summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/librdkafka-2.1.0/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:54:23 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-07-24 09:54:44 +0000
commit836b47cb7e99a977c5a23b059ca1d0b5065d310e (patch)
tree1604da8f482d02effa033c94a84be42bc0c848c3 /fluent-bit/lib/librdkafka-2.1.0/src
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.tar.xz
netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.zip
Merging upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fluent-bit/lib/librdkafka-2.1.0/src')
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt364
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/Makefile97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c2834
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h398
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c430
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h38
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh66
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.pngbin93796 -> 0 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4.c2498
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4.h774
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c1899
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h623
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h47
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c1615
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h413
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/queue.h850
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rd.h436
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c255
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h203
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h259
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c210
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h250
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c1880
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c114
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h170
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rddl.c179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rddl.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h174
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h67
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c113
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h35
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h46
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c721
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c511
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h83
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h159
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c5026
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h9340
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c6668
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h482
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c968
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h73
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c1065
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c278
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c221
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c5867
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h607
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c530
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h1407
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c552
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c5969
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h383
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c4362
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h650
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c623
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h132
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h80
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c426
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h118
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c460
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h102
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c1145
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c220
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h76
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c807
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h144
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h1054
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c819
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h104
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c450
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h49
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c1468
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c836
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c2585
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c687
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c2218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h538
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c2517
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h583
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h62
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h82
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c1794
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c1445
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c1548
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h135
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c928
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h778
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c4301
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h1058
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c213
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h655
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c1085
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h1171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c5378
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h463
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c522
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h63
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c720
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h89
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c1825
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h52
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c604
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h37
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c142
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c973
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c548
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c1841
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c3428
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c278
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c384
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h114
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c1900
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h311
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c1295
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h94
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h100
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c3249
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c546
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h421
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c89
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c487
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h487
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c167
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h35
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdports.c61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdports.h38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h250
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c156
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c629
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h404
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h309
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h86
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c529
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h230
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c134
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h165
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h382
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c1187
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h372
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/regexp.c1347
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/regexp.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy.c1866
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy.h62
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json444
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c932
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h503
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c175
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h208
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h58
178 files changed, 0 insertions, 137874 deletions
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt
deleted file mode 100644
index 37b43c499..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt
+++ /dev/null
@@ -1,364 +0,0 @@
-set(LIBVER 1)
-
-set(
- sources
- crc32c.c
- rdaddr.c
- rdavl.c
- rdbuf.c
- rdcrc32.c
- rdfnv1a.c
- rdkafka.c
- rdkafka_assignor.c
- rdkafka_broker.c
- rdkafka_buf.c
- rdkafka_cgrp.c
- rdkafka_conf.c
- rdkafka_event.c
- rdkafka_feature.c
- rdkafka_lz4.c
- rdkafka_metadata.c
- rdkafka_metadata_cache.c
- rdkafka_msg.c
- rdkafka_msgset_reader.c
- rdkafka_msgset_writer.c
- rdkafka_offset.c
- rdkafka_op.c
- rdkafka_partition.c
- rdkafka_pattern.c
- rdkafka_queue.c
- rdkafka_range_assignor.c
- rdkafka_request.c
- rdkafka_roundrobin_assignor.c
- rdkafka_sasl.c
- rdkafka_sasl_plain.c
- rdkafka_sticky_assignor.c
- rdkafka_subscription.c
- rdkafka_assignment.c
- rdkafka_timer.c
- rdkafka_topic.c
- rdkafka_transport.c
- rdkafka_interceptor.c
- rdkafka_header.c
- rdkafka_admin.c
- rdkafka_aux.c
- rdkafka_background.c
- rdkafka_idempotence.c
- rdkafka_txnmgr.c
- rdkafka_cert.c
- rdkafka_coord.c
- rdkafka_mock.c
- rdkafka_mock_handlers.c
- rdkafka_mock_cgrp.c
- rdkafka_error.c
- rdkafka_fetcher.c
- rdlist.c
- rdlog.c
- rdmurmur2.c
- rdports.c
- rdrand.c
- rdregex.c
- rdstring.c
- rdunittest.c
- rdvarint.c
- rdmap.c
- snappy.c
- tinycthread.c
- tinycthread_extra.c
- rdxxhash.c
- cJSON.c
-)
-
-if(WITH_SSL)
- list(APPEND sources rdkafka_ssl.c)
-endif()
-
-if(WITH_CURL)
- list(APPEND sources rdhttp.c)
-endif()
-
-if(WITH_HDRHISTOGRAM)
- list(APPEND sources rdhdrhistogram.c)
-endif()
-
-if(WITH_LIBDL OR WIN32)
- list(APPEND sources rddl.c)
-endif()
-
-if(WITH_PLUGINS)
- list(APPEND sources rdkafka_plugin.c)
-endif()
-
-if(WIN32)
- list(APPEND sources rdkafka_sasl_win32.c)
-elseif(WITH_SASL_CYRUS)
- list(APPEND sources rdkafka_sasl_cyrus.c)
-endif()
-
-if(WITH_SASL_SCRAM)
- list(APPEND sources rdkafka_sasl_scram.c)
-endif()
-
-if(WITH_SASL_OAUTHBEARER)
- list(APPEND sources rdkafka_sasl_oauthbearer.c)
-endif()
-
-if(WITH_OAUTHBEARER_OIDC)
- list(APPEND sources rdkafka_sasl_oauthbearer_oidc.c)
-endif()
-
-if(WITH_ZLIB)
- list(APPEND sources rdgz.c)
-endif()
-
-if(WITH_ZSTD)
- list(APPEND sources rdkafka_zstd.c)
-endif()
-
-if(NOT WITH_LZ4_EXT)
- list(APPEND sources lz4.c lz4frame.c lz4hc.c)
-endif()
-
-if(NOT HAVE_REGEX)
- list(APPEND sources regexp.c)
-endif()
-
-# Define flags with cmake instead of by defining them on win32_config.h
-if(WITHOUT_WIN32_CONFIG)
- list(APPEND rdkafka_compile_definitions WITHOUT_WIN32_CONFIG)
- if(WITH_SSL)
- list(APPEND rdkafka_compile_definitions WITH_SSL=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_SSL=0)
- endif(WITH_SSL)
- if(WITH_ZLIB)
- list(APPEND rdkafka_compile_definitions WITH_ZLIB=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_ZLIB=0)
- endif(WITH_ZLIB)
- if(WITH_SNAPPY)
- list(APPEND rdkafka_compile_definitions WITH_SNAPPY=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_SNAPPY=0)
- endif(WITH_SNAPPY)
- if(WITH_ZSTD)
- list(APPEND rdkafka_compile_definitions WITH_ZSTD=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_ZSTD=0)
- endif(WITH_ZSTD)
- if(WITH_SASL_SCRAM)
- list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=0)
- endif(WITH_SASL_SCRAM)
- if(WITH_SASL_OAUTHBEARER)
- list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=0)
- endif(WITH_SASL_OAUTHBEARER)
- if(ENABLE_DEVEL)
- list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=1)
- else()
- list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=0)
- endif(ENABLE_DEVEL)
- if(WITH_PLUGINS)
- list(APPEND rdkafka_compile_definitions WITH_PLUGINS=1)
- else()
- list(APPEND rdkafka_compile_definitions WITH_PLUGINS=0)
- endif(WITH_PLUGINS)
-endif()
-
-if(RDKAFKA_BUILD_STATIC)
- set(CMAKE_POSITION_INDEPENDENT_CODE ON)
- set(RDKAFKA_BUILD_MODE STATIC)
-else()
- set(RDKAFKA_BUILD_MODE SHARED)
-endif()
-
-add_library(rdkafka ${RDKAFKA_BUILD_MODE} ${sources})
-if(NOT RDKAFKA_BUILD_STATIC)
- set_property(TARGET rdkafka PROPERTY SOVERSION ${LIBVER})
-endif()
-
-if(MINGW)
- # Target Windows 8.1 to match the VS projects (MinGW defaults to an older WinAPI version)
- list(APPEND rdkafka_compile_definitions WINVER=0x0603 _WIN32_WINNT=0x0603 UNICODE)
-endif(MINGW)
-
-# Support '#include <rdkafka.h>'
-target_include_directories(rdkafka PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
-target_compile_definitions(rdkafka PUBLIC ${rdkafka_compile_definitions})
-if(RDKAFKA_BUILD_STATIC)
- target_compile_definitions(rdkafka PUBLIC LIBRDKAFKA_STATICLIB)
-endif()
-
-# We need 'dummy' directory to support `#include "../config.h"` path
-set(dummy "${GENERATED_DIR}/dummy")
-file(MAKE_DIRECTORY "${dummy}")
-target_include_directories(rdkafka PUBLIC "$<BUILD_INTERFACE:${dummy}>")
-
-if(WITH_CURL)
- find_package(CURL REQUIRED)
- target_include_directories(rdkafka PUBLIC ${CURL_INCLUDE_DIRS})
- target_link_libraries(rdkafka PUBLIC ${CURL_LIBRARIES})
-endif()
-
-if(WITH_HDRHISTOGRAM)
- target_link_libraries(rdkafka PUBLIC m)
-endif()
-
-if(WITH_ZLIB)
- find_package(ZLIB REQUIRED)
- target_include_directories(rdkafka PRIVATE ${ZLIB_INCLUDE_DIRS})
- target_link_libraries(rdkafka PUBLIC ZLIB::ZLIB)
-endif()
-
-if(WITH_ZSTD)
- target_link_libraries(rdkafka PRIVATE ${ZSTD_LIBRARY})
- target_include_directories(rdkafka PRIVATE ${ZSTD_INCLUDE_DIR})
- message(STATUS "Found ZSTD: ${ZSTD_LIBRARY}")
-endif()
-
-if(WITH_SSL)
- if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
- if(NOT TARGET bundled-ssl)
- message(FATAL_ERROR "bundled-ssl target not exist")
- endif()
- target_include_directories(rdkafka BEFORE PRIVATE ${BUNDLED_SSL_INCLUDE_DIR})
- target_link_libraries(rdkafka PUBLIC ${BUNDLED_SSL_LIBRARIES})
- add_dependencies(rdkafka bundled-ssl)
- else()
- find_package(OpenSSL REQUIRED)
- target_include_directories(rdkafka PRIVATE ${OPENSSL_INCLUDE_DIR})
- target_link_libraries(rdkafka PUBLIC OpenSSL::SSL OpenSSL::Crypto)
- get_target_property(OPENSSL_TARGET_TYPE OpenSSL::SSL TYPE)
- if(OPENSSL_CRYPTO_LIBRARY MATCHES "\\.a$")
- target_compile_definitions(rdkafka PUBLIC WITH_STATIC_LIB_libcrypto)
- endif()
- endif()
-endif()
-
-if(LINK_ATOMIC)
- target_link_libraries(rdkafka PUBLIC "-latomic")
-endif()
-
-find_package(Threads REQUIRED)
-target_link_libraries(rdkafka PUBLIC Threads::Threads)
-
-if(WITH_SASL_CYRUS)
- target_include_directories(rdkafka PRIVATE ${SASL_INCLUDE_DIRS})
- target_link_libraries(rdkafka PUBLIC ${SASL_LIBRARIES})
-endif()
-
-if(WITH_LIBDL)
- target_link_libraries(rdkafka PUBLIC ${CMAKE_DL_LIBS})
-endif()
-
-if(WITH_LZ4_EXT)
- target_include_directories(rdkafka PRIVATE ${LZ4_INCLUDE_DIRS})
- target_link_libraries(rdkafka PUBLIC LZ4::LZ4)
-endif()
-
-if(WIN32)
- if(WITH_SSL)
- target_link_libraries(rdkafka PUBLIC crypt32)
- endif()
-
- target_link_libraries(rdkafka PUBLIC ws2_32 secur32)
- if(NOT RDKAFKA_BUILD_STATIC)
- target_compile_definitions(rdkafka PRIVATE LIBRDKAFKA_EXPORTS)
- endif()
-endif()
-
-# Generate pkg-config file
-set(PKG_CONFIG_VERSION "${PROJECT_VERSION}")
-set(PKG_CONFIG_REQUIRES "")
-if (WIN32)
- set(PKG_CONFIG_LIBS_PRIVATE "-lws2_32 -lsecur32 -lcrypt32")
-else()
- set(PKG_CONFIG_LIBS_PRIVATE "-lpthread")
- find_library(RT_LIBRARY rt)
- if(RT_LIBRARY)
- string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lrt")
- endif()
-
- if(WITH_LIBDL)
- string(APPEND PKG_CONFIG_LIBS_PRIVATE " -ldl")
- endif()
-
- if(WITH_HDRHISTOGRAM)
- string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lm")
- endif()
-endif()
-
-if(NOT RDKAFKA_BUILD_STATIC)
- set(PKG_CONFIG_NAME "librdkafka")
- set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library")
-
- if(WITH_CURL)
- string(APPEND PKG_CONFIG_REQUIRES "curl ")
- endif()
-
- if(WITH_ZLIB)
- string(APPEND PKG_CONFIG_REQUIRES "zlib ")
- endif()
-
- if(WITH_SSL)
- string(APPEND PKG_CONFIG_REQUIRES "libssl ")
- endif()
-
- if(WITH_SASL_CYRUS)
- string(APPEND PKG_CONFIG_REQUIRES "libsasl2 ")
- endif()
-
- if(WITH_ZSTD)
- string(APPEND PKG_CONFIG_REQUIRES "libzstd ")
- endif()
-
- if(WITH_LZ4_EXT)
- string(APPEND PKG_CONFIG_REQUIRES "liblz4 ")
- endif()
-
- set(PKG_CONFIG_CFLAGS "-I\${includedir}")
- set(PKG_CONFIG_LIBS "-L\${libdir} -lrdkafka")
-
- configure_file(
- "../packaging/cmake/rdkafka.pc.in"
- "${GENERATED_DIR}/rdkafka.pc"
- @ONLY
- )
- install(
- FILES ${GENERATED_DIR}/rdkafka.pc
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
- )
-else()
- set(PKG_CONFIG_NAME "librdkafka-static")
- set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library (static)")
- set(PKG_CONFIG_CFLAGS "-I\${includedir} -DLIBRDKAFKA_STATICLIB")
- set(PKG_CONFIG_LIBS "-L\${libdir} \${libdir}/librdkafka.a")
- string(APPEND PKG_CONFIG_LIBS " ${PKG_CONFIG_LIBS_PRIVATE}")
- set(PKG_CONFIG_LIBS_PRIVATE "")
- configure_file(
- "../packaging/cmake/rdkafka.pc.in"
- "${GENERATED_DIR}/rdkafka-static.pc"
- @ONLY
- )
- install(
- FILES ${GENERATED_DIR}/rdkafka-static.pc
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
- )
-endif()
-
-install(
- TARGETS rdkafka
- EXPORT "${targets_export_name}"
- LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
- ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
- RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
- INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
-)
-
-install(
- FILES "rdkafka.h" "rdkafka_mock.h"
- DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
-)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/Makefile b/fluent-bit/lib/librdkafka-2.1.0/src/Makefile
deleted file mode 100644
index 26df5723b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/Makefile
+++ /dev/null
@@ -1,97 +0,0 @@
-PKGNAME= librdkafka
-LIBNAME= librdkafka
-LIBVER= 1
-
--include ../Makefile.config
-
-ifneq ($(wildcard ../.git),)
-# Add librdkafka version string from git tag if this is a git checkout
-CPPFLAGS += -DLIBRDKAFKA_GIT_VERSION="\"$(shell git describe --abbrev=6 --dirty --tags 2>/dev/null)\""
-endif
-
-SRCS_$(WITH_SASL_CYRUS) += rdkafka_sasl_cyrus.c
-SRCS_$(WITH_SASL_SCRAM) += rdkafka_sasl_scram.c
-SRCS_$(WITH_SASL_OAUTHBEARER) += rdkafka_sasl_oauthbearer.c
-SRCS_$(WITH_SNAPPY) += snappy.c
-SRCS_$(WITH_ZLIB) += rdgz.c
-SRCS_$(WITH_ZSTD) += rdkafka_zstd.c
-SRCS_$(WITH_HDRHISTOGRAM) += rdhdrhistogram.c
-SRCS_$(WITH_SSL) += rdkafka_ssl.c
-SRCS_$(WITH_CURL) += rdhttp.c
-SRCS_$(WITH_OAUTHBEARER_OIDC) += rdkafka_sasl_oauthbearer_oidc.c
-
-SRCS_LZ4 = rdxxhash.c
-ifneq ($(WITH_LZ4_EXT), y)
-# Use built-in liblz4
-SRCS_LZ4 += lz4.c lz4frame.c lz4hc.c
-endif
-SRCS_y += rdkafka_lz4.c $(SRCS_LZ4)
-
-SRCS_$(WITH_LIBDL) += rddl.c
-SRCS_$(WITH_PLUGINS) += rdkafka_plugin.c
-
-ifneq ($(HAVE_REGEX), y)
-SRCS_y += regexp.c
-endif
-
-SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \
- rdkafka_conf.c rdkafka_timer.c rdkafka_offset.c \
- rdkafka_transport.c rdkafka_buf.c rdkafka_queue.c rdkafka_op.c \
- rdkafka_request.c rdkafka_cgrp.c rdkafka_pattern.c \
- rdkafka_partition.c rdkafka_subscription.c \
- rdkafka_assignment.c \
- rdkafka_assignor.c rdkafka_range_assignor.c \
- rdkafka_roundrobin_assignor.c rdkafka_sticky_assignor.c \
- rdkafka_feature.c \
- rdcrc32.c crc32c.c rdmurmur2.c rdfnv1a.c cJSON.c \
- rdaddr.c rdrand.c rdlist.c \
- tinycthread.c tinycthread_extra.c \
- rdlog.c rdstring.c rdkafka_event.c rdkafka_metadata.c \
- rdregex.c rdports.c rdkafka_metadata_cache.c rdavl.c \
- rdkafka_sasl.c rdkafka_sasl_plain.c rdkafka_interceptor.c \
- rdkafka_msgset_writer.c rdkafka_msgset_reader.c \
- rdkafka_header.c rdkafka_admin.c rdkafka_aux.c \
- rdkafka_background.c rdkafka_idempotence.c rdkafka_cert.c \
- rdkafka_txnmgr.c rdkafka_coord.c \
- rdvarint.c rdbuf.c rdmap.c rdunittest.c \
- rdkafka_mock.c rdkafka_mock_handlers.c rdkafka_mock_cgrp.c \
- rdkafka_error.c rdkafka_fetcher.c \
- $(SRCS_y)
-
-HDRS= rdkafka.h rdkafka_mock.h
-
-OBJS= $(SRCS:.c=.o)
-
-
-all: lib check
-
-include ../mklove/Makefile.base
-
-CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
-
-file-check: lib
-check: file-check
- @(printf "%-30s " "Symbol visibility" ; \
- (($(SYMDUMPER) $(LIBFILENAME) | grep rd_kafka_new >/dev/null) && \
- ($(SYMDUMPER) $(LIBFILENAME) | grep -v rd_kafka_destroy >/dev/null) && \
- printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n") || \
- printf "$(MKL_RED)FAILED$(MKL_CLR_RESET)\n")
-
-install: lib-install
-uninstall: lib-uninstall
-
-clean: lib-clean
-
-# Compile LZ4 with -O3
-$(SRCS_LZ4:.c=.o): CFLAGS:=$(CFLAGS) -O3
-
-ifeq ($(WITH_LDS),y)
-# Enable linker script if supported by platform
-LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME_LDS)
-
-$(LIBNAME_LDS): $(HDRS)
- @(printf "$(MKL_YELLOW)Generating linker script $@ from $(HDRS)$(MKL_CLR_RESET)\n" ; \
- cat $(HDRS) | ../lds-gen.py > $@)
-endif
-
--include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c
deleted file mode 100644
index 9aec18469..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c
+++ /dev/null
@@ -1,2834 +0,0 @@
-/*
- Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-*/
-
-/* cJSON */
-/* JSON parser in C. */
-
-/* disable warnings about old C89 functions in MSVC */
-#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
-#define _CRT_SECURE_NO_DEPRECATE
-#endif
-
-#ifdef __GNUC__
-#pragma GCC visibility push(default)
-#endif
-#if defined(_MSC_VER)
-#pragma warning(push)
-/* disable warning about single line comments in system headers */
-#pragma warning(disable : 4001)
-#endif
-
-#include <string.h>
-#include <stdio.h>
-#include <math.h>
-#include <stdlib.h>
-#include <limits.h>
-#include <ctype.h>
-#include <float.h>
-
-#ifdef ENABLE_LOCALES
-#include <locale.h>
-#endif
-
-#if defined(_MSC_VER)
-#pragma warning(pop)
-#endif
-#ifdef __GNUC__
-#pragma GCC visibility pop
-#endif
-
-#include "cJSON.h"
-
-/* define our own boolean type */
-#ifdef true
-#undef true
-#endif
-#define true ((cJSON_bool)1)
-
-#ifdef false
-#undef false
-#endif
-#define false ((cJSON_bool)0)
-
-/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has
- * been defined in math.h */
-#ifndef isinf
-#define isinf(d) (isnan((d - d)) && !isnan(d))
-#endif
-#ifndef isnan
-#define isnan(d) (d != d)
-#endif
-
-#ifndef NAN
-#define NAN 0.0 / 0.0
-#endif
-
-typedef struct {
- const unsigned char *json;
- size_t position;
-} error;
-static error global_error = {NULL, 0};
-
-CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) {
- return (const char *)(global_error.json + global_error.position);
-}
-
-CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item) {
- if (!cJSON_IsString(item)) {
- return NULL;
- }
-
- return item->valuestring;
-}
-
-CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item) {
- if (!cJSON_IsNumber(item)) {
- return (double)NAN;
- }
-
- return item->valuedouble;
-}
-
-/* This is a safeguard to prevent copy-pasters from using incompatible C and
- * header files */
-#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || \
- (CJSON_VERSION_PATCH != 14)
-#error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
-#endif
-
-CJSON_PUBLIC(const char *) cJSON_Version(void) {
- static char version[15];
- sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR,
- CJSON_VERSION_PATCH);
-
- return version;
-}
-
-/* Case insensitive string comparison, doesn't consider two NULL pointers equal
- * though */
-static int case_insensitive_strcmp(const unsigned char *string1,
- const unsigned char *string2) {
- if ((string1 == NULL) || (string2 == NULL)) {
- return 1;
- }
-
- if (string1 == string2) {
- return 0;
- }
-
- for (; tolower(*string1) == tolower(*string2);
- (void)string1++, string2++) {
- if (*string1 == '\0') {
- return 0;
- }
- }
-
- return tolower(*string1) - tolower(*string2);
-}
-
-typedef struct internal_hooks {
- void *(CJSON_CDECL *allocate)(size_t size);
- void(CJSON_CDECL *deallocate)(void *pointer);
- void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
-} internal_hooks;
-
-#if defined(_MSC_VER)
-/* work around MSVC error C2322: '...' address of dllimport '...' is not static
- */
-static void *CJSON_CDECL internal_malloc(size_t size) {
- return malloc(size);
-}
-static void CJSON_CDECL internal_free(void *pointer) {
- free(pointer);
-}
-static void *CJSON_CDECL internal_realloc(void *pointer, size_t size) {
- return realloc(pointer, size);
-}
-#else
-#define internal_malloc malloc
-#define internal_free free
-#define internal_realloc realloc
-#endif
-
-/* strlen of character literals resolved at compile time */
-#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
-
-static internal_hooks global_hooks = {internal_malloc, internal_free,
- internal_realloc};
-
-static unsigned char *cJSON_strdup(const unsigned char *string,
- const internal_hooks *const hooks) {
- size_t length = 0;
- unsigned char *copy = NULL;
-
- if (string == NULL) {
- return NULL;
- }
-
- length = strlen((const char *)string) + sizeof("");
- copy = (unsigned char *)hooks->allocate(length);
- if (copy == NULL) {
- return NULL;
- }
- memcpy(copy, string, length);
-
- return copy;
-}
-
-CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks) {
- if (hooks == NULL) {
- /* Reset hooks */
- global_hooks.allocate = malloc;
- global_hooks.deallocate = free;
- global_hooks.reallocate = realloc;
- return;
- }
-
- global_hooks.allocate = malloc;
- if (hooks->malloc_fn != NULL) {
- global_hooks.allocate = hooks->malloc_fn;
- }
-
- global_hooks.deallocate = free;
- if (hooks->free_fn != NULL) {
- global_hooks.deallocate = hooks->free_fn;
- }
-
- /* use realloc only if both free and malloc are used */
- global_hooks.reallocate = NULL;
- if ((global_hooks.allocate == malloc) &&
- (global_hooks.deallocate == free)) {
- global_hooks.reallocate = realloc;
- }
-}
-
-/* Internal constructor. */
-static cJSON *cJSON_New_Item(const internal_hooks *const hooks) {
- cJSON *node = (cJSON *)hooks->allocate(sizeof(cJSON));
- if (node) {
- memset(node, '\0', sizeof(cJSON));
- }
-
- return node;
-}
-
-/* Delete a cJSON structure. */
-CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) {
- cJSON *next = NULL;
- while (item != NULL) {
- next = item->next;
- if (!(item->type & cJSON_IsReference) &&
- (item->child != NULL)) {
- cJSON_Delete(item->child);
- }
- if (!(item->type & cJSON_IsReference) &&
- (item->valuestring != NULL)) {
- global_hooks.deallocate(item->valuestring);
- }
- if (!(item->type & cJSON_StringIsConst) &&
- (item->string != NULL)) {
- global_hooks.deallocate(item->string);
- }
- global_hooks.deallocate(item);
- item = next;
- }
-}
-
-/* get the decimal point character of the current locale */
-static unsigned char get_decimal_point(void) {
-#ifdef ENABLE_LOCALES
- struct lconv *lconv = localeconv();
- return (unsigned char)lconv->decimal_point[0];
-#else
- return '.';
-#endif
-}
-
-typedef struct {
- const unsigned char *content;
- size_t length;
- size_t offset;
- size_t depth; /* How deeply nested (in arrays/objects) is the input at
- the current offset. */
- internal_hooks hooks;
-} parse_buffer;
-
-/* check if the given size is left to read in a given parse buffer (starting
- * with 1) */
-#define can_read(buffer, size) \
- ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length))
-/* check if the buffer can be accessed at the given index (starting with 0) */
-#define can_access_at_index(buffer, index) \
- ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length))
-#define cannot_access_at_index(buffer, index) \
- (!can_access_at_index(buffer, index))
-/* get a pointer to the buffer at the position */
-#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
-
-/* Parse the input text to generate a number, and populate the result into item.
- */
-static cJSON_bool parse_number(cJSON *const item,
- parse_buffer *const input_buffer) {
- double number = 0;
- unsigned char *after_end = NULL;
- unsigned char number_c_string[64];
- unsigned char decimal_point = get_decimal_point();
- size_t i = 0;
-
- if ((input_buffer == NULL) || (input_buffer->content == NULL)) {
- return false;
- }
-
- /* copy the number into a temporary buffer and replace '.' with the
- * decimal point of the current locale (for strtod)
- * This also takes care of '\0' not necessarily being available for
- * marking the end of the input */
- for (i = 0; (i < (sizeof(number_c_string) - 1)) &&
- can_access_at_index(input_buffer, i);
- i++) {
- switch (buffer_at_offset(input_buffer)[i]) {
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- case '+':
- case '-':
- case 'e':
- case 'E':
- number_c_string[i] = buffer_at_offset(input_buffer)[i];
- break;
-
- case '.':
- number_c_string[i] = decimal_point;
- break;
-
- default:
- goto loop_end;
- }
- }
-loop_end:
- number_c_string[i] = '\0';
-
- number = strtod((const char *)number_c_string, (char **)&after_end);
- if (number_c_string == after_end) {
- return false; /* parse_error */
- }
-
- item->valuedouble = number;
-
- /* use saturation in case of overflow */
- if (number >= INT_MAX) {
- item->valueint = INT_MAX;
- } else if (number <= (double)INT_MIN) {
- item->valueint = INT_MIN;
- } else {
- item->valueint = (int)number;
- }
-
- item->type = cJSON_Number;
-
- input_buffer->offset += (size_t)(after_end - number_c_string);
- return true;
-}
-
-/* don't ask me, but the original cJSON_SetNumberValue returns an integer or
- * double */
-CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) {
- if (number >= INT_MAX) {
- object->valueint = INT_MAX;
- } else if (number <= (double)INT_MIN) {
- object->valueint = INT_MIN;
- } else {
- object->valueint = (int)number;
- }
-
- return object->valuedouble = number;
-}
-
-CJSON_PUBLIC(char *)
-cJSON_SetValuestring(cJSON *object, const char *valuestring) {
- char *copy = NULL;
- /* if object's type is not cJSON_String or is cJSON_IsReference, it
- * should not set valuestring */
- if (!(object->type & cJSON_String) ||
- (object->type & cJSON_IsReference)) {
- return NULL;
- }
- if (strlen(valuestring) <= strlen(object->valuestring)) {
- strcpy(object->valuestring, valuestring);
- return object->valuestring;
- }
- copy = (char *)cJSON_strdup((const unsigned char *)valuestring,
- &global_hooks);
- if (copy == NULL) {
- return NULL;
- }
- if (object->valuestring != NULL) {
- cJSON_free(object->valuestring);
- }
- object->valuestring = copy;
-
- return copy;
-}
-
-typedef struct {
- unsigned char *buffer;
- size_t length;
- size_t offset;
- size_t depth; /* current nesting depth (for formatted printing) */
- cJSON_bool noalloc;
- cJSON_bool format; /* is this print a formatted print */
- internal_hooks hooks;
-} printbuffer;
-
-/* realloc printbuffer if necessary to have at least "needed" bytes more */
-static unsigned char *ensure(printbuffer *const p, size_t needed) {
- unsigned char *newbuffer = NULL;
- size_t newsize = 0;
-
- if ((p == NULL) || (p->buffer == NULL)) {
- return NULL;
- }
-
- if ((p->length > 0) && (p->offset >= p->length)) {
- /* make sure that offset is valid */
- return NULL;
- }
-
- if (needed > INT_MAX) {
- /* sizes bigger than INT_MAX are currently not supported */
- return NULL;
- }
-
- needed += p->offset + 1;
- if (needed <= p->length) {
- return p->buffer + p->offset;
- }
-
- if (p->noalloc) {
- return NULL;
- }
-
- /* calculate new buffer size */
- if (needed > (INT_MAX / 2)) {
- /* overflow of int, use INT_MAX if possible */
- if (needed <= INT_MAX) {
- newsize = INT_MAX;
- } else {
- return NULL;
- }
- } else {
- newsize = needed * 2;
- }
-
- if (p->hooks.reallocate != NULL) {
- /* reallocate with realloc if available */
- newbuffer =
- (unsigned char *)p->hooks.reallocate(p->buffer, newsize);
- if (newbuffer == NULL) {
- p->hooks.deallocate(p->buffer);
- p->length = 0;
- p->buffer = NULL;
-
- return NULL;
- }
- } else {
- /* otherwise reallocate manually */
- newbuffer = (unsigned char *)p->hooks.allocate(newsize);
- if (!newbuffer) {
- p->hooks.deallocate(p->buffer);
- p->length = 0;
- p->buffer = NULL;
-
- return NULL;
- }
- if (newbuffer) {
- memcpy(newbuffer, p->buffer, p->offset + 1);
- }
- p->hooks.deallocate(p->buffer);
- }
- p->length = newsize;
- p->buffer = newbuffer;
-
- return newbuffer + p->offset;
-}
-
-/* calculate the new length of the string in a printbuffer and update the offset
- */
-static void update_offset(printbuffer *const buffer) {
- const unsigned char *buffer_pointer = NULL;
- if ((buffer == NULL) || (buffer->buffer == NULL)) {
- return;
- }
- buffer_pointer = buffer->buffer + buffer->offset;
-
- buffer->offset += strlen((const char *)buffer_pointer);
-}
-
-/* securely comparison of floating-point variables */
-static cJSON_bool compare_double(double a, double b) {
- double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
- return (fabs(a - b) <= maxVal * DBL_EPSILON);
-}
-
-/* Render the number nicely from the given item into a string. */
-static cJSON_bool print_number(const cJSON *const item,
- printbuffer *const output_buffer) {
- unsigned char *output_pointer = NULL;
- double d = item->valuedouble;
- int length = 0;
- size_t i = 0;
- unsigned char number_buffer[26] = {
- 0}; /* temporary buffer to print the number into */
- unsigned char decimal_point = get_decimal_point();
- double test = 0.0;
-
- if (output_buffer == NULL) {
- return false;
- }
-
- /* This checks for NaN and Infinity */
- if (isnan(d) || isinf(d)) {
- length = sprintf((char *)number_buffer, "null");
- } else {
- /* Try 15 decimal places of precision to avoid nonsignificant
- * nonzero digits */
- length = sprintf((char *)number_buffer, "%1.15g", d);
-
- /* Check whether the original double can be recovered */
- if ((sscanf((char *)number_buffer, "%lg", &test) != 1) ||
- !compare_double((double)test, d)) {
- /* If not, print with 17 decimal places of precision */
- length = sprintf((char *)number_buffer, "%1.17g", d);
- }
- }
-
- /* sprintf failed or buffer overrun occurred */
- if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) {
- return false;
- }
-
- /* reserve appropriate space in the output */
- output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
- if (output_pointer == NULL) {
- return false;
- }
-
- /* copy the printed number to the output and replace locale
- * dependent decimal point with '.' */
- for (i = 0; i < ((size_t)length); i++) {
- if (number_buffer[i] == decimal_point) {
- output_pointer[i] = '.';
- continue;
- }
-
- output_pointer[i] = number_buffer[i];
- }
- output_pointer[i] = '\0';
-
- output_buffer->offset += (size_t)length;
-
- return true;
-}
-
-/* parse 4 digit hexadecimal number */
-static unsigned parse_hex4(const unsigned char *const input) {
- unsigned int h = 0;
- size_t i = 0;
-
- for (i = 0; i < 4; i++) {
- /* parse digit */
- if ((input[i] >= '0') && (input[i] <= '9')) {
- h += (unsigned int)input[i] - '0';
- } else if ((input[i] >= 'A') && (input[i] <= 'F')) {
- h += (unsigned int)10 + input[i] - 'A';
- } else if ((input[i] >= 'a') && (input[i] <= 'f')) {
- h += (unsigned int)10 + input[i] - 'a';
- } else /* invalid */
- {
- return 0;
- }
-
- if (i < 3) {
- /* shift left to make place for the next nibble */
- h = h << 4;
- }
- }
-
- return h;
-}
-
-/* converts a UTF-16 literal to UTF-8
- * A literal can be one or two sequences of the form \uXXXX */
-static unsigned char
-utf16_literal_to_utf8(const unsigned char *const input_pointer,
- const unsigned char *const input_end,
- unsigned char **output_pointer) {
- long unsigned int codepoint = 0;
- unsigned int first_code = 0;
- const unsigned char *first_sequence = input_pointer;
- unsigned char utf8_length = 0;
- unsigned char utf8_position = 0;
- unsigned char sequence_length = 0;
- unsigned char first_byte_mark = 0;
-
- if ((input_end - first_sequence) < 6) {
- /* input ends unexpectedly */
- goto fail;
- }
-
- /* get the first utf16 sequence */
- first_code = parse_hex4(first_sequence + 2);
-
- /* check that the code is valid */
- if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) {
- goto fail;
- }
-
- /* UTF16 surrogate pair */
- if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) {
- const unsigned char *second_sequence = first_sequence + 6;
- unsigned int second_code = 0;
- sequence_length = 12; /* \uXXXX\uXXXX */
-
- if ((input_end - second_sequence) < 6) {
- /* input ends unexpectedly */
- goto fail;
- }
-
- if ((second_sequence[0] != '\\') ||
- (second_sequence[1] != 'u')) {
- /* missing second half of the surrogate pair */
- goto fail;
- }
-
- /* get the second utf16 sequence */
- second_code = parse_hex4(second_sequence + 2);
- /* check that the code is valid */
- if ((second_code < 0xDC00) || (second_code > 0xDFFF)) {
- /* invalid second half of the surrogate pair */
- goto fail;
- }
-
-
- /* calculate the unicode codepoint from the surrogate pair */
- codepoint = 0x10000 + (((first_code & 0x3FF) << 10) |
- (second_code & 0x3FF));
- } else {
- sequence_length = 6; /* \uXXXX */
- codepoint = first_code;
- }
-
- /* encode as UTF-8
- * takes at maximum 4 bytes to encode:
- * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
- if (codepoint < 0x80) {
- /* normal ascii, encoding 0xxxxxxx */
- utf8_length = 1;
- } else if (codepoint < 0x800) {
- /* two bytes, encoding 110xxxxx 10xxxxxx */
- utf8_length = 2;
- first_byte_mark = 0xC0; /* 11000000 */
- } else if (codepoint < 0x10000) {
- /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
- utf8_length = 3;
- first_byte_mark = 0xE0; /* 11100000 */
- } else if (codepoint <= 0x10FFFF) {
- /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
- utf8_length = 4;
- first_byte_mark = 0xF0; /* 11110000 */
- } else {
- /* invalid unicode codepoint */
- goto fail;
- }
-
- /* encode as utf8 */
- for (utf8_position = (unsigned char)(utf8_length - 1);
- utf8_position > 0; utf8_position--) {
- /* 10xxxxxx */
- (*output_pointer)[utf8_position] =
- (unsigned char)((codepoint | 0x80) & 0xBF);
- codepoint >>= 6;
- }
- /* encode first byte */
- if (utf8_length > 1) {
- (*output_pointer)[0] =
- (unsigned char)((codepoint | first_byte_mark) & 0xFF);
- } else {
- (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
- }
-
- *output_pointer += utf8_length;
-
- return sequence_length;
-
-fail:
- return 0;
-}
-
-/* Parse the input text into an unescaped cinput, and populate item. */
-static cJSON_bool parse_string(cJSON *const item,
- parse_buffer *const input_buffer) {
- const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
- const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
- unsigned char *output_pointer = NULL;
- unsigned char *output = NULL;
-
- /* not a string */
- if (buffer_at_offset(input_buffer)[0] != '\"') {
- goto fail;
- }
-
- {
- /* calculate approximate size of the output (overestimate) */
- size_t allocation_length = 0;
- size_t skipped_bytes = 0;
- while (((size_t)(input_end - input_buffer->content) <
- input_buffer->length) &&
- (*input_end != '\"')) {
- /* is escape sequence */
- if (input_end[0] == '\\') {
- if ((size_t)(input_end + 1 -
- input_buffer->content) >=
- input_buffer->length) {
- /* prevent buffer overflow when last
- * input character is a backslash */
- goto fail;
- }
- skipped_bytes++;
- input_end++;
- }
- input_end++;
- }
- if (((size_t)(input_end - input_buffer->content) >=
- input_buffer->length) ||
- (*input_end != '\"')) {
- goto fail; /* string ended unexpectedly */
- }
-
- /* This is at most how much we need for the output */
- allocation_length =
- (size_t)(input_end - buffer_at_offset(input_buffer)) -
- skipped_bytes;
- output = (unsigned char *)input_buffer->hooks.allocate(
- allocation_length + sizeof(""));
- if (output == NULL) {
- goto fail; /* allocation failure */
- }
- }
-
- output_pointer = output;
- /* loop through the string literal */
- while (input_pointer < input_end) {
- if (*input_pointer != '\\') {
- *output_pointer++ = *input_pointer++;
- }
- /* escape sequence */
- else {
- unsigned char sequence_length = 2;
- if ((input_end - input_pointer) < 1) {
- goto fail;
- }
-
- switch (input_pointer[1]) {
- case 'b':
- *output_pointer++ = '\b';
- break;
- case 'f':
- *output_pointer++ = '\f';
- break;
- case 'n':
- *output_pointer++ = '\n';
- break;
- case 'r':
- *output_pointer++ = '\r';
- break;
- case 't':
- *output_pointer++ = '\t';
- break;
- case '\"':
- case '\\':
- case '/':
- *output_pointer++ = input_pointer[1];
- break;
-
- /* UTF-16 literal */
- case 'u':
- sequence_length = utf16_literal_to_utf8(
- input_pointer, input_end, &output_pointer);
- if (sequence_length == 0) {
- /* failed to convert UTF16-literal to
- * UTF-8 */
- goto fail;
- }
- break;
-
- default:
- goto fail;
- }
- input_pointer += sequence_length;
- }
- }
-
- /* zero terminate the output */
- *output_pointer = '\0';
-
- item->type = cJSON_String;
- item->valuestring = (char *)output;
-
- input_buffer->offset = (size_t)(input_end - input_buffer->content);
- input_buffer->offset++;
-
- return true;
-
-fail:
- if (output != NULL) {
- input_buffer->hooks.deallocate(output);
- }
-
- if (input_pointer != NULL) {
- input_buffer->offset =
- (size_t)(input_pointer - input_buffer->content);
- }
-
- return false;
-}
-
-/* Render the cstring provided to an escaped version that can be printed. */
-static cJSON_bool print_string_ptr(const unsigned char *const input,
- printbuffer *const output_buffer) {
- const unsigned char *input_pointer = NULL;
- unsigned char *output = NULL;
- unsigned char *output_pointer = NULL;
- size_t output_length = 0;
- /* numbers of additional characters needed for escaping */
- size_t escape_characters = 0;
-
- if (output_buffer == NULL) {
- return false;
- }
-
- /* empty string */
- if (input == NULL) {
- output = ensure(output_buffer, sizeof("\"\""));
- if (output == NULL) {
- return false;
- }
- strcpy((char *)output, "\"\"");
-
- return true;
- }
-
- /* set "flag" to 1 if something needs to be escaped */
- for (input_pointer = input; *input_pointer; input_pointer++) {
- switch (*input_pointer) {
- case '\"':
- case '\\':
- case '\b':
- case '\f':
- case '\n':
- case '\r':
- case '\t':
- /* one character escape sequence */
- escape_characters++;
- break;
- default:
- if (*input_pointer < 32) {
- /* UTF-16 escape sequence uXXXX */
- escape_characters += 5;
- }
- break;
- }
- }
- output_length = (size_t)(input_pointer - input) + escape_characters;
-
- output = ensure(output_buffer, output_length + sizeof("\"\""));
- if (output == NULL) {
- return false;
- }
-
- /* no characters have to be escaped */
- if (escape_characters == 0) {
- output[0] = '\"';
- memcpy(output + 1, input, output_length);
- output[output_length + 1] = '\"';
- output[output_length + 2] = '\0';
-
- return true;
- }
-
- output[0] = '\"';
- output_pointer = output + 1;
- /* copy the string */
- for (input_pointer = input; *input_pointer != '\0';
- (void)input_pointer++, output_pointer++) {
- if ((*input_pointer > 31) && (*input_pointer != '\"') &&
- (*input_pointer != '\\')) {
- /* normal character, copy */
- *output_pointer = *input_pointer;
- } else {
- /* character needs to be escaped */
- *output_pointer++ = '\\';
- switch (*input_pointer) {
- case '\\':
- *output_pointer = '\\';
- break;
- case '\"':
- *output_pointer = '\"';
- break;
- case '\b':
- *output_pointer = 'b';
- break;
- case '\f':
- *output_pointer = 'f';
- break;
- case '\n':
- *output_pointer = 'n';
- break;
- case '\r':
- *output_pointer = 'r';
- break;
- case '\t':
- *output_pointer = 't';
- break;
- default:
- /* escape and print as unicode codepoint */
- sprintf((char *)output_pointer, "u%04x",
- *input_pointer);
- output_pointer += 4;
- break;
- }
- }
- }
- output[output_length + 1] = '\"';
- output[output_length + 2] = '\0';
-
- return true;
-}
-
-/* Invoke print_string_ptr (which is useful) on an item. */
-static cJSON_bool print_string(const cJSON *const item, printbuffer *const p) {
- return print_string_ptr((unsigned char *)item->valuestring, p);
-}
-
-/* Predeclare these prototypes. */
-static cJSON_bool parse_value(cJSON *const item,
- parse_buffer *const input_buffer);
-static cJSON_bool print_value(const cJSON *const item,
- printbuffer *const output_buffer);
-static cJSON_bool parse_array(cJSON *const item,
- parse_buffer *const input_buffer);
-static cJSON_bool print_array(const cJSON *const item,
- printbuffer *const output_buffer);
-static cJSON_bool parse_object(cJSON *const item,
- parse_buffer *const input_buffer);
-static cJSON_bool print_object(const cJSON *const item,
- printbuffer *const output_buffer);
-
-/* Utility to jump whitespace and cr/lf */
-static parse_buffer *buffer_skip_whitespace(parse_buffer *const buffer) {
- if ((buffer == NULL) || (buffer->content == NULL)) {
- return NULL;
- }
-
- if (cannot_access_at_index(buffer, 0)) {
- return buffer;
- }
-
- while (can_access_at_index(buffer, 0) &&
- (buffer_at_offset(buffer)[0] <= 32)) {
- buffer->offset++;
- }
-
- if (buffer->offset == buffer->length) {
- buffer->offset--;
- }
-
- return buffer;
-}
-
-/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
-static parse_buffer *skip_utf8_bom(parse_buffer *const buffer) {
- if ((buffer == NULL) || (buffer->content == NULL) ||
- (buffer->offset != 0)) {
- return NULL;
- }
-
- if (can_access_at_index(buffer, 4) &&
- (strncmp((const char *)buffer_at_offset(buffer), "\xEF\xBB\xBF",
- 3) == 0)) {
- buffer->offset += 3;
- }
-
- return buffer;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithOpts(const char *value,
- const char **return_parse_end,
- cJSON_bool require_null_terminated) {
- size_t buffer_length;
-
- if (NULL == value) {
- return NULL;
- }
-
- /* Adding null character size due to require_null_terminated. */
- buffer_length = strlen(value) + sizeof("");
-
- return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end,
- require_null_terminated);
-}
-
-/* Parse an object - create a new root, and populate. */
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithLengthOpts(const char *value,
- size_t buffer_length,
- const char **return_parse_end,
- cJSON_bool require_null_terminated) {
- parse_buffer buffer = {0, 0, 0, 0, {0, 0, 0}};
- cJSON *item = NULL;
-
- /* reset error position */
- global_error.json = NULL;
- global_error.position = 0;
-
- if (value == NULL || 0 == buffer_length) {
- goto fail;
- }
-
- buffer.content = (const unsigned char *)value;
- buffer.length = buffer_length;
- buffer.offset = 0;
- buffer.hooks = global_hooks;
-
- item = cJSON_New_Item(&global_hooks);
- if (item == NULL) /* memory fail */
- {
- goto fail;
- }
-
- if (!parse_value(item,
- buffer_skip_whitespace(skip_utf8_bom(&buffer)))) {
- /* parse failure. ep is set. */
- goto fail;
- }
-
- /* if we require null-terminated JSON without appended garbage, skip and
- * then check for a null terminator */
- if (require_null_terminated) {
- buffer_skip_whitespace(&buffer);
- if ((buffer.offset >= buffer.length) ||
- buffer_at_offset(&buffer)[0] != '\0') {
- goto fail;
- }
- }
- if (return_parse_end) {
- *return_parse_end = (const char *)buffer_at_offset(&buffer);
- }
-
- return item;
-
-fail:
- if (item != NULL) {
- cJSON_Delete(item);
- }
-
- if (value != NULL) {
- error local_error;
- local_error.json = (const unsigned char *)value;
- local_error.position = 0;
-
- if (buffer.offset < buffer.length) {
- local_error.position = buffer.offset;
- } else if (buffer.length > 0) {
- local_error.position = buffer.length - 1;
- }
-
- if (return_parse_end != NULL) {
- *return_parse_end = (const char *)local_error.json +
- local_error.position;
- }
-
- global_error = local_error;
- }
-
- return NULL;
-}
-
-/* Default options for cJSON_Parse */
-CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) {
- return cJSON_ParseWithOpts(value, 0, 0);
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithLength(const char *value, size_t buffer_length) {
- return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
-}
-
-#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
-
-static unsigned char *print(const cJSON *const item,
- cJSON_bool format,
- const internal_hooks *const hooks) {
- static const size_t default_buffer_size = 256;
- printbuffer buffer[1];
- unsigned char *printed = NULL;
-
- memset(buffer, 0, sizeof(buffer));
-
- /* create buffer */
- buffer->buffer = (unsigned char *)hooks->allocate(default_buffer_size);
- buffer->length = default_buffer_size;
- buffer->format = format;
- buffer->hooks = *hooks;
- if (buffer->buffer == NULL) {
- goto fail;
- }
-
- /* print the value */
- if (!print_value(item, buffer)) {
- goto fail;
- }
- update_offset(buffer);
-
- /* check if reallocate is available */
- if (hooks->reallocate != NULL) {
- printed = (unsigned char *)hooks->reallocate(
- buffer->buffer, buffer->offset + 1);
- if (printed == NULL) {
- goto fail;
- }
- buffer->buffer = NULL;
- } else /* otherwise copy the JSON over to a new buffer */
- {
- printed = (unsigned char *)hooks->allocate(buffer->offset + 1);
- if (printed == NULL) {
- goto fail;
- }
- memcpy(printed, buffer->buffer,
- cjson_min(buffer->length, buffer->offset + 1));
- printed[buffer->offset] = '\0'; /* just to be sure */
-
- /* free the buffer */
- hooks->deallocate(buffer->buffer);
- }
-
- return printed;
-
-fail:
- if (buffer->buffer != NULL) {
- hooks->deallocate(buffer->buffer);
- }
-
- if (printed != NULL) {
- hooks->deallocate(printed);
- }
-
- return NULL;
-}
-
-/* Render a cJSON item/entity/structure to text. */
-CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) {
- return (char *)print(item, true, &global_hooks);
-}
-
-CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) {
- return (char *)print(item, false, &global_hooks);
-}
-
-CJSON_PUBLIC(char *)
-cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) {
- printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}};
-
- if (prebuffer < 0) {
- return NULL;
- }
-
- p.buffer = (unsigned char *)global_hooks.allocate((size_t)prebuffer);
- if (!p.buffer) {
- return NULL;
- }
-
- p.length = (size_t)prebuffer;
- p.offset = 0;
- p.noalloc = false;
- p.format = fmt;
- p.hooks = global_hooks;
-
- if (!print_value(item, &p)) {
- global_hooks.deallocate(p.buffer);
- return NULL;
- }
-
- return (char *)p.buffer;
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_PrintPreallocated(cJSON *item,
- char *buffer,
- const int length,
- const cJSON_bool format) {
- printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}};
-
- if ((length < 0) || (buffer == NULL)) {
- return false;
- }
-
- p.buffer = (unsigned char *)buffer;
- p.length = (size_t)length;
- p.offset = 0;
- p.noalloc = true;
- p.format = format;
- p.hooks = global_hooks;
-
- return print_value(item, &p);
-}
-
-/* Parser core - when encountering text, process appropriately. */
-static cJSON_bool parse_value(cJSON *const item,
- parse_buffer *const input_buffer) {
- if ((input_buffer == NULL) || (input_buffer->content == NULL)) {
- return false; /* no input */
- }
-
- /* parse the different types of values */
- /* null */
- if (can_read(input_buffer, 4) &&
- (strncmp((const char *)buffer_at_offset(input_buffer), "null", 4) ==
- 0)) {
- item->type = cJSON_NULL;
- input_buffer->offset += 4;
- return true;
- }
- /* false */
- if (can_read(input_buffer, 5) &&
- (strncmp((const char *)buffer_at_offset(input_buffer), "false",
- 5) == 0)) {
- item->type = cJSON_False;
- input_buffer->offset += 5;
- return true;
- }
- /* true */
- if (can_read(input_buffer, 4) &&
- (strncmp((const char *)buffer_at_offset(input_buffer), "true", 4) ==
- 0)) {
- item->type = cJSON_True;
- item->valueint = 1;
- input_buffer->offset += 4;
- return true;
- }
- /* string */
- if (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == '\"')) {
- return parse_string(item, input_buffer);
- }
- /* number */
- if (can_access_at_index(input_buffer, 0) &&
- ((buffer_at_offset(input_buffer)[0] == '-') ||
- ((buffer_at_offset(input_buffer)[0] >= '0') &&
- (buffer_at_offset(input_buffer)[0] <= '9')))) {
- return parse_number(item, input_buffer);
- }
- /* array */
- if (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == '[')) {
- return parse_array(item, input_buffer);
- }
- /* object */
- if (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == '{')) {
- return parse_object(item, input_buffer);
- }
-
- return false;
-}
-
-/* Render a value to text. */
-static cJSON_bool print_value(const cJSON *const item,
- printbuffer *const output_buffer) {
- unsigned char *output = NULL;
-
- if ((item == NULL) || (output_buffer == NULL)) {
- return false;
- }
-
- switch ((item->type) & 0xFF) {
- case cJSON_NULL:
- output = ensure(output_buffer, 5);
- if (output == NULL) {
- return false;
- }
- strcpy((char *)output, "null");
- return true;
-
- case cJSON_False:
- output = ensure(output_buffer, 6);
- if (output == NULL) {
- return false;
- }
- strcpy((char *)output, "false");
- return true;
-
- case cJSON_True:
- output = ensure(output_buffer, 5);
- if (output == NULL) {
- return false;
- }
- strcpy((char *)output, "true");
- return true;
-
- case cJSON_Number:
- return print_number(item, output_buffer);
-
- case cJSON_Raw: {
- size_t raw_length = 0;
- if (item->valuestring == NULL) {
- return false;
- }
-
- raw_length = strlen(item->valuestring) + sizeof("");
- output = ensure(output_buffer, raw_length);
- if (output == NULL) {
- return false;
- }
- memcpy(output, item->valuestring, raw_length);
- return true;
- }
-
- case cJSON_String:
- return print_string(item, output_buffer);
-
- case cJSON_Array:
- return print_array(item, output_buffer);
-
- case cJSON_Object:
- return print_object(item, output_buffer);
-
- default:
- return false;
- }
-}
-
-/* Build an array from input text. */
-static cJSON_bool parse_array(cJSON *const item,
- parse_buffer *const input_buffer) {
- cJSON *head = NULL; /* head of the linked list */
- cJSON *current_item = NULL;
-
- if (input_buffer->depth >= CJSON_NESTING_LIMIT) {
- return false; /* to deeply nested */
- }
- input_buffer->depth++;
-
- if (buffer_at_offset(input_buffer)[0] != '[') {
- /* not an array */
- goto fail;
- }
-
- input_buffer->offset++;
- buffer_skip_whitespace(input_buffer);
- if (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == ']')) {
- /* empty array */
- goto success;
- }
-
- /* check if we skipped to the end of the buffer */
- if (cannot_access_at_index(input_buffer, 0)) {
- input_buffer->offset--;
- goto fail;
- }
-
- /* step back to character in front of the first element */
- input_buffer->offset--;
- /* loop through the comma separated array elements */
- do {
- /* allocate next item */
- cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
- if (new_item == NULL) {
- goto fail; /* allocation failure */
- }
-
- /* attach next item to list */
- if (head == NULL) {
- /* start the linked list */
- current_item = head = new_item;
- } else {
- /* add to the end and advance */
- current_item->next = new_item;
- new_item->prev = current_item;
- current_item = new_item;
- }
-
- /* parse next value */
- input_buffer->offset++;
- buffer_skip_whitespace(input_buffer);
- if (!parse_value(current_item, input_buffer)) {
- goto fail; /* failed to parse value */
- }
- buffer_skip_whitespace(input_buffer);
- } while (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == ','));
-
- if (cannot_access_at_index(input_buffer, 0) ||
- buffer_at_offset(input_buffer)[0] != ']') {
- goto fail; /* expected end of array */
- }
-
-success:
- input_buffer->depth--;
-
- if (head != NULL) {
- head->prev = current_item;
- }
-
- item->type = cJSON_Array;
- item->child = head;
-
- input_buffer->offset++;
-
- return true;
-
-fail:
- if (head != NULL) {
- cJSON_Delete(head);
- }
-
- return false;
-}
-
-/* Render an array to text */
-static cJSON_bool print_array(const cJSON *const item,
- printbuffer *const output_buffer) {
- unsigned char *output_pointer = NULL;
- size_t length = 0;
- cJSON *current_element = item->child;
-
- if (output_buffer == NULL) {
- return false;
- }
-
- /* Compose the output array. */
- /* opening square bracket */
- output_pointer = ensure(output_buffer, 1);
- if (output_pointer == NULL) {
- return false;
- }
-
- *output_pointer = '[';
- output_buffer->offset++;
- output_buffer->depth++;
-
- while (current_element != NULL) {
- if (!print_value(current_element, output_buffer)) {
- return false;
- }
- update_offset(output_buffer);
- if (current_element->next) {
- length = (size_t)(output_buffer->format ? 2 : 1);
- output_pointer = ensure(output_buffer, length + 1);
- if (output_pointer == NULL) {
- return false;
- }
- *output_pointer++ = ',';
- if (output_buffer->format) {
- *output_pointer++ = ' ';
- }
- *output_pointer = '\0';
- output_buffer->offset += length;
- }
- current_element = current_element->next;
- }
-
- output_pointer = ensure(output_buffer, 2);
- if (output_pointer == NULL) {
- return false;
- }
- *output_pointer++ = ']';
- *output_pointer = '\0';
- output_buffer->depth--;
-
- return true;
-}
-
-/* Build an object from the text. */
-static cJSON_bool parse_object(cJSON *const item,
- parse_buffer *const input_buffer) {
- cJSON *head = NULL; /* linked list head */
- cJSON *current_item = NULL;
-
- if (input_buffer->depth >= CJSON_NESTING_LIMIT) {
- return false; /* to deeply nested */
- }
- input_buffer->depth++;
-
- if (cannot_access_at_index(input_buffer, 0) ||
- (buffer_at_offset(input_buffer)[0] != '{')) {
- goto fail; /* not an object */
- }
-
- input_buffer->offset++;
- buffer_skip_whitespace(input_buffer);
- if (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == '}')) {
- goto success; /* empty object */
- }
-
- /* check if we skipped to the end of the buffer */
- if (cannot_access_at_index(input_buffer, 0)) {
- input_buffer->offset--;
- goto fail;
- }
-
- /* step back to character in front of the first element */
- input_buffer->offset--;
- /* loop through the comma separated array elements */
- do {
- /* allocate next item */
- cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
- if (new_item == NULL) {
- goto fail; /* allocation failure */
- }
-
- /* attach next item to list */
- if (head == NULL) {
- /* start the linked list */
- current_item = head = new_item;
- } else {
- /* add to the end and advance */
- current_item->next = new_item;
- new_item->prev = current_item;
- current_item = new_item;
- }
-
- /* parse the name of the child */
- input_buffer->offset++;
- buffer_skip_whitespace(input_buffer);
- if (!parse_string(current_item, input_buffer)) {
- goto fail; /* failed to parse name */
- }
- buffer_skip_whitespace(input_buffer);
-
- /* swap valuestring and string, because we parsed the name */
- current_item->string = current_item->valuestring;
- current_item->valuestring = NULL;
-
- if (cannot_access_at_index(input_buffer, 0) ||
- (buffer_at_offset(input_buffer)[0] != ':')) {
- goto fail; /* invalid object */
- }
-
- /* parse the value */
- input_buffer->offset++;
- buffer_skip_whitespace(input_buffer);
- if (!parse_value(current_item, input_buffer)) {
- goto fail; /* failed to parse value */
- }
- buffer_skip_whitespace(input_buffer);
- } while (can_access_at_index(input_buffer, 0) &&
- (buffer_at_offset(input_buffer)[0] == ','));
-
- if (cannot_access_at_index(input_buffer, 0) ||
- (buffer_at_offset(input_buffer)[0] != '}')) {
- goto fail; /* expected end of object */
- }
-
-success:
- input_buffer->depth--;
-
- if (head != NULL) {
- head->prev = current_item;
- }
-
- item->type = cJSON_Object;
- item->child = head;
-
- input_buffer->offset++;
- return true;
-
-fail:
- if (head != NULL) {
- cJSON_Delete(head);
- }
-
- return false;
-}
-
-/* Render an object to text. */
-static cJSON_bool print_object(const cJSON *const item,
- printbuffer *const output_buffer) {
- unsigned char *output_pointer = NULL;
- size_t length = 0;
- cJSON *current_item = item->child;
-
- if (output_buffer == NULL) {
- return false;
- }
-
- /* Compose the output: */
- length = (size_t)(output_buffer->format ? 2 : 1); /* fmt: {\n */
- output_pointer = ensure(output_buffer, length + 1);
- if (output_pointer == NULL) {
- return false;
- }
-
- *output_pointer++ = '{';
- output_buffer->depth++;
- if (output_buffer->format) {
- *output_pointer++ = '\n';
- }
- output_buffer->offset += length;
-
- while (current_item) {
- if (output_buffer->format) {
- size_t i;
- output_pointer =
- ensure(output_buffer, output_buffer->depth);
- if (output_pointer == NULL) {
- return false;
- }
- for (i = 0; i < output_buffer->depth; i++) {
- *output_pointer++ = '\t';
- }
- output_buffer->offset += output_buffer->depth;
- }
-
- /* print key */
- if (!print_string_ptr((unsigned char *)current_item->string,
- output_buffer)) {
- return false;
- }
- update_offset(output_buffer);
-
- length = (size_t)(output_buffer->format ? 2 : 1);
- output_pointer = ensure(output_buffer, length);
- if (output_pointer == NULL) {
- return false;
- }
- *output_pointer++ = ':';
- if (output_buffer->format) {
- *output_pointer++ = '\t';
- }
- output_buffer->offset += length;
-
- /* print value */
- if (!print_value(current_item, output_buffer)) {
- return false;
- }
- update_offset(output_buffer);
-
- /* print comma if not last */
- length = ((size_t)(output_buffer->format ? 1 : 0) +
- (size_t)(current_item->next ? 1 : 0));
- output_pointer = ensure(output_buffer, length + 1);
- if (output_pointer == NULL) {
- return false;
- }
- if (current_item->next) {
- *output_pointer++ = ',';
- }
-
- if (output_buffer->format) {
- *output_pointer++ = '\n';
- }
- *output_pointer = '\0';
- output_buffer->offset += length;
-
- current_item = current_item->next;
- }
-
- output_pointer =
- ensure(output_buffer,
- output_buffer->format ? (output_buffer->depth + 1) : 2);
- if (output_pointer == NULL) {
- return false;
- }
- if (output_buffer->format) {
- size_t i;
- for (i = 0; i < (output_buffer->depth - 1); i++) {
- *output_pointer++ = '\t';
- }
- }
- *output_pointer++ = '}';
- *output_pointer = '\0';
- output_buffer->depth--;
-
- return true;
-}
-
-/* Get Array size/item / object item. */
-CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) {
- cJSON *child = NULL;
- size_t size = 0;
-
- if (array == NULL) {
- return 0;
- }
-
- child = array->child;
-
- while (child != NULL) {
- size++;
- child = child->next;
- }
-
- /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
-
- return (int)size;
-}
-
-static cJSON *get_array_item(const cJSON *array, size_t index) {
- cJSON *current_child = NULL;
-
- if (array == NULL) {
- return NULL;
- }
-
- current_child = array->child;
- while ((current_child != NULL) && (index > 0)) {
- index--;
- current_child = current_child->next;
- }
-
- return current_child;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) {
- if (index < 0) {
- return NULL;
- }
-
- return get_array_item(array, (size_t)index);
-}
-
-static cJSON *get_object_item(const cJSON *const object,
- const char *const name,
- const cJSON_bool case_sensitive) {
- cJSON *current_element = NULL;
-
- if ((object == NULL) || (name == NULL)) {
- return NULL;
- }
-
- current_element = object->child;
- if (case_sensitive) {
- while ((current_element != NULL) &&
- (current_element->string != NULL) &&
- (strcmp(name, current_element->string) != 0)) {
- current_element = current_element->next;
- }
- } else {
- while ((current_element != NULL) &&
- (case_insensitive_strcmp(
- (const unsigned char *)name,
- (const unsigned char *)(current_element->string)) !=
- 0)) {
- current_element = current_element->next;
- }
- }
-
- if ((current_element == NULL) || (current_element->string == NULL)) {
- return NULL;
- }
-
- return current_element;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_GetObjectItem(const cJSON *const object, const char *const string) {
- return get_object_item(object, string, false);
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
- const char *const string) {
- return get_object_item(object, string, true);
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_HasObjectItem(const cJSON *object, const char *string) {
- return cJSON_GetObjectItem(object, string) ? 1 : 0;
-}
-
-/* Utility for array list handling. */
-static void suffix_object(cJSON *prev, cJSON *item) {
- prev->next = item;
- item->prev = prev;
-}
-
-/* Utility for handling references. */
-static cJSON *create_reference(const cJSON *item,
- const internal_hooks *const hooks) {
- cJSON *reference = NULL;
- if (item == NULL) {
- return NULL;
- }
-
- reference = cJSON_New_Item(hooks);
- if (reference == NULL) {
- return NULL;
- }
-
- memcpy(reference, item, sizeof(cJSON));
- reference->string = NULL;
- reference->type |= cJSON_IsReference;
- reference->next = reference->prev = NULL;
- return reference;
-}
-
-static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) {
- cJSON *child = NULL;
-
- if ((item == NULL) || (array == NULL) || (array == item)) {
- return false;
- }
-
- child = array->child;
- /*
- * To find the last item in array quickly, we use prev in array
- */
- if (child == NULL) {
- /* list is empty, start new one */
- array->child = item;
- item->prev = item;
- item->next = NULL;
- } else {
- /* append to the end */
- if (child->prev) {
- suffix_object(child->prev, item);
- array->child->prev = item;
- }
- }
-
- return true;
-}
-
-/* Add item to array/object. */
-CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) {
- return add_item_to_array(array, item);
-}
-
-#if defined(__clang__) || \
- (defined(__GNUC__) && \
- ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
-#pragma GCC diagnostic push
-#endif
-#ifdef __GNUC__
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif
-/* helper function to cast away const */
-static void *cast_away_const(const void *string) {
- return (void *)string;
-}
-#if defined(__clang__) || \
- (defined(__GNUC__) && \
- ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
-#pragma GCC diagnostic pop
-#endif
-
-
-static cJSON_bool add_item_to_object(cJSON *const object,
- const char *const string,
- cJSON *const item,
- const internal_hooks *const hooks,
- const cJSON_bool constant_key) {
- char *new_key = NULL;
- int new_type = cJSON_Invalid;
-
- if ((object == NULL) || (string == NULL) || (item == NULL) ||
- (object == item)) {
- return false;
- }
-
- if (constant_key) {
- new_key = (char *)cast_away_const(string);
- new_type = item->type | cJSON_StringIsConst;
- } else {
- new_key =
- (char *)cJSON_strdup((const unsigned char *)string, hooks);
- if (new_key == NULL) {
- return false;
- }
-
- new_type = item->type & ~cJSON_StringIsConst;
- }
-
- if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) {
- hooks->deallocate(item->string);
- }
-
- item->string = new_key;
- item->type = new_type;
-
- return add_item_to_array(object, item);
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) {
- return add_item_to_object(object, string, item, &global_hooks, false);
-}
-
-/* Add an item to an object with constant string as key */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) {
- return add_item_to_object(object, string, item, &global_hooks, true);
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) {
- if (array == NULL) {
- return false;
- }
-
- return add_item_to_array(array, create_reference(item, &global_hooks));
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) {
- if ((object == NULL) || (string == NULL)) {
- return false;
- }
-
- return add_item_to_object(object, string,
- create_reference(item, &global_hooks),
- &global_hooks, false);
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddNullToObject(cJSON *const object, const char *const name) {
- cJSON *null = cJSON_CreateNull();
- if (add_item_to_object(object, name, null, &global_hooks, false)) {
- return null;
- }
-
- cJSON_Delete(null);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddTrueToObject(cJSON *const object, const char *const name) {
- cJSON *true_item = cJSON_CreateTrue();
- if (add_item_to_object(object, name, true_item, &global_hooks, false)) {
- return true_item;
- }
-
- cJSON_Delete(true_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddFalseToObject(cJSON *const object, const char *const name) {
- cJSON *false_item = cJSON_CreateFalse();
- if (add_item_to_object(object, name, false_item, &global_hooks,
- false)) {
- return false_item;
- }
-
- cJSON_Delete(false_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddBoolToObject(cJSON *const object,
- const char *const name,
- const cJSON_bool boolean) {
- cJSON *bool_item = cJSON_CreateBool(boolean);
- if (add_item_to_object(object, name, bool_item, &global_hooks, false)) {
- return bool_item;
- }
-
- cJSON_Delete(bool_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddNumberToObject(cJSON *const object,
- const char *const name,
- const double number) {
- cJSON *number_item = cJSON_CreateNumber(number);
- if (add_item_to_object(object, name, number_item, &global_hooks,
- false)) {
- return number_item;
- }
-
- cJSON_Delete(number_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddStringToObject(cJSON *const object,
- const char *const name,
- const char *const string) {
- cJSON *string_item = cJSON_CreateString(string);
- if (add_item_to_object(object, name, string_item, &global_hooks,
- false)) {
- return string_item;
- }
-
- cJSON_Delete(string_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddRawToObject(cJSON *const object,
- const char *const name,
- const char *const raw) {
- cJSON *raw_item = cJSON_CreateRaw(raw);
- if (add_item_to_object(object, name, raw_item, &global_hooks, false)) {
- return raw_item;
- }
-
- cJSON_Delete(raw_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddObjectToObject(cJSON *const object, const char *const name) {
- cJSON *object_item = cJSON_CreateObject();
- if (add_item_to_object(object, name, object_item, &global_hooks,
- false)) {
- return object_item;
- }
-
- cJSON_Delete(object_item);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_AddArrayToObject(cJSON *const object, const char *const name) {
- cJSON *array = cJSON_CreateArray();
- if (add_item_to_object(object, name, array, &global_hooks, false)) {
- return array;
- }
-
- cJSON_Delete(array);
- return NULL;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item) {
- if ((parent == NULL) || (item == NULL)) {
- return NULL;
- }
-
- if (item != parent->child) {
- /* not the first element */
- item->prev->next = item->next;
- }
- if (item->next != NULL) {
- /* not the last element */
- item->next->prev = item->prev;
- }
-
- if (item == parent->child) {
- /* first element */
- parent->child = item->next;
- } else if (item->next == NULL) {
- /* last element */
- parent->child->prev = item->prev;
- }
-
- /* make sure the detached item doesn't point anywhere anymore */
- item->prev = NULL;
- item->next = NULL;
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) {
- if (which < 0) {
- return NULL;
- }
-
- return cJSON_DetachItemViaPointer(array,
- get_array_item(array, (size_t)which));
-}
-
-CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) {
- cJSON_Delete(cJSON_DetachItemFromArray(array, which));
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemFromObject(cJSON *object, const char *string) {
- cJSON *to_detach = cJSON_GetObjectItem(object, string);
-
- return cJSON_DetachItemViaPointer(object, to_detach);
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) {
- cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
-
- return cJSON_DetachItemViaPointer(object, to_detach);
-}
-
-CJSON_PUBLIC(void)
-cJSON_DeleteItemFromObject(cJSON *object, const char *string) {
- cJSON_Delete(cJSON_DetachItemFromObject(object, string));
-}
-
-CJSON_PUBLIC(void)
-cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) {
- cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
-}
-
-/* Replace array/object items with new ones. */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) {
- cJSON *after_inserted = NULL;
-
- if (which < 0) {
- return false;
- }
-
- after_inserted = get_array_item(array, (size_t)which);
- if (after_inserted == NULL) {
- return add_item_to_array(array, newitem);
- }
-
- newitem->next = after_inserted;
- newitem->prev = after_inserted->prev;
- after_inserted->prev = newitem;
- if (after_inserted == array->child) {
- array->child = newitem;
- } else {
- newitem->prev->next = newitem;
- }
- return true;
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemViaPointer(cJSON *const parent,
- cJSON *const item,
- cJSON *replacement) {
- if ((parent == NULL) || (replacement == NULL) || (item == NULL)) {
- return false;
- }
-
- if (replacement == item) {
- return true;
- }
-
- replacement->next = item->next;
- replacement->prev = item->prev;
-
- if (replacement->next != NULL) {
- replacement->next->prev = replacement;
- }
- if (parent->child == item) {
- if (parent->child->prev == parent->child) {
- replacement->prev = replacement;
- }
- parent->child = replacement;
- } else { /*
- * To find the last item in array quickly, we use prev in
- * array. We can't modify the last item's next pointer where
- * this item was the parent's child
- */
- if (replacement->prev != NULL) {
- replacement->prev->next = replacement;
- }
- if (replacement->next == NULL) {
- parent->child->prev = replacement;
- }
- }
-
- item->next = NULL;
- item->prev = NULL;
- cJSON_Delete(item);
-
- return true;
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) {
- if (which < 0) {
- return false;
- }
-
- return cJSON_ReplaceItemViaPointer(
- array, get_array_item(array, (size_t)which), newitem);
-}
-
-static cJSON_bool replace_item_in_object(cJSON *object,
- const char *string,
- cJSON *replacement,
- cJSON_bool case_sensitive) {
- if ((replacement == NULL) || (string == NULL)) {
- return false;
- }
-
- /* replace the name in the replacement */
- if (!(replacement->type & cJSON_StringIsConst) &&
- (replacement->string != NULL)) {
- cJSON_free(replacement->string);
- }
- replacement->string =
- (char *)cJSON_strdup((const unsigned char *)string, &global_hooks);
- replacement->type &= ~cJSON_StringIsConst;
-
- return cJSON_ReplaceItemViaPointer(
- object, get_object_item(object, string, case_sensitive),
- replacement);
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) {
- return replace_item_in_object(object, string, newitem, false);
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
- const char *string,
- cJSON *newitem) {
- return replace_item_in_object(object, string, newitem, true);
-}
-
-/* Create basic types: */
-CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_NULL;
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_True;
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_False;
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = boolean ? cJSON_True : cJSON_False;
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_Number;
- item->valuedouble = num;
-
- /* use saturation in case of overflow */
- if (num >= INT_MAX) {
- item->valueint = INT_MAX;
- } else if (num <= (double)INT_MIN) {
- item->valueint = INT_MIN;
- } else {
- item->valueint = (int)num;
- }
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_String;
- item->valuestring = (char *)cJSON_strdup(
- (const unsigned char *)string, &global_hooks);
- if (!item->valuestring) {
- cJSON_Delete(item);
- return NULL;
- }
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item != NULL) {
- item->type = cJSON_String | cJSON_IsReference;
- item->valuestring = (char *)cast_away_const(string);
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item != NULL) {
- item->type = cJSON_Object | cJSON_IsReference;
- item->child = (cJSON *)cast_away_const(child);
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item != NULL) {
- item->type = cJSON_Array | cJSON_IsReference;
- item->child = (cJSON *)cast_away_const(child);
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_Raw;
- item->valuestring = (char *)cJSON_strdup(
- (const unsigned char *)raw, &global_hooks);
- if (!item->valuestring) {
- cJSON_Delete(item);
- return NULL;
- }
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_Array;
- }
-
- return item;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) {
- cJSON *item = cJSON_New_Item(&global_hooks);
- if (item) {
- item->type = cJSON_Object;
- }
-
- return item;
-}
-
-/* Create Arrays: */
-CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) {
- size_t i = 0;
- cJSON *n = NULL;
- cJSON *p = NULL;
- cJSON *a = NULL;
-
- if ((count < 0) || (numbers == NULL)) {
- return NULL;
- }
-
- a = cJSON_CreateArray();
- for (i = 0; a && (i < (size_t)count); i++) {
- n = cJSON_CreateNumber(numbers[i]);
- if (!n) {
- cJSON_Delete(a);
- return NULL;
- }
- if (!i) {
- a->child = n;
- } else {
- suffix_object(p, n);
- }
- p = n;
- }
- a->child->prev = n;
-
- return a;
-}
-
-CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) {
- size_t i = 0;
- cJSON *n = NULL;
- cJSON *p = NULL;
- cJSON *a = NULL;
-
- if ((count < 0) || (numbers == NULL)) {
- return NULL;
- }
-
- a = cJSON_CreateArray();
-
- for (i = 0; a && (i < (size_t)count); i++) {
- n = cJSON_CreateNumber((double)numbers[i]);
- if (!n) {
- cJSON_Delete(a);
- return NULL;
- }
- if (!i) {
- a->child = n;
- } else {
- suffix_object(p, n);
- }
- p = n;
- }
- a->child->prev = n;
-
- return a;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_CreateDoubleArray(const double *numbers, int count) {
- size_t i = 0;
- cJSON *n = NULL;
- cJSON *p = NULL;
- cJSON *a = NULL;
-
- if ((count < 0) || (numbers == NULL)) {
- return NULL;
- }
-
- a = cJSON_CreateArray();
-
- for (i = 0; a && (i < (size_t)count); i++) {
- n = cJSON_CreateNumber(numbers[i]);
- if (!n) {
- cJSON_Delete(a);
- return NULL;
- }
- if (!i) {
- a->child = n;
- } else {
- suffix_object(p, n);
- }
- p = n;
- }
- a->child->prev = n;
-
- return a;
-}
-
-CJSON_PUBLIC(cJSON *)
-cJSON_CreateStringArray(const char *const *strings, int count) {
- size_t i = 0;
- cJSON *n = NULL;
- cJSON *p = NULL;
- cJSON *a = NULL;
-
- if ((count < 0) || (strings == NULL)) {
- return NULL;
- }
-
- a = cJSON_CreateArray();
-
- for (i = 0; a && (i < (size_t)count); i++) {
- n = cJSON_CreateString(strings[i]);
- if (!n) {
- cJSON_Delete(a);
- return NULL;
- }
- if (!i) {
- a->child = n;
- } else {
- suffix_object(p, n);
- }
- p = n;
- }
- a->child->prev = n;
-
- return a;
-}
-
-/* Duplication */
-CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) {
- cJSON *newitem = NULL;
- cJSON *child = NULL;
- cJSON *next = NULL;
- cJSON *newchild = NULL;
-
- /* Bail on bad ptr */
- if (!item) {
- goto fail;
- }
- /* Create new item */
- newitem = cJSON_New_Item(&global_hooks);
- if (!newitem) {
- goto fail;
- }
- /* Copy over all vars */
- newitem->type = item->type & (~cJSON_IsReference);
- newitem->valueint = item->valueint;
- newitem->valuedouble = item->valuedouble;
- if (item->valuestring) {
- newitem->valuestring = (char *)cJSON_strdup(
- (unsigned char *)item->valuestring, &global_hooks);
- if (!newitem->valuestring) {
- goto fail;
- }
- }
- if (item->string) {
- newitem->string =
- (item->type & cJSON_StringIsConst)
- ? item->string
- : (char *)cJSON_strdup((unsigned char *)item->string,
- &global_hooks);
- if (!newitem->string) {
- goto fail;
- }
- }
- /* If non-recursive, then we're done! */
- if (!recurse) {
- return newitem;
- }
- /* Walk the ->next chain for the child. */
- child = item->child;
- while (child != NULL) {
- newchild = cJSON_Duplicate(
- child, true); /* Duplicate (with recurse) each item in the
- ->next chain */
- if (!newchild) {
- goto fail;
- }
- if (next != NULL) {
- /* If newitem->child already set, then crosswire ->prev
- * and ->next and move on */
- next->next = newchild;
- newchild->prev = next;
- next = newchild;
- } else {
- /* Set newitem->child and move to it */
- newitem->child = newchild;
- next = newchild;
- }
- child = child->next;
- }
- if (newitem && newitem->child) {
- newitem->child->prev = newchild;
- }
-
- return newitem;
-
-fail:
- if (newitem != NULL) {
- cJSON_Delete(newitem);
- }
-
- return NULL;
-}
-
-static void skip_oneline_comment(char **input) {
- *input += static_strlen("//");
-
- for (; (*input)[0] != '\0'; ++(*input)) {
- if ((*input)[0] == '\n') {
- *input += static_strlen("\n");
- return;
- }
- }
-}
-
-static void skip_multiline_comment(char **input) {
- *input += static_strlen("/*");
-
- for (; (*input)[0] != '\0'; ++(*input)) {
- if (((*input)[0] == '*') && ((*input)[1] == '/')) {
- *input += static_strlen("*/");
- return;
- }
- }
-}
-
-static void minify_string(char **input, char **output) {
- (*output)[0] = (*input)[0];
- *input += static_strlen("\"");
- *output += static_strlen("\"");
-
-
- for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
- (*output)[0] = (*input)[0];
-
- if ((*input)[0] == '\"') {
- (*output)[0] = '\"';
- *input += static_strlen("\"");
- *output += static_strlen("\"");
- return;
- } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
- (*output)[1] = (*input)[1];
- *input += static_strlen("\"");
- *output += static_strlen("\"");
- }
- }
-}
-
-CJSON_PUBLIC(void) cJSON_Minify(char *json) {
- char *into = json;
-
- if (json == NULL) {
- return;
- }
-
- while (json[0] != '\0') {
- switch (json[0]) {
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- json++;
- break;
-
- case '/':
- if (json[1] == '/') {
- skip_oneline_comment(&json);
- } else if (json[1] == '*') {
- skip_multiline_comment(&json);
- } else {
- json++;
- }
- break;
-
- case '\"':
- minify_string(&json, (char **)&into);
- break;
-
- default:
- into[0] = json[0];
- json++;
- into++;
- }
- }
-
- /* and null-terminate. */
- *into = '\0';
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_Invalid;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_False;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xff) == cJSON_True;
-}
-
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & (cJSON_True | cJSON_False)) != 0;
-}
-CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_NULL;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_Number;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_String;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_Array;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_Object;
-}
-
-CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item) {
- if (item == NULL) {
- return false;
- }
-
- return (item->type & 0xFF) == cJSON_Raw;
-}
-
-CJSON_PUBLIC(cJSON_bool)
-cJSON_Compare(const cJSON *const a,
- const cJSON *const b,
- const cJSON_bool case_sensitive) {
- if ((a == NULL) || (b == NULL) ||
- ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) {
- return false;
- }
-
- /* check if type is valid */
- switch (a->type & 0xFF) {
- case cJSON_False:
- case cJSON_True:
- case cJSON_NULL:
- case cJSON_Number:
- case cJSON_String:
- case cJSON_Raw:
- case cJSON_Array:
- case cJSON_Object:
- break;
-
- default:
- return false;
- }
-
- /* identical objects are equal */
- if (a == b) {
- return true;
- }
-
- switch (a->type & 0xFF) {
- /* in these cases and equal type is enough */
- case cJSON_False:
- case cJSON_True:
- case cJSON_NULL:
- return true;
-
- case cJSON_Number:
- if (compare_double(a->valuedouble, b->valuedouble)) {
- return true;
- }
- return false;
-
- case cJSON_String:
- case cJSON_Raw:
- if ((a->valuestring == NULL) || (b->valuestring == NULL)) {
- return false;
- }
- if (strcmp(a->valuestring, b->valuestring) == 0) {
- return true;
- }
-
- return false;
-
- case cJSON_Array: {
- cJSON *a_element = a->child;
- cJSON *b_element = b->child;
-
- for (; (a_element != NULL) && (b_element != NULL);) {
- if (!cJSON_Compare(a_element, b_element,
- case_sensitive)) {
- return false;
- }
-
- a_element = a_element->next;
- b_element = b_element->next;
- }
-
- /* one of the arrays is longer than the other */
- if (a_element != b_element) {
- return false;
- }
-
- return true;
- }
-
- case cJSON_Object: {
- cJSON *a_element = NULL;
- cJSON *b_element = NULL;
- cJSON_ArrayForEach(a_element, a) {
- /* TODO This has O(n^2) runtime, which is horrible! */
- b_element = get_object_item(b, a_element->string,
- case_sensitive);
- if (b_element == NULL) {
- return false;
- }
-
- if (!cJSON_Compare(a_element, b_element,
- case_sensitive)) {
- return false;
- }
- }
-
- /* doing this twice, once on a and b to prevent true comparison
- * if a subset of b
- * TODO: Do this the proper way, this is just a fix for now */
- cJSON_ArrayForEach(b_element, b) {
- a_element = get_object_item(a, b_element->string,
- case_sensitive);
- if (a_element == NULL) {
- return false;
- }
-
- if (!cJSON_Compare(b_element, a_element,
- case_sensitive)) {
- return false;
- }
- }
-
- return true;
- }
-
- default:
- return false;
- }
-}
-
-CJSON_PUBLIC(void *) cJSON_malloc(size_t size) {
- return global_hooks.allocate(size);
-}
-
-CJSON_PUBLIC(void) cJSON_free(void *object) {
- global_hooks.deallocate(object);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h
deleted file mode 100644
index 1b5655c7b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-*/
-
-#ifndef cJSON__h
-#define cJSON__h
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if !defined(__WINDOWS__) && \
- (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
-#define __WINDOWS__
-#endif
-
-#ifdef __WINDOWS__
-
-/* When compiling for windows, we specify a specific calling convention to avoid
-issues where we are being called from a project with a different default calling
-convention. For windows you have 3 define options:
-
-CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever
-dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you
-want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you
-want to dllimport symbol
-
-For *nix builds that support visibility attribute, you can define similar
-behavior by
-
-setting default visibility to hidden by adding
--fvisibility=hidden (for gcc)
-or
--xldscope=hidden (for sun cc)
-to CFLAGS
-
-then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way
-CJSON_EXPORT_SYMBOLS does
-
-*/
-
-#define CJSON_CDECL __cdecl
-#define CJSON_STDCALL __stdcall
-
-/* export symbols by default, this is necessary for copy pasting the C and
- * header file */
-#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \
- !defined(CJSON_EXPORT_SYMBOLS)
-#define CJSON_EXPORT_SYMBOLS
-#endif
-
-#if defined(CJSON_HIDE_SYMBOLS)
-#define CJSON_PUBLIC(type) type CJSON_STDCALL
-#elif defined(CJSON_EXPORT_SYMBOLS)
-#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
-#elif defined(CJSON_IMPORT_SYMBOLS)
-#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
-#endif
-#else /* !__WINDOWS__ */
-#define CJSON_CDECL
-#define CJSON_STDCALL
-
-#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \
- defined(CJSON_API_VISIBILITY)
-#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
-#else
-#define CJSON_PUBLIC(type) type
-#endif
-#endif
-
-/* project version */
-#define CJSON_VERSION_MAJOR 1
-#define CJSON_VERSION_MINOR 7
-#define CJSON_VERSION_PATCH 14
-
-#include <stddef.h>
-
-/* cJSON Types: */
-#define cJSON_Invalid (0)
-#define cJSON_False (1 << 0)
-#define cJSON_True (1 << 1)
-#define cJSON_NULL (1 << 2)
-#define cJSON_Number (1 << 3)
-#define cJSON_String (1 << 4)
-#define cJSON_Array (1 << 5)
-#define cJSON_Object (1 << 6)
-#define cJSON_Raw (1 << 7) /* raw json */
-
-#define cJSON_IsReference 256
-#define cJSON_StringIsConst 512
-
-/* The cJSON structure: */
-typedef struct cJSON {
- /* next/prev allow you to walk array/object chains. Alternatively, use
- * GetArraySize/GetArrayItem/GetObjectItem */
- struct cJSON *next;
- struct cJSON *prev;
- /* An array or object item will have a child pointer pointing to a chain
- * of the items in the array/object. */
- struct cJSON *child;
-
- /* The type of the item, as above. */
- int type;
-
- /* The item's string, if type==cJSON_String and type == cJSON_Raw */
- char *valuestring;
- /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead
- */
- int valueint;
- /* The item's number, if type==cJSON_Number */
- double valuedouble;
-
- /* The item's name string, if this item is the child of, or is in the
- * list of subitems of an object. */
- char *string;
-} cJSON;
-
-typedef struct cJSON_Hooks {
- /* malloc/free are CDECL on Windows regardless of the default calling
- * convention of the compiler, so ensure the hooks allow passing those
- * functions directly. */
- void *(CJSON_CDECL *malloc_fn)(size_t sz);
- void(CJSON_CDECL *free_fn)(void *ptr);
-} cJSON_Hooks;
-
-typedef int cJSON_bool;
-
-/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse
- * them. This is to prevent stack overflows. */
-#ifndef CJSON_NESTING_LIMIT
-#define CJSON_NESTING_LIMIT 1000
-#endif
-
-/* returns the version of cJSON as a string */
-CJSON_PUBLIC(const char *) cJSON_Version(void);
-
-/* Supply malloc, realloc and free functions to cJSON */
-CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks);
-
-/* Memory Management: the caller is always responsible to free the results from
- * all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib
- * free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is
- * cJSON_PrintPreallocated, where the caller has full responsibility of the
- * buffer. */
-/* Supply a block of JSON, and this returns a cJSON object you can interrogate.
- */
-CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithLength(const char *value, size_t buffer_length);
-/* ParseWithOpts allows you to require (and check) that the JSON is null
- * terminated, and to retrieve the pointer to the final byte parsed. */
-/* If you supply a ptr in return_parse_end and parsing fails, then
- * return_parse_end will contain a pointer to the error so will match
- * cJSON_GetErrorPtr(). */
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithOpts(const char *value,
- const char **return_parse_end,
- cJSON_bool require_null_terminated);
-CJSON_PUBLIC(cJSON *)
-cJSON_ParseWithLengthOpts(const char *value,
- size_t buffer_length,
- const char **return_parse_end,
- cJSON_bool require_null_terminated);
-
-/* Render a cJSON entity to text for transfer/storage. */
-CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
-/* Render a cJSON entity to text for transfer/storage without any formatting. */
-CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
-/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess
- * at the final size. guessing well reduces reallocation. fmt=0 gives
- * unformatted, =1 gives formatted */
-CJSON_PUBLIC(char *)
-cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
-/* Render a cJSON entity to text using a buffer already allocated in memory with
- * given length. Returns 1 on success and 0 on failure. */
-/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will
- * use, so to be safe allocate 5 bytes more than you actually need */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_PrintPreallocated(cJSON *item,
- char *buffer,
- const int length,
- const cJSON_bool format);
-/* Delete a cJSON entity and all subentities. */
-CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
-
-/* Returns the number of items in an array (or object). */
-CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
-/* Retrieve item number "index" from array "array". Returns NULL if
- * unsuccessful. */
-CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
-/* Get item "string" from object. Case insensitive. */
-CJSON_PUBLIC(cJSON *)
-cJSON_GetObjectItem(const cJSON *const object, const char *const string);
-CJSON_PUBLIC(cJSON *)
-cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
- const char *const string);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_HasObjectItem(const cJSON *object, const char *string);
-/* For analysing failed parses. This returns a pointer to the parse error.
- * You'll probably need to look a few chars back to make sense of it. Defined
- * when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
-CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
-
-/* Check item type and return its value */
-CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item);
-CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item);
-
-/* These functions check the type of an item */
-CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item);
-CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item);
-
-/* These calls create a cJSON item of the appropriate type. */
-CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
-CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
-CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
-CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
-CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
-CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
-/* raw json */
-CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
-CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
-CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
-
-/* Create a string where valuestring references a string so
- * it will not be freed by cJSON_Delete */
-CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
-/* Create an object/array that only references it's elements so
- * they will not be freed by cJSON_Delete */
-CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
-CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
-
-/* These utilities create an Array of count items.
- * The parameter count cannot be greater than the number of elements in the
- * number array, otherwise array access will be out of bounds.*/
-CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
-CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
-CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
-CJSON_PUBLIC(cJSON *)
-cJSON_CreateStringArray(const char *const *strings, int count);
-
-/* Append item to the specified array/object. */
-CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
-/* Use this when string is definitely const (i.e. a literal, or as good as), and
- * will definitely survive the cJSON object. WARNING: When this function was
- * used, make sure to always check that (item->type & cJSON_StringIsConst) is
- * zero before writing to `item->string` */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
-/* Append reference to item to the specified array/object. Use this when you
- * want to add an existing cJSON to a new cJSON, but don't want to corrupt your
- * existing cJSON. */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
-
-/* Remove/Detach items from Arrays/Objects. */
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item);
-CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
-CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemFromObject(cJSON *object, const char *string);
-CJSON_PUBLIC(cJSON *)
-cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
-CJSON_PUBLIC(void)
-cJSON_DeleteItemFromObject(cJSON *object, const char *string);
-CJSON_PUBLIC(void)
-cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
-
-/* Update array items. */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_InsertItemInArray(
- cJSON *array,
- int which,
- cJSON *newitem); /* Shifts pre-existing items to the right. */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemViaPointer(cJSON *const parent,
- cJSON *const item,
- cJSON *replacement);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem);
-CJSON_PUBLIC(cJSON_bool)
-cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
- const char *string,
- cJSON *newitem);
-
-/* Duplicate a cJSON item */
-CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
-/* Duplicate will create a new, identical cJSON item to the one you pass, in new
- * memory that will need to be released. With recurse!=0, it will duplicate any
- * children connected to the item.
- * The item->next and ->prev pointers are always zero on return from Duplicate.
- */
-/* Recursively compare two cJSON items for equality. If either a or b is NULL or
- * invalid, they will be considered unequal.
- * case_sensitive determines if object keys are treated case sensitive (1) or
- * case insensitive (0) */
-CJSON_PUBLIC(cJSON_bool)
-cJSON_Compare(const cJSON *const a,
- const cJSON *const b,
- const cJSON_bool case_sensitive);
-
-/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from
- * strings. The input pointer json cannot point to a read-only address area,
- * such as a string constant,
- * but should point to a readable and writable adress area. */
-CJSON_PUBLIC(void) cJSON_Minify(char *json);
-
-/* Helper functions for creating and adding items to an object at the same time.
- * They return the added item or NULL on failure. */
-CJSON_PUBLIC(cJSON *)
-cJSON_AddNullToObject(cJSON *const object, const char *const name);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddTrueToObject(cJSON *const object, const char *const name);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddFalseToObject(cJSON *const object, const char *const name);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddBoolToObject(cJSON *const object,
- const char *const name,
- const cJSON_bool boolean);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddNumberToObject(cJSON *const object,
- const char *const name,
- const double number);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddStringToObject(cJSON *const object,
- const char *const name,
- const char *const string);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddRawToObject(cJSON *const object,
- const char *const name,
- const char *const raw);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddObjectToObject(cJSON *const object, const char *const name);
-CJSON_PUBLIC(cJSON *)
-cJSON_AddArrayToObject(cJSON *const object, const char *const name);
-
-/* When assigning an integer value, it needs to be propagated to valuedouble
- * too. */
-#define cJSON_SetIntValue(object, number) \
- ((object) ? (object)->valueint = (object)->valuedouble = (number) \
- : (number))
-/* helper for the cJSON_SetNumberValue macro */
-CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
-#define cJSON_SetNumberValue(object, number) \
- ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \
- : (number))
-/* Change the valuestring of a cJSON_String object, only takes effect when type
- * of object is cJSON_String */
-CJSON_PUBLIC(char *)
-cJSON_SetValuestring(cJSON *object, const char *valuestring);
-
-/* Macro for iterating over an array or object */
-#define cJSON_ArrayForEach(element, array) \
- for (element = (array != NULL) ? (array)->child : NULL; \
- element != NULL; element = element->next)
-
-/* malloc/free objects using the malloc/free functions that have been set with
- * cJSON_InitHooks */
-CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
-CJSON_PUBLIC(void) cJSON_free(void *object);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c
deleted file mode 100644
index f1a716dc6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/* Copied from http://stackoverflow.com/a/17646775/1821055
- * with the following modifications:
- * * remove test code
- * * global hw/sw initialization to be called once per process
- * * HW support is determined by configure's WITH_CRC32C_HW
- * * Windows porting (no hardware support on Windows yet)
- *
- * FIXME:
- * * Hardware support on Windows (MSVC assembler)
- * * Hardware support on ARM
- */
-
-/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
- * Copyright (C) 2013 Mark Adler
- * Version 1.1 1 Aug 2013 Mark Adler
- */
-
-/*
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the author be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Mark Adler
- madler@alumni.caltech.edu
- */
-
-/* Use hardware CRC instruction on Intel SSE 4.2 processors. This computes a
- CRC-32C, *not* the CRC-32 used by Ethernet and zip, gzip, etc. A software
- version is provided as a fall-back, as well as for speed comparisons. */
-
-/* Version history:
- 1.0 10 Feb 2013 First version
- 1.1 1 Aug 2013 Correct comments on why three crc instructions in parallel
- */
-
-#include "rd.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#ifndef _WIN32
-#include <unistd.h>
-#endif
-
-#include "rdunittest.h"
-#include "rdendian.h"
-
-#include "crc32c.h"
-
-/* CRC-32C (iSCSI) polynomial in reversed bit order. */
-#define POLY 0x82f63b78
-
-/* Table for a quadword-at-a-time software crc. */
-static uint32_t crc32c_table[8][256];
-
-/* Construct table for software CRC-32C calculation. */
-static void crc32c_init_sw(void)
-{
- uint32_t n, crc, k;
-
- for (n = 0; n < 256; n++) {
- crc = n;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
- crc32c_table[0][n] = crc;
- }
- for (n = 0; n < 256; n++) {
- crc = crc32c_table[0][n];
- for (k = 1; k < 8; k++) {
- crc = crc32c_table[0][crc & 0xff] ^ (crc >> 8);
- crc32c_table[k][n] = crc;
- }
- }
-}
-
-/* Table-driven software version as a fall-back. This is about 15 times slower
- than using the hardware instructions. This assumes little-endian integers,
- as is the case on Intel processors that the assembler code here is for. */
-static uint32_t crc32c_sw(uint32_t crci, const void *buf, size_t len)
-{
- const unsigned char *next = buf;
- uint64_t crc;
-
- crc = crci ^ 0xffffffff;
- while (len && ((uintptr_t)next & 7) != 0) {
- crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
- len--;
- }
- while (len >= 8) {
- /* Alignment-safe */
- uint64_t ncopy;
- memcpy(&ncopy, next, sizeof(ncopy));
- crc ^= le64toh(ncopy);
- crc = crc32c_table[7][crc & 0xff] ^
- crc32c_table[6][(crc >> 8) & 0xff] ^
- crc32c_table[5][(crc >> 16) & 0xff] ^
- crc32c_table[4][(crc >> 24) & 0xff] ^
- crc32c_table[3][(crc >> 32) & 0xff] ^
- crc32c_table[2][(crc >> 40) & 0xff] ^
- crc32c_table[1][(crc >> 48) & 0xff] ^
- crc32c_table[0][crc >> 56];
- next += 8;
- len -= 8;
- }
- while (len) {
- crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
- len--;
- }
- return (uint32_t)crc ^ 0xffffffff;
-}
-
-
-#if WITH_CRC32C_HW
-static int sse42; /* Cached SSE42 support */
-
-/* Multiply a matrix times a vector over the Galois field of two elements,
- GF(2). Each element is a bit in an unsigned integer. mat must have at
- least as many entries as the power of two for most significant one bit in
- vec. */
-static RD_INLINE uint32_t gf2_matrix_times(uint32_t *mat, uint32_t vec)
-{
- uint32_t sum;
-
- sum = 0;
- while (vec) {
- if (vec & 1)
- sum ^= *mat;
- vec >>= 1;
- mat++;
- }
- return sum;
-}
-
-/* Multiply a matrix by itself over GF(2). Both mat and square must have 32
- rows. */
-static RD_INLINE void gf2_matrix_square(uint32_t *square, uint32_t *mat)
-{
- int n;
-
- for (n = 0; n < 32; n++)
- square[n] = gf2_matrix_times(mat, mat[n]);
-}
-
-/* Construct an operator to apply len zeros to a crc. len must be a power of
- two. If len is not a power of two, then the result is the same as for the
- largest power of two less than len. The result for len == 0 is the same as
- for len == 1. A version of this routine could be easily written for any
- len, but that is not needed for this application. */
-static void crc32c_zeros_op(uint32_t *even, size_t len)
-{
- int n;
- uint32_t row;
- uint32_t odd[32]; /* odd-power-of-two zeros operator */
-
- /* put operator for one zero bit in odd */
- odd[0] = POLY; /* CRC-32C polynomial */
- row = 1;
- for (n = 1; n < 32; n++) {
- odd[n] = row;
- row <<= 1;
- }
-
- /* put operator for two zero bits in even */
- gf2_matrix_square(even, odd);
-
- /* put operator for four zero bits in odd */
- gf2_matrix_square(odd, even);
-
- /* first square will put the operator for one zero byte (eight zero bits),
- in even -- next square puts operator for two zero bytes in odd, and so
- on, until len has been rotated down to zero */
- do {
- gf2_matrix_square(even, odd);
- len >>= 1;
- if (len == 0)
- return;
- gf2_matrix_square(odd, even);
- len >>= 1;
- } while (len);
-
- /* answer ended up in odd -- copy to even */
- for (n = 0; n < 32; n++)
- even[n] = odd[n];
-}
-
-/* Take a length and build four lookup tables for applying the zeros operator
- for that length, byte-by-byte on the operand. */
-static void crc32c_zeros(uint32_t zeros[][256], size_t len)
-{
- uint32_t n;
- uint32_t op[32];
-
- crc32c_zeros_op(op, len);
- for (n = 0; n < 256; n++) {
- zeros[0][n] = gf2_matrix_times(op, n);
- zeros[1][n] = gf2_matrix_times(op, n << 8);
- zeros[2][n] = gf2_matrix_times(op, n << 16);
- zeros[3][n] = gf2_matrix_times(op, n << 24);
- }
-}
-
-/* Apply the zeros operator table to crc. */
-static RD_INLINE uint32_t crc32c_shift(uint32_t zeros[][256], uint32_t crc)
-{
- return zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
- zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24];
-}
-
-/* Block sizes for three-way parallel crc computation. LONG and SHORT must
- both be powers of two. The associated string constants must be set
- accordingly, for use in constructing the assembler instructions. */
-#define LONG 8192
-#define LONGx1 "8192"
-#define LONGx2 "16384"
-#define SHORT 256
-#define SHORTx1 "256"
-#define SHORTx2 "512"
-
-/* Tables for hardware crc that shift a crc by LONG and SHORT zeros. */
-static uint32_t crc32c_long[4][256];
-static uint32_t crc32c_short[4][256];
-
-/* Initialize tables for shifting crcs. */
-static void crc32c_init_hw(void)
-{
- crc32c_zeros(crc32c_long, LONG);
- crc32c_zeros(crc32c_short, SHORT);
-}
-
-/* Compute CRC-32C using the Intel hardware instruction. */
-static uint32_t crc32c_hw(uint32_t crc, const void *buf, size_t len)
-{
- const unsigned char *next = buf;
- const unsigned char *end;
- uint64_t crc0, crc1, crc2; /* need to be 64 bits for crc32q */
-
- /* pre-process the crc */
- crc0 = crc ^ 0xffffffff;
-
- /* compute the crc for up to seven leading bytes to bring the data pointer
- to an eight-byte boundary */
- while (len && ((uintptr_t)next & 7) != 0) {
- __asm__("crc32b\t" "(%1), %0"
- : "=r"(crc0)
- : "r"(next), "0"(crc0));
- next++;
- len--;
- }
-
- /* compute the crc on sets of LONG*3 bytes, executing three independent crc
- instructions, each on LONG bytes -- this is optimized for the Nehalem,
- Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a
- throughput of one crc per cycle, but a latency of three cycles */
- while (len >= LONG*3) {
- crc1 = 0;
- crc2 = 0;
- end = next + LONG;
- do {
- __asm__("crc32q\t" "(%3), %0\n\t"
- "crc32q\t" LONGx1 "(%3), %1\n\t"
- "crc32q\t" LONGx2 "(%3), %2"
- : "=r"(crc0), "=r"(crc1), "=r"(crc2)
- : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
- next += 8;
- } while (next < end);
- crc0 = crc32c_shift(crc32c_long, crc0) ^ crc1;
- crc0 = crc32c_shift(crc32c_long, crc0) ^ crc2;
- next += LONG*2;
- len -= LONG*3;
- }
-
- /* do the same thing, but now on SHORT*3 blocks for the remaining data less
- than a LONG*3 block */
- while (len >= SHORT*3) {
- crc1 = 0;
- crc2 = 0;
- end = next + SHORT;
- do {
- __asm__("crc32q\t" "(%3), %0\n\t"
- "crc32q\t" SHORTx1 "(%3), %1\n\t"
- "crc32q\t" SHORTx2 "(%3), %2"
- : "=r"(crc0), "=r"(crc1), "=r"(crc2)
- : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
- next += 8;
- } while (next < end);
- crc0 = crc32c_shift(crc32c_short, crc0) ^ crc1;
- crc0 = crc32c_shift(crc32c_short, crc0) ^ crc2;
- next += SHORT*2;
- len -= SHORT*3;
- }
-
- /* compute the crc on the remaining eight-byte units less than a SHORT*3
- block */
- end = next + (len - (len & 7));
- while (next < end) {
- __asm__("crc32q\t" "(%1), %0"
- : "=r"(crc0)
- : "r"(next), "0"(crc0));
- next += 8;
- }
- len &= 7;
-
- /* compute the crc for up to seven trailing bytes */
- while (len) {
- __asm__("crc32b\t" "(%1), %0"
- : "=r"(crc0)
- : "r"(next), "0"(crc0));
- next++;
- len--;
- }
-
- /* return a post-processed crc */
- return (uint32_t)crc0 ^ 0xffffffff;
-}
-
-/* Check for SSE 4.2. SSE 4.2 was first supported in Nehalem processors
- introduced in November, 2008. This does not check for the existence of the
- cpuid instruction itself, which was introduced on the 486SL in 1992, so this
- will fail on earlier x86 processors. cpuid works on all Pentium and later
- processors. */
-#define SSE42(have) \
- do { \
- uint32_t eax, ecx; \
- eax = 1; \
- __asm__("cpuid" \
- : "=c"(ecx) \
- : "a"(eax) \
- : "%ebx", "%edx"); \
- (have) = (ecx >> 20) & 1; \
- } while (0)
-
-#endif /* WITH_CRC32C_HW */
-
-/* Compute a CRC-32C. If the crc32 instruction is available, use the hardware
- version. Otherwise, use the software version. */
-uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len)
-{
-#if WITH_CRC32C_HW
- if (sse42)
- return crc32c_hw(crc, buf, len);
- else
-#endif
- return crc32c_sw(crc, buf, len);
-}
-
-
-
-
-
-
-/**
- * @brief Populate shift tables once
- */
-void rd_crc32c_global_init (void) {
-#if WITH_CRC32C_HW
- SSE42(sse42);
- if (sse42)
- crc32c_init_hw();
- else
-#endif
- crc32c_init_sw();
-}
-
-int unittest_rd_crc32c (void) {
- const char *buf =
-" This software is provided 'as-is', without any express or implied\n"
-" warranty. In no event will the author be held liable for any damages\n"
-" arising from the use of this software.\n"
-"\n"
-" Permission is granted to anyone to use this software for any purpose,\n"
-" including commercial applications, and to alter it and redistribute it\n"
-" freely, subject to the following restrictions:\n"
-"\n"
-" 1. The origin of this software must not be misrepresented; you must not\n"
-" claim that you wrote the original software. If you use this software\n"
-" in a product, an acknowledgment in the product documentation would be\n"
-" appreciated but is not required.\n"
-" 2. Altered source versions must be plainly marked as such, and must not be\n"
-" misrepresented as being the original software.\n"
-" 3. This notice may not be removed or altered from any source distribution.";
- const uint32_t expected_crc = 0x7dcde113;
- uint32_t crc;
- const char *how;
-
-#if WITH_CRC32C_HW
- if (sse42)
- how = "hardware (SSE42)";
- else
- how = "software (SSE42 supported in build but not at runtime)";
-#else
- how = "software";
-#endif
- RD_UT_SAY("Calculate CRC32C using %s", how);
-
- crc = rd_crc32c(0, buf, strlen(buf));
- RD_UT_ASSERT(crc == expected_crc,
- "Calculated CRC (%s) 0x%"PRIx32
- " not matching expected CRC 0x%"PRIx32,
- how, crc, expected_crc);
-
- /* Verify software version too, regardless of which
- * version was used above. */
- crc32c_init_sw();
- RD_UT_SAY("Calculate CRC32C using software");
- crc = crc32c_sw(0, buf, strlen(buf));
- RD_UT_ASSERT(crc == expected_crc,
- "Calculated CRC (software) 0x%"PRIx32
- " not matching expected CRC 0x%"PRIx32,
- crc, expected_crc);
-
- RD_UT_PASS();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h
deleted file mode 100644
index 21c7badc7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RD_CRC32C_H_
-#define _RD_CRC32C_H_
-
-uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len);
-
-void rd_crc32c_global_init (void);
-
-int unittest_rd_crc32c (void);
-
-#endif /* _RD_CRC32C_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh b/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh
deleted file mode 100755
index c7023f47a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-#
-# librdkafka - Apache Kafka C library
-#
-# Copyright (c) 2020 Magnus Edenhill
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-
-# Generate ApiKey / protocol request defines and rd_kafka_ApiKey2str() fields.
-# Cut'n'paste as needed to rdkafka_protocol.h and rdkafka_proto.h
-#
-#
-# Usage:
-# src/generate_proto.sh /path/to/apache-kafka-source
-
-set -e
-
-KAFKA_DIR="$1"
-
-if [[ ! -d $KAFKA_DIR ]]; then
- echo "Usage: $0 <path-to-kafka-source-directory>"
- exit 1
-fi
-
-cd "$KAFKA_DIR"
-
-echo "################## Protocol defines (add to rdkafka_protocol.h) ###################"
-grep apiKey clients/src/main/resources/common/message/*Request.json | \
- awk '{print $3, $1 }' | \
- sort -n | \
- sed -E -s 's/ cli.*\///' | \
- sed -E 's/\.json:$//' | \
- awk -F, '{print "#define RD_KAFKAP_" $2 " " $1}'
-echo "!! Don't forget to update RD_KAFKAP__NUM !!"
-echo
-echo
-
-echo "################## Protocol names (add to rdkafka_proto.h) ###################"
-grep apiKey clients/src/main/resources/common/message/*Request.json | \
- awk '{print $3, $1 }' | \
- sort -n | \
- sed -E -s 's/ cli.*\///' | \
- sed -E 's/\.json:$//' | \
- awk -F, '{print "[RD_KAFKAP_" $2 "] = \"" $2 "\","}'
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png b/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png
deleted file mode 100644
index 8df1eda82..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png
+++ /dev/null
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c
deleted file mode 100644
index c19b11b7f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c
+++ /dev/null
@@ -1,2498 +0,0 @@
-/*
- LZ4 - Fast LZ compression algorithm
- Copyright (C) 2011-2020, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 homepage : http://www.lz4.org
- - LZ4 source repository : https://github.com/lz4/lz4
-*/
-
-/*-************************************
-* Tuning parameters
-**************************************/
-/*
- * LZ4_HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
- */
-#ifndef LZ4_HEAPMODE
-# define LZ4_HEAPMODE 0
-#endif
-
-/*
- * LZ4_ACCELERATION_DEFAULT :
- * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
- */
-#define LZ4_ACCELERATION_DEFAULT 1
-/*
- * LZ4_ACCELERATION_MAX :
- * Any "acceleration" value higher than this threshold
- * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
- */
-#define LZ4_ACCELERATION_MAX 65537
-
-
-/*-************************************
-* CPU Feature Detection
-**************************************/
-/* LZ4_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- * It can generate buggy code on targets which assembly generation depends on alignment.
- * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
-# if defined(__GNUC__) && \
- ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
- || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-# define LZ4_FORCE_MEMORY_ACCESS 2
-# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
-# define LZ4_FORCE_MEMORY_ACCESS 1
-# endif
-#endif
-
-/*
- * LZ4_FORCE_SW_BITCOUNT
- * Define this parameter if your target system or compiler does not support hardware bit count
- */
-#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
-# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
-# define LZ4_FORCE_SW_BITCOUNT
-#endif
-
-
-
-/*-************************************
-* Dependency
-**************************************/
-/*
- * LZ4_SRC_INCLUDED:
- * Amalgamation flag, whether lz4.c is included
- */
-#ifndef LZ4_SRC_INCLUDED
-# define LZ4_SRC_INCLUDED 1
-#endif
-
-#ifndef LZ4_STATIC_LINKING_ONLY
-#define LZ4_STATIC_LINKING_ONLY
-#endif
-
-#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
-#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
-#endif
-
-#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
-#include "lz4.h"
-/* see also "memory routines" below */
-
-
-/*-************************************
-* Compiler Options
-**************************************/
-#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
-# include <intrin.h> /* only present in VS2005+ */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif /* _MSC_VER */
-
-#ifndef LZ4_FORCE_INLINE
-# ifdef _MSC_VER /* Visual Studio */
-# define LZ4_FORCE_INLINE static __forceinline
-# else
-# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
-# ifdef __GNUC__
-# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
-# else
-# define LZ4_FORCE_INLINE static inline
-# endif
-# else
-# define LZ4_FORCE_INLINE static
-# endif /* __STDC_VERSION__ */
-# endif /* _MSC_VER */
-#endif /* LZ4_FORCE_INLINE */
-
-/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
- * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
- * together with a simple 8-byte copy loop as a fall-back path.
- * However, this optimization hurts the decompression speed by >30%,
- * because the execution does not go to the optimized loop
- * for typical compressible data, and all of the preamble checks
- * before going to the fall-back path become useless overhead.
- * This optimization happens only with the -O3 flag, and -O2 generates
- * a simple 8-byte copy loop.
- * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
- * functions are annotated with __attribute__((optimize("O2"))),
- * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
- * of LZ4_wildCopy8 does not affect the compression speed.
- */
-#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
-# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
-# undef LZ4_FORCE_INLINE
-# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
-#else
-# define LZ4_FORCE_O2
-#endif
-
-#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
-# define expect(expr,value) (__builtin_expect ((expr),(value)) )
-#else
-# define expect(expr,value) (expr)
-#endif
-
-#ifndef likely
-#define likely(expr) expect((expr) != 0, 1)
-#endif
-#ifndef unlikely
-#define unlikely(expr) expect((expr) != 0, 0)
-#endif
-
-/* Should the alignment test prove unreliable, for some reason,
- * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
-#ifndef LZ4_ALIGN_TEST /* can be externally provided */
-# define LZ4_ALIGN_TEST 1
-#endif
-
-
-/*-************************************
-* Memory routines
-**************************************/
-#ifdef LZ4_USER_MEMORY_FUNCTIONS
-/* memory management functions can be customized by user project.
- * Below functions must exist somewhere in the Project
- * and be available at link time */
-void* LZ4_malloc(size_t s);
-void* LZ4_calloc(size_t n, size_t s);
-void LZ4_free(void* p);
-# define ALLOC(s) LZ4_malloc(s)
-# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
-# define FREEMEM(p) LZ4_free(p)
-#else
-struct rdkafka_s;
-extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s);
-extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s);
-extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p);
-# define ALLOC(s) rd_kafka_mem_malloc(NULL, s)
-# define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s)
-# define FREEMEM(p) rd_kafka_mem_free(NULL, p)
-#endif
-
-#include <string.h> /* memset, memcpy */
-#define MEM_INIT(p,v,s) memset((p),(v),(s))
-
-
-/*-************************************
-* Common Constants
-**************************************/
-#define MINMATCH 4
-
-#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
-#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
-#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
-#define FASTLOOP_SAFE_DISTANCE 64
-static const int LZ4_minLength = (MFLIMIT+1);
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
-#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
-# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
-#endif
-
-#define ML_BITS 4
-#define ML_MASK ((1U<<ML_BITS)-1)
-#define RUN_BITS (8-ML_BITS)
-#define RUN_MASK ((1U<<RUN_BITS)-1)
-
-
-/*-************************************
-* Error detection
-**************************************/
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
-# include <assert.h>
-#else
-# ifndef assert
-# define assert(condition) ((void)0)
-# endif
-#endif
-
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
-
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
-# include <stdio.h>
- static int g_debuglog_enable = 1;
-# define DEBUGLOG(l, ...) { \
- if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, " \n"); \
- } }
-#else
-# define DEBUGLOG(l, ...) {} /* disabled */
-#endif
-
-static int LZ4_isAligned(const void* ptr, size_t alignment)
-{
- return ((size_t)ptr & (alignment -1)) == 0;
-}
-
-
-/*-************************************
-* Types
-**************************************/
-#include <limits.h>
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
- typedef uintptr_t uptrval;
-#else
-# if UINT_MAX != 4294967295UL
-# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
-# endif
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
- typedef signed int S32;
- typedef unsigned long long U64;
- typedef size_t uptrval; /* generally true, except OpenVMS-64 */
-#endif
-
-#if defined(__x86_64__)
- typedef U64 reg_t; /* 64-bits in x32 mode */
-#else
- typedef size_t reg_t; /* 32-bits in x32 mode */
-#endif
-
-typedef enum {
- notLimited = 0,
- limitedOutput = 1,
- fillOutput = 2
-} limitedOutput_directive;
-
-
-/*-************************************
-* Reading and writing into memory
-**************************************/
-
-/**
- * LZ4 relies on memcpy with a constant size being inlined. In freestanding
- * environments, the compiler can't assume the implementation of memcpy() is
- * standard compliant, so it can't apply its specialized memcpy() inlining
- * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
- * memcpy() as if it were standard compliant, so it can inline it in freestanding
- * environments. This is needed when decompressing the Linux Kernel, for example.
- */
-#if defined(__GNUC__) && (__GNUC__ >= 4)
-#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
-#else
-#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
-#endif
-
-static unsigned LZ4_isLittleEndian(void)
-{
- const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
- return one.c[0];
-}
-
-
-#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
-/* lie to the compiler about data alignment; use with caution */
-
-static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
-static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
-static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
-
-static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-
-#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
-
-static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
-
-static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-
-#else /* safe and portable access using memcpy() */
-
-static U16 LZ4_read16(const void* memPtr)
-{
- U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static U32 LZ4_read32(const void* memPtr)
-{
- U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static reg_t LZ4_read_ARCH(const void* memPtr)
-{
- reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static void LZ4_write16(void* memPtr, U16 value)
-{
- LZ4_memcpy(memPtr, &value, sizeof(value));
-}
-
-static void LZ4_write32(void* memPtr, U32 value)
-{
- LZ4_memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* LZ4_FORCE_MEMORY_ACCESS */
-
-
-static U16 LZ4_readLE16(const void* memPtr)
-{
- if (LZ4_isLittleEndian()) {
- return LZ4_read16(memPtr);
- } else {
- const BYTE* p = (const BYTE*)memPtr;
- return (U16)((U16)p[0] + (p[1]<<8));
- }
-}
-
-static void LZ4_writeLE16(void* memPtr, U16 value)
-{
- if (LZ4_isLittleEndian()) {
- LZ4_write16(memPtr, value);
- } else {
- BYTE* p = (BYTE*)memPtr;
- p[0] = (BYTE) value;
- p[1] = (BYTE)(value>>8);
- }
-}
-
-/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
-LZ4_FORCE_INLINE
-void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
-{
- BYTE* d = (BYTE*)dstPtr;
- const BYTE* s = (const BYTE*)srcPtr;
- BYTE* const e = (BYTE*)dstEnd;
-
- do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
-}
-
-static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
-static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
-
-
-#ifndef LZ4_FAST_DEC_LOOP
-# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
-# define LZ4_FAST_DEC_LOOP 1
-# elif defined(__aarch64__) && !defined(__clang__)
- /* On aarch64, we disable this optimization for clang because on certain
- * mobile chipsets, performance is reduced with clang. For information
- * refer to https://github.com/lz4/lz4/pull/707 */
-# define LZ4_FAST_DEC_LOOP 1
-# else
-# define LZ4_FAST_DEC_LOOP 0
-# endif
-#endif
-
-#if LZ4_FAST_DEC_LOOP
-
-LZ4_FORCE_INLINE void
-LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
-{
- assert(srcPtr + offset == dstPtr);
- if (offset < 8) {
- LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
- dstPtr[0] = srcPtr[0];
- dstPtr[1] = srcPtr[1];
- dstPtr[2] = srcPtr[2];
- dstPtr[3] = srcPtr[3];
- srcPtr += inc32table[offset];
- LZ4_memcpy(dstPtr+4, srcPtr, 4);
- srcPtr -= dec64table[offset];
- dstPtr += 8;
- } else {
- LZ4_memcpy(dstPtr, srcPtr, 8);
- dstPtr += 8;
- srcPtr += 8;
- }
-
- LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
-}
-
-/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
- * this version copies two times 16 bytes (instead of one time 32 bytes)
- * because it must be compatible with offsets >= 16. */
-LZ4_FORCE_INLINE void
-LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
-{
- BYTE* d = (BYTE*)dstPtr;
- const BYTE* s = (const BYTE*)srcPtr;
- BYTE* const e = (BYTE*)dstEnd;
-
- do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
-}
-
-/* LZ4_memcpy_using_offset() presumes :
- * - dstEnd >= dstPtr + MINMATCH
- * - there is at least 8 bytes available to write after dstEnd */
-LZ4_FORCE_INLINE void
-LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
-{
- BYTE v[8];
-
- assert(dstEnd >= dstPtr + MINMATCH);
-
- switch(offset) {
- case 1:
- MEM_INIT(v, *srcPtr, 8);
- break;
- case 2:
- LZ4_memcpy(v, srcPtr, 2);
- LZ4_memcpy(&v[2], srcPtr, 2);
- LZ4_memcpy(&v[4], v, 4);
- break;
- case 4:
- LZ4_memcpy(v, srcPtr, 4);
- LZ4_memcpy(&v[4], srcPtr, 4);
- break;
- default:
- LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
- return;
- }
-
- LZ4_memcpy(dstPtr, v, 8);
- dstPtr += 8;
- while (dstPtr < dstEnd) {
- LZ4_memcpy(dstPtr, v, 8);
- dstPtr += 8;
- }
-}
-#endif
-
-
-/*-************************************
-* Common functions
-**************************************/
-static unsigned LZ4_NbCommonBytes (reg_t val)
-{
- assert(val != 0);
- if (LZ4_isLittleEndian()) {
- if (sizeof(val) == 8) {
-# if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
- /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
- return (unsigned)_tzcnt_u64(val) >> 3;
-# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanForward64(&r, (U64)val);
- return (unsigned)r >> 3;
-# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_ctzll((U64)val) >> 3;
-# else
- const U64 m = 0x0101010101010101ULL;
- val ^= val - 1;
- return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
-# endif
- } else /* 32 bits */ {
-# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r;
- _BitScanForward(&r, (U32)val);
- return (unsigned)r >> 3;
-# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_ctz((U32)val) >> 3;
-# else
- const U32 m = 0x01010101;
- return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
-# endif
- }
- } else /* Big Endian CPU */ {
- if (sizeof(val)==8) {
-# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_clzll((U64)val) >> 3;
-# else
-#if 1
- /* this method is probably faster,
- * but adds a 128 bytes lookup table */
- static const unsigned char ctz7_tab[128] = {
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- };
- U64 const mask = 0x0101010101010101ULL;
- U64 const t = (((val >> 8) - mask) | val) & mask;
- return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
-#else
- /* this method doesn't consume memory space like the previous one,
- * but it contains several branches,
- * that may end up slowing execution */
- static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
- Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
- Note that this code path is never triggered in 32-bits mode. */
- unsigned r;
- if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-#endif
-# endif
- } else /* 32 bits */ {
-# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_clz((U32)val) >> 3;
-# else
- val >>= 8;
- val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
- (val + 0x00FF0000)) >> 24;
- return (unsigned)val ^ 3;
-# endif
- }
- }
-}
-
-
-#define STEPSIZE sizeof(reg_t)
-LZ4_FORCE_INLINE
-unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
-{
- const BYTE* const pStart = pIn;
-
- if (likely(pIn < pInLimit-(STEPSIZE-1))) {
- reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
- if (!diff) {
- pIn+=STEPSIZE; pMatch+=STEPSIZE;
- } else {
- return LZ4_NbCommonBytes(diff);
- } }
-
- while (likely(pIn < pInLimit-(STEPSIZE-1))) {
- reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
- if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
- pIn += LZ4_NbCommonBytes(diff);
- return (unsigned)(pIn - pStart);
- }
-
- if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
- if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
- if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
- return (unsigned)(pIn - pStart);
-}
-
-
-#ifndef LZ4_COMMONDEFS_ONLY
-/*-************************************
-* Local Constants
-**************************************/
-static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
-static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
-
-
-/*-************************************
-* Local Structures and types
-**************************************/
-typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
-
-/**
- * This enum distinguishes several different modes of accessing previous
- * content in the stream.
- *
- * - noDict : There is no preceding content.
- * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
- * blob being compressed are valid and refer to the preceding
- * content (of length ctx->dictSize), which is available
- * contiguously preceding in memory the content currently
- * being compressed.
- * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
- * else in memory, starting at ctx->dictionary with length
- * ctx->dictSize.
- * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
- * content is in a separate context, pointed to by
- * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
- * entries in the current context that refer to positions
- * preceding the beginning of the current compression are
- * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
- * ->dictSize describe the location and size of the preceding
- * content, and matches are found by looking in the ctx
- * ->dictCtx->hashTable.
- */
-typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-
-
-/*-************************************
-* Local Utils
-**************************************/
-int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
-const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
-int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
-
-
-/*-************************************
-* Internal Definitions used in Tests
-**************************************/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
-
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
- int compressedSize, int maxOutputSize,
- const void* dictStart, size_t dictSize);
-
-#if defined (__cplusplus)
-}
-#endif
-
-/*-******************************
-* Compression functions
-********************************/
-LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
-{
- if (tableType == byU16)
- return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
- else
- return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
-}
-
-LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
-{
- const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
- if (LZ4_isLittleEndian()) {
- const U64 prime5bytes = 889523592379ULL;
- return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
- } else {
- const U64 prime8bytes = 11400714785074694791ULL;
- return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
- }
-}
-
-LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
-{
- if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
- return LZ4_hash4(LZ4_read32(p), tableType);
-}
-
-LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
-{
- switch (tableType)
- {
- default: /* fallthrough */
- case clearedTable: { /* illegal! */ assert(0); return; }
- case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
- }
-}
-
-LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
-{
- switch (tableType)
- {
- default: /* fallthrough */
- case clearedTable: /* fallthrough */
- case byPtr: { /* illegal! */ assert(0); return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
- case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
- }
-}
-
-LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
- void* tableBase, tableType_t const tableType,
- const BYTE* srcBase)
-{
- switch (tableType)
- {
- case clearedTable: { /* illegal! */ assert(0); return; }
- case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
- }
-}
-
-LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
- U32 const h = LZ4_hashPosition(p, tableType);
- LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
-}
-
-/* LZ4_getIndexOnHash() :
- * Index of match position registered in hash table.
- * hash position must be calculated by using base+index, or dictBase+index.
- * Assumption 1 : only valid if tableType == byU32 or byU16.
- * Assumption 2 : h is presumed valid (within limits of hash table)
- */
-LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
-{
- LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
- if (tableType == byU32) {
- const U32* const hashTable = (const U32*) tableBase;
- assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
- return hashTable[h];
- }
- if (tableType == byU16) {
- const U16* const hashTable = (const U16*) tableBase;
- assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
- return hashTable[h];
- }
- assert(0); return 0; /* forbidden case */
-}
-
-static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
- if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
- if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
- { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
-}
-
-LZ4_FORCE_INLINE const BYTE*
-LZ4_getPosition(const BYTE* p,
- const void* tableBase, tableType_t tableType,
- const BYTE* srcBase)
-{
- U32 const h = LZ4_hashPosition(p, tableType);
- return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
-}
-
-LZ4_FORCE_INLINE void
-LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
- const int inputSize,
- const tableType_t tableType) {
- /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
- * therefore safe to use no matter what mode we're in. Otherwise, we figure
- * out if it's safe to leave as is or whether it needs to be reset.
- */
- if ((tableType_t)cctx->tableType != clearedTable) {
- assert(inputSize >= 0);
- if ((tableType_t)cctx->tableType != tableType
- || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
- || ((tableType == byU32) && cctx->currentOffset > 1 GB)
- || tableType == byPtr
- || inputSize >= 4 KB)
- {
- DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
- MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
- cctx->currentOffset = 0;
- cctx->tableType = (U32)clearedTable;
- } else {
- DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
- }
- }
-
- /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
- * than compressing without a gap. However, compressing with
- * currentOffset == 0 is faster still, so we preserve that case.
- */
- if (cctx->currentOffset != 0 && tableType == byU32) {
- DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
- cctx->currentOffset += 64 KB;
- }
-
- /* Finally, clear history */
- cctx->dictCtx = NULL;
- cctx->dictionary = NULL;
- cctx->dictSize = 0;
-}
-
-/** LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time.
- * Presumed already validated at this stage:
- * - source != NULL
- * - inputSize > 0
- */
-LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
- LZ4_stream_t_internal* const cctx,
- const char* const source,
- char* const dest,
- const int inputSize,
- int *inputConsumed, /* only written when outputDirective == fillOutput */
- const int maxOutputSize,
- const limitedOutput_directive outputDirective,
- const tableType_t tableType,
- const dict_directive dictDirective,
- const dictIssue_directive dictIssue,
- const int acceleration)
-{
- int result;
- const BYTE* ip = (const BYTE*) source;
-
- U32 const startIndex = cctx->currentOffset;
- const BYTE* base = (const BYTE*) source - startIndex;
- const BYTE* lowLimit;
-
- const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
- const BYTE* const dictionary =
- dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
- const U32 dictSize =
- dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
- const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
-
- int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
- U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
- const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
- const BYTE* anchor = (const BYTE*) source;
- const BYTE* const iend = ip + inputSize;
- const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
- const BYTE* const matchlimit = iend - LASTLITERALS;
-
- /* the dictCtx currentOffset is indexed on the start of the dictionary,
- * while a dictionary in the current context precedes the currentOffset */
- const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
- dictionary + dictSize - dictCtx->currentOffset :
- dictionary + dictSize - startIndex;
-
- BYTE* op = (BYTE*) dest;
- BYTE* const olimit = op + maxOutputSize;
-
- U32 offset = 0;
- U32 forwardH;
-
- DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
- assert(ip != NULL);
- /* If init conditions are not met, we don't have to mark stream
- * as having dirty context, since no action was taken yet */
- if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
- if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
- assert(acceleration >= 1);
-
- lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
-
- /* Update context state */
- if (dictDirective == usingDictCtx) {
- /* Subsequent linked blocks can't use the dictionary. */
- /* Instead, they use the block we just compressed. */
- cctx->dictCtx = NULL;
- cctx->dictSize = (U32)inputSize;
- } else {
- cctx->dictSize += (U32)inputSize;
- }
- cctx->currentOffset += (U32)inputSize;
- cctx->tableType = (U32)tableType;
-
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
-
- /* First Byte */
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
-
- /* Main Loop */
- for ( ; ; ) {
- const BYTE* match;
- BYTE* token;
- const BYTE* filledIp;
-
- /* Find a match */
- if (tableType == byPtr) {
- const BYTE* forwardIp = ip;
- int step = 1;
- int searchMatchNb = acceleration << LZ4_skipTrigger;
- do {
- U32 const h = forwardH;
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
-
- if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
- assert(ip < mflimitPlusOne);
-
- match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
- forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
-
- } while ( (match+LZ4_DISTANCE_MAX < ip)
- || (LZ4_read32(match) != LZ4_read32(ip)) );
-
- } else { /* byU32, byU16 */
-
- const BYTE* forwardIp = ip;
- int step = 1;
- int searchMatchNb = acceleration << LZ4_skipTrigger;
- do {
- U32 const h = forwardH;
- U32 const current = (U32)(forwardIp - base);
- U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
- assert(matchIndex <= current);
- assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
-
- if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
- assert(ip < mflimitPlusOne);
-
- if (dictDirective == usingDictCtx) {
- if (matchIndex < startIndex) {
- /* there was no match, try the dictionary */
- assert(tableType == byU32);
- matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
- match = dictBase + matchIndex;
- matchIndex += dictDelta; /* make dictCtx index comparable with current context */
- lowLimit = dictionary;
- } else {
- match = base + matchIndex;
- lowLimit = (const BYTE*)source;
- }
- } else if (dictDirective==usingExtDict) {
- if (matchIndex < startIndex) {
- DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
- assert(startIndex - matchIndex >= MINMATCH);
- match = dictBase + matchIndex;
- lowLimit = dictionary;
- } else {
- match = base + matchIndex;
- lowLimit = (const BYTE*)source;
- }
- } else { /* single continuous memory segment */
- match = base + matchIndex;
- }
- forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
-
- DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
- if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
- assert(matchIndex < current);
- if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
- && (matchIndex+LZ4_DISTANCE_MAX < current)) {
- continue;
- } /* too far */
- assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
-
- if (LZ4_read32(match) == LZ4_read32(ip)) {
- if (maybe_extMem) offset = current - matchIndex;
- break; /* match found */
- }
-
- } while(1);
- }
-
- /* Catch up */
- filledIp = ip;
- while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
-
- /* Encode Literals */
- { unsigned const litLength = (unsigned)(ip - anchor);
- token = op++;
- if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
- (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
- return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
- }
- if ((outputDirective == fillOutput) &&
- (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
- op--;
- goto _last_literals;
- }
- if (litLength >= RUN_MASK) {
- int len = (int)(litLength - RUN_MASK);
- *token = (RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
- }
- else *token = (BYTE)(litLength<<ML_BITS);
-
- /* Copy Literals */
- LZ4_wildCopy8(op, anchor, op+litLength);
- op+=litLength;
- DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
- (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
- }
-
-_next_match:
- /* at this stage, the following variables must be correctly set :
- * - ip : at start of LZ operation
- * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
- * - offset : if maybe_ext_memSegment==1 (constant)
- * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
- * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
- */
-
- if ((outputDirective == fillOutput) &&
- (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
- /* the match was too close to the end, rewind and go to last literals */
- op = token;
- goto _last_literals;
- }
-
- /* Encode Offset */
- if (maybe_extMem) { /* static test */
- DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
- assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
- LZ4_writeLE16(op, (U16)offset); op+=2;
- } else {
- DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
- assert(ip-match <= LZ4_DISTANCE_MAX);
- LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
- }
-
- /* Encode MatchLength */
- { unsigned matchCode;
-
- if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
- && (lowLimit==dictionary) /* match within extDict */ ) {
- const BYTE* limit = ip + (dictEnd-match);
- assert(dictEnd > match);
- if (limit > matchlimit) limit = matchlimit;
- matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
- ip += (size_t)matchCode + MINMATCH;
- if (ip==limit) {
- unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
- matchCode += more;
- ip += more;
- }
- DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
- } else {
- matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
- ip += (size_t)matchCode + MINMATCH;
- DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
- }
-
- if ((outputDirective) && /* Check output buffer overflow */
- (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
- if (outputDirective == fillOutput) {
- /* Match description too long : reduce it */
- U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
- ip -= matchCode - newMatchCode;
- assert(newMatchCode < matchCode);
- matchCode = newMatchCode;
- if (unlikely(ip <= filledIp)) {
- /* We have already filled up to filledIp so if ip ends up less than filledIp
- * we have positions in the hash table beyond the current position. This is
- * a problem if we reuse the hash table. So we have to remove these positions
- * from the hash table.
- */
- const BYTE* ptr;
- DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
- for (ptr = ip; ptr <= filledIp; ++ptr) {
- U32 const h = LZ4_hashPosition(ptr, tableType);
- LZ4_clearHash(h, cctx->hashTable, tableType);
- }
- }
- } else {
- assert(outputDirective == limitedOutput);
- return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
- }
- }
- if (matchCode >= ML_MASK) {
- *token += ML_MASK;
- matchCode -= ML_MASK;
- LZ4_write32(op, 0xFFFFFFFF);
- while (matchCode >= 4*255) {
- op+=4;
- LZ4_write32(op, 0xFFFFFFFF);
- matchCode -= 4*255;
- }
- op += matchCode / 255;
- *op++ = (BYTE)(matchCode % 255);
- } else
- *token += (BYTE)(matchCode);
- }
- /* Ensure we have enough space for the last literals. */
- assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
-
- anchor = ip;
-
- /* Test end of chunk */
- if (ip >= mflimitPlusOne) break;
-
- /* Fill table */
- LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
-
- /* Test next position */
- if (tableType == byPtr) {
-
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
- if ( (match+LZ4_DISTANCE_MAX >= ip)
- && (LZ4_read32(match) == LZ4_read32(ip)) )
- { token=op++; *token=0; goto _next_match; }
-
- } else { /* byU32, byU16 */
-
- U32 const h = LZ4_hashPosition(ip, tableType);
- U32 const current = (U32)(ip-base);
- U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
- assert(matchIndex < current);
- if (dictDirective == usingDictCtx) {
- if (matchIndex < startIndex) {
- /* there was no match, try the dictionary */
- matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
- match = dictBase + matchIndex;
- lowLimit = dictionary; /* required for match length counter */
- matchIndex += dictDelta;
- } else {
- match = base + matchIndex;
- lowLimit = (const BYTE*)source; /* required for match length counter */
- }
- } else if (dictDirective==usingExtDict) {
- if (matchIndex < startIndex) {
- match = dictBase + matchIndex;
- lowLimit = dictionary; /* required for match length counter */
- } else {
- match = base + matchIndex;
- lowLimit = (const BYTE*)source; /* required for match length counter */
- }
- } else { /* single memory segment */
- match = base + matchIndex;
- }
- LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
- assert(matchIndex < current);
- if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
- && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
- && (LZ4_read32(match) == LZ4_read32(ip)) ) {
- token=op++;
- *token=0;
- if (maybe_extMem) offset = current - matchIndex;
- DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
- (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
- goto _next_match;
- }
- }
-
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
-
- }
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRun = (size_t)(iend - anchor);
- if ( (outputDirective) && /* Check output buffer overflow */
- (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
- if (outputDirective == fillOutput) {
- /* adapt lastRun to fill 'dst' */
- assert(olimit >= op);
- lastRun = (size_t)(olimit-op) - 1/*token*/;
- lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
- } else {
- assert(outputDirective == limitedOutput);
- return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
- }
- }
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
- if (lastRun >= RUN_MASK) {
- size_t accumulator = lastRun - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRun<<ML_BITS);
- }
- LZ4_memcpy(op, anchor, lastRun);
- ip = anchor + lastRun;
- op += lastRun;
- }
-
- if (outputDirective == fillOutput) {
- *inputConsumed = (int) (((const char*)ip)-source);
- }
- result = (int)(((char*)op) - dest);
- assert(result > 0);
- DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
- return result;
-}
-
-/** LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time;
- * takes care of src == (NULL, 0)
- * and forward the rest to LZ4_compress_generic_validated */
-LZ4_FORCE_INLINE int LZ4_compress_generic(
- LZ4_stream_t_internal* const cctx,
- const char* const src,
- char* const dst,
- const int srcSize,
- int *inputConsumed, /* only written when outputDirective == fillOutput */
- const int dstCapacity,
- const limitedOutput_directive outputDirective,
- const tableType_t tableType,
- const dict_directive dictDirective,
- const dictIssue_directive dictIssue,
- const int acceleration)
-{
- DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
- srcSize, dstCapacity);
-
- if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
- if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
- if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
- DEBUGLOG(5, "Generating an empty block");
- assert(outputDirective == notLimited || dstCapacity >= 1);
- assert(dst != NULL);
- dst[0] = 0;
- if (outputDirective == fillOutput) {
- assert (inputConsumed != NULL);
- *inputConsumed = 0;
- }
- return 1;
- }
- assert(src != NULL);
-
- return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
- inputConsumed, /* only written into if outputDirective == fillOutput */
- dstCapacity, outputDirective,
- tableType, dictDirective, dictIssue, acceleration);
-}
-
-
-int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
- LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
- assert(ctx != NULL);
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
- if (maxOutputSize >= LZ4_compressBound(inputSize)) {
- if (inputSize < LZ4_64Klimit) {
- return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
- } else {
- const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
- }
- } else {
- if (inputSize < LZ4_64Klimit) {
- return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
- } else {
- const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
- }
- }
-}
-
-/**
- * LZ4_compress_fast_extState_fastReset() :
- * A variant of LZ4_compress_fast_extState().
- *
- * Using this variant avoids an expensive initialization step. It is only safe
- * to call if the state buffer is known to be correctly initialized already
- * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
- * "correctly initialized").
- */
-int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
-{
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
-
- if (dstCapacity >= LZ4_compressBound(srcSize)) {
- if (srcSize < LZ4_64Klimit) {
- const tableType_t tableType = byU16;
- LZ4_prepareTable(ctx, srcSize, tableType);
- if (ctx->currentOffset) {
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
- } else {
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
- }
- } else {
- const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- LZ4_prepareTable(ctx, srcSize, tableType);
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
- }
- } else {
- if (srcSize < LZ4_64Klimit) {
- const tableType_t tableType = byU16;
- LZ4_prepareTable(ctx, srcSize, tableType);
- if (ctx->currentOffset) {
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
- } else {
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
- }
- } else {
- const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- LZ4_prepareTable(ctx, srcSize, tableType);
- return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
- }
- }
-}
-
-
-int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
- int result;
-#if (LZ4_HEAPMODE)
- LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
- if (ctxPtr == NULL) return 0;
-#else
- LZ4_stream_t ctx;
- LZ4_stream_t* const ctxPtr = &ctx;
-#endif
- result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
-
-#if (LZ4_HEAPMODE)
- FREEMEM(ctxPtr);
-#endif
- return result;
-}
-
-
-int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
-{
- return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
-}
-
-
-/* Note!: This function leaves the stream in an unclean/broken state!
- * It is not safe to subsequently use the same state with a _fastReset() or
- * _continue() call without resetting it. */
-static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
-{
- void* const s = LZ4_initStream(state, sizeof (*state));
- assert(s != NULL); (void)s;
-
- if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
- return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
- } else {
- if (*srcSizePtr < LZ4_64Klimit) {
- return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
- } else {
- tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
- } }
-}
-
-
-int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
-{
-#if (LZ4_HEAPMODE)
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
- if (ctx == NULL) return 0;
-#else
- LZ4_stream_t ctxBody;
- LZ4_stream_t* ctx = &ctxBody;
-#endif
-
- int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
-
-#if (LZ4_HEAPMODE)
- FREEMEM(ctx);
-#endif
- return result;
-}
-
-
-
-/*-******************************
-* Streaming functions
-********************************/
-
-LZ4_stream_t* LZ4_createStream(void)
-{
- LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
- LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
- DEBUGLOG(4, "LZ4_createStream %p", lz4s);
- if (lz4s == NULL) return NULL;
- LZ4_initStream(lz4s, sizeof(*lz4s));
- return lz4s;
-}
-
-static size_t LZ4_stream_t_alignment(void)
-{
-#if LZ4_ALIGN_TEST
- typedef struct { char c; LZ4_stream_t t; } t_a;
- return sizeof(t_a) - sizeof(LZ4_stream_t);
-#else
- return 1; /* effectively disabled */
-#endif
-}
-
-LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
-{
- DEBUGLOG(5, "LZ4_initStream");
- if (buffer == NULL) { return NULL; }
- if (size < sizeof(LZ4_stream_t)) { return NULL; }
- if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
- MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
- return (LZ4_stream_t*)buffer;
-}
-
-/* resetStream is now deprecated,
- * prefer initStream() which is more general */
-void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
-{
- DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
- MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
-}
-
-void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
- LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
-}
-
-int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
-{
- if (!LZ4_stream) return 0; /* support free on NULL */
- DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
- FREEMEM(LZ4_stream);
- return (0);
-}
-
-
-#define HASH_UNIT sizeof(reg_t)
-int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
-{
- LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
- const tableType_t tableType = byU32;
- const BYTE* p = (const BYTE*)dictionary;
- const BYTE* const dictEnd = p + dictSize;
- const BYTE* base;
-
- DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
-
- /* It's necessary to reset the context,
- * and not just continue it with prepareTable()
- * to avoid any risk of generating overflowing matchIndex
- * when compressing using this dictionary */
- LZ4_resetStream(LZ4_dict);
-
- /* We always increment the offset by 64 KB, since, if the dict is longer,
- * we truncate it to the last 64k, and if it's shorter, we still want to
- * advance by a whole window length so we can provide the guarantee that
- * there are only valid offsets in the window, which allows an optimization
- * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
- * dictionary isn't a full 64k. */
- dict->currentOffset += 64 KB;
-
- if (dictSize < (int)HASH_UNIT) {
- return 0;
- }
-
- if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
- base = dictEnd - dict->currentOffset;
- dict->dictionary = p;
- dict->dictSize = (U32)(dictEnd - p);
- dict->tableType = (U32)tableType;
-
- while (p <= dictEnd-HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, tableType, base);
- p+=3;
- }
-
- return (int)dict->dictSize;
-}
-
-void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
- const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
- &(dictionaryStream->internal_donotuse);
-
- DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
- workingStream, dictionaryStream,
- dictCtx != NULL ? dictCtx->dictSize : 0);
-
- if (dictCtx != NULL) {
- /* If the current offset is zero, we will never look in the
- * external dictionary context, since there is no value a table
- * entry can take that indicate a miss. In that case, we need
- * to bump the offset to something non-zero.
- */
- if (workingStream->internal_donotuse.currentOffset == 0) {
- workingStream->internal_donotuse.currentOffset = 64 KB;
- }
-
- /* Don't actually attach an empty dictionary.
- */
- if (dictCtx->dictSize == 0) {
- dictCtx = NULL;
- }
- }
- workingStream->internal_donotuse.dictCtx = dictCtx;
-}
-
-
-static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
-{
- assert(nextSize >= 0);
- if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
- /* rescale hash table */
- U32 const delta = LZ4_dict->currentOffset - 64 KB;
- const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
- int i;
- DEBUGLOG(4, "LZ4_renormDictT");
- for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
- if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
- else LZ4_dict->hashTable[i] -= delta;
- }
- LZ4_dict->currentOffset = 64 KB;
- if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
- LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
- }
-}
-
-
-int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
- const char* source, char* dest,
- int inputSize, int maxOutputSize,
- int acceleration)
-{
- const tableType_t tableType = byU32;
- LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
- const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
-
- DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
-
- LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
-
- /* invalidate tiny dictionaries */
- if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
- && (dictEnd != (const BYTE*)source) ) {
- DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
- streamPtr->dictSize = 0;
- streamPtr->dictionary = (const BYTE*)source;
- dictEnd = (const BYTE*)source;
- }
-
- /* Check overlapping input/dictionary space */
- { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
- if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
- streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
- if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
- if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
- streamPtr->dictionary = dictEnd - streamPtr->dictSize;
- }
- }
-
- /* prefix mode : source data follows dictionary */
- if (dictEnd == (const BYTE*)source) {
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
- return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
- else
- return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
- }
-
- /* external dictionary mode */
- { int result;
- if (streamPtr->dictCtx) {
- /* We depend here on the fact that dictCtx'es (produced by
- * LZ4_loadDict) guarantee that their tables contain no references
- * to offsets between dictCtx->currentOffset - 64 KB and
- * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
- * to use noDictIssue even when the dict isn't a full 64 KB.
- */
- if (inputSize > 4 KB) {
- /* For compressing large blobs, it is faster to pay the setup
- * cost to copy the dictionary's tables into the active context,
- * so that the compression loop is only looking into one table.
- */
- LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
- } else {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
- }
- } else {
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
- } else {
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
- }
- }
- streamPtr->dictionary = (const BYTE*)source;
- streamPtr->dictSize = (U32)inputSize;
- return result;
- }
-}
-
-
-/* Hidden debug function, to force-test external dictionary mode */
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
-{
- LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
- int result;
-
- LZ4_renormDictT(streamPtr, srcSize);
-
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
- result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
- } else {
- result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
- }
-
- streamPtr->dictionary = (const BYTE*)source;
- streamPtr->dictSize = (U32)srcSize;
-
- return result;
-}
-
-
-/*! LZ4_saveDict() :
- * If previously compressed data block is not guaranteed to remain available at its memory location,
- * save it into a safer place (char* safeBuffer).
- * Note : you don't need to call LZ4_loadDict() afterwards,
- * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
- * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
- */
-int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
-{
- LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
- const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
-
- if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
- if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
-
- if (safeBuffer == NULL) assert(dictSize == 0);
- if (dictSize > 0)
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
-
- dict->dictionary = (const BYTE*)safeBuffer;
- dict->dictSize = (U32)dictSize;
-
- return dictSize;
-}
-
-
-
-/*-*******************************
- * Decompression functions
- ********************************/
-
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
-typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
-
-#undef MIN
-#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
-
-/* Read the variable-length literal or match length.
- *
- * ip - pointer to use as input.
- * lencheck - end ip. Return an error if ip advances >= lencheck.
- * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
- * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
- * error (output) - error code. Should be set to 0 before call.
- */
-typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
-LZ4_FORCE_INLINE unsigned
-read_variable_length(const BYTE**ip, const BYTE* lencheck,
- int loop_check, int initial_check,
- variable_length_error* error)
-{
- U32 length = 0;
- U32 s;
- if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = initial_error;
- return length;
- }
- do {
- s = **ip;
- (*ip)++;
- length += s;
- if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = loop_error;
- return length;
- }
- } while (s==255);
-
- return length;
-}
-
-/*! LZ4_decompress_generic() :
- * This generic decompression function covers all use cases.
- * It shall be instantiated several times, using different sets of directives.
- * Note that it is important for performance that this function really get inlined,
- * in order to remove useless branches during compilation optimization.
- */
-LZ4_FORCE_INLINE int
-LZ4_decompress_generic(
- const char* const src,
- char* const dst,
- int srcSize,
- int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
-
- endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
- earlyEnd_directive partialDecoding, /* full, partial */
- dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
- const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
- const BYTE* const dictStart, /* only if dict==usingExtDict */
- const size_t dictSize /* note : = 0 if noDict */
- )
-{
- if ((src == NULL) || (outputSize < 0)) { return -1; }
-
- { const BYTE* ip = (const BYTE*) src;
- const BYTE* const iend = ip + srcSize;
-
- BYTE* op = (BYTE*) dst;
- BYTE* const oend = op + outputSize;
- BYTE* cpy;
-
- const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
-
- const int safeDecode = (endOnInput==endOnInputSize);
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
-
-
- /* Set up the "end" pointers for the shortcut. */
- const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
- const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
-
- const BYTE* match;
- size_t offset;
- unsigned token;
- size_t length;
-
-
- DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
-
- /* Special cases */
- assert(lowPrefix <= op);
- if ((endOnInput) && (unlikely(outputSize==0))) {
- /* Empty output buffer */
- if (partialDecoding) return 0;
- return ((srcSize==1) && (*ip==0)) ? 0 : -1;
- }
- if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
- if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
-
- /* Currently the fast loop shows a regression on qualcomm arm chips. */
-#if LZ4_FAST_DEC_LOOP
- if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
- DEBUGLOG(6, "skip fast decode loop");
- goto safe_decode;
- }
-
- /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
- while (1) {
- /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
- assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
- if (endOnInput) { assert(ip < iend); }
- token = *ip++;
- length = token >> ML_BITS; /* literal length */
-
- assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
-
- /* decode literal length */
- if (length == RUN_MASK) {
- variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
- if (error == initial_error) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
-
- /* copy literals */
- cpy = op+length;
- LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
- if (endOnInput) { /* LZ4_decompress_safe() */
- if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
- LZ4_wildCopy32(op, ip, cpy);
- } else { /* LZ4_decompress_fast() */
- if (cpy>oend-8) { goto safe_literal_copy; }
- LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
- * it doesn't know input length, and only relies on end-of-block properties */
- }
- ip += length; op = cpy;
- } else {
- cpy = op+length;
- if (endOnInput) { /* LZ4_decompress_safe() */
- DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
- /* We don't need to check oend, since we check it once for each loop below */
- if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
- /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
- LZ4_memcpy(op, ip, 16);
- } else { /* LZ4_decompress_fast() */
- /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
- * it doesn't know input length, and relies on end-of-block properties */
- LZ4_memcpy(op, ip, 8);
- if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
- }
- ip += length; op = cpy;
- }
-
- /* get offset */
- offset = LZ4_readLE16(ip); ip+=2;
- match = op - offset;
- assert(match <= op);
-
- /* get matchlength */
- length = token & ML_MASK;
-
- if (length == ML_MASK) {
- variable_length_error error = ok;
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
- if (error != ok) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
- length += MINMATCH;
- if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
- goto safe_match_copy;
- }
- } else {
- length += MINMATCH;
- if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
- goto safe_match_copy;
- }
-
- /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
- if ((dict == withPrefix64k) || (match >= lowPrefix)) {
- if (offset >= 8) {
- assert(match >= lowPrefix);
- assert(match <= op);
- assert(op + 18 <= oend);
-
- LZ4_memcpy(op, match, 8);
- LZ4_memcpy(op+8, match+8, 8);
- LZ4_memcpy(op+16, match+16, 2);
- op += length;
- continue;
- } } }
-
- if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
- /* match starting within external dictionary */
- if ((dict==usingExtDict) && (match < lowPrefix)) {
- if (unlikely(op+length > oend-LASTLITERALS)) {
- if (partialDecoding) {
- DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
- length = MIN(length, (size_t)(oend-op));
- } else {
- goto _output_error; /* end-of-block condition violated */
- } }
-
- if (length <= (size_t)(lowPrefix-match)) {
- /* match fits entirely within external dictionary : just copy */
- memmove(op, dictEnd - (lowPrefix-match), length);
- op += length;
- } else {
- /* match stretches into both external dictionary and current block */
- size_t const copySize = (size_t)(lowPrefix - match);
- size_t const restSize = length - copySize;
- LZ4_memcpy(op, dictEnd - copySize, copySize);
- op += copySize;
- if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
- BYTE* const endOfMatch = op + restSize;
- const BYTE* copyFrom = lowPrefix;
- while (op < endOfMatch) { *op++ = *copyFrom++; }
- } else {
- LZ4_memcpy(op, lowPrefix, restSize);
- op += restSize;
- } }
- continue;
- }
-
- /* copy match within block */
- cpy = op + length;
-
- assert((op <= oend) && (oend-op >= 32));
- if (unlikely(offset<16)) {
- LZ4_memcpy_using_offset(op, match, cpy, offset);
- } else {
- LZ4_wildCopy32(op, match, cpy);
- }
-
- op = cpy; /* wildcopy correction */
- }
- safe_decode:
-#endif
-
- /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
- while (1) {
- token = *ip++;
- length = token >> ML_BITS; /* literal length */
-
- assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
-
- /* A two-stage shortcut for the most common case:
- * 1) If the literal length is 0..14, and there is enough space,
- * enter the shortcut and copy 16 bytes on behalf of the literals
- * (in the fast mode, only 8 bytes can be safely copied this way).
- * 2) Further if the match length is 4..18, copy 18 bytes in a similar
- * manner; but we ensure that there's enough space in the output for
- * those 18 bytes earlier, upon entering the shortcut (in other words,
- * there is a combined check for both stages).
- */
- if ( (endOnInput ? length != RUN_MASK : length <= 8)
- /* strictly "less than" on input, to re-enter the loop with at least one byte */
- && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
- /* Copy the literals */
- LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
- op += length; ip += length;
-
- /* The second stage: prepare for match copying, decode full info.
- * If it doesn't work out, the info won't be wasted. */
- length = token & ML_MASK; /* match length */
- offset = LZ4_readLE16(ip); ip += 2;
- match = op - offset;
- assert(match <= op); /* check overflow */
-
- /* Do not deal with overlapping matches. */
- if ( (length != ML_MASK)
- && (offset >= 8)
- && (dict==withPrefix64k || match >= lowPrefix) ) {
- /* Copy the match. */
- LZ4_memcpy(op + 0, match + 0, 8);
- LZ4_memcpy(op + 8, match + 8, 8);
- LZ4_memcpy(op +16, match +16, 2);
- op += length + MINMATCH;
- /* Both stages worked, load the next token. */
- continue;
- }
-
- /* The second stage didn't work out, but the info is ready.
- * Propel it right to the point of match copying. */
- goto _copy_match;
- }
-
- /* decode literal length */
- if (length == RUN_MASK) {
- variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
- if (error == initial_error) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
- }
-
- /* copy literals */
- cpy = op+length;
-#if LZ4_FAST_DEC_LOOP
- safe_literal_copy:
-#endif
- LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
- if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
- || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
- {
- /* We've either hit the input parsing restriction or the output parsing restriction.
- * In the normal scenario, decoding a full block, it must be the last sequence,
- * otherwise it's an error (invalid input or dimensions).
- * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
- */
- if (partialDecoding) {
- /* Since we are partial decoding we may be in this block because of the output parsing
- * restriction, which is not valid since the output buffer is allowed to be undersized.
- */
- assert(endOnInput);
- DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
- DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
- DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
- DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
- /* Finishing in the middle of a literals segment,
- * due to lack of input.
- */
- if (ip+length > iend) {
- length = (size_t)(iend-ip);
- cpy = op + length;
- }
- /* Finishing in the middle of a literals segment,
- * due to lack of output space.
- */
- if (cpy > oend) {
- cpy = oend;
- assert(op<=oend);
- length = (size_t)(oend-op);
- }
- } else {
- /* We must be on the last sequence because of the parsing limitations so check
- * that we exactly regenerate the original size (must be exact when !endOnInput).
- */
- if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
- /* We must be on the last sequence (or invalid) because of the parsing limitations
- * so check that we exactly consume the input and don't overrun the output buffer.
- */
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
- DEBUGLOG(6, "should have been last run of literals")
- DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
- DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
- goto _output_error;
- }
- }
- memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
- ip += length;
- op += length;
- /* Necessarily EOF when !partialDecoding.
- * When partialDecoding, it is EOF if we've either
- * filled the output buffer or
- * can't proceed with reading an offset for following match.
- */
- if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
- break;
- }
- } else {
- LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
- ip += length; op = cpy;
- }
-
- /* get offset */
- offset = LZ4_readLE16(ip); ip+=2;
- match = op - offset;
-
- /* get matchlength */
- length = token & ML_MASK;
-
- _copy_match:
- if (length == ML_MASK) {
- variable_length_error error = ok;
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
- if (error != ok) goto _output_error;
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
- }
- length += MINMATCH;
-
-#if LZ4_FAST_DEC_LOOP
- safe_match_copy:
-#endif
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
- /* match starting within external dictionary */
- if ((dict==usingExtDict) && (match < lowPrefix)) {
- if (unlikely(op+length > oend-LASTLITERALS)) {
- if (partialDecoding) length = MIN(length, (size_t)(oend-op));
- else goto _output_error; /* doesn't respect parsing restriction */
- }
-
- if (length <= (size_t)(lowPrefix-match)) {
- /* match fits entirely within external dictionary : just copy */
- memmove(op, dictEnd - (lowPrefix-match), length);
- op += length;
- } else {
- /* match stretches into both external dictionary and current block */
- size_t const copySize = (size_t)(lowPrefix - match);
- size_t const restSize = length - copySize;
- LZ4_memcpy(op, dictEnd - copySize, copySize);
- op += copySize;
- if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
- BYTE* const endOfMatch = op + restSize;
- const BYTE* copyFrom = lowPrefix;
- while (op < endOfMatch) *op++ = *copyFrom++;
- } else {
- LZ4_memcpy(op, lowPrefix, restSize);
- op += restSize;
- } }
- continue;
- }
- assert(match >= lowPrefix);
-
- /* copy match within block */
- cpy = op + length;
-
- /* partialDecoding : may end anywhere within the block */
- assert(op<=oend);
- if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
- size_t const mlen = MIN(length, (size_t)(oend-op));
- const BYTE* const matchEnd = match + mlen;
- BYTE* const copyEnd = op + mlen;
- if (matchEnd > op) { /* overlap copy */
- while (op < copyEnd) { *op++ = *match++; }
- } else {
- LZ4_memcpy(op, match, mlen);
- }
- op = copyEnd;
- if (op == oend) { break; }
- continue;
- }
-
- if (unlikely(offset<8)) {
- LZ4_write32(op, 0); /* silence msan warning when offset==0 */
- op[0] = match[0];
- op[1] = match[1];
- op[2] = match[2];
- op[3] = match[3];
- match += inc32table[offset];
- LZ4_memcpy(op+4, match, 4);
- match -= dec64table[offset];
- } else {
- LZ4_memcpy(op, match, 8);
- match += 8;
- }
- op += 8;
-
- if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
- BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
- if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
- if (op < oCopyLimit) {
- LZ4_wildCopy8(op, match, oCopyLimit);
- match += oCopyLimit - op;
- op = oCopyLimit;
- }
- while (op < cpy) { *op++ = *match++; }
- } else {
- LZ4_memcpy(op, match, 8);
- if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
- }
- op = cpy; /* wildcopy correction */
- }
-
- /* end of decoding */
- if (endOnInput) {
- DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
- return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
- } else {
- return (int) (((const char*)ip)-src); /* Nb of input bytes read */
- }
-
- /* Overflow error detected */
- _output_error:
- return (int) (-(((const char*)ip)-src))-1;
- }
-}
-
-
-/*===== Instantiate the API decoding functions. =====*/
-
-LZ4_FORCE_O2
-int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
- endOnInputSize, decode_full_block, noDict,
- (BYTE*)dest, NULL, 0);
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
-{
- dstCapacity = MIN(targetOutputSize, dstCapacity);
- return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
- endOnInputSize, partial_decode,
- noDict, (BYTE*)dst, NULL, 0);
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
-{
- return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, decode_full_block, withPrefix64k,
- (BYTE*)dest - 64 KB, NULL, 0);
-}
-
-/*===== Instantiate a few more decoding cases, used more than once. =====*/
-
-LZ4_FORCE_O2 /* Exported, an obsolete API function. */
-int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block, withPrefix64k,
- (BYTE*)dest - 64 KB, NULL, 0);
-}
-
-/* Another obsolete API function, paired with the previous one. */
-int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
-{
- /* LZ4_decompress_fast doesn't validate match offsets,
- * and thus serves well with any prefixed dictionary. */
- return LZ4_decompress_fast(source, dest, originalSize);
-}
-
-LZ4_FORCE_O2
-static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
- size_t prefixSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block, noDict,
- (BYTE*)dest-prefixSize, NULL, 0);
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
- int compressedSize, int maxOutputSize,
- const void* dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block, usingExtDict,
- (BYTE*)dest, (const BYTE*)dictStart, dictSize);
-}
-
-LZ4_FORCE_O2
-static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
- const void* dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, decode_full_block, usingExtDict,
- (BYTE*)dest, (const BYTE*)dictStart, dictSize);
-}
-
-/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
- * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
- * These routines are used only once, in LZ4_decompress_*_continue().
- */
-LZ4_FORCE_INLINE
-int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
- size_t prefixSize, const void* dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
- endOnInputSize, decode_full_block, usingExtDict,
- (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
-}
-
-LZ4_FORCE_INLINE
-int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
- size_t prefixSize, const void* dictStart, size_t dictSize)
-{
- return LZ4_decompress_generic(source, dest, 0, originalSize,
- endOnOutputSize, decode_full_block, usingExtDict,
- (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
-}
-
-/*===== streaming decompression functions =====*/
-
-LZ4_streamDecode_t* LZ4_createStreamDecode(void)
-{
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
- LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
- return lz4s;
-}
-
-int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
-{
- if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
- FREEMEM(LZ4_stream);
- return 0;
-}
-
-/*! LZ4_setStreamDecode() :
- * Use this function to instruct where to find the dictionary.
- * This function is not necessary if previous data is still available where it was decoded.
- * Loading a size of 0 is allowed (same effect as no dictionary).
- * @return : 1 if OK, 0 if error
- */
-int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- lz4sd->prefixSize = (size_t) dictSize;
- lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
- lz4sd->externalDict = NULL;
- lz4sd->extDictSize = 0;
- return 1;
-}
-
-/*! LZ4_decoderRingBufferSize() :
- * when setting a ring buffer for streaming decompression (optional scenario),
- * provides the minimum size of this ring buffer
- * to be compatible with any source respecting maxBlockSize condition.
- * Note : in a ring buffer scenario,
- * blocks are presumed decompressed next to each other.
- * When not enough space remains for next block (remainingSize < maxBlockSize),
- * decoding resumes from beginning of ring buffer.
- * @return : minimum ring buffer size,
- * or 0 if there is an error (invalid maxBlockSize).
- */
-int LZ4_decoderRingBufferSize(int maxBlockSize)
-{
- if (maxBlockSize < 0) return 0;
- if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
- if (maxBlockSize < 16) maxBlockSize = 16;
- return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
-}
-
-/*
-*_continue() :
- These decoding functions allow decompression of multiple blocks in "streaming" mode.
- Previously decoded blocks must still be available at the memory position where they were decoded.
- If it's not possible, save the relevant part of decoded data into a safe buffer,
- and indicate where it stands using LZ4_setStreamDecode()
-*/
-LZ4_FORCE_O2
-int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- int result;
-
- if (lz4sd->prefixSize == 0) {
- /* The first call, no dictionary yet. */
- assert(lz4sd->extDictSize == 0);
- result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
- if (result <= 0) return result;
- lz4sd->prefixSize = (size_t)result;
- lz4sd->prefixEnd = (BYTE*)dest + result;
- } else if (lz4sd->prefixEnd == (BYTE*)dest) {
- /* They're rolling the current segment. */
- if (lz4sd->prefixSize >= 64 KB - 1)
- result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
- else if (lz4sd->extDictSize == 0)
- result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
- lz4sd->prefixSize);
- else
- result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
- lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
- lz4sd->prefixSize += (size_t)result;
- lz4sd->prefixEnd += result;
- } else {
- /* The buffer wraps around, or they're switching to another buffer. */
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
- lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
- lz4sd->prefixSize = (size_t)result;
- lz4sd->prefixEnd = (BYTE*)dest + result;
- }
-
- return result;
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- int result;
- assert(originalSize >= 0);
-
- if (lz4sd->prefixSize == 0) {
- assert(lz4sd->extDictSize == 0);
- result = LZ4_decompress_fast(source, dest, originalSize);
- if (result <= 0) return result;
- lz4sd->prefixSize = (size_t)originalSize;
- lz4sd->prefixEnd = (BYTE*)dest + originalSize;
- } else if (lz4sd->prefixEnd == (BYTE*)dest) {
- if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
- result = LZ4_decompress_fast(source, dest, originalSize);
- else
- result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
- lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
- lz4sd->prefixSize += (size_t)originalSize;
- lz4sd->prefixEnd += originalSize;
- } else {
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
- result = LZ4_decompress_fast_extDict(source, dest, originalSize,
- lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
- lz4sd->prefixSize = (size_t)originalSize;
- lz4sd->prefixEnd = (BYTE*)dest + originalSize;
- }
-
- return result;
-}
-
-
-/*
-Advanced decoding functions :
-*_usingDict() :
- These decoding functions work the same as "_continue" ones,
- the dictionary must be explicitly provided within parameters
-*/
-
-int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- if (dictSize==0)
- return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
- if (dictStart+dictSize == dest) {
- if (dictSize >= 64 KB - 1) {
- return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
- }
- assert(dictSize >= 0);
- return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
- }
- assert(dictSize >= 0);
- return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
-}
-
-int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
-{
- if (dictSize==0 || dictStart+dictSize == dest)
- return LZ4_decompress_fast(source, dest, originalSize);
- assert(dictSize >= 0);
- return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
-}
-
-
-/*=*************************************************
-* Obsolete Functions
-***************************************************/
-/* obsolete compression functions */
-int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
-{
- return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
-}
-int LZ4_compress(const char* src, char* dest, int srcSize)
-{
- return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
-}
-int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
-{
- return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
-}
-int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
-{
- return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
-}
-int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
-{
- return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
-}
-int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
-{
- return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
-}
-
-/*
-These decompression functions are deprecated and should no longer be used.
-They are only provided here for compatibility with older user programs.
-- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
-- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
-*/
-int LZ4_uncompress (const char* source, char* dest, int outputSize)
-{
- return LZ4_decompress_fast(source, dest, outputSize);
-}
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
-{
- return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
-}
-
-/* Obsolete Streaming functions */
-
-int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
-
-int LZ4_resetStreamState(void* state, char* inputBuffer)
-{
- (void)inputBuffer;
- LZ4_resetStream((LZ4_stream_t*)state);
- return 0;
-}
-
-void* LZ4_create (char* inputBuffer)
-{
- (void)inputBuffer;
- return LZ4_createStream();
-}
-
-char* LZ4_slideInputBuffer (void* state)
-{
- /* avoid const char * -> char * conversion warning */
- return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
-}
-
-#endif /* LZ4_COMMONDEFS_ONLY */ \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h
deleted file mode 100644
index 7ab1e483a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * LZ4 - Fast LZ compression algorithm
- * Header File
- * Copyright (C) 2011-present, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 homepage : http://www.lz4.org
- - LZ4 source repository : https://github.com/lz4/lz4
-*/
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#ifndef LZ4_H_2983827168210
-#define LZ4_H_2983827168210
-
-/* --- Dependency --- */
-#include <stddef.h> /* size_t */
-
-
-/**
- Introduction
-
- LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
- scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
- multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
-
- The LZ4 compression library provides in-memory compression and decompression functions.
- It gives full buffer control to user.
- Compression can be done in:
- - a single step (described as Simple Functions)
- - a single step, reusing a context (described in Advanced Functions)
- - unbounded multiple steps (described as Streaming compression)
-
- lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
- Decompressing such a compressed block requires additional metadata.
- Exact metadata depends on exact decompression function.
- For the typical case of LZ4_decompress_safe(),
- metadata includes block's compressed size, and maximum bound of decompressed size.
- Each application is free to encode and pass such metadata in whichever way it wants.
-
- lz4.h only handle blocks, it can not generate Frames.
-
- Blocks are different from Frames (doc/lz4_Frame_format.md).
- Frames bundle both blocks and metadata in a specified manner.
- Embedding metadata is required for compressed data to be self-contained and portable.
- Frame format is delivered through a companion API, declared in lz4frame.h.
- The `lz4` CLI can only manage frames.
-*/
-
-/*^***************************************************************
-* Export parameters
-*****************************************************************/
-/*
-* LZ4_DLL_EXPORT :
-* Enable exporting of functions when building a Windows DLL
-* LZ4LIB_VISIBILITY :
-* Control library symbols visibility.
-*/
-#ifndef LZ4LIB_VISIBILITY
-# if defined(__GNUC__) && (__GNUC__ >= 4)
-# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
-# else
-# define LZ4LIB_VISIBILITY
-# endif
-#endif
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-# define LZ4LIB_API LZ4LIB_VISIBILITY
-#endif
-
-/*------ Version ------*/
-#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
-#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
-
-#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
-
-#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
-#define LZ4_QUOTE(str) #str
-#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
-#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
-
-LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */
-LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */
-
-
-/*-************************************
-* Tuning parameter
-**************************************/
-/*!
- * LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio.
- * Reduced memory usage may improve speed, thanks to better cache locality.
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
-#ifndef LZ4_MEMORY_USAGE
-# define LZ4_MEMORY_USAGE 14
-#endif
-
-
-/*-************************************
-* Simple Functions
-**************************************/
-/*! LZ4_compress_default() :
- * Compresses 'srcSize' bytes from buffer 'src'
- * into already allocated 'dst' buffer of size 'dstCapacity'.
- * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
- * It also runs faster, so it's a recommended setting.
- * If the function cannot compress 'src' into a more limited 'dst' budget,
- * compression stops *immediately*, and the function result is zero.
- * In which case, 'dst' content is undefined (invalid).
- * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
- * dstCapacity : size of buffer 'dst' (which must be already allocated)
- * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
- * or 0 if compression fails
- * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
- */
-LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
-
-/*! LZ4_decompress_safe() :
- * compressedSize : is the exact complete size of the compressed block.
- * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
- * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
- * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
- * If the source stream is detected malformed, the function will stop decoding and return a negative result.
- * Note 1 : This function is protected against malicious data packets :
- * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
- * even if the compressed block is maliciously modified to order the decoder to do these actions.
- * In such case, the decoder stops immediately, and considers the compressed block malformed.
- * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
- * The implementation is free to send / store / derive this information in whichever way is most beneficial.
- * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
- */
-LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
-
-
-/*-************************************
-* Advanced Functions
-**************************************/
-#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
-#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
-
-/*! LZ4_compressBound() :
- Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
- This function is primarily useful for memory allocation purposes (destination buffer size).
- Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
- Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
- inputSize : max supported value is LZ4_MAX_INPUT_SIZE
- return : maximum output size in a "worst case" scenario
- or 0, if input size is incorrect (too large or negative)
-*/
-LZ4LIB_API int LZ4_compressBound(int inputSize);
-
-/*! LZ4_compress_fast() :
- Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
- The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
- It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
- An acceleration value of "1" is the same as regular LZ4_compress_default()
- Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
- Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
-*/
-LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
-
-/*! LZ4_compress_fast_extState() :
- * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
- * Use LZ4_sizeofState() to know how much memory must be allocated,
- * and allocate it on 8-bytes boundaries (using `malloc()` typically).
- * Then, provide this buffer as `void* state` to compression function.
- */
-LZ4LIB_API int LZ4_sizeofState(void);
-LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
-
-/*! LZ4_compress_destSize() :
- * Reverse the logic : compresses as much data as possible from 'src' buffer
- * into already allocated buffer 'dst', of size >= 'targetDestSize'.
- * This function either compresses the entire 'src' content into 'dst' if it's large enough,
- * or fill 'dst' buffer completely with as much data as possible from 'src'.
- * note: acceleration parameter is fixed to "default".
- *
- * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
- * New value is necessarily <= input value.
- * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
- * or 0 if compression fails.
- *
- * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
- * the produced compressed content could, in specific circumstances,
- * require to be decompressed into a destination buffer larger
- * by at least 1 byte than the content to decompress.
- * If an application uses `LZ4_compress_destSize()`,
- * it's highly recommended to update liblz4 to v1.9.2 or better.
- * If this can't be done or ensured,
- * the receiving decompression function should provide
- * a dstCapacity which is > decompressedSize, by at least 1 byte.
- * See https://github.com/lz4/lz4/issues/859 for details
- */
-LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
-
-
-/*! LZ4_decompress_safe_partial() :
- * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
- * into destination buffer 'dst' of size 'dstCapacity'.
- * Up to 'targetOutputSize' bytes will be decoded.
- * The function stops decoding on reaching this objective.
- * This can be useful to boost performance
- * whenever only the beginning of a block is required.
- *
- * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
- * If source stream is detected malformed, function returns a negative result.
- *
- * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
- *
- * Note 2 : targetOutputSize must be <= dstCapacity
- *
- * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
- * so dstCapacity is kind of redundant.
- * This is because in older versions of this function,
- * decoding operation would still write complete sequences.
- * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
- * it could write more bytes, though only up to dstCapacity.
- * Some "margin" used to be required for this operation to work properly.
- * Thankfully, this is no longer necessary.
- * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
- *
- * Note 4 : If srcSize is the exact size of the block,
- * then targetOutputSize can be any value,
- * including larger than the block's decompressed size.
- * The function will, at most, generate block's decompressed size.
- *
- * Note 5 : If srcSize is _larger_ than block's compressed size,
- * then targetOutputSize **MUST** be <= block's decompressed size.
- * Otherwise, *silent corruption will occur*.
- */
-LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
-
-
-/*-*********************************************
-* Streaming Compression Functions
-***********************************************/
-typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
-
-LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
-LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
-
-/*! LZ4_resetStream_fast() : v1.9.0+
- * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
- * (e.g., LZ4_compress_fast_continue()).
- *
- * An LZ4_stream_t must be initialized once before usage.
- * This is automatically done when created by LZ4_createStream().
- * However, should the LZ4_stream_t be simply declared on stack (for example),
- * it's necessary to initialize it first, using LZ4_initStream().
- *
- * After init, start any new stream with LZ4_resetStream_fast().
- * A same LZ4_stream_t can be re-used multiple times consecutively
- * and compress multiple streams,
- * provided that it starts each new stream with LZ4_resetStream_fast().
- *
- * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
- * but is not compatible with memory regions containing garbage data.
- *
- * Note: it's only useful to call LZ4_resetStream_fast()
- * in the context of streaming compression.
- * The *extState* functions perform their own resets.
- * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
- */
-LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
-
-/*! LZ4_loadDict() :
- * Use this function to reference a static dictionary into LZ4_stream_t.
- * The dictionary must remain available during compression.
- * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
- * The same dictionary will have to be loaded on decompression side for successful decoding.
- * Dictionary are useful for better compression of small data (KB range).
- * While LZ4 accept any input as dictionary,
- * results are generally better when using Zstandard's Dictionary Builder.
- * Loading a size of 0 is allowed, and is the same as reset.
- * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
- */
-LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
-
-/*! LZ4_compress_fast_continue() :
- * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
- * 'dst' buffer must be already allocated.
- * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
- *
- * @return : size of compressed block
- * or 0 if there is an error (typically, cannot fit into 'dst').
- *
- * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
- * Each block has precise boundaries.
- * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
- * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
- *
- * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
- *
- * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
- * Make sure that buffers are separated, by at least one byte.
- * This construction ensures that each block only depends on previous block.
- *
- * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
- *
- * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
- */
-LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
-/*! LZ4_saveDict() :
- * If last 64KB data cannot be guaranteed to remain available at its current memory location,
- * save it into a safer place (char* safeBuffer).
- * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
- * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
- * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
- */
-LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
-
-
-/*-**********************************************
-* Streaming Decompression Functions
-* Bufferless synchronous API
-************************************************/
-typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
-
-/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
- * creation / destruction of streaming decompression tracking context.
- * A tracking context can be re-used multiple times.
- */
-LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
-LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
-
-/*! LZ4_setStreamDecode() :
- * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
- * Use this function to start decompression of a new stream of blocks.
- * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
- * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
- * @return : 1 if OK, 0 if error
- */
-LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
-
-/*! LZ4_decoderRingBufferSize() : v1.8.2+
- * Note : in a ring buffer scenario (optional),
- * blocks are presumed decompressed next to each other
- * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
- * at which stage it resumes from beginning of ring buffer.
- * When setting such a ring buffer for streaming decompression,
- * provides the minimum size of this ring buffer
- * to be compatible with any source respecting maxBlockSize condition.
- * @return : minimum ring buffer size,
- * or 0 if there is an error (invalid maxBlockSize).
- */
-LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
-#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
-
-/*! LZ4_decompress_*_continue() :
- * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
- * A block is an unsplittable entity, it must be presented entirely to a decompression function.
- * Decompression functions only accepts one block at a time.
- * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
- * If less than 64KB of data has been decoded, all the data must be present.
- *
- * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
- * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
- * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
- * In which case, encoding and decoding buffers do not need to be synchronized.
- * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
- * - Synchronized mode :
- * Decompression buffer size is _exactly_ the same as compression buffer size,
- * and follows exactly same update rule (block boundaries at same positions),
- * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
- * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
- * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
- * In which case, encoding and decoding buffers do not need to be synchronized,
- * and encoding ring buffer can have any size, including small ones ( < 64 KB).
- *
- * Whenever these conditions are not possible,
- * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
- * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
-*/
-LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
-
-
-/*! LZ4_decompress_*_usingDict() :
- * These decoding functions work the same as
- * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
- * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
- * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
- * Performance tip : Decompression speed can be substantially increased
- * when dst == dictStart + dictSize.
- */
-LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
-
-#endif /* LZ4_H_2983827168210 */
-
-
-/*^*************************************
- * !!!!!! STATIC LINKING ONLY !!!!!!
- ***************************************/
-
-/*-****************************************************************************
- * Experimental section
- *
- * Symbols declared in this section must be considered unstable. Their
- * signatures or semantics may change, or they may be removed altogether in the
- * future. They are therefore only safe to depend on when the caller is
- * statically linked against the library.
- *
- * To protect against unsafe usage, not only are the declarations guarded,
- * the definitions are hidden by default
- * when building LZ4 as a shared/dynamic library.
- *
- * In order to access these declarations,
- * define LZ4_STATIC_LINKING_ONLY in your application
- * before including LZ4's headers.
- *
- * In order to make their implementations accessible dynamically, you must
- * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
- ******************************************************************************/
-
-#ifdef LZ4_STATIC_LINKING_ONLY
-
-#ifndef LZ4_STATIC_3504398509
-#define LZ4_STATIC_3504398509
-
-#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
-#define LZ4LIB_STATIC_API LZ4LIB_API
-#else
-#define LZ4LIB_STATIC_API
-#endif
-
-
-/*! LZ4_compress_fast_extState_fastReset() :
- * A variant of LZ4_compress_fast_extState().
- *
- * Using this variant avoids an expensive initialization step.
- * It is only safe to call if the state buffer is known to be correctly initialized already
- * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
- * From a high level, the difference is that
- * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
- * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
- */
-LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
-/*! LZ4_attach_dictionary() :
- * This is an experimental API that allows
- * efficient use of a static dictionary many times.
- *
- * Rather than re-loading the dictionary buffer into a working context before
- * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
- * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
- * in which the working stream references the dictionary stream in-place.
- *
- * Several assumptions are made about the state of the dictionary stream.
- * Currently, only streams which have been prepared by LZ4_loadDict() should
- * be expected to work.
- *
- * Alternatively, the provided dictionaryStream may be NULL,
- * in which case any existing dictionary stream is unset.
- *
- * If a dictionary is provided, it replaces any pre-existing stream history.
- * The dictionary contents are the only history that can be referenced and
- * logically immediately precede the data compressed in the first subsequent
- * compression call.
- *
- * The dictionary will only remain attached to the working stream through the
- * first compression call, at the end of which it is cleared. The dictionary
- * stream (and source buffer) must remain in-place / accessible / unchanged
- * through the completion of the first compression call on the stream.
- */
-LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
-
-
-/*! In-place compression and decompression
- *
- * It's possible to have input and output sharing the same buffer,
- * for highly contrained memory environments.
- * In both cases, it requires input to lay at the end of the buffer,
- * and decompression to start at beginning of the buffer.
- * Buffer size must feature some margin, hence be larger than final size.
- *
- * |<------------------------buffer--------------------------------->|
- * |<-----------compressed data--------->|
- * |<-----------decompressed size------------------>|
- * |<----margin---->|
- *
- * This technique is more useful for decompression,
- * since decompressed size is typically larger,
- * and margin is short.
- *
- * In-place decompression will work inside any buffer
- * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
- * This presumes that decompressedSize > compressedSize.
- * Otherwise, it means compression actually expanded data,
- * and it would be more efficient to store such data with a flag indicating it's not compressed.
- * This can happen when data is not compressible (already compressed, or encrypted).
- *
- * For in-place compression, margin is larger, as it must be able to cope with both
- * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
- * and data expansion, which can happen when input is not compressible.
- * As a consequence, buffer size requirements are much higher,
- * and memory savings offered by in-place compression are more limited.
- *
- * There are ways to limit this cost for compression :
- * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
- * Note that it is a compile-time constant, so all compressions will apply this limit.
- * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
- * so it's a reasonable trick when inputs are known to be small.
- * - Require the compressor to deliver a "maximum compressed size".
- * This is the `dstCapacity` parameter in `LZ4_compress*()`.
- * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
- * in which case, the return code will be 0 (zero).
- * The caller must be ready for these cases to happen,
- * and typically design a backup scheme to send data uncompressed.
- * The combination of both techniques can significantly reduce
- * the amount of margin required for in-place compression.
- *
- * In-place compression can work in any buffer
- * which size is >= (maxCompressedSize)
- * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
- * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
- * so it's possible to reduce memory requirements by playing with them.
- */
-
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
-#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
-
-#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
-# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
-#endif
-
-#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
-#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
-
-#endif /* LZ4_STATIC_3504398509 */
-#endif /* LZ4_STATIC_LINKING_ONLY */
-
-
-
-#ifndef LZ4_H_98237428734687
-#define LZ4_H_98237428734687
-
-/*-************************************************************
- * Private Definitions
- **************************************************************
- * Do not use these definitions directly.
- * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Accessing members will expose user code to API and/or ABI break in future versions of the library.
- **************************************************************/
-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
-#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
-#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
-
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
- typedef int8_t LZ4_i8;
- typedef uint8_t LZ4_byte;
- typedef uint16_t LZ4_u16;
- typedef uint32_t LZ4_u32;
-#else
- typedef signed char LZ4_i8;
- typedef unsigned char LZ4_byte;
- typedef unsigned short LZ4_u16;
- typedef unsigned int LZ4_u32;
-#endif
-
-typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
-struct LZ4_stream_t_internal {
- LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
- LZ4_u32 currentOffset;
- LZ4_u32 tableType;
- const LZ4_byte* dictionary;
- const LZ4_stream_t_internal* dictCtx;
- LZ4_u32 dictSize;
-};
-
-typedef struct {
- const LZ4_byte* externalDict;
- size_t extDictSize;
- const LZ4_byte* prefixEnd;
- size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-
-
-/*! LZ4_stream_t :
- * Do not use below internal definitions directly !
- * Declare or allocate an LZ4_stream_t instead.
- * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
- * The structure definition can be convenient for static allocation
- * (on stack, or as part of larger structure).
- * Init this structure with LZ4_initStream() before first use.
- * note : only use this definition in association with static linking !
- * this definition is not API/ABI safe, and may change in future versions.
- */
-#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
-#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
-union LZ4_stream_u {
- void* table[LZ4_STREAMSIZE_VOIDP];
- LZ4_stream_t_internal internal_donotuse;
-}; /* previously typedef'd to LZ4_stream_t */
-
-
-/*! LZ4_initStream() : v1.9.0+
- * An LZ4_stream_t structure must be initialized at least once.
- * This is automatically done when invoking LZ4_createStream(),
- * but it's not when the structure is simply declared on stack (for example).
- *
- * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
- * It can also initialize any arbitrary buffer of sufficient size,
- * and will @return a pointer of proper type upon initialization.
- *
- * Note : initialization fails if size and alignment conditions are not respected.
- * In which case, the function will @return NULL.
- * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
- * Note3: Before v1.9.0, use LZ4_resetStream() instead
- */
-LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
-
-
-/*! LZ4_streamDecode_t :
- * information structure to track an LZ4 stream during decompression.
- * init this structure using LZ4_setStreamDecode() before first use.
- * note : only use in association with static linking !
- * this definition is not API/ABI safe,
- * and may change in a future version !
- */
-#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
-#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
-union LZ4_streamDecode_u {
- unsigned long long table[LZ4_STREAMDECODESIZE_U64];
- LZ4_streamDecode_t_internal internal_donotuse;
-} ; /* previously typedef'd to LZ4_streamDecode_t */
-
-
-
-/*-************************************
-* Obsolete Functions
-**************************************/
-
-/*! Deprecation warnings
- *
- * Deprecated functions make the compiler generate a warning when invoked.
- * This is meant to invite users to update their source code.
- * Should deprecation warnings be a problem, it is generally possible to disable them,
- * typically with -Wno-deprecated-declarations for gcc
- * or _CRT_SECURE_NO_WARNINGS in Visual.
- *
- * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
- * before including the header file.
- */
-#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
-# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
-#else
-# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-# define LZ4_DEPRECATED(message) [[deprecated(message)]]
-# elif defined(_MSC_VER)
-# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
-# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
-# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
-# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
-# define LZ4_DEPRECATED(message) __attribute__((deprecated))
-# else
-# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
-# define LZ4_DEPRECATED(message) /* disabled */
-# endif
-#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
-
-/*! Obsolete compression functions (since v1.7.3) */
-LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
-LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
-/*! Obsolete decompression functions (since v1.8.0) */
-LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
-LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
-
-/* Obsolete streaming functions (since v1.7.0)
- * degraded functionality; do not use!
- *
- * In order to perform streaming compression, these functions depended on data
- * that is no longer tracked in the state. They have been preserved as well as
- * possible: using them will still produce a correct output. However, they don't
- * actually retain any history between compression calls. The compression ratio
- * achieved will therefore be no better than compressing each chunk
- * independently.
- */
-LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
-LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
-LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
-LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
-
-/*! Obsolete streaming decoding functions (since v1.7.0) */
-LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
-LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
-
-/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
- * These functions used to be faster than LZ4_decompress_safe(),
- * but this is no longer the case. They are now slower.
- * This is because LZ4_decompress_fast() doesn't know the input size,
- * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
- * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
- * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
- *
- * The last remaining LZ4_decompress_fast() specificity is that
- * it can decompress a block without knowing its compressed size.
- * Such functionality can be achieved in a more secure manner
- * by employing LZ4_decompress_safe_partial().
- *
- * Parameters:
- * originalSize : is the uncompressed size to regenerate.
- * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
- * @return : number of bytes read from source buffer (== compressed size).
- * The function expects to finish at block's end exactly.
- * If the source stream is detected malformed, the function stops decoding and returns a negative result.
- * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
- * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
- * Also, since match offsets are not validated, match reads from 'src' may underflow too.
- * These issues never happen if input (compressed) data is correct.
- * But they may happen if input data is invalid (error or intentional tampering).
- * As a consequence, use these functions in trusted environments with trusted data **only**.
- */
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
-LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
-LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
-LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
-
-/*! LZ4_resetStream() :
- * An LZ4_stream_t structure must be initialized at least once.
- * This is done with LZ4_initStream(), or LZ4_resetStream().
- * Consider switching to LZ4_initStream(),
- * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
- */
-LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
-
-
-#endif /* LZ4_H_98237428734687 */
-
-
-#if defined (__cplusplus)
-}
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c
deleted file mode 100644
index 945f9f7a3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c
+++ /dev/null
@@ -1,1899 +0,0 @@
-/*
- * LZ4 auto-framing library
- * Copyright (C) 2011-2016, Yann Collet.
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You can contact the author at :
- * - LZ4 homepage : http://www.lz4.org
- * - LZ4 source repository : https://github.com/lz4/lz4
- */
-
-/* LZ4F is a stand-alone API to create LZ4-compressed Frames
- * in full conformance with specification v1.6.1 .
- * This library rely upon memory management capabilities (malloc, free)
- * provided either by <stdlib.h>,
- * or redirected towards another library of user's choice
- * (see Memory Routines below).
- */
-
-
-/*-************************************
-* Compiler Options
-**************************************/
-#ifdef _MSC_VER /* Visual Studio */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/*-************************************
-* Tuning parameters
-**************************************/
-/*
- * LZ4F_HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
- */
-#ifndef LZ4F_HEAPMODE
-# define LZ4F_HEAPMODE 0
-#endif
-
-
-/*-************************************
-* Memory routines
-**************************************/
-/*
- * User may redirect invocations of
- * malloc(), calloc() and free()
- * towards another library or solution of their choice
- * by modifying below section.
- */
-#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
-#include "rd.h" /* rd_malloc, rd_calloc, rd_free */
-# define ALLOC(s) rd_malloc(s)
-# define ALLOC_AND_ZERO(s) rd_calloc(1,(s))
-# define FREEMEM(p) rd_free(p)
-#endif
-
-#include <string.h> /* memset, memcpy, memmove */
-#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
-# define MEM_INIT(p,v,s) memset((p),(v),(s))
-#endif
-
-
-/*-************************************
-* Library declarations
-**************************************/
-#define LZ4F_STATIC_LINKING_ONLY
-#include "lz4frame.h"
-#define LZ4_STATIC_LINKING_ONLY
-#include "lz4.h"
-#define LZ4_HC_STATIC_LINKING_ONLY
-#include "lz4hc.h"
-#define XXH_STATIC_LINKING_ONLY
-#include "rdxxhash.h"
-
-
-/*-************************************
-* Debug
-**************************************/
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
-# include <assert.h>
-#else
-# ifndef assert
-# define assert(condition) ((void)0)
-# endif
-#endif
-
-#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
-
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
-# include <stdio.h>
-static int g_debuglog_enable = 1;
-# define DEBUGLOG(l, ...) { \
- if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, " \n"); \
- } }
-#else
-# define DEBUGLOG(l, ...) {} /* disabled */
-#endif
-
-
-/*-************************************
-* Basic Types
-**************************************/
-#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
-#else
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
- typedef signed int S32;
- typedef unsigned long long U64;
-#endif
-
-
-/* unoptimized version; solves endianess & alignment issues */
-static U32 LZ4F_readLE32 (const void* src)
-{
- const BYTE* const srcPtr = (const BYTE*)src;
- U32 value32 = srcPtr[0];
- value32 += ((U32)srcPtr[1])<< 8;
- value32 += ((U32)srcPtr[2])<<16;
- value32 += ((U32)srcPtr[3])<<24;
- return value32;
-}
-
-static void LZ4F_writeLE32 (void* dst, U32 value32)
-{
- BYTE* const dstPtr = (BYTE*)dst;
- dstPtr[0] = (BYTE)value32;
- dstPtr[1] = (BYTE)(value32 >> 8);
- dstPtr[2] = (BYTE)(value32 >> 16);
- dstPtr[3] = (BYTE)(value32 >> 24);
-}
-
-static U64 LZ4F_readLE64 (const void* src)
-{
- const BYTE* const srcPtr = (const BYTE*)src;
- U64 value64 = srcPtr[0];
- value64 += ((U64)srcPtr[1]<<8);
- value64 += ((U64)srcPtr[2]<<16);
- value64 += ((U64)srcPtr[3]<<24);
- value64 += ((U64)srcPtr[4]<<32);
- value64 += ((U64)srcPtr[5]<<40);
- value64 += ((U64)srcPtr[6]<<48);
- value64 += ((U64)srcPtr[7]<<56);
- return value64;
-}
-
-static void LZ4F_writeLE64 (void* dst, U64 value64)
-{
- BYTE* const dstPtr = (BYTE*)dst;
- dstPtr[0] = (BYTE)value64;
- dstPtr[1] = (BYTE)(value64 >> 8);
- dstPtr[2] = (BYTE)(value64 >> 16);
- dstPtr[3] = (BYTE)(value64 >> 24);
- dstPtr[4] = (BYTE)(value64 >> 32);
- dstPtr[5] = (BYTE)(value64 >> 40);
- dstPtr[6] = (BYTE)(value64 >> 48);
- dstPtr[7] = (BYTE)(value64 >> 56);
-}
-
-
-/*-************************************
-* Constants
-**************************************/
-#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
-# define KB *(1<<10)
-# define MB *(1<<20)
-# define GB *(1<<30)
-#endif
-
-#define _1BIT 0x01
-#define _2BITS 0x03
-#define _3BITS 0x07
-#define _4BITS 0x0F
-#define _8BITS 0xFF
-
-#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
-#define LZ4F_MAGICNUMBER 0x184D2204U
-#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
-#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
-
-static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
-static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
-static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
-static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
-
-
-/*-************************************
-* Structures and local types
-**************************************/
-typedef struct LZ4F_cctx_s
-{
- LZ4F_preferences_t prefs;
- U32 version;
- U32 cStage;
- const LZ4F_CDict* cdict;
- size_t maxBlockSize;
- size_t maxBufferSize;
- BYTE* tmpBuff;
- BYTE* tmpIn;
- size_t tmpInSize;
- U64 totalInSize;
- XXH32_state_t xxh;
- void* lz4CtxPtr;
- U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
- U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
-} LZ4F_cctx_t;
-
-
-/*-************************************
-* Error management
-**************************************/
-#define LZ4F_GENERATE_STRING(STRING) #STRING,
-static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
-
-
-unsigned LZ4F_isError(LZ4F_errorCode_t code)
-{
- return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
-}
-
-const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
-{
- static const char* codeError = "Unspecified error code";
- if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
- return codeError;
-}
-
-LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
-{
- if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
- return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
-}
-
-static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
-{
- /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
- LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
- return (LZ4F_errorCode_t)-(ptrdiff_t)code;
-}
-
-unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
-
-int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
-
-size_t LZ4F_getBlockSize(unsigned blockSizeID)
-{
- static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
-
- if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
- if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
- return err0r(LZ4F_ERROR_maxBlockSize_invalid);
- blockSizeID -= LZ4F_max64KB;
- return blockSizes[blockSizeID];
-}
-
-/*-************************************
-* Private functions
-**************************************/
-#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
-
-static BYTE LZ4F_headerChecksum (const void* header, size_t length)
-{
- U32 const xxh = XXH32(header, length, 0);
- return (BYTE)(xxh >> 8);
-}
-
-
-/*-************************************
-* Simple-pass compression functions
-**************************************/
-static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
- const size_t srcSize)
-{
- LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
- size_t maxBlockSize = 64 KB;
- while (requestedBSID > proposedBSID) {
- if (srcSize <= maxBlockSize)
- return proposedBSID;
- proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
- maxBlockSize <<= 2;
- }
- return requestedBSID;
-}
-
-/*! LZ4F_compressBound_internal() :
- * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
- * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
- * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
- * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
- */
-static size_t LZ4F_compressBound_internal(size_t srcSize,
- const LZ4F_preferences_t* preferencesPtr,
- size_t alreadyBuffered)
-{
- LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
- prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
- prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
- { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
- U32 const flush = prefsPtr->autoFlush | (srcSize==0);
- LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
- size_t const blockSize = LZ4F_getBlockSize(blockID);
- size_t const maxBuffered = blockSize - 1;
- size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
- size_t const maxSrcSize = srcSize + bufferedSize;
- unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
- size_t const partialBlockSize = maxSrcSize & (blockSize-1);
- size_t const lastBlockSize = flush ? partialBlockSize : 0;
- unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
-
- size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
- size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
-
- return ((BHSize + blockCRCSize) * nbBlocks) +
- (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
- }
-}
-
-size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefs;
- size_t const headerSize = maxFHSize; /* max header size, including optional fields */
-
- if (preferencesPtr!=NULL) prefs = *preferencesPtr;
- else MEM_INIT(&prefs, 0, sizeof(prefs));
- prefs.autoFlush = 1;
-
- return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
-}
-
-
-/*! LZ4F_compressFrame_usingCDict() :
- * Compress srcBuffer using a dictionary, in a single step.
- * cdict can be NULL, in which case, no dictionary is used.
- * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
- * however, it's the only way to provide a dictID, so it's not recommended.
- * @return : number of bytes written into dstBuffer,
- * or an error code if it fails (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefs;
- LZ4F_compressOptions_t options;
- BYTE* const dstStart = (BYTE*) dstBuffer;
- BYTE* dstPtr = dstStart;
- BYTE* const dstEnd = dstStart + dstCapacity;
-
- if (preferencesPtr!=NULL)
- prefs = *preferencesPtr;
- else
- MEM_INIT(&prefs, 0, sizeof(prefs));
- if (prefs.frameInfo.contentSize != 0)
- prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
-
- prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
- prefs.autoFlush = 1;
- if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
- prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
-
- MEM_INIT(&options, 0, sizeof(options));
- options.stableSrc = 1;
-
- if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
-
- { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
- if (LZ4F_isError(headerSize)) return headerSize;
- dstPtr += headerSize; /* header size */ }
-
- assert(dstEnd >= dstPtr);
- { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
- if (LZ4F_isError(cSize)) return cSize;
- dstPtr += cSize; }
-
- assert(dstEnd >= dstPtr);
- { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
- if (LZ4F_isError(tailSize)) return tailSize;
- dstPtr += tailSize; }
-
- assert(dstEnd >= dstStart);
- return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_compressFrame() :
- * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
- * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_preferences_t* preferencesPtr)
-{
- size_t result;
-#if (LZ4F_HEAPMODE)
- LZ4F_cctx_t *cctxPtr;
- result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
- if (LZ4F_isError(result)) return result;
-#else
- LZ4F_cctx_t cctx;
- LZ4_stream_t lz4ctx;
- LZ4F_cctx_t *cctxPtr = &cctx;
-
- DEBUGLOG(4, "LZ4F_compressFrame");
- MEM_INIT(&cctx, 0, sizeof(cctx));
- cctx.version = LZ4F_VERSION;
- cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
- if (preferencesPtr == NULL ||
- preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN)
- {
- LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
- cctxPtr->lz4CtxPtr = &lz4ctx;
- cctxPtr->lz4CtxAlloc = 1;
- cctxPtr->lz4CtxState = 1;
- }
-#endif
-
- result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
- srcBuffer, srcSize,
- NULL, preferencesPtr);
-
-#if (LZ4F_HEAPMODE)
- LZ4F_freeCompressionContext(cctxPtr);
-#else
- if (preferencesPtr != NULL &&
- preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN)
- {
- FREEMEM(cctxPtr->lz4CtxPtr);
- }
-#endif
- return result;
-}
-
-
-/*-***************************************************
-* Dictionary compression
-*****************************************************/
-
-struct LZ4F_CDict_s {
- void* dictContent;
- LZ4_stream_t* fastCtx;
- LZ4_streamHC_t* HCCtx;
-}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
-
-/*! LZ4F_createCDict() :
- * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
- * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
- * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after LZ4F_CDict creation, since its content is copied within CDict
- * @return : digested dictionary for compression, or NULL if failed */
-LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
-{
- const char* dictStart = (const char*)dictBuffer;
- LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
- DEBUGLOG(4, "LZ4F_createCDict");
- if (!cdict) return NULL;
- if (dictSize > 64 KB) {
- dictStart += dictSize - 64 KB;
- dictSize = 64 KB;
- }
- cdict->dictContent = ALLOC(dictSize);
- cdict->fastCtx = LZ4_createStream();
- cdict->HCCtx = LZ4_createStreamHC();
- if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
- LZ4F_freeCDict(cdict);
- return NULL;
- }
- memcpy(cdict->dictContent, dictStart, dictSize);
- LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
- LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
- LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
- return cdict;
-}
-
-void LZ4F_freeCDict(LZ4F_CDict* cdict)
-{
- if (cdict==NULL) return; /* support free on NULL */
- FREEMEM(cdict->dictContent);
- LZ4_freeStream(cdict->fastCtx);
- LZ4_freeStreamHC(cdict->HCCtx);
- FREEMEM(cdict);
-}
-
-
-/*-*********************************
-* Advanced compression functions
-***********************************/
-
-/*! LZ4F_createCompressionContext() :
- * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
- * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
- * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
- * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
- * Object can release its memory using LZ4F_freeCompressionContext();
- */
-LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
-{
- LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
- if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
-
- cctxPtr->version = version;
- cctxPtr->cStage = 0; /* Next stage : init stream */
-
- *LZ4F_compressionContextPtr = cctxPtr;
-
- return LZ4F_OK_NoError;
-}
-
-
-LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
-{
- if (cctxPtr != NULL) { /* support free on NULL */
- FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
- FREEMEM(cctxPtr->tmpBuff);
- FREEMEM(cctxPtr);
- }
-
- return LZ4F_OK_NoError;
-}
-
-
-/**
- * This function prepares the internal LZ4(HC) stream for a new compression,
- * resetting the context and attaching the dictionary, if there is one.
- *
- * It needs to be called at the beginning of each independent compression
- * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
- * beginning of each block in blockIndependent mode).
- */
-static void LZ4F_initStream(void* ctx,
- const LZ4F_CDict* cdict,
- int level,
- LZ4F_blockMode_t blockMode) {
- if (level < LZ4HC_CLEVEL_MIN) {
- if (cdict != NULL || blockMode == LZ4F_blockLinked) {
- /* In these cases, we will call LZ4_compress_fast_continue(),
- * which needs an already reset context. Otherwise, we'll call a
- * one-shot API. The non-continued APIs internally perform their own
- * resets at the beginning of their calls, where they know what
- * tableType they need the context to be in. So in that case this
- * would be misguided / wasted work. */
- LZ4_resetStream_fast((LZ4_stream_t*)ctx);
- }
- LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
- } else {
- LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
- LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
- }
-}
-
-
-/*! LZ4F_compressBegin_usingCDict() :
- * init streaming compression and writes frame header into dstBuffer.
- * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefNull;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- BYTE* headerStart;
-
- if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- MEM_INIT(&prefNull, 0, sizeof(prefNull));
- if (preferencesPtr == NULL) preferencesPtr = &prefNull;
- cctxPtr->prefs = *preferencesPtr;
-
- /* Ctx Management */
- { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
- if (cctxPtr->lz4CtxAlloc < ctxTypeID) {
- FREEMEM(cctxPtr->lz4CtxPtr);
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
- cctxPtr->lz4CtxPtr = LZ4_createStream();
- } else {
- cctxPtr->lz4CtxPtr = LZ4_createStreamHC();
- }
- if (cctxPtr->lz4CtxPtr == NULL)
- return err0r(LZ4F_ERROR_allocation_failed);
- cctxPtr->lz4CtxAlloc = ctxTypeID;
- cctxPtr->lz4CtxState = ctxTypeID;
- } else if (cctxPtr->lz4CtxState != ctxTypeID) {
- /* otherwise, a sufficient buffer is allocated, but we need to
- * reset it to the correct context type */
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
- LZ4_initStream((LZ4_stream_t *) cctxPtr->lz4CtxPtr, sizeof (LZ4_stream_t));
- } else {
- LZ4_initStreamHC((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
- LZ4_setCompressionLevel((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
- }
- cctxPtr->lz4CtxState = ctxTypeID;
- }
- }
-
- /* Buffer Management */
- if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
- cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
- cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
-
- { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
- ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
- cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
-
- if (cctxPtr->maxBufferSize < requiredBuffSize) {
- cctxPtr->maxBufferSize = 0;
- FREEMEM(cctxPtr->tmpBuff);
- cctxPtr->tmpBuff = (BYTE*)ALLOC_AND_ZERO(requiredBuffSize);
- if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
- cctxPtr->maxBufferSize = requiredBuffSize;
- } }
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
- cctxPtr->tmpInSize = 0;
- (void)XXH32_reset(&(cctxPtr->xxh), 0);
-
- /* context init */
- cctxPtr->cdict = cdict;
- if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
- /* frame init only for blockLinked : blockIndependent will be init at each block */
- LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
- }
- if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
- LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
- }
-
- /* Magic Number */
- LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
- dstPtr += 4;
- headerStart = dstPtr;
-
- /* FLG Byte */
- *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
- + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
- + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
- + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
- + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
- + (cctxPtr->prefs.frameInfo.dictID > 0) );
- /* BD Byte */
- *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
- /* Optional Frame content size field */
- if (cctxPtr->prefs.frameInfo.contentSize) {
- LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
- dstPtr += 8;
- cctxPtr->totalInSize = 0;
- }
- /* Optional dictionary ID field */
- if (cctxPtr->prefs.frameInfo.dictID) {
- LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
- dstPtr += 4;
- }
- /* Header CRC Byte */
- *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
- dstPtr++;
-
- cctxPtr->cStage = 1; /* header written, now request input data block */
- return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_compressBegin() :
- * init streaming compression and writes frame header into dstBuffer.
- * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * preferencesPtr can be NULL, in which case default parameters are selected.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_preferences_t* preferencesPtr)
-{
- return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
- NULL, preferencesPtr);
-}
-
-
-/* LZ4F_compressBound() :
- * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
- * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
- * This function cannot fail.
- */
-size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
- if (preferencesPtr && preferencesPtr->autoFlush) {
- return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
- }
- return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
-}
-
-
-typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
-
-
-/*! LZ4F_makeBlock():
- * compress a single block, add header and optional checksum.
- * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
- */
-static size_t LZ4F_makeBlock(void* dst,
- const void* src, size_t srcSize,
- compressFunc_t compress, void* lz4ctx, int level,
- const LZ4F_CDict* cdict,
- LZ4F_blockChecksum_t crcFlag)
-{
- BYTE* const cSizePtr = (BYTE*)dst;
- U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
- (int)(srcSize), (int)(srcSize-1),
- level, cdict);
- if (cSize == 0) { /* compression failed */
- DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
- cSize = (U32)srcSize;
- LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
- memcpy(cSizePtr+BHSize, src, srcSize);
- } else {
- LZ4F_writeLE32(cSizePtr, cSize);
- }
- if (crcFlag) {
- U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
- LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
- }
- return BHSize + cSize + ((U32)crcFlag)*BFSize;
-}
-
-
-static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
- int const acceleration = (level < 0) ? -level + 1 : 1;
- LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
- if (cdict) {
- return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
- } else {
- return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
- }
-}
-
-static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
- int const acceleration = (level < 0) ? -level + 1 : 1;
- (void)cdict; /* init once at beginning of frame */
- return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
-}
-
-static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
- LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
- if (cdict) {
- return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
- }
- return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
-}
-
-static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
- (void)level; (void)cdict; /* init once at beginning of frame */
- return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
-}
-
-static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
-{
- if (level < LZ4HC_CLEVEL_MIN) {
- if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
- return LZ4F_compressBlock_continue;
- }
- if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
- return LZ4F_compressBlockHC_continue;
-}
-
-static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
-{
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
- return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
- return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
-}
-
-typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
-
-/*! LZ4F_compressUpdate() :
- * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
- * dstBuffer MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
- * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
- * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
- * or an error code if it fails (which can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
- void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- LZ4F_compressOptions_t cOptionsNull;
- size_t const blockSize = cctxPtr->maxBlockSize;
- const BYTE* srcPtr = (const BYTE*)srcBuffer;
- const BYTE* const srcEnd = srcPtr + srcSize;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- LZ4F_lastBlockStatus lastBlockCompressed = notDone;
- compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
- DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
-
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
- if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- MEM_INIT(&cOptionsNull, 0, sizeof(cOptionsNull));
- if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
-
- /* complete tmp buffer */
- if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
- size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
- if (sizeToCopy > srcSize) {
- /* add src to tmpIn buffer */
- memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
- srcPtr = srcEnd;
- cctxPtr->tmpInSize += srcSize;
- /* still needs some CRC */
- } else {
- /* complete tmpIn block and then compress it */
- lastBlockCompressed = fromTmpBuffer;
- memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
- srcPtr += sizeToCopy;
-
- dstPtr += LZ4F_makeBlock(dstPtr,
- cctxPtr->tmpIn, blockSize,
- compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
- cctxPtr->cdict,
- cctxPtr->prefs.frameInfo.blockChecksumFlag);
-
- if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
- cctxPtr->tmpInSize = 0;
- }
- }
-
- while ((size_t)(srcEnd - srcPtr) >= blockSize) {
- /* compress full blocks */
- lastBlockCompressed = fromSrcBuffer;
- dstPtr += LZ4F_makeBlock(dstPtr,
- srcPtr, blockSize,
- compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
- cctxPtr->cdict,
- cctxPtr->prefs.frameInfo.blockChecksumFlag);
- srcPtr += blockSize;
- }
-
- if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
- /* compress remaining input < blockSize */
- lastBlockCompressed = fromSrcBuffer;
- dstPtr += LZ4F_makeBlock(dstPtr,
- srcPtr, (size_t)(srcEnd - srcPtr),
- compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
- cctxPtr->cdict,
- cctxPtr->prefs.frameInfo.blockChecksumFlag);
- srcPtr = srcEnd;
- }
-
- /* preserve dictionary if necessary */
- if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
- if (compressOptionsPtr->stableSrc) {
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
- } else {
- int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
- }
-
- /* keep tmpIn within limits */
- if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
- && !(cctxPtr->prefs.autoFlush))
- {
- int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
-
- /* some input data left, necessarily < blockSize */
- if (srcPtr < srcEnd) {
- /* fill tmp buffer */
- size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
- memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
- cctxPtr->tmpInSize = sizeToCopy;
- }
-
- if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
- (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
-
- cctxPtr->totalInSize += srcSize;
- return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_flush() :
- * When compressed data must be sent immediately, without waiting for a block to be filled,
- * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
- * The result of the function is the number of bytes written into dstBuffer.
- * It can be zero, this means there was no data left within LZ4F_cctx.
- * The function outputs an error code if it fails (can be tested using LZ4F_isError())
- * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
- */
-size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- compressFunc_t compress;
-
- if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
- if (dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize))
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- (void)compressOptionsPtr; /* not yet useful */
-
- /* select compression function */
- compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
- /* compress tmp buffer */
- dstPtr += LZ4F_makeBlock(dstPtr,
- cctxPtr->tmpIn, cctxPtr->tmpInSize,
- compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
- cctxPtr->cdict,
- cctxPtr->prefs.frameInfo.blockChecksumFlag);
- assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
-
- if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
- cctxPtr->tmpIn += cctxPtr->tmpInSize;
- cctxPtr->tmpInSize = 0;
-
- /* keep tmpIn within limits */
- if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
- int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
-
- return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_compressEnd() :
- * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
- * It will flush whatever data remained within compressionContext (like LZ4_flush())
- * but also properly finalize the frame, with an endMark and an (optional) checksum.
- * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
- * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
- * or an error code if it fails (can be tested using LZ4F_isError())
- * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
- */
-size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
-
- size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
- DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
- if (LZ4F_isError(flushSize)) return flushSize;
- dstPtr += flushSize;
-
- assert(flushSize <= dstCapacity);
- dstCapacity -= flushSize;
-
- if (dstCapacity < 4) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- LZ4F_writeLE32(dstPtr, 0);
- dstPtr += 4; /* endMark */
-
- if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
- U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
- if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- DEBUGLOG(5,"Writing 32-bit content checksum");
- LZ4F_writeLE32(dstPtr, xxh);
- dstPtr+=4; /* content Checksum */
- }
-
- cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
- cctxPtr->maxBufferSize = 0; /* reuse HC context */
-
- if (cctxPtr->prefs.frameInfo.contentSize) {
- if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
- return err0r(LZ4F_ERROR_frameSize_wrong);
- }
-
- return (size_t)(dstPtr - dstStart);
-}
-
-
-/*-***************************************************
-* Frame Decompression
-*****************************************************/
-
-typedef enum {
- dstage_getFrameHeader=0, dstage_storeFrameHeader,
- dstage_init,
- dstage_getBlockHeader, dstage_storeBlockHeader,
- dstage_copyDirect, dstage_getBlockChecksum,
- dstage_getCBlock, dstage_storeCBlock,
- dstage_flushOut,
- dstage_getSuffix, dstage_storeSuffix,
- dstage_getSFrameSize, dstage_storeSFrameSize,
- dstage_skipSkippable
-} dStage_t;
-
-struct LZ4F_dctx_s {
- LZ4F_frameInfo_t frameInfo;
- U32 version;
- dStage_t dStage;
- U64 frameRemainingSize;
- size_t maxBlockSize;
- size_t maxBufferSize;
- BYTE* tmpIn;
- size_t tmpInSize;
- size_t tmpInTarget;
- BYTE* tmpOutBuffer;
- const BYTE* dict;
- size_t dictSize;
- BYTE* tmpOut;
- size_t tmpOutSize;
- size_t tmpOutStart;
- XXH32_state_t xxh;
- XXH32_state_t blockChecksum;
- BYTE header[LZ4F_HEADER_SIZE_MAX];
-}; /* typedef'd to LZ4F_dctx in lz4frame.h */
-
-
-/*! LZ4F_createDecompressionContext() :
- * Create a decompressionContext object, which will track all decompression operations.
- * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
- * Object can later be released using LZ4F_freeDecompressionContext().
- * @return : if != 0, there was an error during context creation.
- */
-LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
-{
- LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOC_AND_ZERO(sizeof(LZ4F_dctx));
- if (dctx == NULL) { /* failed allocation */
- *LZ4F_decompressionContextPtr = NULL;
- return err0r(LZ4F_ERROR_allocation_failed);
- }
-
- dctx->version = versionNumber;
- *LZ4F_decompressionContextPtr = dctx;
- return LZ4F_OK_NoError;
-}
-
-LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
-{
- LZ4F_errorCode_t result = LZ4F_OK_NoError;
- if (dctx != NULL) { /* can accept NULL input, like free() */
- result = (LZ4F_errorCode_t)dctx->dStage;
- FREEMEM(dctx->tmpIn);
- FREEMEM(dctx->tmpOutBuffer);
- FREEMEM(dctx);
- }
- return result;
-}
-
-
-/*==--- Streaming Decompression operations ---==*/
-
-void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
-{
- dctx->dStage = dstage_getFrameHeader;
- dctx->dict = NULL;
- dctx->dictSize = 0;
-}
-
-
-/*! LZ4F_decodeHeader() :
- * input : `src` points at the **beginning of the frame**
- * output : set internal values of dctx, such as
- * dctx->frameInfo and dctx->dStage.
- * Also allocates internal buffers.
- * @return : nb Bytes read from src (necessarily <= srcSize)
- * or an error code (testable with LZ4F_isError())
- */
-static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
-{
- unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
- size_t frameHeaderSize;
- const BYTE* srcPtr = (const BYTE*)src;
-
- DEBUGLOG(5, "LZ4F_decodeHeader");
- /* need to decode header to get frameInfo */
- if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
- MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
-
- /* special case : skippable frames */
- if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
- dctx->frameInfo.frameType = LZ4F_skippableFrame;
- if (src == (void*)(dctx->header)) {
- dctx->tmpInSize = srcSize;
- dctx->tmpInTarget = 8;
- dctx->dStage = dstage_storeSFrameSize;
- return srcSize;
- } else {
- dctx->dStage = dstage_getSFrameSize;
- return 4;
- }
- }
-
- /* control magic number */
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
- DEBUGLOG(4, "frame header error : unknown magic number");
- return err0r(LZ4F_ERROR_frameType_unknown);
- }
-#endif
- dctx->frameInfo.frameType = LZ4F_frame;
-
- /* Flags */
- { U32 const FLG = srcPtr[4];
- U32 const version = (FLG>>6) & _2BITS;
- blockChecksumFlag = (FLG>>4) & _1BIT;
- blockMode = (FLG>>5) & _1BIT;
- contentSizeFlag = (FLG>>3) & _1BIT;
- contentChecksumFlag = (FLG>>2) & _1BIT;
- dictIDFlag = FLG & _1BIT;
- /* validate */
- if (((FLG>>1)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
- if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */
- }
-
- /* Frame Header Size */
- frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
-
- if (srcSize < frameHeaderSize) {
- /* not enough input to fully decode frame header */
- if (srcPtr != dctx->header)
- memcpy(dctx->header, srcPtr, srcSize);
- dctx->tmpInSize = srcSize;
- dctx->tmpInTarget = frameHeaderSize;
- dctx->dStage = dstage_storeFrameHeader;
- return srcSize;
- }
-
- { U32 const BD = srcPtr[5];
- blockSizeID = (BD>>4) & _3BITS;
- /* validate */
- if (((BD>>7)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
- if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */
- if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
- }
-
- /* check header */
- assert(frameHeaderSize > 5);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
- if (HC != srcPtr[frameHeaderSize-1])
- return err0r(LZ4F_ERROR_headerChecksum_invalid);
- }
-#endif
-
- /* save */
- dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
- dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
- dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
- dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
- dctx->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
- if (contentSizeFlag)
- dctx->frameRemainingSize =
- dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
- if (dictIDFlag)
- dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
-
- dctx->dStage = dstage_init;
-
- return frameHeaderSize;
-}
-
-
-/*! LZ4F_headerSize() :
- * @return : size of frame header
- * or an error code, which can be tested using LZ4F_isError()
- */
-size_t LZ4F_headerSize(const void* src, size_t srcSize)
-{
- if (src == NULL) return err0r(LZ4F_ERROR_srcPtr_wrong);
-
- /* minimal srcSize to determine header size */
- if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
- return err0r(LZ4F_ERROR_frameHeader_incomplete);
-
- /* special case : skippable frames */
- if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
- return 8;
-
- /* control magic number */
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
- return err0r(LZ4F_ERROR_frameType_unknown);
-#endif
-
- /* Frame Header Size */
- { BYTE const FLG = ((const BYTE*)src)[4];
- U32 const contentSizeFlag = (FLG>>3) & _1BIT;
- U32 const dictIDFlag = FLG & _1BIT;
- return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
- }
-}
-
-/*! LZ4F_getFrameInfo() :
- * This function extracts frame parameters (max blockSize, frame checksum, etc.).
- * Usage is optional. Objective is to provide relevant information for allocation purposes.
- * This function works in 2 situations :
- * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
- * Amount of input data provided must be large enough to successfully decode the frame header.
- * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
- * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
- * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
- * Decompression must resume from (srcBuffer + *srcSizePtr).
- * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
- * or an error code which can be tested using LZ4F_isError()
- * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
- * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
- */
-LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
- LZ4F_frameInfo_t* frameInfoPtr,
- const void* srcBuffer, size_t* srcSizePtr)
-{
- LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
- if (dctx->dStage > dstage_storeFrameHeader) {
- /* frameInfo already decoded */
- size_t o=0, i=0;
- *srcSizePtr = 0;
- *frameInfoPtr = dctx->frameInfo;
- /* returns : recommended nb of bytes for LZ4F_decompress() */
- return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
- } else {
- if (dctx->dStage == dstage_storeFrameHeader) {
- /* frame decoding already started, in the middle of header => automatic fail */
- *srcSizePtr = 0;
- return err0r(LZ4F_ERROR_frameDecoding_alreadyStarted);
- } else {
- size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
- if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
- if (*srcSizePtr < hSize) {
- *srcSizePtr=0;
- return err0r(LZ4F_ERROR_frameHeader_incomplete);
- }
-
- { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
- if (LZ4F_isError(decodeResult)) {
- *srcSizePtr = 0;
- } else {
- *srcSizePtr = decodeResult;
- decodeResult = BHSize; /* block header size */
- }
- *frameInfoPtr = dctx->frameInfo;
- return decodeResult;
- } } }
-}
-
-
-/* LZ4F_updateDict() :
- * only used for LZ4F_blockLinked mode
- * Condition : dstPtr != NULL
- */
-static void LZ4F_updateDict(LZ4F_dctx* dctx,
- const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
- unsigned withinTmp)
-{
- assert(dstPtr != NULL);
- if (dctx->dictSize==0) {
- dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
- }
- assert(dctx->dict != NULL);
-
- if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
- dctx->dictSize += dstSize;
- return;
- }
-
- assert(dstPtr >= dstBufferStart);
- if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
- dctx->dict = (const BYTE*)dstBufferStart;
- dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
- return;
- }
-
- assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
-
- /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
- assert(dctx->tmpOutBuffer != NULL);
-
- if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
- /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
- assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
- dctx->dictSize += dstSize;
- return;
- }
-
- if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
- size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
- size_t copySize = 64 KB - dctx->tmpOutSize;
- const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
- if (dctx->tmpOutSize > 64 KB) copySize = 0;
- if (copySize > preserveSize) copySize = preserveSize;
-
- memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
- dctx->dict = dctx->tmpOutBuffer;
- dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
- return;
- }
-
- if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
- if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
- size_t const preserveSize = 64 KB - dstSize;
- memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
- dctx->dictSize = preserveSize;
- }
- memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
- dctx->dictSize += dstSize;
- return;
- }
-
- /* join dict & dest into tmp */
- { size_t preserveSize = 64 KB - dstSize;
- if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
- memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
- memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
- dctx->dict = dctx->tmpOutBuffer;
- dctx->dictSize = preserveSize + dstSize;
- }
-}
-
-
-
-/*! LZ4F_decompress() :
- * Call this function repetitively to regenerate compressed data in srcBuffer.
- * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
- * into dstBuffer of capacity *dstSizePtr.
- *
- * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
- *
- * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
- * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
- * Remaining data will have to be presented again in a subsequent invocation.
- *
- * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
- * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
- * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
- * Note that this is just a hint, and it's always possible to any srcSize value.
- * When a frame is fully decoded, @return will be 0.
- * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
- */
-size_t LZ4F_decompress(LZ4F_dctx* dctx,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const LZ4F_decompressOptions_t* decompressOptionsPtr)
-{
- LZ4F_decompressOptions_t optionsNull;
- const BYTE* const srcStart = (const BYTE*)srcBuffer;
- const BYTE* const srcEnd = srcStart + *srcSizePtr;
- const BYTE* srcPtr = srcStart;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
- BYTE* dstPtr = dstStart;
- const BYTE* selectedIn = NULL;
- unsigned doAnotherStage = 1;
- size_t nextSrcSizeHint = 1;
-
-
- DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
- srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
- if (dstBuffer == NULL) assert(*dstSizePtr == 0);
- MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
- if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
- *srcSizePtr = 0;
- *dstSizePtr = 0;
- assert(dctx != NULL);
-
- /* behaves as a state machine */
-
- while (doAnotherStage) {
-
- switch(dctx->dStage)
- {
-
- case dstage_getFrameHeader:
- DEBUGLOG(6, "dstage_getFrameHeader");
- if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
- size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
- if (LZ4F_isError(hSize)) return hSize;
- srcPtr += hSize;
- break;
- }
- dctx->tmpInSize = 0;
- if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
- dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
- dctx->dStage = dstage_storeFrameHeader;
- /* fall-through */
-
- case dstage_storeFrameHeader:
- DEBUGLOG(6, "dstage_storeFrameHeader");
- { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
- memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
- dctx->tmpInSize += sizeToCopy;
- srcPtr += sizeToCopy;
- }
- if (dctx->tmpInSize < dctx->tmpInTarget) {
- nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
- doAnotherStage = 0; /* not enough src data, ask for some more */
- break;
- }
- { size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */
- if (LZ4F_isError(hSize)) return hSize;
- }
- break;
-
- case dstage_init:
- DEBUGLOG(6, "dstage_init");
- if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
- /* internal buffers allocation */
- { size_t const bufferNeeded = dctx->maxBlockSize
- + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
- if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
- dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
- FREEMEM(dctx->tmpIn);
- dctx->tmpIn = (BYTE*)ALLOC(dctx->maxBlockSize + BFSize /* block checksum */);
- if (dctx->tmpIn == NULL)
- return err0r(LZ4F_ERROR_allocation_failed);
- FREEMEM(dctx->tmpOutBuffer);
- dctx->tmpOutBuffer= (BYTE*)ALLOC(bufferNeeded);
- if (dctx->tmpOutBuffer== NULL)
- return err0r(LZ4F_ERROR_allocation_failed);
- dctx->maxBufferSize = bufferNeeded;
- } }
- dctx->tmpInSize = 0;
- dctx->tmpInTarget = 0;
- dctx->tmpOut = dctx->tmpOutBuffer;
- dctx->tmpOutStart = 0;
- dctx->tmpOutSize = 0;
-
- dctx->dStage = dstage_getBlockHeader;
- /* fall-through */
-
- case dstage_getBlockHeader:
- if ((size_t)(srcEnd - srcPtr) >= BHSize) {
- selectedIn = srcPtr;
- srcPtr += BHSize;
- } else {
- /* not enough input to read cBlockSize field */
- dctx->tmpInSize = 0;
- dctx->dStage = dstage_storeBlockHeader;
- }
-
- if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
- case dstage_storeBlockHeader:
- { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
- size_t const wantedData = BHSize - dctx->tmpInSize;
- size_t const sizeToCopy = MIN(wantedData, remainingInput);
- memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
- dctx->tmpInSize += sizeToCopy;
-
- if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
- nextSrcSizeHint = BHSize - dctx->tmpInSize;
- doAnotherStage = 0;
- break;
- }
- selectedIn = dctx->tmpIn;
- } /* if (dctx->dStage == dstage_storeBlockHeader) */
-
- /* decode block header */
- { U32 const blockHeader = LZ4F_readLE32(selectedIn);
- size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
- size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
- if (blockHeader==0) { /* frameEnd signal, no more block */
- DEBUGLOG(5, "end of frame");
- dctx->dStage = dstage_getSuffix;
- break;
- }
- if (nextCBlockSize > dctx->maxBlockSize) {
- return err0r(LZ4F_ERROR_maxBlockSize_invalid);
- }
- if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
- /* next block is uncompressed */
- dctx->tmpInTarget = nextCBlockSize;
- DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
- if (dctx->frameInfo.blockChecksumFlag) {
- (void)XXH32_reset(&dctx->blockChecksum, 0);
- }
- dctx->dStage = dstage_copyDirect;
- break;
- }
- /* next block is a compressed block */
- dctx->tmpInTarget = nextCBlockSize + crcSize;
- dctx->dStage = dstage_getCBlock;
- if (dstPtr==dstEnd || srcPtr==srcEnd) {
- nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
- doAnotherStage = 0;
- }
- break;
- }
-
- case dstage_copyDirect: /* uncompressed block */
- DEBUGLOG(6, "dstage_copyDirect");
- { size_t sizeToCopy;
- if (dstPtr == NULL) {
- sizeToCopy = 0;
- } else {
- size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
- sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
- memcpy(dstPtr, srcPtr, sizeToCopy);
- if (dctx->frameInfo.blockChecksumFlag) {
- (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
- }
- if (dctx->frameInfo.contentChecksumFlag)
- (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
- if (dctx->frameInfo.contentSize)
- dctx->frameRemainingSize -= sizeToCopy;
-
- /* history management (linked blocks only)*/
- if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
- LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
- } }
-
- srcPtr += sizeToCopy;
- dstPtr += sizeToCopy;
- if (sizeToCopy == dctx->tmpInTarget) { /* all done */
- if (dctx->frameInfo.blockChecksumFlag) {
- dctx->tmpInSize = 0;
- dctx->dStage = dstage_getBlockChecksum;
- } else
- dctx->dStage = dstage_getBlockHeader; /* new block */
- break;
- }
- dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
- }
- nextSrcSizeHint = dctx->tmpInTarget +
- +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
- + BHSize /* next header size */;
- doAnotherStage = 0;
- break;
-
- /* check block checksum for recently transferred uncompressed block */
- case dstage_getBlockChecksum:
- DEBUGLOG(6, "dstage_getBlockChecksum");
- { const void* crcSrc;
- if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
- crcSrc = srcPtr;
- srcPtr += 4;
- } else {
- size_t const stillToCopy = 4 - dctx->tmpInSize;
- size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr));
- memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
- dctx->tmpInSize += sizeToCopy;
- srcPtr += sizeToCopy;
- if (dctx->tmpInSize < 4) { /* all input consumed */
- doAnotherStage = 0;
- break;
- }
- crcSrc = dctx->header;
- }
- { U32 const readCRC = LZ4F_readLE32(crcSrc);
- U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- DEBUGLOG(6, "compare block checksum");
- if (readCRC != calcCRC) {
- DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
- readCRC, calcCRC);
- return err0r(LZ4F_ERROR_blockChecksum_invalid);
- }
-#else
- (void)readCRC;
- (void)calcCRC;
-#endif
- } }
- dctx->dStage = dstage_getBlockHeader; /* new block */
- break;
-
- case dstage_getCBlock:
- DEBUGLOG(6, "dstage_getCBlock");
- if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
- dctx->tmpInSize = 0;
- dctx->dStage = dstage_storeCBlock;
- break;
- }
- /* input large enough to read full block directly */
- selectedIn = srcPtr;
- srcPtr += dctx->tmpInTarget;
-
- if (0) /* always jump over next block */
- case dstage_storeCBlock:
- { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
- size_t const inputLeft = (size_t)(srcEnd-srcPtr);
- size_t const sizeToCopy = MIN(wantedData, inputLeft);
- memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
- dctx->tmpInSize += sizeToCopy;
- srcPtr += sizeToCopy;
- if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
- nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
- + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
- + BHSize /* next header size */;
- doAnotherStage = 0;
- break;
- }
- selectedIn = dctx->tmpIn;
- }
-
- /* At this stage, input is large enough to decode a block */
- if (dctx->frameInfo.blockChecksumFlag) {
- dctx->tmpInTarget -= 4;
- assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
- { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
- U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (readBlockCrc != calcBlockCrc)
- return err0r(LZ4F_ERROR_blockChecksum_invalid);
-#else
- (void)readBlockCrc;
- (void)calcBlockCrc;
-#endif
- } }
-
- if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
- const char* dict = (const char*)dctx->dict;
- size_t dictSize = dctx->dictSize;
- int decodedSize;
- assert(dstPtr != NULL);
- if (dict && dictSize > 1 GB) {
- /* the dictSize param is an int, avoid truncation / sign issues */
- dict += dictSize - 64 KB;
- dictSize = 64 KB;
- }
- /* enough capacity in `dst` to decompress directly there */
- decodedSize = LZ4_decompress_safe_usingDict(
- (const char*)selectedIn, (char*)dstPtr,
- (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
- dict, (int)dictSize);
- if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
- if (dctx->frameInfo.contentChecksumFlag)
- XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
- if (dctx->frameInfo.contentSize)
- dctx->frameRemainingSize -= (size_t)decodedSize;
-
- /* dictionary management */
- if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
- LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
- }
-
- dstPtr += decodedSize;
- dctx->dStage = dstage_getBlockHeader;
- break;
- }
-
- /* not enough place into dst : decode into tmpOut */
- /* ensure enough place for tmpOut */
- if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
- if (dctx->dict == dctx->tmpOutBuffer) {
- if (dctx->dictSize > 128 KB) {
- memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
- dctx->dictSize = 64 KB;
- }
- dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
- } else { /* dict not within tmp */
- size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
- dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
- } }
-
- /* Decode block */
- { const char* dict = (const char*)dctx->dict;
- size_t dictSize = dctx->dictSize;
- int decodedSize;
- if (dict && dictSize > 1 GB) {
- /* the dictSize param is an int, avoid truncation / sign issues */
- dict += dictSize - 64 KB;
- dictSize = 64 KB;
- }
- decodedSize = LZ4_decompress_safe_usingDict(
- (const char*)selectedIn, (char*)dctx->tmpOut,
- (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
- dict, (int)dictSize);
- if (decodedSize < 0) /* decompression failed */
- return err0r(LZ4F_ERROR_decompressionFailed);
- if (dctx->frameInfo.contentChecksumFlag)
- XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
- if (dctx->frameInfo.contentSize)
- dctx->frameRemainingSize -= (size_t)decodedSize;
- dctx->tmpOutSize = (size_t)decodedSize;
- dctx->tmpOutStart = 0;
- dctx->dStage = dstage_flushOut;
- }
- /* fall-through */
-
- case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
- DEBUGLOG(6, "dstage_flushOut");
- if (dstPtr != NULL) {
- size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
- memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
-
- /* dictionary management */
- if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
- LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
-
- dctx->tmpOutStart += sizeToCopy;
- dstPtr += sizeToCopy;
- }
- if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
- dctx->dStage = dstage_getBlockHeader; /* get next block */
- break;
- }
- /* could not flush everything : stop there, just request a block header */
- doAnotherStage = 0;
- nextSrcSizeHint = BHSize;
- break;
-
- case dstage_getSuffix:
- if (dctx->frameRemainingSize)
- return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */
- if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
- nextSrcSizeHint = 0;
- LZ4F_resetDecompressionContext(dctx);
- doAnotherStage = 0;
- break;
- }
- if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
- dctx->tmpInSize = 0;
- dctx->dStage = dstage_storeSuffix;
- } else {
- selectedIn = srcPtr;
- srcPtr += 4;
- }
-
- if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
- case dstage_storeSuffix:
- { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
- size_t const wantedData = 4 - dctx->tmpInSize;
- size_t const sizeToCopy = MIN(wantedData, remainingInput);
- memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
- dctx->tmpInSize += sizeToCopy;
- if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
- nextSrcSizeHint = 4 - dctx->tmpInSize;
- doAnotherStage=0;
- break;
- }
- selectedIn = dctx->tmpIn;
- } /* if (dctx->dStage == dstage_storeSuffix) */
-
- /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
- { U32 const readCRC = LZ4F_readLE32(selectedIn);
- U32 const resultCRC = XXH32_digest(&(dctx->xxh));
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (readCRC != resultCRC)
- return err0r(LZ4F_ERROR_contentChecksum_invalid);
-#else
- (void)readCRC;
- (void)resultCRC;
-#endif
- nextSrcSizeHint = 0;
- LZ4F_resetDecompressionContext(dctx);
- doAnotherStage = 0;
- break;
- }
-
- case dstage_getSFrameSize:
- if ((srcEnd - srcPtr) >= 4) {
- selectedIn = srcPtr;
- srcPtr += 4;
- } else {
- /* not enough input to read cBlockSize field */
- dctx->tmpInSize = 4;
- dctx->tmpInTarget = 8;
- dctx->dStage = dstage_storeSFrameSize;
- }
-
- if (dctx->dStage == dstage_storeSFrameSize)
- case dstage_storeSFrameSize:
- { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
- (size_t)(srcEnd - srcPtr) );
- memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
- dctx->tmpInSize += sizeToCopy;
- if (dctx->tmpInSize < dctx->tmpInTarget) {
- /* not enough input to get full sBlockSize; wait for more */
- nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
- doAnotherStage = 0;
- break;
- }
- selectedIn = dctx->header + 4;
- } /* if (dctx->dStage == dstage_storeSFrameSize) */
-
- /* case dstage_decodeSFrameSize: */ /* no direct entry */
- { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
- dctx->frameInfo.contentSize = SFrameSize;
- dctx->tmpInTarget = SFrameSize;
- dctx->dStage = dstage_skipSkippable;
- break;
- }
-
- case dstage_skipSkippable:
- { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
- srcPtr += skipSize;
- dctx->tmpInTarget -= skipSize;
- doAnotherStage = 0;
- nextSrcSizeHint = dctx->tmpInTarget;
- if (nextSrcSizeHint) break; /* still more to skip */
- /* frame fully skipped : prepare context for a new frame */
- LZ4F_resetDecompressionContext(dctx);
- break;
- }
- } /* switch (dctx->dStage) */
- } /* while (doAnotherStage) */
-
- /* preserve history within tmp whenever necessary */
- LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
- if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
- && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
- && (dctx->dict != NULL) /* dictionary exists */
- && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
- && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
- {
- if (dctx->dStage == dstage_flushOut) {
- size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
- size_t copySize = 64 KB - dctx->tmpOutSize;
- const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
- if (dctx->tmpOutSize > 64 KB) copySize = 0;
- if (copySize > preserveSize) copySize = preserveSize;
- assert(dctx->tmpOutBuffer != NULL);
-
- memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
- dctx->dict = dctx->tmpOutBuffer;
- dctx->dictSize = preserveSize + dctx->tmpOutStart;
- } else {
- const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
- size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
-
- memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
-
- dctx->dict = dctx->tmpOutBuffer;
- dctx->dictSize = newDictSize;
- dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
- }
- }
-
- *srcSizePtr = (size_t)(srcPtr - srcStart);
- *dstSizePtr = (size_t)(dstPtr - dstStart);
- return nextSrcSizeHint;
-}
-
-/*! LZ4F_decompress_usingDict() :
- * Same as LZ4F_decompress(), using a predefined dictionary.
- * Dictionary is used "in place", without any preprocessing.
- * It must remain accessible throughout the entire frame decoding.
- */
-size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const void* dict, size_t dictSize,
- const LZ4F_decompressOptions_t* decompressOptionsPtr)
-{
- if (dctx->dStage <= dstage_init) {
- dctx->dict = (const BYTE*)dict;
- dctx->dictSize = dictSize;
- }
- return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
- srcBuffer, srcSizePtr,
- decompressOptionsPtr);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h
deleted file mode 100644
index 4573317ef..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- LZ4 auto-framing library
- Header File
- Copyright (C) 2011-2017, Yann Collet.
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-/* LZ4F is a stand-alone API able to create and decode LZ4 frames
- * conformant with specification v1.6.1 in doc/lz4_Frame_format.md .
- * Generated frames are compatible with `lz4` CLI.
- *
- * LZ4F also offers streaming capabilities.
- *
- * lz4.h is not required when using lz4frame.h,
- * except to extract common constant such as LZ4_VERSION_NUMBER.
- * */
-
-#ifndef LZ4F_H_09782039843
-#define LZ4F_H_09782039843
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* --- Dependency --- */
-#include <stddef.h> /* size_t */
-
-
-/**
- Introduction
-
- lz4frame.h implements LZ4 frame specification (doc/lz4_Frame_format.md).
- lz4frame.h provides frame compression functions that take care
- of encoding standard metadata alongside LZ4-compressed blocks.
-*/
-
-/*-***************************************************************
- * Compiler specifics
- *****************************************************************/
-/* LZ4_DLL_EXPORT :
- * Enable exporting of functions when building a Windows DLL
- * LZ4FLIB_VISIBILITY :
- * Control library symbols visibility.
- */
-#ifndef LZ4FLIB_VISIBILITY
-# if defined(__GNUC__) && (__GNUC__ >= 4)
-# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
-# else
-# define LZ4FLIB_VISIBILITY
-# endif
-#endif
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
-#else
-# define LZ4FLIB_API LZ4FLIB_VISIBILITY
-#endif
-
-#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
-# define LZ4F_DEPRECATE(x) x
-#else
-# if defined(_MSC_VER)
-# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */
-# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
-# define LZ4F_DEPRECATE(x) x __attribute__((deprecated))
-# else
-# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */
-# endif
-#endif
-
-
-/*-************************************
- * Error management
- **************************************/
-typedef size_t LZ4F_errorCode_t;
-
-LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */
-LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */
-
-
-/*-************************************
- * Frame compression types
- ************************************* */
-/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
-#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
-# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
-#else
-# define LZ4F_OBSOLETE_ENUM(x)
-#endif
-
-/* The larger the block size, the (slightly) better the compression ratio,
- * though there are diminishing returns.
- * Larger blocks also increase memory usage on both compression and decompression sides.
- */
-typedef enum {
- LZ4F_default=0,
- LZ4F_max64KB=4,
- LZ4F_max256KB=5,
- LZ4F_max1MB=6,
- LZ4F_max4MB=7
- LZ4F_OBSOLETE_ENUM(max64KB)
- LZ4F_OBSOLETE_ENUM(max256KB)
- LZ4F_OBSOLETE_ENUM(max1MB)
- LZ4F_OBSOLETE_ENUM(max4MB)
-} LZ4F_blockSizeID_t;
-
-/* Linked blocks sharply reduce inefficiencies when using small blocks,
- * they compress better.
- * However, some LZ4 decoders are only compatible with independent blocks */
-typedef enum {
- LZ4F_blockLinked=0,
- LZ4F_blockIndependent
- LZ4F_OBSOLETE_ENUM(blockLinked)
- LZ4F_OBSOLETE_ENUM(blockIndependent)
-} LZ4F_blockMode_t;
-
-typedef enum {
- LZ4F_noContentChecksum=0,
- LZ4F_contentChecksumEnabled
- LZ4F_OBSOLETE_ENUM(noContentChecksum)
- LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
-} LZ4F_contentChecksum_t;
-
-typedef enum {
- LZ4F_noBlockChecksum=0,
- LZ4F_blockChecksumEnabled
-} LZ4F_blockChecksum_t;
-
-typedef enum {
- LZ4F_frame=0,
- LZ4F_skippableFrame
- LZ4F_OBSOLETE_ENUM(skippableFrame)
-} LZ4F_frameType_t;
-
-#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
-typedef LZ4F_blockSizeID_t blockSizeID_t;
-typedef LZ4F_blockMode_t blockMode_t;
-typedef LZ4F_frameType_t frameType_t;
-typedef LZ4F_contentChecksum_t contentChecksum_t;
-#endif
-
-/*! LZ4F_frameInfo_t :
- * makes it possible to set or read frame parameters.
- * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO,
- * setting all parameters to default.
- * It's then possible to update selectively some parameters */
-typedef struct {
- LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
- LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
- LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
- LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
- unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
- unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
- LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
-} LZ4F_frameInfo_t;
-
-#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
-
-/*! LZ4F_preferences_t :
- * makes it possible to supply advanced compression instructions to streaming interface.
- * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES,
- * setting all parameters to default.
- * All reserved fields must be set to zero. */
-typedef struct {
- LZ4F_frameInfo_t frameInfo;
- int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
- unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */
- unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */
- unsigned reserved[3]; /* must be zero for forward compatibility */
-} LZ4F_preferences_t;
-
-#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
-
-
-/*-*********************************
-* Simple compression function
-***********************************/
-
-LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
-
-/*! LZ4F_compressFrameBound() :
- * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
- * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
- * Note : this result is only usable with LZ4F_compressFrame().
- * It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed.
- */
-LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-
-/*! LZ4F_compressFrame() :
- * Compress an entire srcBuffer into a valid LZ4 frame.
- * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_preferences_t* preferencesPtr);
-
-
-/*-***********************************
-* Advanced compression functions
-*************************************/
-typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
-typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */
-
-typedef struct {
- unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
- unsigned reserved[3];
-} LZ4F_compressOptions_t;
-
-/*--- Resource Management ---*/
-
-#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */
-LZ4FLIB_API unsigned LZ4F_getVersion(void);
-
-/*! LZ4F_createCompressionContext() :
- * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
- * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
- * The function will provide a pointer to a fully allocated LZ4F_cctx object.
- * If @return != zero, there was an error during context creation.
- * Object can release its memory using LZ4F_freeCompressionContext();
- */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
-
-
-/*---- Compression ----*/
-
-#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected paramaters */
-#define LZ4F_HEADER_SIZE_MAX 19
-
-/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
-#define LZ4F_BLOCK_HEADER_SIZE 4
-
-/* Size in bytes of a block checksum footer in little-endian format. */
-#define LZ4F_BLOCK_CHECKSUM_SIZE 4
-
-/* Size in bytes of the content checksum. */
-#define LZ4F_CONTENT_CHECKSUM_SIZE 4
-
-/*! LZ4F_compressBegin() :
- * will write the frame header into dstBuffer.
- * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (which can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_preferences_t* prefsPtr);
-
-/*! LZ4F_compressBound() :
- * Provides minimum dstCapacity required to guarantee success of
- * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario.
- * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead.
- * Note that the result is only valid for a single invocation of LZ4F_compressUpdate().
- * When invoking LZ4F_compressUpdate() multiple times,
- * if the output buffer is gradually filled up instead of emptied and re-used from its start,
- * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound().
- * @return is always the same for a srcSize and prefsPtr.
- * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
- * tech details :
- * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
- * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
- * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
- */
-LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
-
-/*! LZ4F_compressUpdate() :
- * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
- * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
- * This value is provided by LZ4F_compressBound().
- * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
- * LZ4F_compressUpdate() doesn't guarantee error recovery.
- * When an error occurs, compression context must be freed or resized.
- * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
- * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
- * or an error code if it fails (which can be tested using LZ4F_isError())
- */
-LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_compressOptions_t* cOptPtr);
-
-/*! LZ4F_flush() :
- * When data must be generated and sent immediately, without waiting for a block to be completely filled,
- * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
- * `dstCapacity` must be large enough to ensure the operation will be successful.
- * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
- * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx)
- * or an error code if it fails (which can be tested using LZ4F_isError())
- * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
- */
-LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_compressOptions_t* cOptPtr);
-
-/*! LZ4F_compressEnd() :
- * To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
- * It will flush whatever data remained within `cctx` (like LZ4_flush())
- * and properly finalize the frame, with an endMark and a checksum.
- * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
- * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark),
- * or an error code if it fails (which can be tested using LZ4F_isError())
- * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
- * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
- */
-LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_compressOptions_t* cOptPtr);
-
-
-/*-*********************************
-* Decompression functions
-***********************************/
-typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
-typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
-
-typedef struct {
- unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */
- unsigned reserved[3]; /* must be set to zero for forward compatibility */
-} LZ4F_decompressOptions_t;
-
-
-/* Resource management */
-
-/*! LZ4F_createDecompressionContext() :
- * Create an LZ4F_dctx object, to track all decompression operations.
- * The version provided MUST be LZ4F_VERSION.
- * The function provides a pointer to an allocated and initialized LZ4F_dctx object.
- * The result is an errorCode, which can be tested using LZ4F_isError().
- * dctx memory can be released using LZ4F_freeDecompressionContext();
- * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
- * That is, it should be == 0 if decompression has been completed fully and correctly.
- */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
-
-
-/*-***********************************
-* Streaming decompression functions
-*************************************/
-
-#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
-
-/*! LZ4F_headerSize() : v1.9.0+
- * Provide the header size of a frame starting at `src`.
- * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH,
- * which is enough to decode the header length.
- * @return : size of frame header
- * or an error code, which can be tested using LZ4F_isError()
- * note : Frame header size is variable, but is guaranteed to be
- * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
- */
-LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
-
-/*! LZ4F_getFrameInfo() :
- * This function extracts frame parameters (max blockSize, dictID, etc.).
- * Its usage is optional: user can call LZ4F_decompress() directly.
- *
- * Extracted information will fill an existing LZ4F_frameInfo_t structure.
- * This can be useful for allocation and dictionary identification purposes.
- *
- * LZ4F_getFrameInfo() can work in the following situations :
- *
- * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress().
- * It will decode header from `srcBuffer`,
- * consuming the header and starting the decoding process.
- *
- * Input size must be large enough to contain the full frame header.
- * Frame header size can be known beforehand by LZ4F_headerSize().
- * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes,
- * and not more than <= LZ4F_HEADER_SIZE_MAX bytes.
- * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work.
- * It's allowed to provide more input data than the header size,
- * LZ4F_getFrameInfo() will only consume the header.
- *
- * If input size is not large enough,
- * aka if it's smaller than header size,
- * function will fail and return an error code.
- *
- * 2) After decoding has been started,
- * it's possible to invoke LZ4F_getFrameInfo() anytime
- * to extract already decoded frame parameters stored within dctx.
- *
- * Note that, if decoding has barely started,
- * and not yet read enough information to decode the header,
- * LZ4F_getFrameInfo() will fail.
- *
- * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value).
- * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started,
- * and when decoding the header has been successful.
- * Decompression must then resume from (srcBuffer + *srcSizePtr).
- *
- * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call,
- * or an error code which can be tested using LZ4F_isError().
- * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
- * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
- */
-LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
- LZ4F_frameInfo_t* frameInfoPtr,
- const void* srcBuffer, size_t* srcSizePtr);
-
-/*! LZ4F_decompress() :
- * Call this function repetitively to regenerate data compressed in `srcBuffer`.
- *
- * The function requires a valid dctx state.
- * It will read up to *srcSizePtr bytes from srcBuffer,
- * and decompress data into dstBuffer, of capacity *dstSizePtr.
- *
- * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
- * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value).
- *
- * The function does not necessarily read all input bytes, so always check value in *srcSizePtr.
- * Unconsumed source data must be presented again in subsequent invocations.
- *
- * `dstBuffer` can freely change between each consecutive function invocation.
- * `dstBuffer` content will be overwritten.
- *
- * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
- * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
- * Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
- * This is just a hint though, it's always possible to provide any srcSize.
- *
- * When a frame is fully decoded, @return will be 0 (no more data expected).
- * When provided with more bytes than necessary to decode a frame,
- * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0.
- *
- * If decompression failed, @return is an error code, which can be tested using LZ4F_isError().
- * After a decompression error, the `dctx` context is not resumable.
- * Use LZ4F_resetDecompressionContext() to return to clean state.
- *
- * After a frame is fully decoded, dctx can be used again to decompress another frame.
- */
-LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const LZ4F_decompressOptions_t* dOptPtr);
-
-
-/*! LZ4F_resetDecompressionContext() : added in v1.8.0
- * In case of an error, the context is left in "undefined" state.
- * In which case, it's necessary to reset it, before re-using it.
- * This method can also be used to abruptly stop any unfinished decompression,
- * and start a new one using same context resources. */
-LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
-
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4F_H_09782039843 */
-
-#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
-#define LZ4F_H_STATIC_09782039843
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* These declarations are not stable and may change in the future.
- * They are therefore only safe to depend on
- * when the caller is statically linked against the library.
- * To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
- *
- * By default, these symbols aren't published into shared/dynamic libraries.
- * You can override this behavior and force them to be published
- * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
- * Use at your own risk.
- */
-#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
-# define LZ4FLIB_STATIC_API LZ4FLIB_API
-#else
-# define LZ4FLIB_STATIC_API
-#endif
-
-
-/* --- Error List --- */
-#define LZ4F_LIST_ERRORS(ITEM) \
- ITEM(OK_NoError) \
- ITEM(ERROR_GENERIC) \
- ITEM(ERROR_maxBlockSize_invalid) \
- ITEM(ERROR_blockMode_invalid) \
- ITEM(ERROR_contentChecksumFlag_invalid) \
- ITEM(ERROR_compressionLevel_invalid) \
- ITEM(ERROR_headerVersion_wrong) \
- ITEM(ERROR_blockChecksum_invalid) \
- ITEM(ERROR_reservedFlag_set) \
- ITEM(ERROR_allocation_failed) \
- ITEM(ERROR_srcSize_tooLarge) \
- ITEM(ERROR_dstMaxSize_tooSmall) \
- ITEM(ERROR_frameHeader_incomplete) \
- ITEM(ERROR_frameType_unknown) \
- ITEM(ERROR_frameSize_wrong) \
- ITEM(ERROR_srcPtr_wrong) \
- ITEM(ERROR_decompressionFailed) \
- ITEM(ERROR_headerChecksum_invalid) \
- ITEM(ERROR_contentChecksum_invalid) \
- ITEM(ERROR_frameDecoding_alreadyStarted) \
- ITEM(ERROR_maxCode)
-
-#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
-
-/* enum list is exposed, to handle specific errors */
-typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
- _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes;
-
-LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
-
-LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(unsigned);
-
-/**********************************
- * Bulk processing dictionary API
- *********************************/
-
-/* A Dictionary is useful for the compression of small messages (KB range).
- * It dramatically improves compression efficiency.
- *
- * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
- * Best results are generally achieved by using Zstandard's Dictionary Builder
- * to generate a high-quality dictionary from a set of samples.
- *
- * Loading a dictionary has a cost, since it involves construction of tables.
- * The Bulk processing dictionary API makes it possible to share this cost
- * over an arbitrary number of compression jobs, even concurrently,
- * markedly improving compression latency for these cases.
- *
- * The same dictionary will have to be used on the decompression side
- * for decoding to be successful.
- * To help identify the correct dictionary at decoding stage,
- * the frame header allows optional embedding of a dictID field.
- */
-typedef struct LZ4F_CDict_s LZ4F_CDict;
-
-/*! LZ4_createCDict() :
- * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
- * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
- * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
-LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
-LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
-
-
-/*! LZ4_compressFrame_usingCDict() :
- * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
- * cctx must point to a context created by LZ4F_createCompressionContext().
- * If cdict==NULL, compress without a dictionary.
- * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * If this condition is not respected, function will fail (@return an errorCode).
- * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
- * but it's not recommended, as it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict(
- LZ4F_cctx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* preferencesPtr);
-
-
-/*! LZ4F_compressBegin_usingCDict() :
- * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
- * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you may provide NULL as argument,
- * however, it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer for the header,
- * or an error code (which can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict(
- LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* prefsPtr);
-
-
-/*! LZ4F_decompress_usingDict() :
- * Same as LZ4F_decompress(), using a predefined dictionary.
- * Dictionary is used "in place", without any preprocessing.
- * It must remain accessible throughout the entire frame decoding. */
-LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict(
- LZ4F_dctx* dctxPtr,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const void* dict, size_t dictSize,
- const LZ4F_decompressOptions_t* decompressOptionsPtr);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h
deleted file mode 100644
index 925a2c5c3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- LZ4 auto-framing library
- Header File for static linking only
- Copyright (C) 2011-2016, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
-#ifndef LZ4FRAME_STATIC_H_0398209384
-#define LZ4FRAME_STATIC_H_0398209384
-
-/* The declarations that formerly were made here have been merged into
- * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward,
- * it is recommended to simply include that header directly.
- */
-
-#define LZ4F_STATIC_LINKING_ONLY
-#include "lz4frame.h"
-
-#endif /* LZ4FRAME_STATIC_H_0398209384 */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c
deleted file mode 100644
index 77c9f4305..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c
+++ /dev/null
@@ -1,1615 +0,0 @@
-/*
- LZ4 HC - High Compression Mode of LZ4
- Copyright (C) 2011-2017, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
-
-
-/* *************************************
-* Tuning Parameter
-***************************************/
-
-/*! HEAPMODE :
- * Select how default compression function will allocate workplace memory,
- * in stack (0:fastest), or in heap (1:requires malloc()).
- * Since workplace is rather large, heap mode is recommended.
- */
-#ifndef LZ4HC_HEAPMODE
-# define LZ4HC_HEAPMODE 1
-#endif
-
-
-/*=== Dependency ===*/
-#define LZ4_HC_STATIC_LINKING_ONLY
-#include "lz4hc.h"
-
-
-/*=== Common definitions ===*/
-#if defined(__GNUC__)
-# pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-#if defined (__clang__)
-# pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
-#define LZ4_COMMONDEFS_ONLY
-#ifndef LZ4_SRC_INCLUDED
-#include "lz4.c" /* LZ4_count, constants, mem */
-#endif
-
-
-/*=== Enums ===*/
-typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
-
-
-/*=== Constants ===*/
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
-#define LZ4_OPT_NUM (1<<12)
-
-
-/*=== Macros ===*/
-#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
-#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
-#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
-#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
-#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
-/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
-#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
-
-static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
-
-
-/**************************************
-* HC Compression
-**************************************/
-static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
-{
- MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
- MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
-}
-
-static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
-{
- uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
- if (startingOffset > 1 GB) {
- LZ4HC_clearTables(hc4);
- startingOffset = 0;
- }
- startingOffset += 64 KB;
- hc4->nextToUpdate = (U32) startingOffset;
- hc4->base = start - startingOffset;
- hc4->end = start;
- hc4->dictBase = start - startingOffset;
- hc4->dictLimit = (U32) startingOffset;
- hc4->lowLimit = (U32) startingOffset;
-}
-
-
-/* Update chains up to ip (excluded) */
-LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
-{
- U16* const chainTable = hc4->chainTable;
- U32* const hashTable = hc4->hashTable;
- const BYTE* const base = hc4->base;
- U32 const target = (U32)(ip - base);
- U32 idx = hc4->nextToUpdate;
-
- while (idx < target) {
- U32 const h = LZ4HC_hashPtr(base+idx);
- size_t delta = idx - hashTable[h];
- if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
- DELTANEXTU16(chainTable, idx) = (U16)delta;
- hashTable[h] = idx;
- idx++;
- }
-
- hc4->nextToUpdate = target;
-}
-
-/** LZ4HC_countBack() :
- * @return : negative value, nb of common bytes before ip/match */
-LZ4_FORCE_INLINE
-int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
- const BYTE* const iMin, const BYTE* const mMin)
-{
- int back = 0;
- int const min = (int)MAX(iMin - ip, mMin - match);
- assert(min <= 0);
- assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
- assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
- while ( (back > min)
- && (ip[back-1] == match[back-1]) )
- back--;
- return back;
-}
-
-#if defined(_MSC_VER)
-# define LZ4HC_rotl32(x,r) _rotl(x,r)
-#else
-# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-#endif
-
-
-static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
-{
- size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
- if (bitsToRotate == 0) return pattern;
- return LZ4HC_rotl32(pattern, (int)bitsToRotate);
-}
-
-/* LZ4HC_countPattern() :
- * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
-static unsigned
-LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
-{
- const BYTE* const iStart = ip;
- reg_t const pattern = (sizeof(pattern)==8) ?
- (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
-
- while (likely(ip < iEnd-(sizeof(pattern)-1))) {
- reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
- if (!diff) { ip+=sizeof(pattern); continue; }
- ip += LZ4_NbCommonBytes(diff);
- return (unsigned)(ip - iStart);
- }
-
- if (LZ4_isLittleEndian()) {
- reg_t patternByte = pattern;
- while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
- ip++; patternByte >>= 8;
- }
- } else { /* big endian */
- U32 bitOffset = (sizeof(pattern)*8) - 8;
- while (ip < iEnd) {
- BYTE const byte = (BYTE)(pattern >> bitOffset);
- if (*ip != byte) break;
- ip ++; bitOffset -= 8;
- }
- }
-
- return (unsigned)(ip - iStart);
-}
-
-/* LZ4HC_reverseCountPattern() :
- * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
- * read using natural platform endianess */
-static unsigned
-LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
-{
- const BYTE* const iStart = ip;
-
- while (likely(ip >= iLow+4)) {
- if (LZ4_read32(ip-4) != pattern) break;
- ip -= 4;
- }
- { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
- while (likely(ip>iLow)) {
- if (ip[-1] != *bytePtr) break;
- ip--; bytePtr--;
- } }
- return (unsigned)(iStart - ip);
-}
-
-/* LZ4HC_protectDictEnd() :
- * Checks if the match is in the last 3 bytes of the dictionary, so reading the
- * 4 byte MINMATCH would overflow.
- * @returns true if the match index is okay.
- */
-static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
-{
- return ((U32)((dictLimit - 1) - matchIndex) >= 3);
-}
-
-typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
-typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
-
-LZ4_FORCE_INLINE int
-LZ4HC_InsertAndGetWiderMatch (
- LZ4HC_CCtx_internal* hc4,
- const BYTE* const ip,
- const BYTE* const iLowLimit,
- const BYTE* const iHighLimit,
- int longest,
- const BYTE** matchpos,
- const BYTE** startpos,
- const int maxNbAttempts,
- const int patternAnalysis,
- const int chainSwap,
- const dictCtx_directive dict,
- const HCfavor_e favorDecSpeed)
-{
- U16* const chainTable = hc4->chainTable;
- U32* const HashTable = hc4->hashTable;
- const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
- const BYTE* const base = hc4->base;
- const U32 dictLimit = hc4->dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
- const U32 ipIndex = (U32)(ip - base);
- const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
- const BYTE* const dictBase = hc4->dictBase;
- int const lookBackLength = (int)(ip-iLowLimit);
- int nbAttempts = maxNbAttempts;
- U32 matchChainPos = 0;
- U32 const pattern = LZ4_read32(ip);
- U32 matchIndex;
- repeat_state_e repeat = rep_untested;
- size_t srcPatternLength = 0;
-
- DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
- /* First Match */
- LZ4HC_Insert(hc4, ip);
- matchIndex = HashTable[LZ4HC_hashPtr(ip)];
- DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
- matchIndex, lowestMatchIndex);
-
- while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
- int matchLength=0;
- nbAttempts--;
- assert(matchIndex < ipIndex);
- if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
- /* do nothing */
- } else if (matchIndex >= dictLimit) { /* within current Prefix */
- const BYTE* const matchPtr = base + matchIndex;
- assert(matchPtr >= lowPrefixPtr);
- assert(matchPtr < ip);
- assert(longest >= 1);
- if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
- if (LZ4_read32(matchPtr) == pattern) {
- int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
- matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
- matchLength -= back;
- if (matchLength > longest) {
- longest = matchLength;
- *matchpos = matchPtr + back;
- *startpos = ip + back;
- } } }
- } else { /* lowestMatchIndex <= matchIndex < dictLimit */
- const BYTE* const matchPtr = dictBase + matchIndex;
- if (LZ4_read32(matchPtr) == pattern) {
- const BYTE* const dictStart = dictBase + hc4->lowLimit;
- int back = 0;
- const BYTE* vLimit = ip + (dictLimit - matchIndex);
- if (vLimit > iHighLimit) vLimit = iHighLimit;
- matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
- if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
- matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
- back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
- matchLength -= back;
- if (matchLength > longest) {
- longest = matchLength;
- *matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
- *startpos = ip + back;
- } } }
-
- if (chainSwap && matchLength==longest) { /* better match => select a better chain */
- assert(lookBackLength==0); /* search forward only */
- if (matchIndex + (U32)longest <= ipIndex) {
- int const kTrigger = 4;
- U32 distanceToNextMatch = 1;
- int const end = longest - MINMATCH + 1;
- int step = 1;
- int accel = 1 << kTrigger;
- int pos;
- for (pos = 0; pos < end; pos += step) {
- U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
- step = (accel++ >> kTrigger);
- if (candidateDist > distanceToNextMatch) {
- distanceToNextMatch = candidateDist;
- matchChainPos = (U32)pos;
- accel = 1 << kTrigger;
- }
- }
- if (distanceToNextMatch > 1) {
- if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
- matchIndex -= distanceToNextMatch;
- continue;
- } } }
-
- { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
- if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
- U32 const matchCandidateIdx = matchIndex-1;
- /* may be a repeated pattern */
- if (repeat == rep_untested) {
- if ( ((pattern & 0xFFFF) == (pattern >> 16))
- & ((pattern & 0xFF) == (pattern >> 24)) ) {
- repeat = rep_confirmed;
- srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
- } else {
- repeat = rep_not;
- } }
- if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
- && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
- const int extDict = matchCandidateIdx < dictLimit;
- const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
- if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
- const BYTE* const dictStart = dictBase + hc4->lowLimit;
- const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
- size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
- if (extDict && matchPtr + forwardPatternLength == iLimit) {
- U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
- forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
- }
- { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
- size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
- size_t currentSegmentLength;
- if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
- U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
- backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
- }
- /* Limit backLength not go further than lowestMatchIndex */
- backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
- assert(matchCandidateIdx - backLength >= lowestMatchIndex);
- currentSegmentLength = backLength + forwardPatternLength;
- /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
- if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
- && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
- U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
- if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
- matchIndex = newMatchIndex;
- else {
- /* Can only happen if started in the prefix */
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
- matchIndex = dictLimit;
- }
- } else {
- U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
- if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
- matchIndex = dictLimit;
- } else {
- matchIndex = newMatchIndex;
- if (lookBackLength==0) { /* no back possible */
- size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
- if ((size_t)longest < maxML) {
- assert(base + matchIndex != ip);
- if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
- assert(maxML < 2 GB);
- longest = (int)maxML;
- *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
- *startpos = ip;
- }
- { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
- if (distToNextPattern > matchIndex) break; /* avoid overflow */
- matchIndex -= distToNextPattern;
- } } } } }
- continue;
- } }
- } } /* PA optimization */
-
- /* follow current chain */
- matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
-
- } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
-
- if ( dict == usingDictCtxHc
- && nbAttempts > 0
- && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
- size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
- U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
- assert(dictEndOffset <= 1 GB);
- matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
- while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
- const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
-
- if (LZ4_read32(matchPtr) == pattern) {
- int mlt;
- int back = 0;
- const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
- if (vLimit > iHighLimit) vLimit = iHighLimit;
- mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
- back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
- mlt -= back;
- if (mlt > longest) {
- longest = mlt;
- *matchpos = base + matchIndex + back;
- *startpos = ip + back;
- } }
-
- { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
- dictMatchIndex -= nextOffset;
- matchIndex -= nextOffset;
- } } }
-
- return longest;
-}
-
-LZ4_FORCE_INLINE
-int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
- const BYTE* const ip, const BYTE* const iLimit,
- const BYTE** matchpos,
- const int maxNbAttempts,
- const int patternAnalysis,
- const dictCtx_directive dict)
-{
- const BYTE* uselessPtr = ip;
- /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
- * but this won't be the case here, as we define iLowLimit==ip,
- * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
-}
-
-/* LZ4HC_encodeSequence() :
- * @return : 0 if ok,
- * 1 if buffer issue detected */
-LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
- const BYTE** _ip,
- BYTE** _op,
- const BYTE** _anchor,
- int matchLength,
- const BYTE* const match,
- limitedOutput_directive limit,
- BYTE* oend)
-{
-#define ip (*_ip)
-#define op (*_op)
-#define anchor (*_anchor)
-
- size_t length;
- BYTE* const token = op++;
-
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
- static const BYTE* start = NULL;
- static U32 totalCost = 0;
- U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
- U32 const ll = (U32)(ip - anchor);
- U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
- U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
- U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
- if (start==NULL) start = anchor; /* only works for single segment */
- /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
- DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
- pos,
- (U32)(ip - anchor), matchLength, (U32)(ip-match),
- cost, totalCost);
- totalCost += cost;
-#endif
-
- /* Encode Literal length */
- length = (size_t)(ip - anchor);
- LZ4_STATIC_ASSERT(notLimited == 0);
- /* Check output limit */
- if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
- DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
- (int)length, (int)(oend - op));
- return 1;
- }
- if (length >= RUN_MASK) {
- size_t len = length - RUN_MASK;
- *token = (RUN_MASK << ML_BITS);
- for(; len >= 255 ; len -= 255) *op++ = 255;
- *op++ = (BYTE)len;
- } else {
- *token = (BYTE)(length << ML_BITS);
- }
-
- /* Copy Literals */
- LZ4_wildCopy8(op, anchor, op + length);
- op += length;
-
- /* Encode Offset */
- assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
- LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
-
- /* Encode MatchLength */
- assert(matchLength >= MINMATCH);
- length = (size_t)matchLength - MINMATCH;
- if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
- DEBUGLOG(6, "Not enough room to write match length");
- return 1; /* Check output limit */
- }
- if (length >= ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
- if (length >= 255) { length -= 255; *op++ = 255; }
- *op++ = (BYTE)length;
- } else {
- *token += (BYTE)(length);
- }
-
- /* Prepare next loop */
- ip += matchLength;
- anchor = ip;
-
- return 0;
-}
-#undef ip
-#undef op
-#undef anchor
-
-LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
- LZ4HC_CCtx_internal* const ctx,
- const char* const source,
- char* const dest,
- int* srcSizePtr,
- int const maxOutputSize,
- int maxNbAttempts,
- const limitedOutput_directive limit,
- const dictCtx_directive dict
- )
-{
- const int inputSize = *srcSizePtr;
- const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
-
- const BYTE* ip = (const BYTE*) source;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + inputSize;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = (iend - LASTLITERALS);
-
- BYTE* optr = (BYTE*) dest;
- BYTE* op = (BYTE*) dest;
- BYTE* oend = op + maxOutputSize;
-
- int ml0, ml, ml2, ml3;
- const BYTE* start0;
- const BYTE* ref0;
- const BYTE* ref = NULL;
- const BYTE* start2 = NULL;
- const BYTE* ref2 = NULL;
- const BYTE* start3 = NULL;
- const BYTE* ref3 = NULL;
-
- /* init */
- *srcSizePtr = 0;
- if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
- if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
-
- /* Main Loop */
- while (ip <= mflimit) {
- ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
- if (ml<MINMATCH) { ip++; continue; }
-
- /* saved, in case we would skip too much */
- start0 = ip; ref0 = ref; ml0 = ml;
-
-_Search2:
- if (ip+ml <= mflimit) {
- ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
- ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
- maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
- } else {
- ml2 = ml;
- }
-
- if (ml2 == ml) { /* No better match => encode ML1 */
- optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- continue;
- }
-
- if (start0 < ip) { /* first match was skipped at least once */
- if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
- ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
- } }
-
- /* Here, start0==ip */
- if ((start2 - ip) < 3) { /* First Match too small : removed */
- ml = ml2;
- ip = start2;
- ref =ref2;
- goto _Search2;
- }
-
-_Search3:
- /* At this stage, we have :
- * ml2 > ml1, and
- * ip1+3 <= ip2 (usually < ip1+ml1) */
- if ((start2 - ip) < OPTIMAL_ML) {
- int correction;
- int new_ml = ml;
- if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
- if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = new_ml - (int)(start2 - ip);
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- }
- /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
-
- if (start2 + ml2 <= mflimit) {
- ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
- start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
- maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
- } else {
- ml3 = ml2;
- }
-
- if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
- /* ip & ref are known; Now for ml */
- if (start2 < ip+ml) ml = (int)(start2 - ip);
- /* Now, encode 2 sequences */
- optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- ip = start2;
- optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
- ml = ml2;
- ref = ref2;
- goto _dest_overflow;
- }
- continue;
- }
-
- if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
- if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
- if (start2 < ip+ml) {
- int correction = (int)(ip+ml - start2);
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- if (ml2 < MINMATCH) {
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- }
- }
-
- optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- ip = start3;
- ref = ref3;
- ml = ml3;
-
- start0 = start2;
- ref0 = ref2;
- ml0 = ml2;
- goto _Search2;
- }
-
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- goto _Search3;
- }
-
- /*
- * OK, now we have 3 ascending matches;
- * let's write the first one ML1.
- * ip & ref are known; Now decide ml.
- */
- if (start2 < ip+ml) {
- if ((start2 - ip) < OPTIMAL_ML) {
- int correction;
- if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
- if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = ml - (int)(start2 - ip);
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- } else {
- ml = (int)(start2 - ip);
- }
- }
- optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
-
- /* ML2 becomes ML1 */
- ip = start2; ref = ref2; ml = ml2;
-
- /* ML3 becomes ML2 */
- start2 = start3; ref2 = ref3; ml2 = ml3;
-
- /* let's find a new ML3 */
- goto _Search3;
- }
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + llAdd + lastRunSize;
- if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
- if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) return 0;
- /* adapt lastRunSize to fill 'dest' */
- lastRunSize = (size_t)(oend - op) - 1 /*token*/;
- llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
- lastRunSize -= llAdd;
- }
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
- ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
- *op++ = (RUN_MASK << ML_BITS);
- for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize << ML_BITS);
- }
- memcpy(op, anchor, lastRunSize);
- op += lastRunSize;
- }
-
- /* End */
- *srcSizePtr = (int) (((const char*)ip) - source);
- return (int) (((char*)op)-dest);
-
-_dest_overflow:
- if (limit == fillOutput) {
- /* Assumption : ip, anchor, ml and ref must be set correctly */
- size_t const ll = (size_t)(ip - anchor);
- size_t const ll_addbytes = (ll + 240) / 255;
- size_t const ll_totalCost = 1 + ll_addbytes + ll;
- BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
- DEBUGLOG(6, "Last sequence overflowing");
- op = optr; /* restore correct out pointer */
- if (op + ll_totalCost <= maxLitPos) {
- /* ll validated; now adjust match length */
- size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
- size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
- assert(maxMlSize < INT_MAX); assert(ml >= 0);
- if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
- if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
- } }
- goto _last_literals;
- }
- /* compression failed */
- return 0;
-}
-
-
-static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
- const char* const source, char* dst,
- int* srcSizePtr, int dstCapacity,
- int const nbSearches, size_t sufficient_len,
- const limitedOutput_directive limit, int const fullUpdate,
- const dictCtx_directive dict,
- const HCfavor_e favorDecSpeed);
-
-
-LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
- LZ4HC_CCtx_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- int const dstCapacity,
- int cLevel,
- const limitedOutput_directive limit,
- const dictCtx_directive dict
- )
-{
- typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
- typedef struct {
- lz4hc_strat_e strat;
- int nbSearches;
- U32 targetLength;
- } cParams_t;
- static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
- { lz4hc, 2, 16 }, /* 0, unused */
- { lz4hc, 2, 16 }, /* 1, unused */
- { lz4hc, 2, 16 }, /* 2, unused */
- { lz4hc, 4, 16 }, /* 3 */
- { lz4hc, 8, 16 }, /* 4 */
- { lz4hc, 16, 16 }, /* 5 */
- { lz4hc, 32, 16 }, /* 6 */
- { lz4hc, 64, 16 }, /* 7 */
- { lz4hc, 128, 16 }, /* 8 */
- { lz4hc, 256, 16 }, /* 9 */
- { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
- { lz4opt, 512,128 }, /*11 */
- { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
- };
-
- DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
- ctx, src, *srcSizePtr, limit);
-
- if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
-
- ctx->end += *srcSizePtr;
- if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
- cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
- { cParams_t const cParam = clTable[cLevel];
- HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
- int result;
-
- if (cParam.strat == lz4hc) {
- result = LZ4HC_compress_hashChain(ctx,
- src, dst, srcSizePtr, dstCapacity,
- cParam.nbSearches, limit, dict);
- } else {
- assert(cParam.strat == lz4opt);
- result = LZ4HC_compress_optimal(ctx,
- src, dst, srcSizePtr, dstCapacity,
- cParam.nbSearches, cParam.targetLength, limit,
- cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
- dict, favor);
- }
- if (result <= 0) ctx->dirty = 1;
- return result;
- }
-}
-
-static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
-
-static int
-LZ4HC_compress_generic_noDictCtx (
- LZ4HC_CCtx_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- int const dstCapacity,
- int cLevel,
- limitedOutput_directive limit
- )
-{
- assert(ctx->dictCtx == NULL);
- return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
-}
-
-static int
-LZ4HC_compress_generic_dictCtx (
- LZ4HC_CCtx_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- int const dstCapacity,
- int cLevel,
- limitedOutput_directive limit
- )
-{
- const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
- assert(ctx->dictCtx != NULL);
- if (position >= 64 KB) {
- ctx->dictCtx = NULL;
- return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
- } else if (position == 0 && *srcSizePtr > 4 KB) {
- memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
- LZ4HC_setExternalDict(ctx, (const BYTE *)src);
- ctx->compressionLevel = (short)cLevel;
- return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
- } else {
- return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
- }
-}
-
-static int
-LZ4HC_compress_generic (
- LZ4HC_CCtx_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- int const dstCapacity,
- int cLevel,
- limitedOutput_directive limit
- )
-{
- if (ctx->dictCtx == NULL) {
- return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
- } else {
- return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
- }
-}
-
-
-int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
-
-static size_t LZ4_streamHC_t_alignment(void)
-{
-#if LZ4_ALIGN_TEST
- typedef struct { char c; LZ4_streamHC_t t; } t_a;
- return sizeof(t_a) - sizeof(LZ4_streamHC_t);
-#else
- return 1; /* effectively disabled */
-#endif
-}
-
-/* state is presumed correctly initialized,
- * in which case its size and alignment have already been validate */
-int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
- LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
- if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
- LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
- LZ4HC_init_internal (ctx, (const BYTE*)src);
- if (dstCapacity < LZ4_compressBound(srcSize))
- return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
- else
- return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
-}
-
-int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
- LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
- if (ctx==NULL) return 0; /* init failure */
- return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
-}
-
-int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
- LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
-#else
- LZ4_streamHC_t state;
- LZ4_streamHC_t* const statePtr = &state;
-#endif
- int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
- FREEMEM(statePtr);
-#endif
- return cSize;
-}
-
-/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
-int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
-{
- LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
- if (ctx==NULL) return 0; /* init failure */
- LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
- LZ4_setCompressionLevel(ctx, cLevel);
- return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
-}
-
-
-
-/**************************************
-* Streaming Functions
-**************************************/
-/* allocation */
-LZ4_streamHC_t* LZ4_createStreamHC(void)
-{
- LZ4_streamHC_t* const state =
- (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
- if (state == NULL) return NULL;
- LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
- return state;
-}
-
-int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
-{
- DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
- if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
- FREEMEM(LZ4_streamHCPtr);
- return 0;
-}
-
-
-LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
-{
- LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
- /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
- LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
- DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
- /* check conditions */
- if (buffer == NULL) return NULL;
- if (size < sizeof(LZ4_streamHC_t)) return NULL;
- if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
- /* init */
- { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
- MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
- LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
- return LZ4_streamHCPtr;
-}
-
-/* just a stub */
-void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
-{
- LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
- LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
-}
-
-void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
-{
- DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
- if (LZ4_streamHCPtr->internal_donotuse.dirty) {
- LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
- } else {
- /* preserve end - base : can trigger clearTable's threshold */
- LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
- LZ4_streamHCPtr->internal_donotuse.base = NULL;
- LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
- }
- LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
-}
-
-void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
-{
- DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
- if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
- if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
- LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
-}
-
-void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
-{
- LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
-}
-
-/* LZ4_loadDictHC() :
- * LZ4_streamHCPtr is presumed properly initialized */
-int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
- const char* dictionary, int dictSize)
-{
- LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
- assert(LZ4_streamHCPtr != NULL);
- if (dictSize > 64 KB) {
- dictionary += (size_t)dictSize - 64 KB;
- dictSize = 64 KB;
- }
- /* need a full initialization, there are bad side-effects when using resetFast() */
- { int const cLevel = ctxPtr->compressionLevel;
- LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
- LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
- }
- LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
- ctxPtr->end = (const BYTE*)dictionary + dictSize;
- if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
- return dictSize;
-}
-
-void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
- working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
-}
-
-/* compression */
-
-static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
-{
- DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
- if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
- LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
-
- /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
- ctxPtr->lowLimit = ctxPtr->dictLimit;
- ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
- ctxPtr->dictBase = ctxPtr->base;
- ctxPtr->base = newBlock - ctxPtr->dictLimit;
- ctxPtr->end = newBlock;
- ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
-
- /* cannot reference an extDict and a dictCtx at the same time */
- ctxPtr->dictCtx = NULL;
-}
-
-static int
-LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
- const char* src, char* dst,
- int* srcSizePtr, int dstCapacity,
- limitedOutput_directive limit)
-{
- LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
- LZ4_streamHCPtr, src, *srcSizePtr, limit);
- assert(ctxPtr != NULL);
- /* auto-init if forgotten */
- if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
-
- /* Check overflow */
- if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
- size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
- if (dictSize > 64 KB) dictSize = 64 KB;
- LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
- }
-
- /* Check if blocks follow each other */
- if ((const BYTE*)src != ctxPtr->end)
- LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
-
- /* Check overlapping input/dictionary space */
- { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
- const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
- const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
- if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
- if (sourceEnd > dictEnd) sourceEnd = dictEnd;
- ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
- if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
- } }
-
- return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
-}
-
-int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
-{
- if (dstCapacity < LZ4_compressBound(srcSize))
- return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
- else
- return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
-}
-
-int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
-{
- return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
-}
-
-
-
-/* LZ4_saveDictHC :
- * save history content
- * into a user-provided buffer
- * which is then used to continue compression
- */
-int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
-{
- LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
- int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
- DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
- assert(prefixSize >= 0);
- if (dictSize > 64 KB) dictSize = 64 KB;
- if (dictSize < 4) dictSize = 0;
- if (dictSize > prefixSize) dictSize = prefixSize;
- if (safeBuffer == NULL) assert(dictSize == 0);
- if (dictSize > 0)
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
- { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
- streamPtr->end = (const BYTE*)safeBuffer + dictSize;
- streamPtr->base = streamPtr->end - endIndex;
- streamPtr->dictLimit = endIndex - (U32)dictSize;
- streamPtr->lowLimit = endIndex - (U32)dictSize;
- if (streamPtr->nextToUpdate < streamPtr->dictLimit)
- streamPtr->nextToUpdate = streamPtr->dictLimit;
- }
- return dictSize;
-}
-
-
-/***************************************************
-* Deprecated Functions
-***************************************************/
-
-/* These functions currently generate deprecation warnings */
-
-/* Wrappers for deprecated compression functions */
-int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
-int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
-
-
-/* Deprecated streaming functions */
-int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
-
-/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
- * @return : 0 on success, !=0 if error */
-int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
-{
- LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
- if (hc4 == NULL) return 1; /* init failed */
- LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return 0;
-}
-
-void* LZ4_createHC (const char* inputBuffer)
-{
- LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
- if (hc4 == NULL) return NULL; /* not enough memory */
- LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return hc4;
-}
-
-int LZ4_freeHC (void* LZ4HC_Data)
-{
- if (!LZ4HC_Data) return 0; /* support free on NULL */
- FREEMEM(LZ4HC_Data);
- return 0;
-}
-
-int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
-{
- return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
-}
-
-int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
-{
- return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
-}
-
-char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
-{
- LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
- const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
- LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
- /* avoid const char * -> char * conversion warning :( */
- return (char *)(uptrval)bufferStart;
-}
-
-
-/* ================================================
- * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
- * ===============================================*/
-typedef struct {
- int price;
- int off;
- int mlen;
- int litlen;
-} LZ4HC_optimal_t;
-
-/* price in bytes */
-LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
-{
- int price = litlen;
- assert(litlen >= 0);
- if (litlen >= (int)RUN_MASK)
- price += 1 + ((litlen-(int)RUN_MASK) / 255);
- return price;
-}
-
-
-/* requires mlen >= MINMATCH */
-LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
-{
- int price = 1 + 2 ; /* token + 16-bit offset */
- assert(litlen >= 0);
- assert(mlen >= MINMATCH);
-
- price += LZ4HC_literalsPrice(litlen);
-
- if (mlen >= (int)(ML_MASK+MINMATCH))
- price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
-
- return price;
-}
-
-
-typedef struct {
- int off;
- int len;
-} LZ4HC_match_t;
-
-LZ4_FORCE_INLINE LZ4HC_match_t
-LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
- const BYTE* ip, const BYTE* const iHighLimit,
- int minLen, int nbSearches,
- const dictCtx_directive dict,
- const HCfavor_e favorDecSpeed)
-{
- LZ4HC_match_t match = { 0 , 0 };
- const BYTE* matchPtr = NULL;
- /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
- * but this won't be the case here, as we define iLowLimit==ip,
- * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
- if (matchLength <= minLen) return match;
- if (favorDecSpeed) {
- if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
- }
- match.len = matchLength;
- match.off = (int)(ip-matchPtr);
- return match;
-}
-
-
-static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
- const char* const source,
- char* dst,
- int* srcSizePtr,
- int dstCapacity,
- int const nbSearches,
- size_t sufficient_len,
- const limitedOutput_directive limit,
- int const fullUpdate,
- const dictCtx_directive dict,
- const HCfavor_e favorDecSpeed)
-{
- int retval = 0;
-#define TRAILING_LITERALS 3
-#ifdef LZ4HC_HEAPMODE
- LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
-#else
- LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
-#endif
-
- const BYTE* ip = (const BYTE*) source;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + *srcSizePtr;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = iend - LASTLITERALS;
- BYTE* op = (BYTE*) dst;
- BYTE* opSaved = (BYTE*) dst;
- BYTE* oend = op + dstCapacity;
- int ovml = MINMATCH; /* overflow - last sequence */
- const BYTE* ovref = NULL;
-
- /* init */
-#ifdef LZ4HC_HEAPMODE
- if (opt == NULL) goto _return_label;
-#endif
- DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
- *srcSizePtr = 0;
- if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
- if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
-
- /* Main Loop */
- while (ip <= mflimit) {
- int const llen = (int)(ip - anchor);
- int best_mlen, best_off;
- int cur, last_match_pos = 0;
-
- LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
- if (firstMatch.len==0) { ip++; continue; }
-
- if ((size_t)firstMatch.len > sufficient_len) {
- /* good enough solution : immediate encoding */
- int const firstML = firstMatch.len;
- const BYTE* const matchPos = ip - firstMatch.off;
- opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
- ovml = firstML;
- ovref = matchPos;
- goto _dest_overflow;
- }
- continue;
- }
-
- /* set prices for first positions (literals) */
- { int rPos;
- for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
- int const cost = LZ4HC_literalsPrice(llen + rPos);
- opt[rPos].mlen = 1;
- opt[rPos].off = 0;
- opt[rPos].litlen = llen + rPos;
- opt[rPos].price = cost;
- DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
- rPos, cost, opt[rPos].litlen);
- } }
- /* set prices using initial match */
- { int mlen = MINMATCH;
- int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
- int const offset = firstMatch.off;
- assert(matchML < LZ4_OPT_NUM);
- for ( ; mlen <= matchML ; mlen++) {
- int const cost = LZ4HC_sequencePrice(llen, mlen);
- opt[mlen].mlen = mlen;
- opt[mlen].off = offset;
- opt[mlen].litlen = llen;
- opt[mlen].price = cost;
- DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
- mlen, cost, mlen);
- } }
- last_match_pos = firstMatch.len;
- { int addLit;
- for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
- opt[last_match_pos+addLit].mlen = 1; /* literal */
- opt[last_match_pos+addLit].off = 0;
- opt[last_match_pos+addLit].litlen = addLit;
- opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
- DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
- last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
- } }
-
- /* check further positions */
- for (cur = 1; cur < last_match_pos; cur++) {
- const BYTE* const curPtr = ip + cur;
- LZ4HC_match_t newMatch;
-
- if (curPtr > mflimit) break;
- DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
- cur, opt[cur].price, opt[cur+1].price, cur+1);
- if (fullUpdate) {
- /* not useful to search here if next position has same (or lower) cost */
- if ( (opt[cur+1].price <= opt[cur].price)
- /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
- && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
- continue;
- } else {
- /* not useful to search here if next position has same (or lower) cost */
- if (opt[cur+1].price <= opt[cur].price) continue;
- }
-
- DEBUGLOG(7, "search at rPos:%u", cur);
- if (fullUpdate)
- newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
- else
- /* only test matches of minimum length; slightly faster, but misses a few bytes */
- newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
- if (!newMatch.len) continue;
-
- if ( ((size_t)newMatch.len > sufficient_len)
- || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
- /* immediate encoding */
- best_mlen = newMatch.len;
- best_off = newMatch.off;
- last_match_pos = cur + 1;
- goto encode;
- }
-
- /* before match : set price with literals at beginning */
- { int const baseLitlen = opt[cur].litlen;
- int litlen;
- for (litlen = 1; litlen < MINMATCH; litlen++) {
- int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
- int const pos = cur + litlen;
- if (price < opt[pos].price) {
- opt[pos].mlen = 1; /* literal */
- opt[pos].off = 0;
- opt[pos].litlen = baseLitlen+litlen;
- opt[pos].price = price;
- DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
- pos, price, opt[pos].litlen);
- } } }
-
- /* set prices using match at position = cur */
- { int const matchML = newMatch.len;
- int ml = MINMATCH;
-
- assert(cur + newMatch.len < LZ4_OPT_NUM);
- for ( ; ml <= matchML ; ml++) {
- int const pos = cur + ml;
- int const offset = newMatch.off;
- int price;
- int ll;
- DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
- pos, last_match_pos);
- if (opt[cur].mlen == 1) {
- ll = opt[cur].litlen;
- price = ((cur > ll) ? opt[cur - ll].price : 0)
- + LZ4HC_sequencePrice(ll, ml);
- } else {
- ll = 0;
- price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
- }
-
- assert((U32)favorDecSpeed <= 1);
- if (pos > last_match_pos+TRAILING_LITERALS
- || price <= opt[pos].price - (int)favorDecSpeed) {
- DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
- pos, price, ml);
- assert(pos < LZ4_OPT_NUM);
- if ( (ml == matchML) /* last pos of last match */
- && (last_match_pos < pos) )
- last_match_pos = pos;
- opt[pos].mlen = ml;
- opt[pos].off = offset;
- opt[pos].litlen = ll;
- opt[pos].price = price;
- } } }
- /* complete following positions with literals */
- { int addLit;
- for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
- opt[last_match_pos+addLit].mlen = 1; /* literal */
- opt[last_match_pos+addLit].off = 0;
- opt[last_match_pos+addLit].litlen = addLit;
- opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
- DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
- } }
- } /* for (cur = 1; cur <= last_match_pos; cur++) */
-
- assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
- best_mlen = opt[last_match_pos].mlen;
- best_off = opt[last_match_pos].off;
- cur = last_match_pos - best_mlen;
-
-encode: /* cur, last_match_pos, best_mlen, best_off must be set */
- assert(cur < LZ4_OPT_NUM);
- assert(last_match_pos >= 1); /* == 1 when only one candidate */
- DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
- { int candidate_pos = cur;
- int selected_matchLength = best_mlen;
- int selected_offset = best_off;
- while (1) { /* from end to beginning */
- int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
- int const next_offset = opt[candidate_pos].off;
- DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
- opt[candidate_pos].mlen = selected_matchLength;
- opt[candidate_pos].off = selected_offset;
- selected_matchLength = next_matchLength;
- selected_offset = next_offset;
- if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
- assert(next_matchLength > 0); /* can be 1, means literal */
- candidate_pos -= next_matchLength;
- } }
-
- /* encode all recorded sequences in order */
- { int rPos = 0; /* relative position (to ip) */
- while (rPos < last_match_pos) {
- int const ml = opt[rPos].mlen;
- int const offset = opt[rPos].off;
- if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
- rPos += ml;
- assert(ml >= MINMATCH);
- assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
- opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
- ovml = ml;
- ovref = ip - offset;
- goto _dest_overflow;
- } } }
- } /* while (ip <= mflimit) */
-
-_last_literals:
- /* Encode Last Literals */
- { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + llAdd + lastRunSize;
- if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
- if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) { /* Check output limit */
- retval = 0;
- goto _return_label;
- }
- /* adapt lastRunSize to fill 'dst' */
- lastRunSize = (size_t)(oend - op) - 1 /*token*/;
- llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
- lastRunSize -= llAdd;
- }
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
- ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
-
- if (lastRunSize >= RUN_MASK) {
- size_t accumulator = lastRunSize - RUN_MASK;
- *op++ = (RUN_MASK << ML_BITS);
- for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRunSize << ML_BITS);
- }
- memcpy(op, anchor, lastRunSize);
- op += lastRunSize;
- }
-
- /* End */
- *srcSizePtr = (int) (((const char*)ip) - source);
- retval = (int) ((char*)op-dst);
- goto _return_label;
-
-_dest_overflow:
-if (limit == fillOutput) {
- /* Assumption : ip, anchor, ovml and ovref must be set correctly */
- size_t const ll = (size_t)(ip - anchor);
- size_t const ll_addbytes = (ll + 240) / 255;
- size_t const ll_totalCost = 1 + ll_addbytes + ll;
- BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
- DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
- op = opSaved; /* restore correct out pointer */
- if (op + ll_totalCost <= maxLitPos) {
- /* ll validated; now adjust match length */
- size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
- size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
- assert(maxMlSize < INT_MAX); assert(ovml >= 0);
- if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
- if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
- DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
- DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
- DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
- } }
- goto _last_literals;
-}
-_return_label:
-#ifdef LZ4HC_HEAPMODE
- FREEMEM(opt);
-#endif
- return retval;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h
deleted file mode 100644
index 3d441fb6f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- LZ4 HC - High Compression Mode of LZ4
- Header File
- Copyright (C) 2011-2017, Yann Collet.
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-#ifndef LZ4_HC_H_19834876238432
-#define LZ4_HC_H_19834876238432
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* --- Dependency --- */
-/* note : lz4hc requires lz4.h/lz4.c for compilation */
-#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
-
-
-/* --- Useful constants --- */
-#define LZ4HC_CLEVEL_MIN 3
-#define LZ4HC_CLEVEL_DEFAULT 9
-#define LZ4HC_CLEVEL_OPT_MIN 10
-#define LZ4HC_CLEVEL_MAX 12
-
-
-/*-************************************
- * Block Compression
- **************************************/
-/*! LZ4_compress_HC() :
- * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
- * `dst` must be already allocated.
- * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
- * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
- * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
- * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
- * @return : the number of bytes written into 'dst'
- * or 0 if compression fails.
- */
-LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
-
-
-/* Note :
- * Decompression functions are provided within "lz4.h" (BSD license)
- */
-
-
-/*! LZ4_compress_HC_extStateHC() :
- * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
- * `state` size is provided by LZ4_sizeofStateHC().
- * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
- */
-LZ4LIB_API int LZ4_sizeofStateHC(void);
-LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
-
-
-/*! LZ4_compress_HC_destSize() : v1.9.0+
- * Will compress as much data as possible from `src`
- * to fit into `targetDstSize` budget.
- * Result is provided in 2 parts :
- * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
- * or 0 if compression fails.
- * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
- */
-LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
- const char* src, char* dst,
- int* srcSizePtr, int targetDstSize,
- int compressionLevel);
-
-
-/*-************************************
- * Streaming Compression
- * Bufferless synchronous API
- **************************************/
- typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
-
-/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
- * These functions create and release memory for LZ4 HC streaming state.
- * Newly created states are automatically initialized.
- * A same state can be used multiple times consecutively,
- * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
- */
-LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
-LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
-
-/*
- These functions compress data in successive blocks of any size,
- using previous blocks as dictionary, to improve compression ratio.
- One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
- There is an exception for ring buffers, which can be smaller than 64 KB.
- Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
-
- Before starting compression, state must be allocated and properly initialized.
- LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
-
- Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
- or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
- LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
- which is automatically the case when state is created using LZ4_createStreamHC().
-
- After reset, a first "fictional block" can be designated as initial dictionary,
- using LZ4_loadDictHC() (Optional).
-
- Invoke LZ4_compress_HC_continue() to compress each successive block.
- The number of blocks is unlimited.
- Previous input blocks, including initial dictionary when present,
- must remain accessible and unmodified during compression.
-
- It's allowed to update compression level anytime between blocks,
- using LZ4_setCompressionLevel() (experimental).
-
- 'dst' buffer should be sized to handle worst case scenarios
- (see LZ4_compressBound(), it ensures compression success).
- In case of failure, the API does not guarantee recovery,
- so the state _must_ be reset.
- To ensure compression success
- whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
- consider using LZ4_compress_HC_continue_destSize().
-
- Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
- it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
- Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
-
- After completing a streaming compression,
- it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
- just by resetting it, using LZ4_resetStreamHC_fast().
-*/
-
-LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
-LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
-
-LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
- const char* src, char* dst,
- int srcSize, int maxDstSize);
-
-/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
- * Similar to LZ4_compress_HC_continue(),
- * but will read as much data as possible from `src`
- * to fit into `targetDstSize` budget.
- * Result is provided into 2 parts :
- * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
- * or 0 if compression fails.
- * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
- * Note that this function may not consume the entire input.
- */
-LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
- const char* src, char* dst,
- int* srcSizePtr, int targetDstSize);
-
-LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
-
-
-
-/*^**********************************************
- * !!!!!! STATIC LINKING ONLY !!!!!!
- ***********************************************/
-
-/*-******************************************************************
- * PRIVATE DEFINITIONS :
- * Do not use these definitions directly.
- * They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
- * Declare an `LZ4_streamHC_t` directly, rather than any type below.
- * Even then, only do so in the context of static linking, as definitions may change between versions.
- ********************************************************************/
-
-#define LZ4HC_DICTIONARY_LOGSIZE 16
-#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
-#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
-
-#define LZ4HC_HASH_LOG 15
-#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
-#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
-
-
-typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
-struct LZ4HC_CCtx_internal
-{
- LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
- LZ4_u16 chainTable[LZ4HC_MAXD];
- const LZ4_byte* end; /* next block here to continue on current prefix */
- const LZ4_byte* base; /* All index relative to this position */
- const LZ4_byte* dictBase; /* alternate base for extDict */
- LZ4_u32 dictLimit; /* below that point, need extDict */
- LZ4_u32 lowLimit; /* below that point, no more dict */
- LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
- short compressionLevel;
- LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
- otherwise, favor compression ratio */
- LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
- const LZ4HC_CCtx_internal* dictCtx;
-};
-
-
-/* Do not use these definitions directly !
- * Declare or allocate an LZ4_streamHC_t instead.
- */
-#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
-#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
-union LZ4_streamHC_u {
- void* table[LZ4_STREAMHCSIZE_VOIDP];
- LZ4HC_CCtx_internal internal_donotuse;
-}; /* previously typedef'd to LZ4_streamHC_t */
-
-/* LZ4_streamHC_t :
- * This structure allows static allocation of LZ4 HC streaming state.
- * This can be used to allocate statically, on state, or as part of a larger structure.
- *
- * Such state **must** be initialized using LZ4_initStreamHC() before first use.
- *
- * Note that invoking LZ4_initStreamHC() is not required when
- * the state was created using LZ4_createStreamHC() (which is recommended).
- * Using the normal builder, a newly created state is automatically initialized.
- *
- * Static allocation shall only be used in combination with static linking.
- */
-
-/* LZ4_initStreamHC() : v1.9.0+
- * Required before first use of a statically allocated LZ4_streamHC_t.
- * Before v1.9.0 : use LZ4_resetStreamHC() instead
- */
-LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
-
-
-/*-************************************
-* Deprecated Functions
-**************************************/
-/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
-
-/* deprecated compression functions */
-LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
-LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
-LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
-/* Obsolete streaming functions; degraded functionality; do not use!
- *
- * In order to perform streaming compression, these functions depended on data
- * that is no longer tracked in the state. They have been preserved as well as
- * possible: using them will still produce a correct output. However, use of
- * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
- * than preserve a window-sized chunk of history.
- */
-LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
-LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
-LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
-LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
-LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
-LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
-
-
-/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
- * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
- * which is now the recommended function to start a new stream of blocks,
- * but cannot be used to initialize a memory segment containing arbitrary garbage data.
- *
- * It is recommended to switch to LZ4_initStreamHC().
- * LZ4_resetStreamHC() will generate deprecation warnings in a future version.
- */
-LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4_HC_H_19834876238432 */
-
-
-/*-**************************************************
- * !!!!! STATIC LINKING ONLY !!!!!
- * Following definitions are considered experimental.
- * They should not be linked from DLL,
- * as there is no guarantee of API stability yet.
- * Prototypes will be promoted to "stable" status
- * after successfull usage in real-life scenarios.
- ***************************************************/
-#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
-#ifndef LZ4_HC_SLO_098092834
-#define LZ4_HC_SLO_098092834
-
-#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
-#include "lz4.h"
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
- * It's possible to change compression level
- * between successive invocations of LZ4_compress_HC_continue*()
- * for dynamic adaptation.
- */
-LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
- LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
-
-/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
- * Opt. Parser will favor decompression speed over compression ratio.
- * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
- */
-LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
- LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
-
-/*! LZ4_resetStreamHC_fast() : v1.9.0+
- * When an LZ4_streamHC_t is known to be in a internally coherent state,
- * it can often be prepared for a new compression with almost no work, only
- * sometimes falling back to the full, expensive reset that is always required
- * when the stream is in an indeterminate state (i.e., the reset performed by
- * LZ4_resetStreamHC()).
- *
- * LZ4_streamHCs are guaranteed to be in a valid state when:
- * - returned from LZ4_createStreamHC()
- * - reset by LZ4_resetStreamHC()
- * - memset(stream, 0, sizeof(LZ4_streamHC_t))
- * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
- * - the stream was in a valid state and was then used in any compression call
- * that returned success
- * - the stream was in an indeterminate state and was used in a compression
- * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
- * returned success
- *
- * Note:
- * A stream that was last used in a compression call that returned an error
- * may be passed to this function. However, it will be fully reset, which will
- * clear any existing history and settings from the context.
- */
-LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
- LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
-
-/*! LZ4_compress_HC_extStateHC_fastReset() :
- * A variant of LZ4_compress_HC_extStateHC().
- *
- * Using this variant avoids an expensive initialization step. It is only safe
- * to call if the state buffer is known to be correctly initialized already
- * (see above comment on LZ4_resetStreamHC_fast() for a definition of
- * "correctly initialized"). From a high level, the difference is that this
- * function initializes the provided state with a call to
- * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
- * call to LZ4_resetStreamHC().
- */
-LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
- void* state,
- const char* src, char* dst,
- int srcSize, int dstCapacity,
- int compressionLevel);
-
-/*! LZ4_attach_HC_dictionary() :
- * This is an experimental API that allows for the efficient use of a
- * static dictionary many times.
- *
- * Rather than re-loading the dictionary buffer into a working context before
- * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
- * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
- * in which the working stream references the dictionary stream in-place.
- *
- * Several assumptions are made about the state of the dictionary stream.
- * Currently, only streams which have been prepared by LZ4_loadDictHC() should
- * be expected to work.
- *
- * Alternatively, the provided dictionary stream pointer may be NULL, in which
- * case any existing dictionary stream is unset.
- *
- * A dictionary should only be attached to a stream without any history (i.e.,
- * a stream that has just been reset).
- *
- * The dictionary will remain attached to the working stream only for the
- * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
- * dictionary context association from the working stream. The dictionary
- * stream (and source buffer) must remain in-place / accessible / unchanged
- * through the lifetime of the stream session.
- */
-LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
- LZ4_streamHC_t *working_stream,
- const LZ4_streamHC_t *dictionary_stream);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4_HC_SLO_098092834 */
-#endif /* LZ4_HC_STATIC_LINKING_ONLY */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/queue.h b/fluent-bit/lib/librdkafka-2.1.0/src/queue.h
deleted file mode 100644
index d1ba14833..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/queue.h
+++ /dev/null
@@ -1,850 +0,0 @@
-/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
-
-/*
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)queue.h 8.5 (Berkeley) 8/20/94
- */
-
-#ifndef _SYS_QUEUE_H_
-#define _SYS_QUEUE_H_
-
-/*
- * This file defines five types of data structures: singly-linked lists,
- * lists, simple queues, tail queues, and circular queues.
- *
- * A singly-linked list is headed by a single forward pointer. The
- * elements are singly linked for minimum space and pointer manipulation
- * overhead at the expense of O(n) removal for arbitrary elements. New
- * elements can be added to the list after an existing element or at the
- * head of the list. Elements being removed from the head of the list
- * should use the explicit macro for this purpose for optimum
- * efficiency. A singly-linked list may only be traversed in the forward
- * direction. Singly-linked lists are ideal for applications with large
- * datasets and few or no removals or for implementing a LIFO queue.
- *
- * A list is headed by a single forward pointer (or an array of forward
- * pointers for a hash table header). The elements are doubly linked
- * so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before
- * or after an existing element or at the head of the list. A list
- * may only be traversed in the forward direction.
- *
- * A simple queue is headed by a pair of pointers, one the head of the
- * list and the other to the tail of the list. The elements are singly
- * linked to save space, so elements can only be removed from the
- * head of the list. New elements can be added to the list after
- * an existing element, at the head of the list, or at the end of the
- * list. A simple queue may only be traversed in the forward direction.
- *
- * A tail queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or
- * after an existing element, at the head of the list, or at the end of
- * the list. A tail queue may be traversed in either direction.
- *
- * A circle queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or after
- * an existing element, at the head of the list, or at the end of the list.
- * A circle queue may be traversed in either direction, but has a more
- * complex end of list detection.
- *
- * For details on the use of these macros, see the queue(3) manual page.
- */
-
-/*
- * Include the definition of NULL only on NetBSD because sys/null.h
- * is not available elsewhere. This conditional makes the header
- * portable and it can simply be dropped verbatim into any system.
- * The caveat is that on other systems some other header
- * must provide NULL before the macros can be used.
- */
-#ifdef __NetBSD__
-#include <sys/null.h>
-#endif
-
-#if defined(QUEUEDEBUG)
-# if defined(_KERNEL)
-# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
-# else
-# include <err.h>
-# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
-# endif
-#endif
-
-/*
- * Singly-linked List definitions.
- */
-#define SLIST_HEAD(name, type) \
-struct name { \
- struct type *slh_first; /* first element */ \
-}
-
-#define SLIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define SLIST_ENTRY(type) \
-struct { \
- struct type *sle_next; /* next element */ \
-}
-
-/*
- * Singly-linked List access methods.
- */
-#define SLIST_FIRST(head) ((head)->slh_first)
-#define SLIST_END(head) NULL
-#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
-#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
-
-#define SLIST_FOREACH(var, head, field) \
- for((var) = (head)->slh_first; \
- (var) != SLIST_END(head); \
- (var) = (var)->field.sle_next)
-
-#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = SLIST_FIRST((head)); \
- (var) != SLIST_END(head) && \
- ((tvar) = SLIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-/*
- * Singly-linked List functions.
- */
-#define SLIST_INIT(head) do { \
- (head)->slh_first = SLIST_END(head); \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
- (elm)->field.sle_next = (slistelm)->field.sle_next; \
- (slistelm)->field.sle_next = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_INSERT_HEAD(head, elm, field) do { \
- (elm)->field.sle_next = (head)->slh_first; \
- (head)->slh_first = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_REMOVE_AFTER(slistelm, field) do { \
- (slistelm)->field.sle_next = \
- SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_REMOVE_HEAD(head, field) do { \
- (head)->slh_first = (head)->slh_first->field.sle_next; \
-} while (/*CONSTCOND*/0)
-
-#define SLIST_REMOVE(head, elm, type, field) do { \
- if ((head)->slh_first == (elm)) { \
- SLIST_REMOVE_HEAD((head), field); \
- } \
- else { \
- struct type *curelm = (head)->slh_first; \
- while(curelm->field.sle_next != (elm)) \
- curelm = curelm->field.sle_next; \
- curelm->field.sle_next = \
- curelm->field.sle_next->field.sle_next; \
- } \
-} while (/*CONSTCOND*/0)
-
-
-/*
- * List definitions.
- */
-#define LIST_HEAD(name, type) \
-struct name { \
- struct type *lh_first; /* first element */ \
-}
-
-#define LIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define LIST_ENTRY(type) \
-struct { \
- struct type *le_next; /* next element */ \
- struct type **le_prev; /* address of previous next element */ \
-}
-
-/*
- * List access methods.
- */
-#define LIST_FIRST(head) ((head)->lh_first)
-#define LIST_END(head) NULL
-#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
-#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-
-#define LIST_FOREACH(var, head, field) \
- for ((var) = ((head)->lh_first); \
- (var) != LIST_END(head); \
- (var) = ((var)->field.le_next))
-
-#define LIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = LIST_FIRST((head)); \
- (var) != LIST_END(head) && \
- ((tvar) = LIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define LIST_MOVE(head1, head2) do { \
- LIST_INIT((head2)); \
- if (!LIST_EMPTY((head1))) { \
- (head2)->lh_first = (head1)->lh_first; \
- LIST_INIT((head1)); \
- } \
-} while (/*CONSTCOND*/0)
-
-/*
- * List functions.
- */
-#if defined(QUEUEDEBUG)
-#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
- if ((head)->lh_first && \
- (head)->lh_first->field.le_prev != &(head)->lh_first) \
- QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_LIST_OP(elm, field) \
- if ((elm)->field.le_next && \
- (elm)->field.le_next->field.le_prev != \
- &(elm)->field.le_next) \
- QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
- __FILE__, __LINE__); \
- if (*(elm)->field.le_prev != (elm)) \
- QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
- (elm)->field.le_next = (void *)1L; \
- (elm)->field.le_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
-#define QUEUEDEBUG_LIST_OP(elm, field)
-#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
-#endif
-
-#define LIST_INIT(head) do { \
- (head)->lh_first = LIST_END(head); \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_AFTER(listelm, elm, field) do { \
- QUEUEDEBUG_LIST_OP((listelm), field) \
- if (((elm)->field.le_next = (listelm)->field.le_next) != \
- LIST_END(head)) \
- (listelm)->field.le_next->field.le_prev = \
- &(elm)->field.le_next; \
- (listelm)->field.le_next = (elm); \
- (elm)->field.le_prev = &(listelm)->field.le_next; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
- QUEUEDEBUG_LIST_OP((listelm), field) \
- (elm)->field.le_prev = (listelm)->field.le_prev; \
- (elm)->field.le_next = (listelm); \
- *(listelm)->field.le_prev = (elm); \
- (listelm)->field.le_prev = &(elm)->field.le_next; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_INSERT_HEAD(head, elm, field) do { \
- QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
- if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
- (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
- (head)->lh_first = (elm); \
- (elm)->field.le_prev = &(head)->lh_first; \
-} while (/*CONSTCOND*/0)
-
-#define LIST_REMOVE(elm, field) do { \
- QUEUEDEBUG_LIST_OP((elm), field) \
- if ((elm)->field.le_next != NULL) \
- (elm)->field.le_next->field.le_prev = \
- (elm)->field.le_prev; \
- *(elm)->field.le_prev = (elm)->field.le_next; \
- QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
-} while (/*CONSTCOND*/0)
-
-#define LIST_REPLACE(elm, elm2, field) do { \
- if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
- (elm2)->field.le_next->field.le_prev = \
- &(elm2)->field.le_next; \
- (elm2)->field.le_prev = (elm)->field.le_prev; \
- *(elm2)->field.le_prev = (elm2); \
- QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
-} while (/*CONSTCOND*/0)
-
-/*
- * Simple queue definitions.
- */
-#define SIMPLEQ_HEAD(name, type) \
-struct name { \
- struct type *sqh_first; /* first element */ \
- struct type **sqh_last; /* addr of last next element */ \
-}
-
-#define SIMPLEQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).sqh_first }
-
-#define SIMPLEQ_ENTRY(type) \
-struct { \
- struct type *sqe_next; /* next element */ \
-}
-
-/*
- * Simple queue access methods.
- */
-#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
-#define SIMPLEQ_END(head) NULL
-#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
-#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
-
-#define SIMPLEQ_FOREACH(var, head, field) \
- for ((var) = ((head)->sqh_first); \
- (var) != SIMPLEQ_END(head); \
- (var) = ((var)->field.sqe_next))
-
-#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
- for ((var) = ((head)->sqh_first); \
- (var) != SIMPLEQ_END(head) && \
- ((next = ((var)->field.sqe_next)), 1); \
- (var) = (next))
-
-/*
- * Simple queue functions.
- */
-#define SIMPLEQ_INIT(head) do { \
- (head)->sqh_first = NULL; \
- (head)->sqh_last = &(head)->sqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
- if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
- (head)->sqh_last = &(elm)->field.sqe_next; \
- (head)->sqh_first = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
- (elm)->field.sqe_next = NULL; \
- *(head)->sqh_last = (elm); \
- (head)->sqh_last = &(elm)->field.sqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
- if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
- (head)->sqh_last = &(elm)->field.sqe_next; \
- (listelm)->field.sqe_next = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
- if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
- (head)->sqh_last = &(head)->sqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
- if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
- == NULL) \
- (head)->sqh_last = &(elm)->field.sqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
- if ((head)->sqh_first == (elm)) { \
- SIMPLEQ_REMOVE_HEAD((head), field); \
- } else { \
- struct type *curelm = (head)->sqh_first; \
- while (curelm->field.sqe_next != (elm)) \
- curelm = curelm->field.sqe_next; \
- if ((curelm->field.sqe_next = \
- curelm->field.sqe_next->field.sqe_next) == NULL) \
- (head)->sqh_last = &(curelm)->field.sqe_next; \
- } \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_CONCAT(head1, head2) do { \
- if (!SIMPLEQ_EMPTY((head2))) { \
- *(head1)->sqh_last = (head2)->sqh_first; \
- (head1)->sqh_last = (head2)->sqh_last; \
- SIMPLEQ_INIT((head2)); \
- } \
-} while (/*CONSTCOND*/0)
-
-#define SIMPLEQ_LAST(head, type, field) \
- (SIMPLEQ_EMPTY((head)) ? \
- NULL : \
- ((struct type *)(void *) \
- ((char *)((head)->sqh_last) - offsetof(struct type, field))))
-
-/*
- * Tail queue definitions.
- */
-#define _TAILQ_HEAD(name, type, qual) \
-struct name { \
- qual type *tqh_first; /* first element */ \
- qual type *qual *tqh_last; /* addr of last next element */ \
-}
-#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
-
-#define TAILQ_HEAD_INITIALIZER(head) \
- { TAILQ_END(head), &(head).tqh_first }
-
-#define _TAILQ_ENTRY(type, qual) \
-struct { \
- qual type *tqe_next; /* next element */ \
- qual type *qual *tqe_prev; /* address of previous next element */\
-}
-#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
-
-/*
- * Tail queue access methods.
- */
-#define TAILQ_FIRST(head) ((head)->tqh_first)
-#define TAILQ_END(head) (NULL)
-#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
-#define TAILQ_LAST(head, headname) \
- (*(((struct headname *)((head)->tqh_last))->tqh_last))
-#define TAILQ_PREV(elm, headname, field) \
- (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
-
-
-#define TAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->tqh_first); \
- (var) != TAILQ_END(head); \
- (var) = ((var)->field.tqe_next))
-
-#define TAILQ_FOREACH_SAFE(var, head, field, next) \
- for ((var) = ((head)->tqh_first); \
- (var) != TAILQ_END(head) && \
- ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
-
-#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
- for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
- (var) != TAILQ_END(head); \
- (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
-
-#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var) != TAILQ_END(head) && \
- ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
-
-/*
- * Tail queue functions.
- */
-#if defined(QUEUEDEBUG)
-#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
- if ((head)->tqh_first && \
- (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
- QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
- if (*(head)->tqh_last != NULL) \
- QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_TAILQ_OP(elm, field) \
- if ((elm)->field.tqe_next && \
- (elm)->field.tqe_next->field.tqe_prev != \
- &(elm)->field.tqe_next) \
- QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
- __FILE__, __LINE__); \
- if (*(elm)->field.tqe_prev != (elm)) \
- QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
- if ((elm)->field.tqe_next == NULL && \
- (head)->tqh_last != &(elm)->field.tqe_next) \
- QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
- (head), (elm), __FILE__, __LINE__);
-#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
- (elm)->field.tqe_next = (void *)1L; \
- (elm)->field.tqe_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
-#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
-#define QUEUEDEBUG_TAILQ_OP(elm, field)
-#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
-#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
-#endif
-
-#define TAILQ_INIT(head) do { \
- (head)->tqh_first = TAILQ_END(head); \
- (head)->tqh_last = &(head)->tqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_INSERT_HEAD(head, elm, field) do { \
- QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
- if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
- (head)->tqh_first->field.tqe_prev = \
- &(elm)->field.tqe_next; \
- else \
- (head)->tqh_last = &(elm)->field.tqe_next; \
- (head)->tqh_first = (elm); \
- (elm)->field.tqe_prev = &(head)->tqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_INSERT_TAIL(head, elm, field) do { \
- QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
- (elm)->field.tqe_next = TAILQ_END(head); \
- (elm)->field.tqe_prev = (head)->tqh_last; \
- *(head)->tqh_last = (elm); \
- (head)->tqh_last = &(elm)->field.tqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
- QUEUEDEBUG_TAILQ_OP((listelm), field) \
- if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
- TAILQ_END(head)) \
- (elm)->field.tqe_next->field.tqe_prev = \
- &(elm)->field.tqe_next; \
- else \
- (head)->tqh_last = &(elm)->field.tqe_next; \
- (listelm)->field.tqe_next = (elm); \
- (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
- QUEUEDEBUG_TAILQ_OP((listelm), field) \
- (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
- (elm)->field.tqe_next = (listelm); \
- *(listelm)->field.tqe_prev = (elm); \
- (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_REMOVE(head, elm, field) do { \
- QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
- QUEUEDEBUG_TAILQ_OP((elm), field) \
- if (((elm)->field.tqe_next) != TAILQ_END(head)) \
- (elm)->field.tqe_next->field.tqe_prev = \
- (elm)->field.tqe_prev; \
- else \
- (head)->tqh_last = (elm)->field.tqe_prev; \
- *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
- QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_REPLACE(head, elm, elm2, field) do { \
- if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
- TAILQ_END(head)) \
- (elm2)->field.tqe_next->field.tqe_prev = \
- &(elm2)->field.tqe_next; \
- else \
- (head)->tqh_last = &(elm2)->field.tqe_next; \
- (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
- *(elm2)->field.tqe_prev = (elm2); \
- QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
-} while (/*CONSTCOND*/0)
-
-#define TAILQ_CONCAT(head1, head2, field) do { \
- if (!TAILQ_EMPTY(head2)) { \
- *(head1)->tqh_last = (head2)->tqh_first; \
- (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
- (head1)->tqh_last = (head2)->tqh_last; \
- TAILQ_INIT((head2)); \
- } \
-} while (/*CONSTCOND*/0)
-
-/*
- * Singly-linked Tail queue declarations.
- */
-#define STAILQ_HEAD(name, type) \
-struct name { \
- struct type *stqh_first; /* first element */ \
- struct type **stqh_last; /* addr of last next element */ \
-}
-
-#define STAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).stqh_first }
-
-#define STAILQ_ENTRY(type) \
-struct { \
- struct type *stqe_next; /* next element */ \
-}
-
-/*
- * Singly-linked Tail queue access methods.
- */
-#define STAILQ_FIRST(head) ((head)->stqh_first)
-#define STAILQ_END(head) NULL
-#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
-#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
-
-/*
- * Singly-linked Tail queue functions.
- */
-#define STAILQ_INIT(head) do { \
- (head)->stqh_first = NULL; \
- (head)->stqh_last = &(head)->stqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_INSERT_HEAD(head, elm, field) do { \
- if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
- (head)->stqh_last = &(elm)->field.stqe_next; \
- (head)->stqh_first = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_INSERT_TAIL(head, elm, field) do { \
- (elm)->field.stqe_next = NULL; \
- *(head)->stqh_last = (elm); \
- (head)->stqh_last = &(elm)->field.stqe_next; \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
- if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
- (head)->stqh_last = &(elm)->field.stqe_next; \
- (listelm)->field.stqe_next = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_REMOVE_HEAD(head, field) do { \
- if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
- (head)->stqh_last = &(head)->stqh_first; \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_REMOVE(head, elm, type, field) do { \
- if ((head)->stqh_first == (elm)) { \
- STAILQ_REMOVE_HEAD((head), field); \
- } else { \
- struct type *curelm = (head)->stqh_first; \
- while (curelm->field.stqe_next != (elm)) \
- curelm = curelm->field.stqe_next; \
- if ((curelm->field.stqe_next = \
- curelm->field.stqe_next->field.stqe_next) == NULL) \
- (head)->stqh_last = &(curelm)->field.stqe_next; \
- } \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->stqh_first); \
- (var); \
- (var) = ((var)->field.stqe_next))
-
-#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = STAILQ_FIRST((head)); \
- (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define STAILQ_CONCAT(head1, head2) do { \
- if (!STAILQ_EMPTY((head2))) { \
- *(head1)->stqh_last = (head2)->stqh_first; \
- (head1)->stqh_last = (head2)->stqh_last; \
- STAILQ_INIT((head2)); \
- } \
-} while (/*CONSTCOND*/0)
-
-#define STAILQ_LAST(head, type, field) \
- (STAILQ_EMPTY((head)) ? \
- NULL : \
- ((struct type *)(void *) \
- ((char *)((head)->stqh_last) - offsetof(struct type, field))))
-
-
-#ifndef _KERNEL
-/*
- * Circular queue definitions. Do not use. We still keep the macros
- * for compatibility but because of pointer aliasing issues their use
- * is discouraged!
- */
-
-/*
- * __launder_type(): We use this ugly hack to work around the the compiler
- * noticing that two types may not alias each other and elide tests in code.
- * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
- * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
- * 4.8) declare these comparisons as always false, causing the code to
- * not run as designed.
- *
- * This hack is only to be used for comparisons and thus can be fully const.
- * Do not use for assignment.
- *
- * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
- * this by changing the head/tail sentinal values, but see the note above
- * this one.
- */
-#ifdef _MSC_VER
-#define __launder_type(x) ((const void *)(x))
-#else
-static inline const void * __launder_type(const void *);
-static inline const void *
-__launder_type(const void *__x)
-{
- __asm __volatile("" : "+r" (__x));
- return __x;
-}
-#endif
-
-#if defined(QUEUEDEBUG)
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
- if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
- (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
- QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
- __FILE__, __LINE__); \
- if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
- (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
- QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
- __FILE__, __LINE__);
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
- if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
- if ((head)->cqh_last != (elm)) \
- QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
- (elm), __FILE__, __LINE__); \
- } else { \
- if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
- QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
- (elm), __FILE__, __LINE__); \
- } \
- if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
- if ((head)->cqh_first != (elm)) \
- QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
- (elm), __FILE__, __LINE__); \
- } else { \
- if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
- QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
- (elm), __FILE__, __LINE__); \
- }
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
- (elm)->field.cqe_next = (void *)1L; \
- (elm)->field.cqe_prev = (void *)1L;
-#else
-#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
-#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
-#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
-#endif
-
-#define CIRCLEQ_HEAD(name, type) \
-struct name { \
- struct type *cqh_first; /* first element */ \
- struct type *cqh_last; /* last element */ \
-}
-
-#define CIRCLEQ_HEAD_INITIALIZER(head) \
- { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
-
-#define CIRCLEQ_ENTRY(type) \
-struct { \
- struct type *cqe_next; /* next element */ \
- struct type *cqe_prev; /* previous element */ \
-}
-
-/*
- * Circular queue functions.
- */
-#define CIRCLEQ_INIT(head) do { \
- (head)->cqh_first = CIRCLEQ_END(head); \
- (head)->cqh_last = CIRCLEQ_END(head); \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
- QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
- (elm)->field.cqe_next = (listelm)->field.cqe_next; \
- (elm)->field.cqe_prev = (listelm); \
- if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
- (head)->cqh_last = (elm); \
- else \
- (listelm)->field.cqe_next->field.cqe_prev = (elm); \
- (listelm)->field.cqe_next = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
- QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
- (elm)->field.cqe_next = (listelm); \
- (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
- if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
- (head)->cqh_first = (elm); \
- else \
- (listelm)->field.cqe_prev->field.cqe_next = (elm); \
- (listelm)->field.cqe_prev = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
- QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- (elm)->field.cqe_next = (head)->cqh_first; \
- (elm)->field.cqe_prev = CIRCLEQ_END(head); \
- if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
- (head)->cqh_last = (elm); \
- else \
- (head)->cqh_first->field.cqe_prev = (elm); \
- (head)->cqh_first = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
- QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- (elm)->field.cqe_next = CIRCLEQ_END(head); \
- (elm)->field.cqe_prev = (head)->cqh_last; \
- if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
- (head)->cqh_first = (elm); \
- else \
- (head)->cqh_last->field.cqe_next = (elm); \
- (head)->cqh_last = (elm); \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_REMOVE(head, elm, field) do { \
- QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
- QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
- if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
- (head)->cqh_last = (elm)->field.cqe_prev; \
- else \
- (elm)->field.cqe_next->field.cqe_prev = \
- (elm)->field.cqe_prev; \
- if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
- (head)->cqh_first = (elm)->field.cqe_next; \
- else \
- (elm)->field.cqe_prev->field.cqe_next = \
- (elm)->field.cqe_next; \
- QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
-} while (/*CONSTCOND*/0)
-
-#define CIRCLEQ_FOREACH(var, head, field) \
- for ((var) = ((head)->cqh_first); \
- (var) != CIRCLEQ_ENDC(head); \
- (var) = ((var)->field.cqe_next))
-
-#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
- for ((var) = ((head)->cqh_last); \
- (var) != CIRCLEQ_ENDC(head); \
- (var) = ((var)->field.cqe_prev))
-
-/*
- * Circular queue access methods.
- */
-#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
-#define CIRCLEQ_LAST(head) ((head)->cqh_last)
-/* For comparisons */
-#define CIRCLEQ_ENDC(head) (__launder_type(head))
-/* For assignments */
-#define CIRCLEQ_END(head) ((void *)(head))
-#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
-#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
-#define CIRCLEQ_EMPTY(head) \
- (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
-
-#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
- (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
- ? ((head)->cqh_first) \
- : (elm->field.cqe_next))
-#define CIRCLEQ_LOOP_PREV(head, elm, field) \
- (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
- ? ((head)->cqh_last) \
- : (elm->field.cqe_prev))
-#endif /* !_KERNEL */
-
-#endif /* !_SYS_QUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rd.h b/fluent-bit/lib/librdkafka-2.1.0/src/rd.h
deleted file mode 100644
index 670605de4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rd.h
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RD_H_
-#define _RD_H_
-
-#ifndef _WIN32
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE /* for strndup() */
-#endif
-
-#if defined(__APPLE__) && !defined(_DARWIN_C_SOURCE)
-#define _DARWIN_C_SOURCE /* for strlcpy, pthread_setname_np, etc */
-#endif
-
-#define __need_IOV_MAX
-#ifndef _POSIX_C_SOURCE
-#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */
-#endif
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <time.h>
-#include <assert.h>
-#include <limits.h>
-
-#include "tinycthread.h"
-#include "rdsysqueue.h"
-
-#ifdef _WIN32
-/* Visual Studio */
-#include "win32_config.h"
-#else
-/* POSIX / UNIX based systems */
-#include "../config.h" /* mklove output */
-#endif
-
-#ifdef _WIN32
-/* Win32/Visual Studio */
-#include "rdwin32.h"
-
-#else
-/* POSIX / UNIX based systems */
-#include "rdposix.h"
-#endif
-
-#include "rdtypes.h"
-
-#if WITH_SYSLOG
-#include <syslog.h>
-#else
-#define LOG_EMERG 0
-#define LOG_ALERT 1
-#define LOG_CRIT 2
-#define LOG_ERR 3
-#define LOG_WARNING 4
-#define LOG_NOTICE 5
-#define LOG_INFO 6
-#define LOG_DEBUG 7
-#endif
-
-
-/* Debug assert, only enabled with --enable-devel */
-#if ENABLE_DEVEL == 1
-#define rd_dassert(cond) rd_assert(cond)
-#else
-#define rd_dassert(cond) \
- do { \
- } while (0)
-#endif
-
-#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
-/** Function attribute to indicate that a sentinel NULL is required at the
- * end of the va-arg input list. */
-#define RD_SENTINEL __attribute__((__sentinel__))
-#else
-#define RD_SENTINEL
-#endif
-
-
-/** Assert if reached */
-#define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated")
-
-/** Assert if reached */
-#define RD_BUG(...) \
- do { \
- fprintf(stderr, \
- "INTERNAL ERROR: librdkafka %s:%d: ", __FUNCTION__, \
- __LINE__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\n"); \
- rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \
- } while (0)
-
-
-
-/**
- * Allocator wrappers.
- * We serve under the premise that if a (small) memory
- * allocation fails all hope is lost and the application
- * will fail anyway, so no need to handle it handsomely.
- */
-static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) {
- void *p = calloc(num, sz);
- rd_assert(p);
- return p;
-}
-
-static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) {
- void *p = malloc(sz);
- rd_assert(p);
- return p;
-}
-
-static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) {
- void *p = realloc(ptr, sz);
- rd_assert(p);
- return p;
-}
-
-static RD_INLINE RD_UNUSED void rd_free(void *ptr) {
- free(ptr);
-}
-
-static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) {
-#ifndef _WIN32
- char *n = strdup(s);
-#else
- char *n = _strdup(s);
-#endif
- rd_assert(n);
- return n;
-}
-
-static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
-#if HAVE_STRNDUP
- char *n = strndup(s, len);
- rd_assert(n);
-#else
- char *n = (char *)rd_malloc(len + 1);
- rd_assert(n);
- memcpy(n, s, len);
- n[len] = '\0';
-#endif
- return n;
-}
-
-
-
-/*
- * Portability
- */
-
-#ifdef strndupa
-#define rd_strndupa(DESTPTR, PTR, LEN) (*(DESTPTR) = strndupa(PTR, LEN))
-#else
-#define rd_strndupa(DESTPTR, PTR, LEN) \
- do { \
- const char *_src = (PTR); \
- size_t _srclen = (LEN); \
- char *_dst = rd_alloca(_srclen + 1); \
- memcpy(_dst, _src, _srclen); \
- _dst[_srclen] = '\0'; \
- *(DESTPTR) = _dst; \
- } while (0)
-#endif
-
-#ifdef strdupa
-#define rd_strdupa(DESTPTR, PTR) (*(DESTPTR) = strdupa(PTR))
-#else
-#define rd_strdupa(DESTPTR, PTR) \
- do { \
- const char *_src1 = (PTR); \
- size_t _srclen1 = strlen(_src1); \
- rd_strndupa(DESTPTR, _src1, _srclen1); \
- } while (0)
-#endif
-
-#ifndef IOV_MAX
-#ifdef __APPLE__
-/* Some versions of MacOSX dont have IOV_MAX */
-#define IOV_MAX 1024
-#elif defined(_WIN32) || defined(__GNU__)
-/* There is no IOV_MAX on MSVC or GNU but it is used internally in librdkafka */
-#define IOV_MAX 1024
-#else
-#error "IOV_MAX not defined"
-#endif
-#endif
-
-
-/* Round/align X upwards to STRIDE, which must be power of 2. */
-#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1))
-
-#define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A)))
-#define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A)
-#define RD_SIZEOF(TYPE, MEMBER) sizeof(((TYPE *)NULL)->MEMBER)
-#define RD_OFFSETOF(TYPE, MEMBER) ((size_t) & (((TYPE *)NULL)->MEMBER))
-
-/**
- * Returns the 'I'th array element from static sized array 'A'
- * or NULL if 'I' is out of range.
- * var-args is an optional prefix to provide the correct return type.
- */
-#define RD_ARRAY_ELEM(A, I, ...) \
- ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__(A)[(I)] : NULL)
-
-
-#define RD_STRINGIFY(X) #X
-
-
-
-#define RD_MIN(a, b) ((a) < (b) ? (a) : (b))
-#define RD_MAX(a, b) ((a) > (b) ? (a) : (b))
-
-
-/**
- * Cap an integer (of any type) to reside within the defined limit.
- */
-#define RD_INT_CAP(val, low, hi) \
- ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
-
-
-
-/**
- * Allocate 'size' bytes, copy 'src', return pointer to new memory.
- *
- * Use rd_free() to free the returned pointer.
- */
-static RD_INLINE RD_UNUSED void *rd_memdup(const void *src, size_t size) {
- void *dst = rd_malloc(size);
- memcpy(dst, src, size);
- return dst;
-}
-
-/**
- * @brief Memset &OBJ to 0, does automatic sizeof(OBJ).
- */
-#define RD_MEMZERO(OBJ) memset(&(OBJ), 0, sizeof(OBJ))
-
-
-/**
- * Generic refcnt interface
- */
-
-#if !HAVE_ATOMICS_32
-#define RD_REFCNT_USE_LOCKS 1
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-typedef struct rd_refcnt_t {
- mtx_t lock;
- int v;
-} rd_refcnt_t;
-#else
-typedef rd_atomic32_t rd_refcnt_t;
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_init(rd_refcnt_t *R, int v) {
- int r;
- mtx_init(&R->lock, mtx_plain);
- mtx_lock(&R->lock);
- r = R->v = v;
- mtx_unlock(&R->lock);
- return r;
-}
-#else
-#define rd_refcnt_init(R, v) rd_atomic32_init(R, v)
-#endif
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED void rd_refcnt_destroy(rd_refcnt_t *R) {
- mtx_lock(&R->lock);
- rd_assert(R->v == 0);
- mtx_unlock(&R->lock);
-
- mtx_destroy(&R->lock);
-}
-#else
-#define rd_refcnt_destroy(R) \
- do { \
- } while (0)
-#endif
-
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_set(rd_refcnt_t *R, int v) {
- int r;
- mtx_lock(&R->lock);
- r = R->v = v;
- mtx_unlock(&R->lock);
- return r;
-}
-#else
-#define rd_refcnt_set(R, v) rd_atomic32_set(R, v)
-#endif
-
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_add0(rd_refcnt_t *R) {
- int r;
- mtx_lock(&R->lock);
- r = ++(R->v);
- mtx_unlock(&R->lock);
- return r;
-}
-#else
-#define rd_refcnt_add0(R) rd_atomic32_add(R, 1)
-#endif
-
-static RD_INLINE RD_UNUSED int rd_refcnt_sub0(rd_refcnt_t *R) {
- int r;
-#ifdef RD_REFCNT_USE_LOCKS
- mtx_lock(&R->lock);
- r = --(R->v);
- mtx_unlock(&R->lock);
-#else
- r = rd_atomic32_sub(R, 1);
-#endif
- if (r < 0)
- rd_assert(!*"refcnt sub-zero");
- return r;
-}
-
-#ifdef RD_REFCNT_USE_LOCKS
-static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) {
- int r;
- mtx_lock(&R->lock);
- r = R->v;
- mtx_unlock(&R->lock);
- return r;
-}
-#else
-#define rd_refcnt_get(R) rd_atomic32_get(R)
-#endif
-
-/**
- * A wrapper for decreasing refcount and calling a destroy function
- * when refcnt reaches 0.
- */
-#define rd_refcnt_destroywrapper(REFCNT, DESTROY_CALL) \
- do { \
- if (rd_refcnt_sub(REFCNT) > 0) \
- break; \
- DESTROY_CALL; \
- } while (0)
-
-
-#define rd_refcnt_destroywrapper2(REFCNT, WHAT, DESTROY_CALL) \
- do { \
- if (rd_refcnt_sub2(REFCNT, WHAT) > 0) \
- break; \
- DESTROY_CALL; \
- } while (0)
-
-#if ENABLE_REFCNT_DEBUG
-#define rd_refcnt_add_fl(FUNC, LINE, R) \
- (fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", #R, \
- rd_refcnt_get(R), (R), (FUNC), (LINE)), \
- rd_refcnt_add0(R))
-
-#define rd_refcnt_add(R) rd_refcnt_add_fl(__FUNCTION__, __LINE__, (R))
-
-#define rd_refcnt_add2(R, WHAT) \
- do { \
- fprintf(stderr, \
- "REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", #R, \
- rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
- rd_refcnt_add0(R); \
- } while (0)
-
-#define rd_refcnt_sub2(R, WHAT) \
- (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", #R, \
- rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
- rd_refcnt_sub0(R))
-
-#define rd_refcnt_sub(R) \
- (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", #R, \
- rd_refcnt_get(R), (R), __FUNCTION__, __LINE__), \
- rd_refcnt_sub0(R))
-
-#else
-#define rd_refcnt_add_fl(FUNC, LINE, R) rd_refcnt_add0(R)
-#define rd_refcnt_add(R) rd_refcnt_add0(R)
-#define rd_refcnt_sub(R) rd_refcnt_sub0(R)
-#endif
-
-
-
-#define RD_IF_FREE(PTR, FUNC) \
- do { \
- if ((PTR)) \
- FUNC(PTR); \
- } while (0)
-
-
-/**
- * @brief Utility types to hold memory,size tuple.
- */
-
-typedef struct rd_chariov_s {
- char *ptr;
- size_t size;
-} rd_chariov_t;
-
-#endif /* _RD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c
deleted file mode 100644
index 092406233..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-
-#include "rd.h"
-#include "rdaddr.h"
-#include "rdrand.h"
-
-#ifdef _WIN32
-#include <ws2tcpip.h>
-#endif
-
-const char *rd_sockaddr2str(const void *addr, int flags) {
- const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
- static RD_TLS char ret[32][256];
- static RD_TLS int reti = 0;
- char portstr[32];
- int of = 0;
- int niflags = NI_NUMERICSERV;
- int r;
-
- reti = (reti + 1) % 32;
-
- switch (a->sinx_family) {
- case AF_INET:
- case AF_INET6:
- if (flags & RD_SOCKADDR2STR_F_FAMILY)
- of += rd_snprintf(&ret[reti][of],
- sizeof(ret[reti]) - of, "ipv%i#",
- a->sinx_family == AF_INET ? 4 : 6);
-
- if ((flags & RD_SOCKADDR2STR_F_PORT) &&
- a->sinx_family == AF_INET6)
- ret[reti][of++] = '[';
-
- if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
- niflags |= NI_NUMERICHOST;
-
- retry:
- if ((r = getnameinfo(
- (const struct sockaddr *)a, RD_SOCKADDR_INX_LEN(a),
-
- ret[reti] + of, sizeof(ret[reti]) - of,
-
- (flags & RD_SOCKADDR2STR_F_PORT) ? portstr : NULL,
-
- (flags & RD_SOCKADDR2STR_F_PORT) ? sizeof(portstr) : 0,
-
- niflags))) {
-
- if (r == EAI_AGAIN && !(niflags & NI_NUMERICHOST)) {
- /* If unable to resolve name, retry without
- * name resolution. */
- niflags |= NI_NUMERICHOST;
- goto retry;
- }
- break;
- }
-
-
- if (flags & RD_SOCKADDR2STR_F_PORT) {
- size_t len = strlen(ret[reti]);
- rd_snprintf(
- ret[reti] + len, sizeof(ret[reti]) - len, "%s:%s",
- a->sinx_family == AF_INET6 ? "]" : "", portstr);
- }
-
- return ret[reti];
- }
-
-
- /* Error-case */
- rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
- rd_family2str(a->sinx_family));
-
- return ret[reti];
-}
-
-
-const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc) {
- static RD_TLS char snode[256];
- static RD_TLS char ssvc[64];
- const char *t;
- const char *svct = NULL;
- size_t nodelen = 0;
-
- *snode = '\0';
- *ssvc = '\0';
-
- if (*nodesvc == '[') {
- /* "[host]".. (enveloped node name) */
- if (!(t = strchr(nodesvc, ']')))
- return "Missing close-']'";
- nodesvc++;
- nodelen = t - nodesvc;
- svct = t + 1;
-
- } else if (*nodesvc == ':' && *(nodesvc + 1) != ':') {
- /* ":".. (port only) */
- nodelen = 0;
- svct = nodesvc;
- }
-
- if ((svct = strrchr(svct ? svct : nodesvc, ':')) &&
- (*(svct - 1) != ':') && *(++svct)) {
- /* Optional ":service" definition. */
- if (strlen(svct) >= sizeof(ssvc))
- return "Service name too long";
- strcpy(ssvc, svct);
- if (!nodelen)
- nodelen = svct - nodesvc - 1;
-
- } else if (!nodelen)
- nodelen = strlen(nodesvc);
-
- if (nodelen) {
- /* Truncate nodename if necessary. */
- nodelen = RD_MIN(nodelen, sizeof(snode) - 1);
- memcpy(snode, nodesvc, nodelen);
- snode[nodelen] = '\0';
- }
-
- *node = snode;
- *svc = ssvc;
-
- return NULL;
-}
-
-
-
-rd_sockaddr_list_t *
-rd_getaddrinfo(const char *nodesvc,
- const char *defsvc,
- int flags,
- int family,
- int socktype,
- int protocol,
- int (*resolve_cb)(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque),
- void *opaque,
- const char **errstr) {
- struct addrinfo hints;
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = family;
- hints.ai_socktype = socktype;
- hints.ai_protocol = protocol;
- hints.ai_flags = flags;
-
- struct addrinfo *ais, *ai;
- char *node, *svc;
- int r;
- int cnt = 0;
- rd_sockaddr_list_t *rsal;
-
- if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
- errno = EINVAL;
- return NULL;
- }
-
- if (*svc)
- defsvc = svc;
-
- if (resolve_cb) {
- r = resolve_cb(node, defsvc, &hints, &ais, opaque);
- } else {
- r = getaddrinfo(node, defsvc, &hints, &ais);
- }
-
- if (r) {
-#ifdef EAI_SYSTEM
- if (r == EAI_SYSTEM)
-#else
- if (0)
-#endif
- *errstr = rd_strerror(errno);
- else {
-#ifdef _WIN32
- *errstr = gai_strerrorA(r);
-#else
- *errstr = gai_strerror(r);
-#endif
- errno = EFAULT;
- }
- return NULL;
- }
-
- /* Count number of addresses */
- for (ai = ais; ai != NULL; ai = ai->ai_next)
- cnt++;
-
- if (cnt == 0) {
- /* unlikely? */
- if (resolve_cb)
- resolve_cb(NULL, NULL, NULL, &ais, opaque);
- else
- freeaddrinfo(ais);
- errno = ENOENT;
- *errstr = "No addresses";
- return NULL;
- }
-
-
- rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
-
- for (ai = ais; ai != NULL; ai = ai->ai_next)
- memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr,
- ai->ai_addrlen);
-
- if (resolve_cb)
- resolve_cb(NULL, NULL, NULL, &ais, opaque);
- else
- freeaddrinfo(ais);
-
- /* Shuffle address list for proper round-robin */
- if (!(flags & RD_AI_NOSHUFFLE))
- rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
- sizeof(*rsal->rsal_addr));
-
- return rsal;
-}
-
-
-
-void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal) {
- rd_free(rsal);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h
deleted file mode 100644
index c8574d019..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDADDR_H_
-#define _RDADDR_H_
-
-#ifndef _WIN32
-#include <netinet/in.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#else
-#define WIN32_MEAN_AND_LEAN
-#include <winsock2.h>
-#include <ws2ipdef.h>
-#endif
-
-#if defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__)
-#include <sys/socket.h>
-#endif
-
-/**
- * rd_sockaddr_inx_t is a union for either ipv4 or ipv6 sockaddrs.
- * It provides conveniant abstraction of AF_INET* agnostic operations.
- */
-typedef union {
- struct sockaddr_in in;
- struct sockaddr_in6 in6;
-} rd_sockaddr_inx_t;
-#define sinx_family in.sin_family
-#define sinx_addr in.sin_addr
-#define RD_SOCKADDR_INX_LEN(sinx) \
- ((sinx)->sinx_family == AF_INET \
- ? sizeof(struct sockaddr_in) \
- : (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \
- : sizeof(rd_sockaddr_inx_t))
-#define RD_SOCKADDR_INX_PORT(sinx) \
- ((sinx)->sinx_family == AF_INET \
- ? (sinx)->in.sin_port \
- : (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
-
-#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \
- do { \
- if ((sinx)->sinx_family == AF_INET) \
- (sinx)->in.sin_port = port; \
- else if ((sinx)->sinx_family == AF_INET6) \
- (sinx)->in6.sin6_port = port; \
- } while (0)
-
-
-
-/**
- * Returns a thread-local temporary string (may be called up to 32 times
- * without buffer wrapping) containing the human string representation
- * of the sockaddr (which should be AF_INET or AF_INET6 at this point).
- * If the RD_SOCKADDR2STR_F_PORT is provided the port number will be
- * appended to the string.
- * IPv6 address enveloping ("[addr]:port") will also be performed
- * if .._F_PORT is set.
- */
-#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */
-#define RD_SOCKADDR2STR_F_RESOLVE \
- 0x2 /* Try to resolve address to hostname. \
- */
-#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */
-#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \
- (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
-const char *rd_sockaddr2str(const void *addr, int flags);
-
-
-/**
- * Splits a node:service definition up into their node and svc counterparts
- * suitable for passing to getaddrinfo().
- * Returns NULL on success (and temporarily available pointers in '*node'
- * and '*svc') or error string on failure.
- *
- * Thread-safe but returned buffers in '*node' and '*svc' are only
- * usable until the next call to rd_addrinfo_prepare() in the same thread.
- */
-const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc);
-
-
-
-typedef struct rd_sockaddr_list_s {
- int rsal_cnt;
- int rsal_curr;
- rd_sockaddr_inx_t rsal_addr[];
-} rd_sockaddr_list_t;
-
-
-/**
- * Returns the next address from a sockaddr list and updates
- * the current-index to point to it.
- *
- * Typical usage is for round-robin connection attempts or similar:
- * while (1) {
- * rd_sockaddr_inx_t *sinx = rd_sockaddr_list_next(my_server_list);
- * if (do_connect((struct sockaddr *)sinx) == -1) {
- * sleep(1);
- * continue;
- * }
- * ...
- * }
- *
- */
-
-static RD_INLINE rd_sockaddr_inx_t *
-rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) RD_UNUSED;
-static RD_INLINE rd_sockaddr_inx_t *
-rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) {
- rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
- return &rsal->rsal_addr[rsal->rsal_curr];
-}
-
-
-#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
- for ((sinx) = &(rsal)->rsal_addr[0]; \
- (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++)
-
-/**
- * Wrapper for getaddrinfo(3) that performs these additional tasks:
- * - Input is a combined "<node>[:<svc>]" string, with support for
- * IPv6 enveloping ("[addr]:port").
- * - Returns a rd_sockaddr_list_t which must be freed with
- * rd_sockaddr_list_destroy() when done with it.
- * - Automatically shuffles the returned address list to provide
- * round-robin (unless RD_AI_NOSHUFFLE is provided in 'flags').
- *
- * Thread-safe.
- */
-#define RD_AI_NOSHUFFLE \
- 0x10000000 /* Dont shuffle returned address list. \
- * FIXME: Guessing non-used bits like this \
- * is a bad idea. */
-
-struct addrinfo;
-
-rd_sockaddr_list_t *
-rd_getaddrinfo(const char *nodesvc,
- const char *defsvc,
- int flags,
- int family,
- int socktype,
- int protocol,
- int (*resolve_cb)(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque),
- void *opaque,
- const char **errstr);
-
-
-
-/**
- * Frees a sockaddr list.
- *
- * Thread-safe.
- */
-void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal);
-
-
-
-/**
- * Returns the human readable name of a socket family.
- */
-static const char *rd_family2str(int af) RD_UNUSED;
-static const char *rd_family2str(int af) {
- switch (af) {
- case AF_INET:
- return "inet";
- case AF_INET6:
- return "inet6";
- default:
- return "af?";
- };
-}
-
-#endif /* _RDADDR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h
deleted file mode 100644
index aa7d3d770..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2014-2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDATOMIC_H_
-#define _RDATOMIC_H_
-
-#include "tinycthread.h"
-
-typedef struct {
- int32_t val;
-#if !defined(_WIN32) && !HAVE_ATOMICS_32
- mtx_t lock;
-#endif
-} rd_atomic32_t;
-
-typedef struct {
- int64_t val;
-#if !defined(_WIN32) && !HAVE_ATOMICS_64
- mtx_t lock;
-#endif
-} rd_atomic64_t;
-
-
-static RD_INLINE RD_UNUSED void rd_atomic32_init(rd_atomic32_t *ra, int32_t v) {
- ra->val = v;
-#if !defined(_WIN32) && !HAVE_ATOMICS_32
- mtx_init(&ra->lock, mtx_plain);
-#endif
-}
-
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_add(rd_atomic32_t *ra,
- int32_t v) {
-#ifdef __SUNPRO_C
- return atomic_add_32_nv(&ra->val, v);
-#elif defined(_WIN32)
- return InterlockedAdd((LONG *)&ra->val, v);
-#elif !HAVE_ATOMICS_32
- int32_t r;
- mtx_lock(&ra->lock);
- ra->val += v;
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP32(add, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra,
- int32_t v) {
-#ifdef __SUNPRO_C
- return atomic_add_32_nv(&ra->val, -v);
-#elif defined(_WIN32)
- return InterlockedAdd((LONG *)&ra->val, -v);
-#elif !HAVE_ATOMICS_32
- int32_t r;
- mtx_lock(&ra->lock);
- ra->val -= v;
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP32(sub, fetch, &ra->val, v);
-#endif
-}
-
-/**
- * @warning The returned value is the nominal value and will be outdated
- * by the time the application reads it.
- * It should not be used for exact arithmetics, any correlation
- * with other data is unsynchronized, meaning that two atomics,
- * or one atomic and a mutex-protected piece of data have no
- * common synchronization and can't be relied on.
- */
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) {
-#if defined(_WIN32) || defined(__SUNPRO_C)
- return ra->val;
-#elif !HAVE_ATOMICS_32
- int32_t r;
- mtx_lock(&ra->lock);
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP32(fetch, add, &ra->val, 0);
-#endif
-}
-
-static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra,
- int32_t v) {
-#ifdef _WIN32
- return InterlockedExchange((LONG *)&ra->val, v);
-#elif !HAVE_ATOMICS_32
- int32_t r;
- mtx_lock(&ra->lock);
- r = ra->val = v;
- mtx_unlock(&ra->lock);
- return r;
-#elif HAVE_ATOMICS_32_ATOMIC
- __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
- return v;
-#elif HAVE_ATOMICS_32_SYNC
- (void)__sync_lock_test_and_set(&ra->val, v);
- return v;
-#else
- return ra->val = v; // FIXME
-#endif
-}
-
-
-
-static RD_INLINE RD_UNUSED void rd_atomic64_init(rd_atomic64_t *ra, int64_t v) {
- ra->val = v;
-#if !defined(_WIN32) && !HAVE_ATOMICS_64
- mtx_init(&ra->lock, mtx_plain);
-#endif
-}
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_add(rd_atomic64_t *ra,
- int64_t v) {
-#ifdef __SUNPRO_C
- return atomic_add_64_nv(&ra->val, v);
-#elif defined(_WIN32)
- return InterlockedAdd64(&ra->val, v);
-#elif !HAVE_ATOMICS_64
- int64_t r;
- mtx_lock(&ra->lock);
- ra->val += v;
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP64(add, fetch, &ra->val, v);
-#endif
-}
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra,
- int64_t v) {
-#ifdef __SUNPRO_C
- return atomic_add_64_nv(&ra->val, -v);
-#elif defined(_WIN32)
- return InterlockedAdd64(&ra->val, -v);
-#elif !HAVE_ATOMICS_64
- int64_t r;
- mtx_lock(&ra->lock);
- ra->val -= v;
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP64(sub, fetch, &ra->val, v);
-#endif
-}
-
-/**
- * @warning The returned value is the nominal value and will be outdated
- * by the time the application reads it.
- * It should not be used for exact arithmetics, any correlation
- * with other data is unsynchronized, meaning that two atomics,
- * or one atomic and a mutex-protected piece of data have no
- * common synchronization and can't be relied on.
- * Use with care.
- */
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) {
-#if defined(_WIN32) || defined(__SUNPRO_C)
- return InterlockedCompareExchange64(&ra->val, 0, 0);
-#elif !HAVE_ATOMICS_64
- int64_t r;
- mtx_lock(&ra->lock);
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#else
- return ATOMIC_OP64(fetch, add, &ra->val, 0);
-#endif
-}
-
-
-static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra,
- int64_t v) {
-#ifdef _WIN32
- return InterlockedExchange64(&ra->val, v);
-#elif !HAVE_ATOMICS_64
- int64_t r;
- mtx_lock(&ra->lock);
- ra->val = v;
- r = ra->val;
- mtx_unlock(&ra->lock);
- return r;
-#elif HAVE_ATOMICS_64_ATOMIC
- __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
- return v;
-#elif HAVE_ATOMICS_64_SYNC
- (void)__sync_lock_test_and_set(&ra->val, v);
- return v;
-#else
- return ra->val = v; // FIXME
-#endif
-}
-
-#endif /* _RDATOMIC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h
deleted file mode 100644
index a170e8da5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDAVG_H_
-#define _RDAVG_H_
-
-
-#if WITH_HDRHISTOGRAM
-#include "rdhdrhistogram.h"
-#endif
-
-typedef struct rd_avg_s {
- struct {
- int64_t maxv;
- int64_t minv;
- int64_t avg;
- int64_t sum;
- int cnt;
- rd_ts_t start;
- } ra_v;
- mtx_t ra_lock;
- int ra_enabled;
- enum { RD_AVG_GAUGE,
- RD_AVG_COUNTER,
- } ra_type;
-#if WITH_HDRHISTOGRAM
- rd_hdr_histogram_t *ra_hdr;
-#endif
- /* Histogram results, calculated for dst in rollover().
- * Will be all zeroes if histograms are not supported. */
- struct {
- /* Quantiles */
- int64_t p50;
- int64_t p75;
- int64_t p90;
- int64_t p95;
- int64_t p99;
- int64_t p99_99;
-
- int64_t oor; /**< Values out of range */
- int32_t hdrsize; /**< hdr.allocatedSize */
- double stddev;
- double mean;
- } ra_hist;
-} rd_avg_t;
-
-
-/**
- * @brief Add value \p v to averager \p ra.
- */
-static RD_UNUSED void rd_avg_add(rd_avg_t *ra, int64_t v) {
- mtx_lock(&ra->ra_lock);
- if (!ra->ra_enabled) {
- mtx_unlock(&ra->ra_lock);
- return;
- }
- if (v > ra->ra_v.maxv)
- ra->ra_v.maxv = v;
- if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
- ra->ra_v.minv = v;
- ra->ra_v.sum += v;
- ra->ra_v.cnt++;
-#if WITH_HDRHISTOGRAM
- rd_hdr_histogram_record(ra->ra_hdr, v);
-#endif
- mtx_unlock(&ra->ra_lock);
-}
-
-
-/**
- * @brief Calculate the average
- */
-static RD_UNUSED void rd_avg_calc(rd_avg_t *ra, rd_ts_t now) {
- if (ra->ra_type == RD_AVG_GAUGE) {
- if (ra->ra_v.cnt)
- ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt;
- else
- ra->ra_v.avg = 0;
- } else {
- rd_ts_t elapsed = now - ra->ra_v.start;
-
- if (elapsed)
- ra->ra_v.avg = (ra->ra_v.sum * 1000000llu) / elapsed;
- else
- ra->ra_v.avg = 0;
-
- ra->ra_v.start = elapsed;
- }
-}
-
-
-/**
- * @returns the quantile \q for \p ra, or 0 if histograms are not supported
- * in this build.
- *
- * @remark ra will be not locked by this function.
- */
-static RD_UNUSED int64_t rd_avg_quantile(const rd_avg_t *ra, double q) {
-#if WITH_HDRHISTOGRAM
- return rd_hdr_histogram_quantile(ra->ra_hdr, q);
-#else
- return 0;
-#endif
-}
-
-/**
- * @brief Rolls over statistics in \p src and stores the average in \p dst.
- * \p src is cleared and ready to be reused.
- *
- * Caller must free avg internal members by calling rd_avg_destroy()
- * on the \p dst.
- */
-static RD_UNUSED void rd_avg_rollover(rd_avg_t *dst, rd_avg_t *src) {
- rd_ts_t now;
-
- mtx_lock(&src->ra_lock);
- if (!src->ra_enabled) {
- memset(dst, 0, sizeof(*dst));
- dst->ra_type = src->ra_type;
- mtx_unlock(&src->ra_lock);
- return;
- }
-
- mtx_init(&dst->ra_lock, mtx_plain);
- dst->ra_type = src->ra_type;
- dst->ra_v = src->ra_v;
-#if WITH_HDRHISTOGRAM
- dst->ra_hdr = NULL;
-
- dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr);
- dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr);
- dst->ra_hist.oor = src->ra_hdr->outOfRangeCount;
- dst->ra_hist.hdrsize = src->ra_hdr->allocatedSize;
- dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0);
- dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0);
- dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0);
- dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0);
- dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0);
- dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99);
-#else
- memset(&dst->ra_hist, 0, sizeof(dst->ra_hist));
-#endif
- memset(&src->ra_v, 0, sizeof(src->ra_v));
-
- now = rd_clock();
- src->ra_v.start = now;
-
-#if WITH_HDRHISTOGRAM
- /* Adapt histogram span to fit future out of range entries
- * from this period. */
- if (src->ra_hdr->totalCount > 0) {
- int64_t vmin = src->ra_hdr->lowestTrackableValue;
- int64_t vmax = src->ra_hdr->highestTrackableValue;
- int64_t mindiff, maxdiff;
-
- mindiff = src->ra_hdr->lowestTrackableValue -
- src->ra_hdr->lowestOutOfRange;
-
- if (mindiff > 0) {
- /* There were low out of range values, grow lower
- * span to fit lowest out of range value + 20%. */
- vmin = src->ra_hdr->lowestOutOfRange +
- (int64_t)((double)mindiff * 0.2);
- }
-
- maxdiff = src->ra_hdr->highestOutOfRange -
- src->ra_hdr->highestTrackableValue;
-
- if (maxdiff > 0) {
- /* There were high out of range values, grow higher
- * span to fit highest out of range value + 20%. */
- vmax = src->ra_hdr->highestOutOfRange +
- (int64_t)((double)maxdiff * 0.2);
- }
-
- if (vmin == src->ra_hdr->lowestTrackableValue &&
- vmax == src->ra_hdr->highestTrackableValue) {
- /* No change in min,max, use existing hdr */
- rd_hdr_histogram_reset(src->ra_hdr);
-
- } else {
- int sigfigs = (int)src->ra_hdr->significantFigures;
- /* Create new hdr for adapted range */
- rd_hdr_histogram_destroy(src->ra_hdr);
- src->ra_hdr = rd_hdr_histogram_new(vmin, vmax, sigfigs);
- }
-
- } else {
- /* No records, no need to reset. */
- }
-#endif
-
- mtx_unlock(&src->ra_lock);
-
- rd_avg_calc(dst, now);
-}
-
-
-/**
- * Initialize an averager
- */
-static RD_UNUSED void rd_avg_init(rd_avg_t *ra,
- int type,
- int64_t exp_min,
- int64_t exp_max,
- int sigfigs,
- int enable) {
- memset(ra, 0, sizeof(*ra));
- mtx_init(&ra->ra_lock, 0);
- ra->ra_enabled = enable;
- if (!enable)
- return;
- ra->ra_type = type;
- ra->ra_v.start = rd_clock();
-#if WITH_HDRHISTOGRAM
- /* Start off the histogram with expected min,max span,
- * we'll adapt the size on each rollover. */
- ra->ra_hdr = rd_hdr_histogram_new(exp_min, exp_max, sigfigs);
-#endif
-}
-
-
-/**
- * Destroy averager
- */
-static RD_UNUSED void rd_avg_destroy(rd_avg_t *ra) {
-#if WITH_HDRHISTOGRAM
- if (ra->ra_hdr)
- rd_hdr_histogram_destroy(ra->ra_hdr);
-#endif
- mtx_destroy(&ra->ra_lock);
-}
-
-#endif /* _RDAVG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c
deleted file mode 100644
index f25251de8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdavl.h"
-
-/*
- * AVL tree.
- * Inspired by Ian Piumarta's tree.h implementation.
- */
-
-#define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0)
-
-#define RD_AVL_NODE_DELTA(ran) \
- (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
- RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT]))
-
-#define RD_DELTA_MAX 1
-
-
-static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran);
-
-static rd_avl_node_t *rd_avl_rotate(rd_avl_node_t *ran, rd_avl_dir_t dir) {
- rd_avl_node_t *n;
- static const rd_avl_dir_t odirmap[] = {/* opposite direction map */
- [RD_AVL_RIGHT] = RD_AVL_LEFT,
- [RD_AVL_LEFT] = RD_AVL_RIGHT};
- const int odir = odirmap[dir];
-
- n = ran->ran_p[odir];
- ran->ran_p[odir] = n->ran_p[dir];
- n->ran_p[dir] = rd_avl_balance_node(ran);
-
- return rd_avl_balance_node(n);
-}
-
-static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran) {
- const int d = RD_AVL_NODE_DELTA(ran);
- int h;
-
- if (d < -RD_DELTA_MAX) {
- if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0)
- ran->ran_p[RD_AVL_RIGHT] = rd_avl_rotate(
- ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT);
- return rd_avl_rotate(ran, RD_AVL_LEFT);
-
- } else if (d > RD_DELTA_MAX) {
- if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0)
- ran->ran_p[RD_AVL_LEFT] =
- rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], RD_AVL_LEFT);
-
- return rd_avl_rotate(ran, RD_AVL_RIGHT);
- }
-
- ran->ran_height = 0;
-
- if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height)
- ran->ran_height = h;
-
- if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >
- ran->ran_height)
- ran->ran_height = h;
-
- ran->ran_height++;
-
- return ran;
-}
-
-rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
- rd_avl_node_t *parent,
- rd_avl_node_t *ran,
- rd_avl_node_t **existing) {
- rd_avl_dir_t dir;
- int r;
-
- if (!parent)
- return ran;
-
- if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) {
- /* Replace existing node with new one. */
- ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
- ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT];
- ran->ran_height = parent->ran_height;
- *existing = parent;
- return ran;
- }
-
- if (r < 0)
- dir = RD_AVL_LEFT;
- else
- dir = RD_AVL_RIGHT;
-
- parent->ran_p[dir] =
- rd_avl_insert_node(ravl, parent->ran_p[dir], ran, existing);
- return rd_avl_balance_node(parent);
-}
-
-
-static rd_avl_node_t *
-rd_avl_move(rd_avl_node_t *dst, rd_avl_node_t *src, rd_avl_dir_t dir) {
-
- if (!dst)
- return src;
-
- dst->ran_p[dir] = rd_avl_move(dst->ran_p[dir], src, dir);
-
- return rd_avl_balance_node(dst);
-}
-
-static rd_avl_node_t *rd_avl_remove_node0(rd_avl_node_t *ran) {
- rd_avl_node_t *tmp;
-
- tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], ran->ran_p[RD_AVL_RIGHT],
- RD_AVL_RIGHT);
-
- ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL;
- return tmp;
-}
-
-
-rd_avl_node_t *
-rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm) {
- rd_avl_dir_t dir;
- int r;
-
- if (!parent)
- return NULL;
-
-
- if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0)
- return rd_avl_remove_node0(parent);
- else if (r < 0)
- dir = RD_AVL_LEFT;
- else /* > 0 */
- dir = RD_AVL_RIGHT;
-
- parent->ran_p[dir] = rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
-
- return rd_avl_balance_node(parent);
-}
-
-
-
-rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
- const rd_avl_node_t *begin,
- const void *elm) {
- int r;
-
- if (!begin)
- return NULL;
- else if (!(r = ravl->ravl_cmp(elm, begin->ran_elm)))
- return (rd_avl_node_t *)begin;
- else if (r < 0)
- return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_LEFT], elm);
- else /* r > 0 */
- return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_RIGHT], elm);
-}
-
-
-
-void rd_avl_destroy(rd_avl_t *ravl) {
- if (ravl->ravl_flags & RD_AVL_F_LOCKS)
- rwlock_destroy(&ravl->ravl_rwlock);
-
- if (ravl->ravl_flags & RD_AVL_F_OWNER)
- rd_free(ravl);
-}
-
-rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
-
- if (!ravl) {
- ravl = rd_calloc(1, sizeof(*ravl));
- flags |= RD_AVL_F_OWNER;
- } else {
- memset(ravl, 0, sizeof(*ravl));
- }
-
- ravl->ravl_flags = flags;
- ravl->ravl_cmp = cmp;
-
- if (flags & RD_AVL_F_LOCKS)
- rwlock_init(&ravl->ravl_rwlock);
-
- return ravl;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h
deleted file mode 100644
index f3e539242..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/*
- * AVL tree.
- * Inspired by Ian Piumarta's tree.h implementation.
- */
-
-#ifndef _RDAVL_H_
-#define _RDAVL_H_
-
-#include "tinycthread.h"
-
-
-typedef enum {
- RD_AVL_LEFT,
- RD_AVL_RIGHT,
-} rd_avl_dir_t;
-
-/**
- * AVL tree node.
- * Add 'rd_avl_node_t ..' as field to your element's struct and
- * provide it as the 'field' argument in the API below.
- */
-typedef struct rd_avl_node_s {
- struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */
- int ran_height; /* Sub-tree height */
- void *ran_elm; /* Backpointer to the containing
- * element. This could be considered
- * costly but is convenient for the
- * caller: RAM is cheap,
- * development time isn't*/
-} rd_avl_node_t;
-
-
-
-/**
- * Per-AVL application-provided element comparator.
- */
-typedef int (*rd_avl_cmp_t)(const void *, const void *);
-
-
-/**
- * AVL tree
- */
-typedef struct rd_avl_s {
- rd_avl_node_t *ravl_root; /* Root node */
- rd_avl_cmp_t ravl_cmp; /* Comparator */
- int ravl_flags; /* Flags */
-#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */
-#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */
- rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
-} rd_avl_t;
-
-
-
-/**
- *
- *
- * Public API
- *
- *
- */
-
-/**
- * Insert 'elm' into AVL tree.
- * In case of collision the previous entry is overwritten by the
- * new one and the previous element is returned, else NULL.
- */
-#define RD_AVL_INSERT(ravl, elm, field) rd_avl_insert(ravl, elm, &(elm)->field)
-
-
-/**
- * Remove element by matching value 'elm' using compare function.
- */
-#define RD_AVL_REMOVE_ELM(ravl, elm) rd_avl_remove_elm(ravl, elm)
-
-/**
- * Search for (by value using compare function) and return matching elm.
- */
-#define RD_AVL_FIND(ravl, elm) rd_avl_find(ravl, elm, 1)
-
-
-/**
- * Search (by value using compare function) for and return matching elm.
- * Same as RD_AVL_FIND_NL() but assumes 'ravl' Ă­s already locked
- * by 'rd_avl_*lock()'.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_FIND_NL(ravl, elm) \
- rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0)
-
-
-/**
- * Search (by value using compare function) for elm and return its AVL node.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_FIND_NODE_NL(ravl, elm) rd_avl_find(ravl, elm, 0)
-
-
-/**
- * Changes the element pointer for an existing AVL node in the tree.
- * The new element must be identical (according to the comparator)
- * to the previous element.
- *
- * NOTE: rd_avl_wrlock() must be held.
- */
-#define RD_AVL_ELM_SET_NL(ran, elm) ((ran)->ran_elm = (elm))
-
-/**
- * Returns the current element pointer for an existing AVL node in the tree
- *
- * NOTE: rd_avl_*lock() must be held.
- */
-#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm)
-
-
-
-/**
- * Destroy previously initialized (by rd_avl_init()) AVL tree.
- */
-void rd_avl_destroy(rd_avl_t *ravl);
-
-/**
- * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree.
- * 'cmp' is the comparison function that takes two const pointers
- * pointing to the elements being compared (rather than the avl_nodes).
- * 'flags' is zero or more of the RD_AVL_F_.. flags.
- *
- * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'.
- */
-rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
-
-
-/**
- * 'ravl' locking functions.
- * Locking is performed automatically for all methods except for
- * those with the "_NL"/"_nl" suffix ("not locked") which expects
- * either read or write lock to be held.
- *
- * rdavl utilizes rwlocks to allow multiple concurrent read threads.
- */
-static RD_INLINE RD_UNUSED void rd_avl_rdlock(rd_avl_t *ravl) {
- if (ravl->ravl_flags & RD_AVL_F_LOCKS)
- rwlock_rdlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_wrlock(rd_avl_t *ravl) {
- if (ravl->ravl_flags & RD_AVL_F_LOCKS)
- rwlock_wrlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_rdunlock(rd_avl_t *ravl) {
- if (ravl->ravl_flags & RD_AVL_F_LOCKS)
- rwlock_rdunlock(&ravl->ravl_rwlock);
-}
-
-static RD_INLINE RD_UNUSED void rd_avl_wrunlock(rd_avl_t *ravl) {
- if (ravl->ravl_flags & RD_AVL_F_LOCKS)
- rwlock_wrunlock(&ravl->ravl_rwlock);
-}
-
-
-
-/**
- * Private API, dont use directly.
- */
-
-rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
- rd_avl_node_t *parent,
- rd_avl_node_t *ran,
- rd_avl_node_t **existing);
-
-static RD_UNUSED void *
-rd_avl_insert(rd_avl_t *ravl, void *elm, rd_avl_node_t *ran) {
- rd_avl_node_t *existing = NULL;
-
- memset(ran, 0, sizeof(*ran));
- ran->ran_elm = elm;
-
- rd_avl_wrlock(ravl);
- ravl->ravl_root =
- rd_avl_insert_node(ravl, ravl->ravl_root, ran, &existing);
- rd_avl_wrunlock(ravl);
-
- return existing ? existing->ran_elm : NULL;
-}
-
-rd_avl_node_t *
-rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm);
-
-static RD_INLINE RD_UNUSED void rd_avl_remove_elm(rd_avl_t *ravl,
- const void *elm) {
- rd_avl_wrlock(ravl);
- ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm);
- rd_avl_wrunlock(ravl);
-}
-
-
-rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
- const rd_avl_node_t *begin,
- const void *elm);
-
-
-static RD_INLINE RD_UNUSED void *
-rd_avl_find(rd_avl_t *ravl, const void *elm, int dolock) {
- const rd_avl_node_t *ran;
- void *ret;
-
- if (dolock)
- rd_avl_rdlock(ravl);
-
- ran = rd_avl_find_node(ravl, ravl->ravl_root, elm);
- ret = ran ? ran->ran_elm : NULL;
-
- if (dolock)
- rd_avl_rdunlock(ravl);
-
- return ret;
-}
-
-#endif /* _RDAVL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c
deleted file mode 100644
index 1392cf7b1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c
+++ /dev/null
@@ -1,1880 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdbuf.h"
-#include "rdunittest.h"
-#include "rdlog.h"
-#include "rdcrc32.h"
-#include "crc32c.h"
-
-
-static size_t
-rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p);
-
-
-/**
- * @brief Destroy the segment and free its payload.
- *
- * @remark Will NOT unlink from buffer.
- */
-static void rd_segment_destroy(rd_segment_t *seg) {
- /* Free payload */
- if (seg->seg_free && seg->seg_p)
- seg->seg_free(seg->seg_p);
-
- if (seg->seg_flags & RD_SEGMENT_F_FREE)
- rd_free(seg);
-}
-
-/**
- * @brief Initialize segment with absolute offset, backing memory pointer,
- * and backing memory size.
- * @remark The segment is NOT linked.
- */
-static void rd_segment_init(rd_segment_t *seg, void *mem, size_t size) {
- memset(seg, 0, sizeof(*seg));
- seg->seg_p = mem;
- seg->seg_size = size;
-}
-
-
-/**
- * @brief Append segment to buffer
- *
- * @remark Will set the buffer position to the new \p seg if no existing wpos.
- * @remark Will set the segment seg_absof to the current length of the buffer.
- */
-static rd_segment_t *rd_buf_append_segment(rd_buf_t *rbuf, rd_segment_t *seg) {
- TAILQ_INSERT_TAIL(&rbuf->rbuf_segments, seg, seg_link);
- rbuf->rbuf_segment_cnt++;
- seg->seg_absof = rbuf->rbuf_len;
- rbuf->rbuf_len += seg->seg_of;
- rbuf->rbuf_size += seg->seg_size;
-
- /* Update writable position */
- if (!rbuf->rbuf_wpos)
- rbuf->rbuf_wpos = seg;
- else
- rd_buf_get_writable0(rbuf, NULL, NULL);
-
- return seg;
-}
-
-
-
-/**
- * @brief Attempt to allocate \p size bytes from the buffers extra buffers.
- * @returns the allocated pointer which MUST NOT be freed, or NULL if
- * not enough memory.
- * @remark the returned pointer is memory-aligned to be safe.
- */
-static void *extra_alloc(rd_buf_t *rbuf, size_t size) {
- size_t of = RD_ROUNDUP(rbuf->rbuf_extra_len, 8); /* FIXME: 32-bit */
- void *p;
-
- if (of + size > rbuf->rbuf_extra_size)
- return NULL;
-
- p = rbuf->rbuf_extra + of; /* Aligned pointer */
-
- rbuf->rbuf_extra_len = of + size;
-
- return p;
-}
-
-
-
-/**
- * @brief Get a pre-allocated segment if available, or allocate a new
- * segment with the extra amount of \p size bytes allocated for payload.
- *
- * Will not append the segment to the buffer.
- */
-static rd_segment_t *rd_buf_alloc_segment0(rd_buf_t *rbuf, size_t size) {
- rd_segment_t *seg;
-
- /* See if there is enough room in the extra buffer for
- * allocating the segment header and the buffer,
- * or just the segment header, else fall back to malloc. */
- if ((seg = extra_alloc(rbuf, sizeof(*seg) + size))) {
- rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size);
-
- } else if ((seg = extra_alloc(rbuf, sizeof(*seg)))) {
- rd_segment_init(seg, size > 0 ? rd_malloc(size) : NULL, size);
- if (size > 0)
- seg->seg_free = rd_free;
-
- } else if ((seg = rd_malloc(sizeof(*seg) + size))) {
- rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size);
- seg->seg_flags |= RD_SEGMENT_F_FREE;
-
- } else
- rd_assert(!*"segment allocation failure");
-
- return seg;
-}
-
-/**
- * @brief Allocate between \p min_size .. \p max_size of backing memory
- * and add it as a new segment to the buffer.
- *
- * The buffer position is updated to point to the new segment.
- *
- * The segment will be over-allocated if permitted by max_size
- * (max_size == 0 or max_size > min_size).
- */
-static rd_segment_t *
-rd_buf_alloc_segment(rd_buf_t *rbuf, size_t min_size, size_t max_size) {
- rd_segment_t *seg;
-
- /* Over-allocate if allowed. */
- if (min_size != max_size || max_size == 0)
- max_size = RD_MAX(sizeof(*seg) * 4,
- RD_MAX(min_size * 2, rbuf->rbuf_size / 2));
-
- seg = rd_buf_alloc_segment0(rbuf, max_size);
-
- rd_buf_append_segment(rbuf, seg);
-
- return seg;
-}
-
-
-/**
- * @brief Ensures that \p size bytes will be available
- * for writing and the position will be updated to point to the
- * start of this contiguous block.
- */
-void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size) {
- rd_segment_t *seg = rbuf->rbuf_wpos;
-
- if (seg) {
- void *p;
- size_t remains = rd_segment_write_remains(seg, &p);
-
- if (remains >= size)
- return; /* Existing segment has enough space. */
-
- /* Future optimization:
- * If existing segment has enough remaining space to warrant
- * a split, do it, before allocating a new one. */
- }
-
- /* Allocate new segment */
- rbuf->rbuf_wpos = rd_buf_alloc_segment(rbuf, size, size);
-}
-
-/**
- * @brief Ensures that at least \p size bytes will be available for
- * a future write.
- *
- * Typically used prior to a call to rd_buf_get_write_iov()
- */
-void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size) {
- size_t remains;
- while ((remains = rd_buf_write_remains(rbuf)) < min_size)
- rd_buf_alloc_segment(rbuf, min_size - remains,
- max_size ? max_size - remains : 0);
-}
-
-
-/**
- * @returns the segment at absolute offset \p absof, or NULL if out of range.
- *
- * @remark \p hint is an optional segment where to start looking, such as
- * the current write or read position.
- */
-rd_segment_t *rd_buf_get_segment_at_offset(const rd_buf_t *rbuf,
- const rd_segment_t *hint,
- size_t absof) {
- const rd_segment_t *seg = hint;
-
- if (unlikely(absof >= rbuf->rbuf_len))
- return NULL;
-
- /* Only use current write position if possible and if it helps */
- if (!seg || absof < seg->seg_absof)
- seg = TAILQ_FIRST(&rbuf->rbuf_segments);
-
- do {
- if (absof >= seg->seg_absof &&
- absof < seg->seg_absof + seg->seg_of) {
- rd_dassert(seg->seg_absof <= rd_buf_len(rbuf));
- return (rd_segment_t *)seg;
- }
- } while ((seg = TAILQ_NEXT(seg, seg_link)));
-
- return NULL;
-}
-
-
-/**
- * @brief Split segment \p seg at absolute offset \p absof, appending
- * a new segment after \p seg with its memory pointing to the
- * memory starting at \p absof.
- * \p seg 's memory will be shorted to the \p absof.
- *
- * The new segment is NOT appended to the buffer.
- *
- * @warning MUST ONLY be used on the LAST segment
- *
- * @warning if a segment is inserted between these two splitted parts
- * it is imperative that the later segment's absof is corrected.
- *
- * @remark The seg_free callback is retained on the original \p seg
- * and is not copied to the new segment, but flags are copied.
- */
-static rd_segment_t *
-rd_segment_split(rd_buf_t *rbuf, rd_segment_t *seg, size_t absof) {
- rd_segment_t *newseg;
- size_t relof;
-
- rd_assert(seg == rbuf->rbuf_wpos);
- rd_assert(absof >= seg->seg_absof &&
- absof <= seg->seg_absof + seg->seg_of);
-
- relof = absof - seg->seg_absof;
-
- newseg = rd_buf_alloc_segment0(rbuf, 0);
-
- /* Add later part of split bytes to new segment */
- newseg->seg_p = seg->seg_p + relof;
- newseg->seg_of = seg->seg_of - relof;
- newseg->seg_size = seg->seg_size - relof;
- newseg->seg_absof = SIZE_MAX; /* Invalid */
- newseg->seg_flags |= seg->seg_flags;
-
- /* Remove earlier part of split bytes from previous segment */
- seg->seg_of = relof;
- seg->seg_size = relof;
-
- /* newseg's length will be added to rbuf_len in append_segment(),
- * so shave it off here from seg's perspective. */
- rbuf->rbuf_len -= newseg->seg_of;
- rbuf->rbuf_size -= newseg->seg_size;
-
- return newseg;
-}
-
-
-
-/**
- * @brief Unlink and destroy a segment, updating the \p rbuf
- * with the decrease in length and capacity.
- */
-static void rd_buf_destroy_segment(rd_buf_t *rbuf, rd_segment_t *seg) {
- rd_assert(rbuf->rbuf_segment_cnt > 0 && rbuf->rbuf_len >= seg->seg_of &&
- rbuf->rbuf_size >= seg->seg_size);
-
- TAILQ_REMOVE(&rbuf->rbuf_segments, seg, seg_link);
- rbuf->rbuf_segment_cnt--;
- rbuf->rbuf_len -= seg->seg_of;
- rbuf->rbuf_size -= seg->seg_size;
- if (rbuf->rbuf_wpos == seg)
- rbuf->rbuf_wpos = NULL;
-
- rd_segment_destroy(seg);
-}
-
-
-/**
- * @brief Free memory associated with the \p rbuf, but not the rbuf itself.
- * Segments will be destroyed.
- */
-void rd_buf_destroy(rd_buf_t *rbuf) {
- rd_segment_t *seg, *tmp;
-
-#if ENABLE_DEVEL
- /* FIXME */
- if (rbuf->rbuf_len > 0 && 0) {
- size_t overalloc = rbuf->rbuf_size - rbuf->rbuf_len;
- float fill_grade =
- (float)rbuf->rbuf_len / (float)rbuf->rbuf_size;
-
- printf("fill grade: %.2f%% (%" PRIusz
- " bytes over-allocated)\n",
- fill_grade * 100.0f, overalloc);
- }
-#endif
-
-
- TAILQ_FOREACH_SAFE(seg, &rbuf->rbuf_segments, seg_link, tmp) {
- rd_segment_destroy(seg);
- }
-
- if (rbuf->rbuf_extra)
- rd_free(rbuf->rbuf_extra);
-}
-
-
-/**
- * @brief Same as rd_buf_destroy() but also frees the \p rbuf itself.
- */
-void rd_buf_destroy_free(rd_buf_t *rbuf) {
- rd_buf_destroy(rbuf);
- rd_free(rbuf);
-}
-
-/**
- * @brief Initialize buffer, pre-allocating \p fixed_seg_cnt segments
- * where the first segment will have a \p buf_size of backing memory.
- *
- * The caller may rearrange the backing memory as it see fits.
- */
-void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) {
- size_t totalloc = 0;
-
- memset(rbuf, 0, sizeof(*rbuf));
- TAILQ_INIT(&rbuf->rbuf_segments);
-
- if (!fixed_seg_cnt) {
- assert(!buf_size);
- return;
- }
-
- /* Pre-allocate memory for a fixed set of segments that are known
- * before-hand, to minimize the number of extra allocations
- * needed for well-known layouts (such as headers, etc) */
- totalloc += RD_ROUNDUP(sizeof(rd_segment_t), 8) * fixed_seg_cnt;
-
- /* Pre-allocate extra space for the backing buffer. */
- totalloc += buf_size;
-
- rbuf->rbuf_extra_size = totalloc;
- rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size);
-}
-
-
-/**
- * @brief Allocates a buffer object and initializes it.
- * @sa rd_buf_init()
- */
-rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size) {
- rd_buf_t *rbuf = rd_malloc(sizeof(*rbuf));
- rd_buf_init(rbuf, fixed_seg_cnt, buf_size);
- return rbuf;
-}
-
-
-/**
- * @brief Convenience writer iterator interface.
- *
- * After writing to \p p the caller must update the written length
- * by calling rd_buf_write(rbuf, NULL, written_length)
- *
- * @returns the number of contiguous writable bytes in segment
- * and sets \p *p to point to the start of the memory region.
- */
-static size_t
-rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p) {
- rd_segment_t *seg;
-
- for (seg = rbuf->rbuf_wpos; seg; seg = TAILQ_NEXT(seg, seg_link)) {
- size_t len = rd_segment_write_remains(seg, p);
-
- /* Even though the write offset hasn't changed we
- * avoid future segment scans by adjusting the
- * wpos here to the first writable segment. */
- rbuf->rbuf_wpos = seg;
- if (segp)
- *segp = seg;
-
- if (unlikely(len == 0))
- continue;
-
- /* Also adjust absof if the segment was allocated
- * before the previous segment's memory was exhausted
- * and thus now might have a lower absolute offset
- * than the previos segment's now higher relative offset. */
- if (seg->seg_of == 0 && seg->seg_absof < rbuf->rbuf_len)
- seg->seg_absof = rbuf->rbuf_len;
-
- return len;
- }
-
- return 0;
-}
-
-size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p) {
- rd_segment_t *seg;
- return rd_buf_get_writable0(rbuf, &seg, p);
-}
-
-
-
-/**
- * @brief Write \p payload of \p size bytes to current position
- * in buffer. A new segment will be allocated and appended
- * if needed.
- *
- * @returns the write position where payload was written (pre-write).
- * Returning the pre-positition allows write_update() to later
- * update the same location, effectively making write()s
- * also a place-holder mechanism.
- *
- * @remark If \p payload is NULL only the write position is updated,
- * in this mode it is required for the buffer to have enough
- * memory for the NULL write (as it would otherwise cause
- * uninitialized memory in any new segments allocated from this
- * function).
- */
-size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size) {
- size_t remains = size;
- size_t initial_absof;
- const char *psrc = (const char *)payload;
-
- initial_absof = rbuf->rbuf_len;
-
- /* Ensure enough space by pre-allocating segments. */
- rd_buf_write_ensure(rbuf, size, 0);
-
- while (remains > 0) {
- void *p = NULL;
- rd_segment_t *seg = NULL;
- size_t segremains = rd_buf_get_writable0(rbuf, &seg, &p);
- size_t wlen = RD_MIN(remains, segremains);
-
- rd_dassert(seg == rbuf->rbuf_wpos);
- rd_dassert(wlen > 0);
- rd_dassert(seg->seg_p + seg->seg_of <= (char *)p &&
- (char *)p < seg->seg_p + seg->seg_size);
-
- if (payload) {
- memcpy(p, psrc, wlen);
- psrc += wlen;
- }
-
- seg->seg_of += wlen;
- rbuf->rbuf_len += wlen;
- remains -= wlen;
- }
-
- rd_assert(remains == 0);
-
- return initial_absof;
-}
-
-
-
-/**
- * @brief Write \p slice to \p rbuf
- *
- * @remark The slice position will be updated.
- *
- * @returns the number of bytes witten (always slice length)
- */
-size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice) {
- const void *p;
- size_t rlen;
- size_t sum = 0;
-
- while ((rlen = rd_slice_reader(slice, &p))) {
- size_t r;
- r = rd_buf_write(rbuf, p, rlen);
- rd_dassert(r != 0);
- sum += r;
- }
-
- return sum;
-}
-
-
-
-/**
- * @brief Write \p payload of \p size at absolute offset \p absof
- * WITHOUT updating the total buffer length.
- *
- * This is used to update a previously written region, such
- * as updating the header length.
- *
- * @returns the number of bytes written, which may be less than \p size
- * if the update spans multiple segments.
- */
-static size_t rd_segment_write_update(rd_segment_t *seg,
- size_t absof,
- const void *payload,
- size_t size) {
- size_t relof;
- size_t wlen;
-
- rd_dassert(absof >= seg->seg_absof);
- relof = absof - seg->seg_absof;
- rd_assert(relof <= seg->seg_of);
- wlen = RD_MIN(size, seg->seg_of - relof);
- rd_dassert(relof + wlen <= seg->seg_of);
-
- memcpy(seg->seg_p + relof, payload, wlen);
-
- return wlen;
-}
-
-
-
-/**
- * @brief Write \p payload of \p size at absolute offset \p absof
- * WITHOUT updating the total buffer length.
- *
- * This is used to update a previously written region, such
- * as updating the header length.
- */
-size_t rd_buf_write_update(rd_buf_t *rbuf,
- size_t absof,
- const void *payload,
- size_t size) {
- rd_segment_t *seg;
- const char *psrc = (const char *)payload;
- size_t of;
-
- /* Find segment for offset */
- seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
- rd_assert(seg && *"invalid absolute offset");
-
- for (of = 0; of < size; seg = TAILQ_NEXT(seg, seg_link)) {
- rd_assert(seg->seg_absof <= rd_buf_len(rbuf));
- size_t wlen = rd_segment_write_update(seg, absof + of,
- psrc + of, size - of);
- of += wlen;
- }
-
- rd_dassert(of == size);
-
- return of;
-}
-
-
-
-/**
- * @brief Push reference memory segment to current write position.
- */
-void rd_buf_push0(rd_buf_t *rbuf,
- const void *payload,
- size_t size,
- void (*free_cb)(void *),
- rd_bool_t writable) {
- rd_segment_t *prevseg, *seg, *tailseg = NULL;
-
- if ((prevseg = rbuf->rbuf_wpos) &&
- rd_segment_write_remains(prevseg, NULL) > 0) {
- /* If the current segment still has room in it split it
- * and insert the pushed segment in the middle (below). */
- tailseg = rd_segment_split(
- rbuf, prevseg, prevseg->seg_absof + prevseg->seg_of);
- }
-
- seg = rd_buf_alloc_segment0(rbuf, 0);
- seg->seg_p = (char *)payload;
- seg->seg_size = size;
- seg->seg_of = size;
- seg->seg_free = free_cb;
- if (!writable)
- seg->seg_flags |= RD_SEGMENT_F_RDONLY;
-
- rd_buf_append_segment(rbuf, seg);
-
- if (tailseg)
- rd_buf_append_segment(rbuf, tailseg);
-}
-
-
-
-/**
- * @brief Erase \p size bytes at \p absof from buffer.
- *
- * @returns the number of bytes erased.
- *
- * @remark This is costly since it forces a memory move.
- */
-size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) {
- rd_segment_t *seg, *next = NULL;
- size_t of;
-
- /* Find segment for offset */
- seg = rd_buf_get_segment_at_offset(rbuf, NULL, absof);
-
- /* Adjust segments until size is exhausted, then continue scanning to
- * update the absolute offset. */
- for (of = 0; seg && of < size; seg = next) {
- /* Example:
- * seg_absof = 10
- * seg_of = 7
- * absof = 12
- * of = 1
- * size = 4
- *
- * rof = 3 relative segment offset where to erase
- * eraseremains = 3 remaining bytes to erase
- * toerase = 3 available bytes to erase in segment
- * segremains = 1 remaining bytes in segment after to
- * the right of the erased part, i.e.,
- * the memory that needs to be moved to the
- * left.
- */
- /** Relative offset in segment for the absolute offset */
- size_t rof = (absof + of) - seg->seg_absof;
- /** How much remains to be erased */
- size_t eraseremains = size - of;
- /** How much can be erased from this segment */
- size_t toerase = RD_MIN(seg->seg_of - rof, eraseremains);
- /** How much remains in the segment after the erased part */
- size_t segremains = seg->seg_of - (rof + toerase);
-
- next = TAILQ_NEXT(seg, seg_link);
-
- seg->seg_absof -= of;
-
- if (unlikely(toerase == 0))
- continue;
-
- if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
- RD_BUG("rd_buf_erase() called on read-only segment");
-
- if (likely(segremains > 0))
- memmove(seg->seg_p + rof, seg->seg_p + rof + toerase,
- segremains);
-
- seg->seg_of -= toerase;
- rbuf->rbuf_len -= toerase;
-
- of += toerase;
-
- /* If segment is now empty, remove it */
- if (seg->seg_of == 0)
- rd_buf_destroy_segment(rbuf, seg);
- }
-
- /* Update absolute offset of remaining segments */
- for (seg = next; seg; seg = TAILQ_NEXT(seg, seg_link)) {
- rd_assert(seg->seg_absof >= of);
- seg->seg_absof -= of;
- }
-
- rbuf->rbuf_erased += of;
-
- return of;
-}
-
-
-
-/**
- * @brief Do a write-seek, updating the write position to the given
- * absolute \p absof.
- *
- * @warning Any sub-sequent segments will be destroyed.
- *
- * @returns -1 if the offset is out of bounds, else 0.
- */
-int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) {
- rd_segment_t *seg, *next;
- size_t relof;
-
- seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
- if (unlikely(!seg))
- return -1;
-
- relof = absof - seg->seg_absof;
- if (unlikely(relof > seg->seg_of))
- return -1;
-
- /* Destroy sub-sequent segments in reverse order so that
- * destroy_segment() length checks are correct.
- * Will decrement rbuf_len et.al. */
- for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
- next != seg;) {
- rd_segment_t *this = next;
- next = TAILQ_PREV(this, rd_segment_head, seg_link);
- rd_buf_destroy_segment(rbuf, this);
- }
-
- /* Update relative write offset */
- seg->seg_of = relof;
- rbuf->rbuf_wpos = seg;
- rbuf->rbuf_len = seg->seg_absof + seg->seg_of;
-
- rd_assert(rbuf->rbuf_len == absof);
-
- return 0;
-}
-
-
-/**
- * @brief Set up the iovecs in \p iovs (of size \p iov_max) with the writable
- * segments from the buffer's current write position.
- *
- * @param iovcntp will be set to the number of populated \p iovs[]
- * @param size_max limits the total number of bytes made available.
- * Note: this value may be overshot with the size of one
- * segment.
- *
- * @returns the total number of bytes in the represented segments.
- *
- * @remark the write position will NOT be updated.
- */
-size_t rd_buf_get_write_iov(const rd_buf_t *rbuf,
- struct iovec *iovs,
- size_t *iovcntp,
- size_t iov_max,
- size_t size_max) {
- const rd_segment_t *seg;
- size_t iovcnt = 0;
- size_t sum = 0;
-
- for (seg = rbuf->rbuf_wpos; seg && iovcnt < iov_max && sum < size_max;
- seg = TAILQ_NEXT(seg, seg_link)) {
- size_t len;
- void *p;
-
- len = rd_segment_write_remains(seg, &p);
- if (unlikely(len == 0))
- continue;
-
- iovs[iovcnt].iov_base = p;
- iovs[iovcnt++].iov_len = len;
-
- sum += len;
- }
-
- *iovcntp = iovcnt;
-
- return sum;
-}
-
-
-
-/**
- * @name Slice reader interface
- *
- * @{
- */
-
-/**
- * @brief Initialize a new slice of \p size bytes starting at \p seg with
- * relative offset \p rof.
- *
- * @returns 0 on success or -1 if there is not at least \p size bytes available
- * in the buffer.
- */
-int rd_slice_init_seg(rd_slice_t *slice,
- const rd_buf_t *rbuf,
- const rd_segment_t *seg,
- size_t rof,
- size_t size) {
- /* Verify that \p size bytes are indeed available in the buffer. */
- if (unlikely(rbuf->rbuf_len < (seg->seg_absof + rof + size)))
- return -1;
-
- slice->buf = rbuf;
- slice->seg = seg;
- slice->rof = rof;
- slice->start = seg->seg_absof + rof;
- slice->end = slice->start + size;
-
- rd_assert(seg->seg_absof + rof >= slice->start &&
- seg->seg_absof + rof <= slice->end);
-
- rd_assert(slice->end <= rd_buf_len(rbuf));
-
- return 0;
-}
-
-/**
- * @brief Initialize new slice of \p size bytes starting at offset \p absof
- *
- * @returns 0 on success or -1 if there is not at least \p size bytes available
- * in the buffer.
- */
-int rd_slice_init(rd_slice_t *slice,
- const rd_buf_t *rbuf,
- size_t absof,
- size_t size) {
- const rd_segment_t *seg =
- rd_buf_get_segment_at_offset(rbuf, NULL, absof);
- if (unlikely(!seg))
- return -1;
-
- return rd_slice_init_seg(slice, rbuf, seg, absof - seg->seg_absof,
- size);
-}
-
-/**
- * @brief Initialize new slice covering the full buffer \p rbuf
- */
-void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf) {
- int r = rd_slice_init(slice, rbuf, 0, rd_buf_len(rbuf));
- rd_assert(r == 0);
-}
-
-
-
-/**
- * @sa rd_slice_reader() rd_slice_peeker()
- */
-size_t rd_slice_reader0(rd_slice_t *slice, const void **p, int update_pos) {
- size_t rof = slice->rof;
- size_t rlen;
- const rd_segment_t *seg;
-
- /* Find segment with non-zero payload */
- for (seg = slice->seg;
- seg && seg->seg_absof + rof < slice->end && seg->seg_of == rof;
- seg = TAILQ_NEXT(seg, seg_link))
- rof = 0;
-
- if (unlikely(!seg || seg->seg_absof + rof >= slice->end))
- return 0;
-
- *p = (const void *)(seg->seg_p + rof);
- rlen = RD_MIN(seg->seg_of - rof, rd_slice_remains(slice));
-
- if (update_pos) {
- if (slice->seg != seg) {
- rd_assert(seg->seg_absof + rof >= slice->start &&
- seg->seg_absof + rof + rlen <= slice->end);
- slice->seg = seg;
- slice->rof = rlen;
- } else {
- slice->rof += rlen;
- }
- }
-
- return rlen;
-}
-
-
-/**
- * @brief Convenience reader iterator interface.
- *
- * Call repeatedly from while loop until it returns 0.
- *
- * @param slice slice to read from, position will be updated.
- * @param p will be set to the start of \p *rlenp contiguous bytes of memory
- * @param rlenp will be set to the number of bytes available in \p p
- *
- * @returns the number of bytes read, or 0 if slice is empty.
- */
-size_t rd_slice_reader(rd_slice_t *slice, const void **p) {
- return rd_slice_reader0(slice, p, 1 /*update_pos*/);
-}
-
-/**
- * @brief Identical to rd_slice_reader() but does NOT update the read position
- */
-size_t rd_slice_peeker(const rd_slice_t *slice, const void **p) {
- return rd_slice_reader0((rd_slice_t *)slice, p, 0 /*dont update_pos*/);
-}
-
-
-
-/**
- * @brief Read \p size bytes from current read position,
- * advancing the read offset by the number of bytes copied to \p dst.
- *
- * If there are less than \p size remaining in the buffer
- * then 0 is returned and no bytes are copied.
- *
- * @returns \p size, or 0 if \p size bytes are not available in buffer.
- *
- * @remark This performs a complete read, no partitial reads.
- *
- * @remark If \p dst is NULL only the read position is updated.
- */
-size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size) {
- size_t remains = size;
- char *d = (char *)dst; /* Possibly NULL */
- size_t rlen;
- const void *p;
- size_t orig_end = slice->end;
-
- if (unlikely(rd_slice_remains(slice) < size))
- return 0;
-
- /* Temporarily shrink slice to offset + \p size */
- slice->end = rd_slice_abs_offset(slice) + size;
-
- while ((rlen = rd_slice_reader(slice, &p))) {
- rd_dassert(remains >= rlen);
- if (dst) {
- memcpy(d, p, rlen);
- d += rlen;
- }
- remains -= rlen;
- }
-
- rd_dassert(remains == 0);
-
- /* Restore original size */
- slice->end = orig_end;
-
- return size;
-}
-
-
-/**
- * @brief Read \p size bytes from absolute slice offset \p offset
- * and store in \p dst, without updating the slice read position.
- *
- * @returns \p size if the offset and size was within the slice, else 0.
- */
-size_t
-rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size) {
- rd_slice_t sub = *slice;
-
- if (unlikely(rd_slice_seek(&sub, offset) == -1))
- return 0;
-
- return rd_slice_read(&sub, dst, size);
-}
-
-
-/**
- * @brief Read a varint-encoded unsigned integer from \p slice,
- * storing the decoded number in \p nump on success (return value > 0).
- *
- * @returns the number of bytes read on success or 0 in case of
- * buffer underflow.
- */
-size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump) {
- uint64_t num = 0;
- int shift = 0;
- size_t rof = slice->rof;
- const rd_segment_t *seg;
-
- /* Traverse segments, byte for byte, until varint is decoded
- * or no more segments available (underflow). */
- for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) {
- for (; rof < seg->seg_of; rof++) {
- unsigned char oct;
-
- if (unlikely(seg->seg_absof + rof >= slice->end))
- return 0; /* Underflow */
-
- oct = *(const unsigned char *)(seg->seg_p + rof);
-
- num |= (uint64_t)(oct & 0x7f) << shift;
- shift += 7;
-
- if (!(oct & 0x80)) {
- /* Done: no more bytes expected */
- *nump = num;
-
- /* Update slice's read pointer and offset */
- if (slice->seg != seg)
- slice->seg = seg;
- slice->rof = rof + 1; /* including the +1 byte
- * that was just read */
-
- return shift / 7;
- }
- }
-
- rof = 0;
- }
-
- return 0; /* Underflow */
-}
-
-
-/**
- * @returns a pointer to \p size contiguous bytes at the current read offset.
- * If there isn't \p size contiguous bytes available NULL will
- * be returned.
- *
- * @remark The read position is updated to point past \p size.
- */
-const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size) {
- void *p;
-
- if (unlikely(rd_slice_remains(slice) < size ||
- slice->rof + size > slice->seg->seg_of))
- return NULL;
-
- p = slice->seg->seg_p + slice->rof;
-
- rd_slice_read(slice, NULL, size);
-
- return p;
-}
-
-
-
-/**
- * @brief Sets the slice's read position. The offset is the slice offset,
- * not buffer offset.
- *
- * @returns 0 if offset was within range, else -1 in which case the position
- * is not changed.
- */
-int rd_slice_seek(rd_slice_t *slice, size_t offset) {
- const rd_segment_t *seg;
- size_t absof = slice->start + offset;
-
- if (unlikely(absof >= slice->end))
- return -1;
-
- seg = rd_buf_get_segment_at_offset(slice->buf, slice->seg, absof);
- rd_assert(seg);
-
- slice->seg = seg;
- slice->rof = absof - seg->seg_absof;
- rd_assert(seg->seg_absof + slice->rof >= slice->start &&
- seg->seg_absof + slice->rof <= slice->end);
-
- return 0;
-}
-
-
-/**
- * @brief Narrow the current slice to \p size, saving
- * the original slice state info \p save_slice.
- *
- * Use rd_slice_widen() to restore the saved slice
- * with the read count updated from the narrowed slice.
- *
- * This is useful for reading a sub-slice of a larger slice
- * without having to pass the lesser length around.
- *
- * @returns 1 if enough underlying slice buffer memory is available, else 0.
- */
-int rd_slice_narrow(rd_slice_t *slice, rd_slice_t *save_slice, size_t size) {
- if (unlikely(slice->start + size > slice->end))
- return 0;
- *save_slice = *slice;
- slice->end = slice->start + size;
- rd_assert(rd_slice_abs_offset(slice) <= slice->end);
- return 1;
-}
-
-/**
- * @brief Same as rd_slice_narrow() but using a relative size \p relsize
- * from the current read position.
- */
-int rd_slice_narrow_relative(rd_slice_t *slice,
- rd_slice_t *save_slice,
- size_t relsize) {
- return rd_slice_narrow(slice, save_slice,
- rd_slice_offset(slice) + relsize);
-}
-
-
-/**
- * @brief Restore the original \p save_slice size from a previous call to
- * rd_slice_narrow(), while keeping the updated read pointer from
- * \p slice.
- */
-void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice) {
- slice->end = save_slice->end;
-}
-
-
-/**
- * @brief Copy the original slice \p orig to \p new_slice and adjust
- * the new slice length to \p size.
- *
- * This is a side-effect free form of rd_slice_narrow() which is not to
- * be used with rd_slice_widen().
- *
- * @returns 1 if enough underlying slice buffer memory is available, else 0.
- */
-int rd_slice_narrow_copy(const rd_slice_t *orig,
- rd_slice_t *new_slice,
- size_t size) {
- if (unlikely(orig->start + size > orig->end))
- return 0;
- *new_slice = *orig;
- new_slice->end = orig->start + size;
- rd_assert(rd_slice_abs_offset(new_slice) <= new_slice->end);
- return 1;
-}
-
-/**
- * @brief Same as rd_slice_narrow_copy() but with a relative size from
- * the current read position.
- */
-int rd_slice_narrow_copy_relative(const rd_slice_t *orig,
- rd_slice_t *new_slice,
- size_t relsize) {
- return rd_slice_narrow_copy(orig, new_slice,
- rd_slice_offset(orig) + relsize);
-}
-
-
-
-/**
- * @brief Set up the iovec \p iovs (of size \p iov_max) with the readable
- * segments from the slice's current read position.
- *
- * @param iovcntp will be set to the number of populated \p iovs[]
- * @param size_max limits the total number of bytes made available.
- * Note: this value may be overshot with the size of one
- * segment.
- *
- * @returns the total number of bytes in the represented segments.
- *
- * @remark will NOT update the read position.
- */
-size_t rd_slice_get_iov(const rd_slice_t *slice,
- struct iovec *iovs,
- size_t *iovcntp,
- size_t iov_max,
- size_t size_max) {
- const void *p;
- size_t rlen;
- size_t iovcnt = 0;
- size_t sum = 0;
- rd_slice_t copy = *slice; /* Use a copy of the slice so we dont
- * update the position for the caller. */
-
- while (sum < size_max && iovcnt < iov_max &&
- (rlen = rd_slice_reader(&copy, &p))) {
- iovs[iovcnt].iov_base = (void *)p;
- iovs[iovcnt++].iov_len = rlen;
-
- sum += rlen;
- }
-
- *iovcntp = iovcnt;
-
- return sum;
-}
-
-
-
-/**
- * @brief CRC32 calculation of slice.
- *
- * @returns the calculated CRC
- *
- * @remark the slice's position is updated.
- */
-uint32_t rd_slice_crc32(rd_slice_t *slice) {
- rd_crc32_t crc;
- const void *p;
- size_t rlen;
-
- crc = rd_crc32_init();
-
- while ((rlen = rd_slice_reader(slice, &p)))
- crc = rd_crc32_update(crc, p, rlen);
-
- return (uint32_t)rd_crc32_finalize(crc);
-}
-
-/**
- * @brief Compute CRC-32C of segments starting at at buffer position \p absof,
- * also supporting the case where the position/offset is not at the
- * start of the first segment.
- *
- * @remark the slice's position is updated.
- */
-uint32_t rd_slice_crc32c(rd_slice_t *slice) {
- const void *p;
- size_t rlen;
- uint32_t crc = 0;
-
- while ((rlen = rd_slice_reader(slice, &p)))
- crc = rd_crc32c(crc, (const char *)p, rlen);
-
- return crc;
-}
-
-
-
-/**
- * @name Debugging dumpers
- *
- *
- */
-
-static void rd_segment_dump(const rd_segment_t *seg,
- const char *ind,
- size_t relof,
- int do_hexdump) {
- fprintf(stderr,
- "%s((rd_segment_t *)%p): "
- "p %p, of %" PRIusz
- ", "
- "absof %" PRIusz ", size %" PRIusz ", free %p, flags 0x%x\n",
- ind, seg, seg->seg_p, seg->seg_of, seg->seg_absof,
- seg->seg_size, seg->seg_free, seg->seg_flags);
- rd_assert(relof <= seg->seg_of);
- if (do_hexdump)
- rd_hexdump(stderr, "segment", seg->seg_p + relof,
- seg->seg_of - relof);
-}
-
-void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump) {
- const rd_segment_t *seg;
-
- fprintf(stderr,
- "((rd_buf_t *)%p):\n"
- " len %" PRIusz " size %" PRIusz ", %" PRIusz "/%" PRIusz
- " extra memory used\n",
- rbuf, rbuf->rbuf_len, rbuf->rbuf_size, rbuf->rbuf_extra_len,
- rbuf->rbuf_extra_size);
-
- if (rbuf->rbuf_wpos) {
- fprintf(stderr, " wpos:\n");
- rd_segment_dump(rbuf->rbuf_wpos, " ", 0, 0);
- }
-
- if (rbuf->rbuf_segment_cnt > 0) {
- size_t segcnt = 0;
-
- fprintf(stderr, " %" PRIusz " linked segments:\n",
- rbuf->rbuf_segment_cnt);
- TAILQ_FOREACH(seg, &rbuf->rbuf_segments, seg_link) {
- rd_segment_dump(seg, " ", 0, do_hexdump);
- segcnt++;
- rd_assert(segcnt <= rbuf->rbuf_segment_cnt);
- }
- }
-}
-
-void rd_slice_dump(const rd_slice_t *slice, int do_hexdump) {
- const rd_segment_t *seg;
- size_t relof;
-
- fprintf(stderr,
- "((rd_slice_t *)%p):\n"
- " buf %p (len %" PRIusz "), seg %p (absof %" PRIusz
- "), "
- "rof %" PRIusz ", start %" PRIusz ", end %" PRIusz
- ", size %" PRIusz ", offset %" PRIusz "\n",
- slice, slice->buf, rd_buf_len(slice->buf), slice->seg,
- slice->seg ? slice->seg->seg_absof : 0, slice->rof,
- slice->start, slice->end, rd_slice_size(slice),
- rd_slice_offset(slice));
- relof = slice->rof;
-
- for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) {
- rd_segment_dump(seg, " ", relof, do_hexdump);
- relof = 0;
- }
-}
-
-
-/**
- * @name Unit-tests
- *
- *
- *
- */
-
-
-/**
- * @brief Basic write+read test
- */
-static int do_unittest_write_read(void) {
- rd_buf_t b;
- char ones[1024];
- char twos[1024];
- char threes[1024];
- char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
- char buf[1024 * 3];
- rd_slice_t slice;
- size_t r, pos;
-
- memset(ones, 0x1, sizeof(ones));
- memset(twos, 0x2, sizeof(twos));
- memset(threes, 0x3, sizeof(threes));
- memset(fiftyfives, 0x55, sizeof(fiftyfives));
- memset(buf, 0x55, sizeof(buf));
-
- rd_buf_init(&b, 2, 1000);
-
- /*
- * Verify write
- */
- r = rd_buf_write(&b, ones, 200);
- RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r);
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos);
-
- r = rd_buf_write(&b, twos, 800);
- RD_UT_ASSERT(r == 200, "write() returned position %" PRIusz, r);
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200 + 800, "pos() returned position %" PRIusz, pos);
-
- /* Buffer grows here */
- r = rd_buf_write(&b, threes, 1);
- RD_UT_ASSERT(pos == 200 + 800, "write() returned position %" PRIusz, r);
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200 + 800 + 1, "pos() returned position %" PRIusz,
- pos);
-
- /*
- * Verify read
- */
- /* Get full slice. */
- rd_slice_init_full(&slice, &b);
-
- r = rd_slice_read(&slice, buf, 200 + 800 + 2);
- RD_UT_ASSERT(r == 0,
- "read() > remaining should have failed, gave %" PRIusz, r);
- r = rd_slice_read(&slice, buf, 200 + 800 + 1);
- RD_UT_ASSERT(r == 200 + 800 + 1,
- "read() returned %" PRIusz " (%" PRIusz " remains)", r,
- rd_slice_remains(&slice));
-
- RD_UT_ASSERT(!memcmp(buf, ones, 200), "verify ones");
- RD_UT_ASSERT(!memcmp(buf + 200, twos, 800), "verify twos");
- RD_UT_ASSERT(!memcmp(buf + 200 + 800, threes, 1), "verify threes");
- RD_UT_ASSERT(!memcmp(buf + 200 + 800 + 1, fiftyfives, 100),
- "verify 55s");
-
- rd_buf_destroy(&b);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Helper read verifier, not a unit-test itself.
- */
-#define do_unittest_read_verify(b, absof, len, verify) \
- do { \
- int __fail = do_unittest_read_verify0(b, absof, len, verify); \
- RD_UT_ASSERT(!__fail, \
- "read_verify(absof=%" PRIusz ",len=%" PRIusz \
- ") " \
- "failed", \
- (size_t)absof, (size_t)len); \
- } while (0)
-
-static int do_unittest_read_verify0(const rd_buf_t *b,
- size_t absof,
- size_t len,
- const char *verify) {
- rd_slice_t slice, sub;
- char buf[1024];
- size_t half;
- size_t r;
- int i;
-
- rd_assert(sizeof(buf) >= len);
-
- /* Get reader slice */
- i = rd_slice_init(&slice, b, absof, len);
- RD_UT_ASSERT(i == 0, "slice_init() failed: %d", i);
-
- r = rd_slice_read(&slice, buf, len);
- RD_UT_ASSERT(r == len,
- "read() returned %" PRIusz " expected %" PRIusz
- " (%" PRIusz " remains)",
- r, len, rd_slice_remains(&slice));
-
- RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
-
- r = rd_slice_offset(&slice);
- RD_UT_ASSERT(r == len, "offset() returned %" PRIusz ", not %" PRIusz, r,
- len);
-
- half = len / 2;
- i = rd_slice_seek(&slice, half);
- RD_UT_ASSERT(i == 0, "seek(%" PRIusz ") returned %d", half, i);
- r = rd_slice_offset(&slice);
- RD_UT_ASSERT(r == half, "offset() returned %" PRIusz ", not %" PRIusz,
- r, half);
-
- /* Get a sub-slice covering the later half. */
- sub = rd_slice_pos(&slice);
- r = rd_slice_offset(&sub);
- RD_UT_ASSERT(r == 0, "sub: offset() returned %" PRIusz ", not %" PRIusz,
- r, (size_t)0);
- r = rd_slice_size(&sub);
- RD_UT_ASSERT(r == half,
- "sub: size() returned %" PRIusz ", not %" PRIusz, r, half);
- r = rd_slice_remains(&sub);
- RD_UT_ASSERT(r == half,
- "sub: remains() returned %" PRIusz ", not %" PRIusz, r,
- half);
-
- /* Read half */
- r = rd_slice_read(&sub, buf, half);
- RD_UT_ASSERT(r == half,
- "sub read() returned %" PRIusz " expected %" PRIusz
- " (%" PRIusz " remains)",
- r, len, rd_slice_remains(&sub));
-
- RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
-
- r = rd_slice_offset(&sub);
- RD_UT_ASSERT(r == rd_slice_size(&sub),
- "sub offset() returned %" PRIusz ", not %" PRIusz, r,
- rd_slice_size(&sub));
- r = rd_slice_remains(&sub);
- RD_UT_ASSERT(r == 0,
- "sub: remains() returned %" PRIusz ", not %" PRIusz, r,
- (size_t)0);
-
- return 0;
-}
-
-
-/**
- * @brief write_seek() and split() test
- */
-static int do_unittest_write_split_seek(void) {
- rd_buf_t b;
- char ones[1024];
- char twos[1024];
- char threes[1024];
- char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
- char buf[1024 * 3];
- size_t r, pos;
- rd_segment_t *seg, *newseg;
-
- memset(ones, 0x1, sizeof(ones));
- memset(twos, 0x2, sizeof(twos));
- memset(threes, 0x3, sizeof(threes));
- memset(fiftyfives, 0x55, sizeof(fiftyfives));
- memset(buf, 0x55, sizeof(buf));
-
- rd_buf_init(&b, 0, 0);
-
- /*
- * Verify write
- */
- r = rd_buf_write(&b, ones, 400);
- RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r);
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 400, "pos() returned position %" PRIusz, pos);
-
- do_unittest_read_verify(&b, 0, 400, ones);
-
- /*
- * Seek and re-write
- */
- r = rd_buf_write_seek(&b, 200);
- RD_UT_ASSERT(r == 0, "seek() failed");
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos);
-
- r = rd_buf_write(&b, twos, 100);
- RD_UT_ASSERT(pos == 200, "write() returned position %" PRIusz, r);
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
-
- do_unittest_read_verify(&b, 0, 200, ones);
- do_unittest_read_verify(&b, 200, 100, twos);
-
- /* Make sure read() did not modify the write position. */
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
-
- /* Split buffer, write position is now at split where writes
- * are not allowed (mid buffer). */
- seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
- RD_UT_ASSERT(seg->seg_of != 0, "assumed mid-segment");
- newseg = rd_segment_split(&b, seg, 50);
- rd_buf_append_segment(&b, newseg);
- seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
- RD_UT_ASSERT(seg != NULL, "seg");
- RD_UT_ASSERT(seg == newseg, "newseg %p, seg %p", newseg, seg);
- RD_UT_ASSERT(seg->seg_of > 0,
- "assumed beginning of segment, got %" PRIusz, seg->seg_of);
-
- pos = rd_buf_write_pos(&b);
- RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
-
- /* Re-verify that nothing changed */
- do_unittest_read_verify(&b, 0, 200, ones);
- do_unittest_read_verify(&b, 200, 100, twos);
-
- /* Do a write seek at buffer boundary, sub-sequent buffers should
- * be destroyed. */
- r = rd_buf_write_seek(&b, 50);
- RD_UT_ASSERT(r == 0, "seek() failed");
- do_unittest_read_verify(&b, 0, 50, ones);
-
- rd_buf_destroy(&b);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief Unittest to verify payload is correctly written and read.
- * Each written u32 word is the running CRC of the word count.
- */
-static int do_unittest_write_read_payload_correctness(void) {
- uint32_t crc;
- uint32_t write_crc, read_crc;
- const int seed = 12345;
- rd_buf_t b;
- const size_t max_cnt = 20000;
- rd_slice_t slice;
- size_t r;
- size_t i;
- int pass;
-
- crc = rd_crc32_init();
- crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
-
- rd_buf_init(&b, 0, 0);
- for (i = 0; i < max_cnt; i++) {
- crc = rd_crc32_update(crc, (void *)&i, sizeof(i));
- rd_buf_write(&b, &crc, sizeof(crc));
- }
-
- write_crc = rd_crc32_finalize(crc);
-
- r = rd_buf_len(&b);
- RD_UT_ASSERT(r == max_cnt * sizeof(crc),
- "expected length %" PRIusz ", not %" PRIusz, r,
- max_cnt * sizeof(crc));
-
- /*
- * Now verify the contents with a reader.
- */
- rd_slice_init_full(&slice, &b);
-
- r = rd_slice_remains(&slice);
- RD_UT_ASSERT(r == rd_buf_len(&b),
- "slice remains %" PRIusz ", should be %" PRIusz, r,
- rd_buf_len(&b));
-
- for (pass = 0; pass < 2; pass++) {
- /* Two passes:
- * - pass 1: using peek()
- * - pass 2: using read()
- */
- const char *pass_str = pass == 0 ? "peek" : "read";
-
- crc = rd_crc32_init();
- crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
-
- for (i = 0; i < max_cnt; i++) {
- uint32_t buf_crc;
-
- crc = rd_crc32_update(crc, (void *)&i, sizeof(i));
-
- if (pass == 0)
- r = rd_slice_peek(&slice, i * sizeof(buf_crc),
- &buf_crc, sizeof(buf_crc));
- else
- r = rd_slice_read(&slice, &buf_crc,
- sizeof(buf_crc));
- RD_UT_ASSERT(r == sizeof(buf_crc),
- "%s() at #%" PRIusz
- " failed: "
- "r is %" PRIusz " not %" PRIusz,
- pass_str, i, r, sizeof(buf_crc));
- RD_UT_ASSERT(buf_crc == crc,
- "%s: invalid crc at #%" PRIusz
- ": expected %" PRIu32 ", read %" PRIu32,
- pass_str, i, crc, buf_crc);
- }
-
- read_crc = rd_crc32_finalize(crc);
-
- RD_UT_ASSERT(read_crc == write_crc,
- "%s: finalized read crc %" PRIu32
- " != write crc %" PRIu32,
- pass_str, read_crc, write_crc);
- }
-
- r = rd_slice_remains(&slice);
- RD_UT_ASSERT(r == 0, "slice remains %" PRIusz ", should be %" PRIusz, r,
- (size_t)0);
-
- rd_buf_destroy(&b);
-
- RD_UT_PASS();
-}
-
-#define do_unittest_iov_verify(...) \
- do { \
- int __fail = do_unittest_iov_verify0(__VA_ARGS__); \
- RD_UT_ASSERT(!__fail, "iov_verify() failed"); \
- } while (0)
-static int
-do_unittest_iov_verify0(rd_buf_t *b, size_t exp_iovcnt, size_t exp_totsize) {
-#define MY_IOV_MAX 16
- struct iovec iov[MY_IOV_MAX];
- size_t iovcnt;
- size_t i;
- size_t totsize, sum;
-
- rd_assert(exp_iovcnt <= MY_IOV_MAX);
-
- totsize =
- rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize);
- RD_UT_ASSERT(totsize >= exp_totsize,
- "iov total size %" PRIusz " expected >= %" PRIusz, totsize,
- exp_totsize);
- RD_UT_ASSERT(iovcnt >= exp_iovcnt && iovcnt <= MY_IOV_MAX,
- "iovcnt %" PRIusz ", expected %" PRIusz
- " < x <= MY_IOV_MAX",
- iovcnt, exp_iovcnt);
-
- sum = 0;
- for (i = 0; i < iovcnt; i++) {
- RD_UT_ASSERT(iov[i].iov_base,
- "iov #%" PRIusz " iov_base not set", i);
- RD_UT_ASSERT(iov[i].iov_len,
- "iov #%" PRIusz " iov_len %" PRIusz
- " out of range",
- i, iov[i].iov_len);
- sum += iov[i].iov_len;
- RD_UT_ASSERT(sum <= totsize,
- "sum %" PRIusz " > totsize %" PRIusz, sum,
- totsize);
- }
-
- RD_UT_ASSERT(sum == totsize, "sum %" PRIusz " != totsize %" PRIusz, sum,
- totsize);
-
- return 0;
-}
-
-
-/**
- * @brief Verify that buffer to iovec conversion works.
- */
-static int do_unittest_write_iov(void) {
- rd_buf_t b;
-
- rd_buf_init(&b, 0, 0);
- rd_buf_write_ensure(&b, 100, 100);
-
- do_unittest_iov_verify(&b, 1, 100);
-
- /* Add a secondary buffer */
- rd_buf_write_ensure(&b, 30000, 0);
-
- do_unittest_iov_verify(&b, 2, 100 + 30000);
-
-
- rd_buf_destroy(&b);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief Verify that erasing parts of the buffer works.
- */
-static int do_unittest_erase(void) {
- static const struct {
- const char *segs[4];
- const char *writes[4];
- struct {
- size_t of;
- size_t size;
- size_t retsize;
- } erasures[4];
-
- const char *expect;
- } in[] = {/* 12|3|45
- * x x xx */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{1, 4, 4}},
- .expect = "1",
- },
- /* 12|3|45
- * xx */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{0, 2, 2}},
- .expect = "345",
- },
- /* 12|3|45
- * xx */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{3, 2, 2}},
- .expect = "123",
- },
- /* 12|3|45
- * x
- * 1 |3|45
- * x
- * 1 | 45
- * x */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{1, 1, 1}, {1, 1, 1}, {2, 1, 1}},
- .expect = "14",
- },
- /* 12|3|45
- * xxxxxxx */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{0, 5, 5}},
- .expect = "",
- },
- /* 12|3|45
- * x */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{0, 1, 1}},
- .expect = "2345",
- },
- /* 12|3|45
- * x */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{4, 1, 1}},
- .expect = "1234",
- },
- /* 12|3|45
- * x */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{5, 10, 0}},
- .expect = "12345",
- },
- /* 12|3|45
- * xxx */
- {
- .segs = {"12", "3", "45"},
- .erasures = {{4, 3, 1}, {4, 3, 0}, {4, 3, 0}},
- .expect = "1234",
- },
- /* 1
- * xxx */
- {
- .segs = {"1"},
- .erasures = {{0, 3, 1}},
- .expect = "",
- },
- /* 123456
- * xxxxxx */
- {
- .segs = {"123456"},
- .erasures = {{0, 6, 6}},
- .expect = "",
- },
- /* 123456789a
- * xxx */
- {
- .segs = {"123456789a"},
- .erasures = {{4, 3, 3}},
- .expect = "123489a",
- },
- /* 1234|5678
- * x xx */
- {.segs = {"1234", "5678"},
- .erasures = {{3, 3, 3}},
- .writes = {"9abc"},
- .expect = "123789abc"},
-
- {.expect = NULL}};
- int i;
-
- for (i = 0; in[i].expect; i++) {
- rd_buf_t b;
- rd_slice_t s;
- size_t expsz = strlen(in[i].expect);
- char *out;
- int j;
- size_t r;
- int r2;
-
- rd_buf_init(&b, 0, 0);
-
- /* Write segments to buffer */
- for (j = 0; in[i].segs[j]; j++)
- rd_buf_push_writable(&b, rd_strdup(in[i].segs[j]),
- strlen(in[i].segs[j]), rd_free);
-
- /* Perform erasures */
- for (j = 0; in[i].erasures[j].retsize; j++) {
- r = rd_buf_erase(&b, in[i].erasures[j].of,
- in[i].erasures[j].size);
- RD_UT_ASSERT(r == in[i].erasures[j].retsize,
- "expected retsize %" PRIusz
- " for i=%d,j=%d"
- ", not %" PRIusz,
- in[i].erasures[j].retsize, i, j, r);
- }
-
- /* Perform writes */
- for (j = 0; in[i].writes[j]; j++)
- rd_buf_write(&b, in[i].writes[j],
- strlen(in[i].writes[j]));
-
- RD_UT_ASSERT(expsz == rd_buf_len(&b),
- "expected buffer to be %" PRIusz
- " bytes, not "
- "%" PRIusz " for i=%d",
- expsz, rd_buf_len(&b), i);
-
- /* Read back and verify */
- r2 = rd_slice_init(&s, &b, 0, rd_buf_len(&b));
- RD_UT_ASSERT((r2 == -1 && rd_buf_len(&b) == 0) ||
- (r2 == 0 && rd_buf_len(&b) > 0),
- "slice_init(%" PRIusz ") returned %d for i=%d",
- rd_buf_len(&b), r2, i);
- if (r2 == -1)
- continue; /* Empty buffer */
-
- RD_UT_ASSERT(expsz == rd_slice_size(&s),
- "expected slice to be %" PRIusz
- " bytes, not %" PRIusz " for i=%d",
- expsz, rd_slice_size(&s), i);
-
- out = rd_malloc(expsz);
-
- r = rd_slice_read(&s, out, expsz);
- RD_UT_ASSERT(r == expsz,
- "expected to read %" PRIusz " bytes, not %" PRIusz
- " for i=%d",
- expsz, r, i);
-
- RD_UT_ASSERT(!memcmp(out, in[i].expect, expsz),
- "Expected \"%.*s\", not \"%.*s\" for i=%d",
- (int)expsz, in[i].expect, (int)r, out, i);
-
- rd_free(out);
-
- RD_UT_ASSERT(rd_slice_remains(&s) == 0,
- "expected no remaining bytes in slice, but got "
- "%" PRIusz " for i=%d",
- rd_slice_remains(&s), i);
-
- rd_buf_destroy(&b);
- }
-
-
- RD_UT_PASS();
-}
-
-
-int unittest_rdbuf(void) {
- int fails = 0;
-
- fails += do_unittest_write_read();
- fails += do_unittest_write_split_seek();
- fails += do_unittest_write_read_payload_correctness();
- fails += do_unittest_write_iov();
- fails += do_unittest_erase();
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h
deleted file mode 100644
index 1ef30e4a9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDBUF_H
-#define _RDBUF_H
-
-#ifndef _WIN32
-/* for struct iovec */
-#include <sys/socket.h>
-#include <sys/types.h>
-#endif
-
-#include "rdsysqueue.h"
-
-
-/**
- * @name Generic byte buffers
- *
- * @{
- *
- * A buffer is a list of segments, each segment having a memory pointer,
- * write offset, and capacity.
- *
- * The main buffer and segment structure is tailored for append-writing
- * or append-pushing foreign memory.
- *
- * Updates of previously written memory regions are possible through the
- * use of write_update() that takes an absolute offset.
- *
- * The write position is part of the buffer and segment structures, while
- * read is a separate object (rd_slice_t) that does not affect the buffer.
- */
-
-
-/**
- * @brief Buffer segment
- */
-typedef struct rd_segment_s {
- TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */
- char *seg_p; /**< Backing-store memory */
- size_t seg_of; /**< Current relative write-position
- * (length of payload in this segment) */
- size_t seg_size; /**< Allocated size of seg_p */
- size_t seg_absof; /**< Absolute offset of this segment's
- * beginning in the grand rd_buf_t */
- void (*seg_free)(void *p); /**< Optional free function for seg_p */
- int seg_flags; /**< Segment flags */
-#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
-#define RD_SEGMENT_F_FREE \
- 0x2 /**< Free segment on destroy, \
- * e.g, not a fixed segment. */
-} rd_segment_t;
-
-
-
-TAILQ_HEAD(rd_segment_head, rd_segment_s);
-
-/**
- * @brief Buffer, containing a list of segments.
- */
-typedef struct rd_buf_s {
- struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */
- size_t rbuf_segment_cnt; /**< Number of segments */
-
- rd_segment_t *rbuf_wpos; /**< Current write position seg */
- size_t rbuf_len; /**< Current (written) length */
- size_t rbuf_erased; /**< Total number of bytes
- * erased from segments.
- * This amount is taken into
- * account when checking for
- * writable space which is
- * always at the end of the
- * buffer and thus can't make
- * use of the erased parts. */
- size_t rbuf_size; /**< Total allocated size of
- * all segments. */
-
- char *rbuf_extra; /* Extra memory allocated for
- * use by segment structs,
- * buffer memory, etc. */
- size_t rbuf_extra_len; /* Current extra memory used */
- size_t rbuf_extra_size; /* Total size of extra memory */
-} rd_buf_t;
-
-
-
-/**
- * @brief A read-only slice of a buffer.
- */
-typedef struct rd_slice_s {
- const rd_buf_t *buf; /**< Pointer to buffer */
- const rd_segment_t *seg; /**< Current read position segment.
- * Will point to NULL when end of
- * slice is reached. */
- size_t rof; /**< Relative read offset in segment */
- size_t start; /**< Slice start offset in buffer */
- size_t end; /**< Slice end offset in buffer+1 */
-} rd_slice_t;
-
-
-
-/**
- * @returns the current write position (absolute offset)
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_write_pos(const rd_buf_t *rbuf) {
- const rd_segment_t *seg = rbuf->rbuf_wpos;
-
- if (unlikely(!seg)) {
-#if ENABLE_DEVEL
- rd_assert(rbuf->rbuf_len == 0);
-#endif
- return 0;
- }
-#if ENABLE_DEVEL
- rd_assert(seg->seg_absof + seg->seg_of == rbuf->rbuf_len);
-#endif
- return seg->seg_absof + seg->seg_of;
-}
-
-
-/**
- * @returns the number of bytes available for writing (before growing).
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_write_remains(const rd_buf_t *rbuf) {
- return rbuf->rbuf_size - (rbuf->rbuf_len + rbuf->rbuf_erased);
-}
-
-
-
-/**
- * @returns the number of bytes remaining to write to the given segment,
- * and sets the \p *p pointer (unless NULL) to the start of
- * the contiguous memory.
- */
-static RD_INLINE RD_UNUSED size_t
-rd_segment_write_remains(const rd_segment_t *seg, void **p) {
- if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
- return 0;
- if (p)
- *p = (void *)(seg->seg_p + seg->seg_of);
- return seg->seg_size - seg->seg_of;
-}
-
-
-
-/**
- * @returns the last segment for the buffer.
- */
-static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last(const rd_buf_t *rbuf) {
- return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
-}
-
-
-/**
- * @returns the total written buffer length
- */
-static RD_INLINE RD_UNUSED size_t rd_buf_len(const rd_buf_t *rbuf) {
- return rbuf->rbuf_len;
-}
-
-
-int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof);
-
-
-size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size);
-size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice);
-size_t rd_buf_write_update(rd_buf_t *rbuf,
- size_t absof,
- const void *payload,
- size_t size);
-void rd_buf_push0(rd_buf_t *rbuf,
- const void *payload,
- size_t size,
- void (*free_cb)(void *),
- rd_bool_t writable);
-#define rd_buf_push(rbuf, payload, size, free_cb) \
- rd_buf_push0(rbuf, payload, size, free_cb, rd_false /*not-writable*/)
-#define rd_buf_push_writable(rbuf, payload, size, free_cb) \
- rd_buf_push0(rbuf, payload, size, free_cb, rd_true /*writable*/)
-
-size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size);
-
-size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p);
-
-void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size);
-
-void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size);
-
-size_t rd_buf_get_write_iov(const rd_buf_t *rbuf,
- struct iovec *iovs,
- size_t *iovcntp,
- size_t iov_max,
- size_t size_max);
-
-void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size);
-rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size);
-
-void rd_buf_destroy(rd_buf_t *rbuf);
-void rd_buf_destroy_free(rd_buf_t *rbuf);
-
-void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump);
-
-int unittest_rdbuf(void);
-
-
-/**@}*/
-
-
-
-/**
- * @name Buffer reads operate on slices of an rd_buf_t and does not
- * modify the underlying rd_buf_t itself.
- *
- * @warning A slice will not be valid/safe after the buffer or
- * segments have been modified by a buf write operation
- * (write, update, write_seek, etc).
- * @{
- */
-
-
-/**
- * @returns the remaining length in the slice
- */
-#define rd_slice_remains(slice) ((slice)->end - rd_slice_abs_offset(slice))
-
-/**
- * @returns the total size of the slice, regardless of current position.
- */
-#define rd_slice_size(slice) ((slice)->end - (slice)->start)
-
-/**
- * @returns the read position in the slice as a new slice.
- */
-static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos(const rd_slice_t *slice) {
- rd_slice_t newslice = *slice;
-
- if (!slice->seg)
- return newslice;
-
- newslice.start = slice->seg->seg_absof + slice->rof;
-
- return newslice;
-}
-
-/**
- * @returns the read position as an absolute buffer byte offset.
- * @remark this is the buffer offset, not the slice's local offset.
- */
-static RD_INLINE RD_UNUSED size_t rd_slice_abs_offset(const rd_slice_t *slice) {
- if (unlikely(!slice->seg)) /* reader has reached the end */
- return slice->end;
-
- return slice->seg->seg_absof + slice->rof;
-}
-
-/**
- * @returns the read position as a byte offset.
- * @remark this is the slice-local offset, not the backing buffer's offset.
- */
-static RD_INLINE RD_UNUSED size_t rd_slice_offset(const rd_slice_t *slice) {
- if (unlikely(!slice->seg)) /* reader has reached the end */
- return rd_slice_size(slice);
-
- return (slice->seg->seg_absof + slice->rof) - slice->start;
-}
-
-
-
-int rd_slice_init_seg(rd_slice_t *slice,
- const rd_buf_t *rbuf,
- const rd_segment_t *seg,
- size_t rof,
- size_t size);
-int rd_slice_init(rd_slice_t *slice,
- const rd_buf_t *rbuf,
- size_t absof,
- size_t size);
-void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf);
-
-size_t rd_slice_reader(rd_slice_t *slice, const void **p);
-size_t rd_slice_peeker(const rd_slice_t *slice, const void **p);
-
-size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size);
-size_t
-rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size);
-
-size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump);
-
-/**
- * @brief Read a zig-zag varint-encoded signed integer from \p slice,
- * storing the decoded number in \p nump on success (return value > 0).
- *
- * @returns the number of bytes read on success or 0 in case of
- * buffer underflow.
- */
-static RD_UNUSED RD_INLINE size_t rd_slice_read_varint(rd_slice_t *slice,
- int64_t *nump) {
- size_t r;
- uint64_t unum;
-
- r = rd_slice_read_uvarint(slice, &unum);
- if (likely(r > 0)) {
- /* Zig-zag decoding */
- *nump = (int64_t)((unum >> 1) ^ -(int64_t)(unum & 1));
- }
-
- return r;
-}
-
-
-
-const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size);
-
-int rd_slice_seek(rd_slice_t *slice, size_t offset);
-
-size_t rd_slice_get_iov(const rd_slice_t *slice,
- struct iovec *iovs,
- size_t *iovcntp,
- size_t iov_max,
- size_t size_max);
-
-
-uint32_t rd_slice_crc32(rd_slice_t *slice);
-uint32_t rd_slice_crc32c(rd_slice_t *slice);
-
-
-int rd_slice_narrow(rd_slice_t *slice,
- rd_slice_t *save_slice,
- size_t size) RD_WARN_UNUSED_RESULT;
-int rd_slice_narrow_relative(rd_slice_t *slice,
- rd_slice_t *save_slice,
- size_t relsize) RD_WARN_UNUSED_RESULT;
-void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice);
-int rd_slice_narrow_copy(const rd_slice_t *orig,
- rd_slice_t *new_slice,
- size_t size) RD_WARN_UNUSED_RESULT;
-int rd_slice_narrow_copy_relative(const rd_slice_t *orig,
- rd_slice_t *new_slice,
- size_t relsize) RD_WARN_UNUSED_RESULT;
-
-void rd_slice_dump(const rd_slice_t *slice, int do_hexdump);
-
-
-/**@}*/
-
-
-
-#endif /* _RDBUF_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c
deleted file mode 100644
index 2a6e126c1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/**
- * \file rdcrc32.c
- * Functions and types for CRC checks.
- *
- *
- *
- * Generated on Tue May 8 17:37:04 2012,
- * by pycrc v0.7.10, http://www.tty1.net/pycrc/
- * using the configuration:
- * Width = 32
- * Poly = 0x04c11db7
- * XorIn = 0xffffffff
- * ReflectIn = True
- * XorOut = 0xffffffff
- * ReflectOut = True
- * Algorithm = table-driven
- *****************************************************************************/
-#include "rdcrc32.h" /* include the header file generated with pycrc */
-#include <stdlib.h>
-#include <stdint.h>
-
-/**
- * Static table used for the table_driven implementation.
- *****************************************************************************/
-const rd_crc32_t crc_table[256] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
- 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
- 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
- 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
- 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
- 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
- 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
- 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
- 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
- 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
- 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
- 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
- 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
- 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
- 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
- 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
- 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
- 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
- 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
- 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
- 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
- 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
-
-/**
- * Reflect all bits of a \a data word of \a data_len bytes.
- *
- * \param data The data word to be reflected.
- * \param data_len The width of \a data expressed in number of bits.
- * \return The reflected data.
- *****************************************************************************/
-rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) {
- unsigned int i;
- rd_crc32_t ret;
-
- ret = data & 0x01;
- for (i = 1; i < data_len; i++) {
- data >>= 1;
- ret = (ret << 1) | (data & 0x01);
- }
- return ret;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h
deleted file mode 100644
index c3195fca6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/**
- * \file rdcrc32.h
- * Functions and types for CRC checks.
- *
- * Generated on Tue May 8 17:36:59 2012,
- * by pycrc v0.7.10, http://www.tty1.net/pycrc/
- *
- * NOTE: Contains librd modifications:
- * - rd_crc32() helper.
- * - __RDCRC32___H__ define (was missing the '32' part).
- *
- * using the configuration:
- * Width = 32
- * Poly = 0x04c11db7
- * XorIn = 0xffffffff
- * ReflectIn = True
- * XorOut = 0xffffffff
- * ReflectOut = True
- * Algorithm = table-driven
- *****************************************************************************/
-#ifndef __RDCRC32___H__
-#define __RDCRC32___H__
-
-#include "rd.h"
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#if WITH_ZLIB
-#include <zlib.h>
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * The definition of the used algorithm.
- *****************************************************************************/
-#define CRC_ALGO_TABLE_DRIVEN 1
-
-
-/**
- * The type of the CRC values.
- *
- * This type must be big enough to contain at least 32 bits.
- *****************************************************************************/
-typedef uint32_t rd_crc32_t;
-
-#if !WITH_ZLIB
-extern const rd_crc32_t crc_table[256];
-#endif
-
-
-/**
- * Reflect all bits of a \a data word of \a data_len bytes.
- *
- * \param data The data word to be reflected.
- * \param data_len The width of \a data expressed in number of bits.
- * \return The reflected data.
- *****************************************************************************/
-rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len);
-
-
-/**
- * Calculate the initial crc value.
- *
- * \return The initial crc value.
- *****************************************************************************/
-static RD_INLINE rd_crc32_t rd_crc32_init(void) {
-#if WITH_ZLIB
- return crc32(0, NULL, 0);
-#else
- return 0xffffffff;
-#endif
-}
-
-
-/**
- * Update the crc value with new data.
- *
- * \param crc The current crc value.
- * \param data Pointer to a buffer of \a data_len bytes.
- * \param data_len Number of bytes in the \a data buffer.
- * \return The updated crc value.
- *****************************************************************************/
-/**
- * Update the crc value with new data.
- *
- * \param crc The current crc value.
- * \param data Pointer to a buffer of \a data_len bytes.
- * \param data_len Number of bytes in the \a data buffer.
- * \return The updated crc value.
- *****************************************************************************/
-static RD_INLINE RD_UNUSED rd_crc32_t rd_crc32_update(rd_crc32_t crc,
- const unsigned char *data,
- size_t data_len) {
-#if WITH_ZLIB
- rd_assert(data_len <= UINT_MAX);
- return crc32(crc, data, (uInt)data_len);
-#else
- unsigned int tbl_idx;
-
- while (data_len--) {
- tbl_idx = (crc ^ *data) & 0xff;
- crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
-
- data++;
- }
- return crc & 0xffffffff;
-#endif
-}
-
-
-/**
- * Calculate the final crc value.
- *
- * \param crc The current crc value.
- * \return The final crc value.
- *****************************************************************************/
-static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) {
-#if WITH_ZLIB
- return crc;
-#else
- return crc ^ 0xffffffff;
-#endif
-}
-
-
-/**
- * Wrapper for performing CRC32 on the provided buffer.
- */
-static RD_INLINE rd_crc32_t rd_crc32(const char *data, size_t data_len) {
- return rd_crc32_finalize(rd_crc32_update(
- rd_crc32_init(), (const unsigned char *)data, data_len));
-}
-
-#ifdef __cplusplus
-} /* closing brace for extern "C" */
-#endif
-
-#endif /* __RDCRC32___H__ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c
deleted file mode 100644
index 785e28c48..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rddl.h"
-
-#if WITH_LIBDL
-#include <dlfcn.h>
-
-#elif defined(_WIN32)
-
-#else
-#error "Dynamic library loading not supported on this platform"
-#endif
-
-
-
-/**
- * @brief Latest thread-local dl error, normalized to suit our logging.
- * @returns a newly allocated string that must be freed
- */
-static char *rd_dl_error(void) {
-#if WITH_LIBDL
- char *errstr;
- char *s;
- errstr = dlerror();
- if (!errstr)
- return rd_strdup("No error returned from dlerror()");
-
- errstr = rd_strdup(errstr);
- /* Change newlines to separators. */
- while ((s = strchr(errstr, '\n')))
- *s = '.';
-
- return errstr;
-
-#elif defined(_WIN32)
- char buf[1024];
- rd_strerror_w32(GetLastError(), buf, sizeof(buf));
- return rd_strdup(buf);
-#endif
-}
-
-/**
- * @brief Attempt to load library \p path.
- * @returns the library handle (platform dependent, thus opaque) on success,
- * else NULL.
- */
-static rd_dl_hnd_t *
-rd_dl_open0(const char *path, char *errstr, size_t errstr_size) {
- void *handle;
- const char *loadfunc;
-#if WITH_LIBDL
- loadfunc = "dlopen()";
- handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
-#elif defined(_WIN32)
- loadfunc = "LoadLibrary()";
- handle = (void *)LoadLibraryA(path);
-#endif
- if (!handle) {
- char *dlerrstr = rd_dl_error();
- rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc,
- dlerrstr);
- rd_free(dlerrstr);
- }
- return (rd_dl_hnd_t *)handle;
-}
-
-
-/**
- * @brief Attempt to load library \p path, possibly with a filename extension
- * which will be automatically resolved depending on platform.
- * @returns the library handle (platform dependent, thus opaque) on success,
- * else NULL.
- */
-rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size) {
- rd_dl_hnd_t *handle;
- char *extpath;
- size_t pathlen;
- const char *td, *fname;
- const char *solib_ext = SOLIB_EXT;
-
- /* Try original path first. */
- handle = rd_dl_open0(path, errstr, errstr_size);
- if (handle)
- return handle;
-
- /* Original path not found, see if we can append the solib_ext
- * filename extension. */
-
- /* Get filename and filename extension.
- * We can't rely on basename(3) since it is not portable */
- fname = strrchr(path, '/');
-#ifdef _WIN32
- td = strrchr(path, '\\');
- if (td > fname)
- fname = td;
-#endif
- if (!fname)
- fname = path;
-
- td = strrchr(fname, '.');
-
- /* If there is a filename extension ('.' within the last characters)
- * then bail out, we will not append an extension in this case. */
- if (td && td >= fname + strlen(fname) - strlen(SOLIB_EXT))
- return NULL;
-
- /* Append platform-specific library extension. */
- pathlen = strlen(path);
- extpath = rd_alloca(pathlen + strlen(solib_ext) + 1);
- memcpy(extpath, path, pathlen);
- memcpy(extpath + pathlen, solib_ext, strlen(solib_ext) + 1);
-
- /* Try again with extension */
- return rd_dl_open0(extpath, errstr, errstr_size);
-}
-
-
-/**
- * @brief Close handle previously returned by rd_dl_open()
- * @remark errors are ignored (what can we do anyway?)
- */
-void rd_dl_close(rd_dl_hnd_t *handle) {
-#if WITH_LIBDL
- dlclose((void *)handle);
-#elif defined(_WIN32)
- FreeLibrary((HMODULE)handle);
-#endif
-}
-
-/**
- * @brief look up address of \p symbol in library handle \p handle
- * @returns the function pointer on success or NULL on error.
- */
-void *rd_dl_sym(rd_dl_hnd_t *handle,
- const char *symbol,
- char *errstr,
- size_t errstr_size) {
- void *func;
-#if WITH_LIBDL
- func = dlsym((void *)handle, symbol);
-#elif defined(_WIN32)
- func = GetProcAddress((HMODULE)handle, symbol);
-#endif
- if (!func) {
- char *dlerrstr = rd_dl_error();
- rd_snprintf(errstr, errstr_size,
- "Failed to load symbol \"%s\": %s", symbol,
- dlerrstr);
- rd_free(dlerrstr);
- }
- return func;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h
deleted file mode 100644
index eaf6eb6d5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDDL_H
-#define _RDDL_H
-
-#include <sys/types.h>
-
-typedef void rd_dl_hnd_t;
-
-rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size);
-void rd_dl_close(rd_dl_hnd_t *handle);
-void *rd_dl_sym(rd_dl_hnd_t *handle,
- const char *symbol,
- char *errstr,
- size_t errstr_size);
-
-#endif /* _RDDL_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h
deleted file mode 100644
index 613d44bfa..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDENDIAN_H_
-#define _RDENDIAN_H_
-
-/**
- * Provides portable endian-swapping macros/functions.
- *
- * be64toh()
- * htobe64()
- * be32toh()
- * htobe32()
- * be16toh()
- * htobe16()
- * le64toh()
- */
-
-#ifdef __FreeBSD__
-#include <sys/endian.h>
-#elif defined __GLIBC__
-#include <endian.h>
-#ifndef be64toh
-/* Support older glibc (<2.9) which lack be64toh */
-#include <byteswap.h>
-#if __BYTE_ORDER == __BIG_ENDIAN
-#define be16toh(x) (x)
-#define be32toh(x) (x)
-#define be64toh(x) (x)
-#define le64toh(x) __bswap_64(x)
-#define le32toh(x) __bswap_32(x)
-#else
-#define be16toh(x) __bswap_16(x)
-#define be32toh(x) __bswap_32(x)
-#define be64toh(x) __bswap_64(x)
-#define le64toh(x) (x)
-#define le32toh(x) (x)
-#endif
-#endif
-
-#elif defined __CYGWIN__
-#include <endian.h>
-#elif defined __BSD__
-#include <sys/endian.h>
-#elif defined __sun
-#include <sys/byteorder.h>
-#include <sys/isa_defs.h>
-#define __LITTLE_ENDIAN 1234
-#define __BIG_ENDIAN 4321
-#ifdef _BIG_ENDIAN
-#define __BYTE_ORDER __BIG_ENDIAN
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-#define le16toh(x) ((uint16_t)BSWAP_16(x))
-#define le32toh(x) BSWAP_32(x)
-#define le64toh(x) BSWAP_64(x)
-#else
-#define __BYTE_ORDER __LITTLE_ENDIAN
-#define be64toh(x) BSWAP_64(x)
-#define be32toh(x) ntohl(x)
-#define be16toh(x) ntohs(x)
-#define le16toh(x) (x)
-#define le32toh(x) (x)
-#define le64toh(x) (x)
-#define htole16(x) (x)
-#define htole64(x) (x)
-#endif /* __sun */
-
-#elif defined __APPLE__
-#include <machine/endian.h>
-#include <libkern/OSByteOrder.h>
-#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-#define le16toh(x) OSSwapInt16(x)
-#define le32toh(x) OSSwapInt32(x)
-#define le64toh(x) OSSwapInt64(x)
-#else
-#define be64toh(x) OSSwapInt64(x)
-#define be32toh(x) OSSwapInt32(x)
-#define be16toh(x) OSSwapInt16(x)
-#define le16toh(x) (x)
-#define le32toh(x) (x)
-#define le64toh(x) (x)
-#endif
-
-#elif defined(_WIN32)
-#include <intrin.h>
-
-#define be64toh(x) _byteswap_uint64(x)
-#define be32toh(x) _byteswap_ulong(x)
-#define be16toh(x) _byteswap_ushort(x)
-#define le16toh(x) (x)
-#define le32toh(x) (x)
-#define le64toh(x) (x)
-
-#elif defined _AIX /* AIX is always big endian */
-#define be64toh(x) (x)
-#define be32toh(x) (x)
-#define be16toh(x) (x)
-#define le32toh(x) \
- ((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \
- (((x)&0xff000000) >> 24))
-#define le64toh(x) \
- ((((x)&0x00000000000000ffL) << 56) | \
- (((x)&0x000000000000ff00L) << 40) | \
- (((x)&0x0000000000ff0000L) << 24) | \
- (((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \
- (((x)&0x0000ff0000000000L) >> 24) | \
- (((x)&0x00ff000000000000L) >> 40) | \
- (((x)&0xff00000000000000L) >> 56))
-#else
-#include <endian.h>
-#endif
-
-
-
-/*
- * On Solaris, be64toh is a function, not a macro, so there's no need to error
- * if it's not defined.
- */
-#if !defined(__sun) && !defined(be64toh)
-#error Missing definition for be64toh
-#endif
-
-#ifndef be32toh
-#define be32toh(x) ntohl(x)
-#endif
-
-#ifndef be16toh
-#define be16toh(x) ntohs(x)
-#endif
-
-#ifndef htobe64
-#define htobe64(x) be64toh(x)
-#endif
-#ifndef htobe32
-#define htobe32(x) be32toh(x)
-#endif
-#ifndef htobe16
-#define htobe16(x) be16toh(x)
-#endif
-
-#ifndef htole32
-#define htole32(x) le32toh(x)
-#endif
-
-#endif /* _RDENDIAN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h
deleted file mode 100644
index 310045f0e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include <math.h>
-
-/**
- * rd_dbl_eq0(a,b,prec)
- * Check two doubles for equality with the specified precision.
- * Use this instead of != and == for all floats/doubles.
- * More info:
- * http://docs.sun.com/source/806-3568/ncg_goldberg.html
- */
-static RD_INLINE RD_UNUSED int rd_dbl_eq0(double a, double b, double prec) {
- return fabs(a - b) < prec;
-}
-
-/* A default 'good' double-equality precision value.
- * This rather timid epsilon value is useful for tenths, hundreths,
- * and thousands parts, but not anything more precis than that.
- * If a higher precision is needed, use dbl_eq0 and dbl_eq0 directly
- * and specify your own precision. */
-#define RD_DBL_EPSILON 0.00001
-
-/**
- * rd_dbl_eq(a,b)
- * Same as rd_dbl_eq0() above but with a predefined 'good' precision.
- */
-#define rd_dbl_eq(a, b) rd_dbl_eq0(a, b, RD_DBL_EPSILON)
-
-/**
- * rd_dbl_ne(a,b)
- * Same as rd_dbl_eq() above but with reversed logic: not-equal.
- */
-#define rd_dbl_ne(a, b) (!rd_dbl_eq0(a, b, RD_DBL_EPSILON))
-
-/**
- * rd_dbl_zero(a)
- * Checks if the double `a' is zero (or close enough).
- */
-#define rd_dbl_zero(a) rd_dbl_eq0(a, 0.0, RD_DBL_EPSILON)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c
deleted file mode 100644
index e951ec59f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdunittest.h"
-#include "rdfnv1a.h"
-
-
-/* FNV-1a by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo
- *
- * Based on http://www.isthe.com/chongo/src/fnv/hash_32a.c
- * with librdkafka modifications to match the Sarama default Producer
- * implementation, as seen here:
- * https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 Note that
- * this implementation is only compatible with Sarama's default
- * NewHashPartitioner and not NewReferenceHashPartitioner.
- */
-uint32_t rd_fnv1a(const void *key, size_t len) {
- const uint32_t prime = 0x01000193; // 16777619
- const uint32_t offset = 0x811C9DC5; // 2166136261
- size_t i;
- int32_t h = offset;
-
- const unsigned char *data = (const unsigned char *)key;
-
- for (i = 0; i < len; i++) {
- h ^= data[i];
- h *= prime;
- }
-
- /* Take absolute value to match the Sarama NewHashPartitioner
- * implementation */
- if (h < 0) {
- h = -h;
- }
-
- return (uint32_t)h;
-}
-
-
-/**
- * @brief Unittest for rd_fnv1a()
- */
-int unittest_fnv1a(void) {
- const char *short_unaligned = "1234";
- const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
- const char *keysToTest[] = {
- "kafka",
- "giberish123456789",
- short_unaligned,
- short_unaligned + 1,
- short_unaligned + 2,
- short_unaligned + 3,
- unaligned,
- unaligned + 1,
- unaligned + 2,
- unaligned + 3,
- "",
- NULL,
- };
-
- // Acquired via https://play.golang.org/p/vWIhw3zJINA
- const int32_t golang_hashfnv_results[] = {
- 0xd33c4e1, // kafka
- 0x77a58295, // giberish123456789
- 0x23bdd03, // short_unaligned
- 0x2dea3cd2, // short_unaligned+1
- 0x740fa83e, // short_unaligned+2
- 0x310ca263, // short_unaligned+3
- 0x65cbd69c, // unaligned
- 0x6e49c79a, // unaligned+1
- 0x69eed356, // unaligned+2
- 0x6abcc023, // unaligned+3
- 0x7ee3623b, // ""
- 0x7ee3623b, // NULL
- };
-
- size_t i;
- for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) {
- uint32_t h = rd_fnv1a(
- keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0);
- RD_UT_ASSERT((int32_t)h == golang_hashfnv_results[i],
- "Calculated FNV-1a hash 0x%x for \"%s\", "
- "expected 0x%x",
- h, keysToTest[i], golang_hashfnv_results[i]);
- }
- RD_UT_PASS();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h
deleted file mode 100644
index 8df66b0d6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __RDFNV1A___H__
-#define __RDFNV1A___H__
-
-uint32_t rd_fnv1a(const void *key, size_t len);
-int unittest_fnv1a(void);
-
-#endif // __RDFNV1A___H__
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c
deleted file mode 100644
index 794bd9cc1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdgz.h"
-
-#include <zlib.h>
-
-
-#define RD_GZ_CHUNK 262144
-
-void *rd_gz_decompress(const void *compressed,
- int compressed_len,
- uint64_t *decompressed_lenp) {
- int pass = 1;
- char *decompressed = NULL;
-
- /* First pass (1): calculate decompressed size.
- * (pass-1 is skipped if *decompressed_lenp is
- * non-zero).
- * Second pass (2): perform actual decompression.
- */
-
- if (*decompressed_lenp != 0LLU)
- pass++;
-
- for (; pass <= 2; pass++) {
- z_stream strm = RD_ZERO_INIT;
- char buf[512];
- char *p;
- int len;
- int r;
-
- if ((r = inflateInit2(&strm, 15 + 32)) != Z_OK)
- goto fail;
-
- strm.next_in = (void *)compressed;
- strm.avail_in = compressed_len;
-
- if (pass == 1) {
- /* Use dummy output buffer */
- p = buf;
- len = sizeof(buf);
- } else {
- /* Use real output buffer */
- p = decompressed;
- len = (int)*decompressed_lenp;
- }
-
- do {
- strm.next_out = (unsigned char *)p;
- strm.avail_out = len;
-
- r = inflate(&strm, Z_NO_FLUSH);
- switch (r) {
- case Z_STREAM_ERROR:
- case Z_NEED_DICT:
- case Z_DATA_ERROR:
- case Z_MEM_ERROR:
- inflateEnd(&strm);
- goto fail;
- }
-
- if (pass == 2) {
- /* Advance output pointer (in pass 2). */
- p += len - strm.avail_out;
- len -= len - strm.avail_out;
- }
-
- } while (strm.avail_out == 0 && r != Z_STREAM_END);
-
-
- if (pass == 1) {
- *decompressed_lenp = strm.total_out;
- if (!(decompressed = rd_malloc(
- (size_t)(*decompressed_lenp) + 1))) {
- inflateEnd(&strm);
- return NULL;
- }
- /* For convenience of the caller we nul-terminate
- * the buffer. If it happens to be a string there
- * is no need for extra copies. */
- decompressed[*decompressed_lenp] = '\0';
- }
-
- inflateEnd(&strm);
- }
-
- return decompressed;
-
-fail:
- if (decompressed)
- rd_free(decompressed);
- return NULL;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h
deleted file mode 100644
index 10d661cb3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDGZ_H_
-#define _RDGZ_H_
-
-/**
- * Simple gzip decompression returning the inflated data
- * in a malloced buffer.
- * '*decompressed_lenp' must be 0 if the length of the uncompressed data
- * is not known in which case it will be calculated.
- * The returned buffer is nul-terminated (the actual allocated length
- * is '*decompressed_lenp'+1.
- *
- * The decompressed length is returned in '*decompressed_lenp'.
- */
-void *rd_gz_decompress(const void *compressed,
- int compressed_len,
- uint64_t *decompressed_lenp);
-
-#endif /* _RDGZ_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c
deleted file mode 100644
index 3f2b6758b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/*
- * This license covers this C port of
- * Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
- * at revision 3a0bb77429bd3a61596f5e8a3172445844342120
- *
- * ----------------------------------------------------------------------------
- *
- * The MIT License (MIT)
- *
- * Copyright (c) 2014 Coda Hale
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Minimal C Hdr_Histogram based on Coda Hale's Golang implementation.
- * https://github.com/codahale/hdr_histogram
- *
- *
- * A Histogram is a lossy data structure used to record the distribution of
- * non-normally distributed data (like latency) with a high degree of accuracy
- * and a bounded degree of precision.
- *
- *
- */
-
-#include "rd.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-
-#include "rdhdrhistogram.h"
-#include "rdunittest.h"
-#include "rdfloat.h"
-
-void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr) {
- rd_free(hdr);
-}
-
-rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
- int64_t maxValue,
- int significantFigures) {
- rd_hdr_histogram_t *hdr;
- int64_t largestValueWithSingleUnitResolution;
- int32_t subBucketCountMagnitude;
- int32_t subBucketHalfCountMagnitude;
- int32_t unitMagnitude;
- int32_t subBucketCount;
- int32_t subBucketHalfCount;
- int64_t subBucketMask;
- int64_t smallestUntrackableValue;
- int32_t bucketsNeeded = 1;
- int32_t bucketCount;
- int32_t countsLen;
-
- if (significantFigures < 1 || significantFigures > 5)
- return NULL;
-
- largestValueWithSingleUnitResolution =
- (int64_t)(2.0 * pow(10.0, (double)significantFigures));
-
- subBucketCountMagnitude =
- (int32_t)ceil(log2((double)largestValueWithSingleUnitResolution));
-
- subBucketHalfCountMagnitude = RD_MAX(subBucketCountMagnitude, 1) - 1;
-
- unitMagnitude = (int32_t)RD_MAX(floor(log2((double)minValue)), 0);
-
- subBucketCount =
- (int32_t)pow(2, (double)subBucketHalfCountMagnitude + 1.0);
-
- subBucketHalfCount = subBucketCount / 2;
-
- subBucketMask = (int64_t)(subBucketCount - 1) << unitMagnitude;
-
- /* Determine exponent range needed to support the trackable
- * value with no overflow: */
- smallestUntrackableValue = (int64_t)subBucketCount << unitMagnitude;
- while (smallestUntrackableValue < maxValue) {
- smallestUntrackableValue <<= 1;
- bucketsNeeded++;
- }
-
- bucketCount = bucketsNeeded;
- countsLen = (bucketCount + 1) * (subBucketCount / 2);
- hdr = rd_calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen));
- hdr->counts = (int64_t *)(hdr + 1);
- hdr->allocatedSize = sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen);
-
- hdr->lowestTrackableValue = minValue;
- hdr->highestTrackableValue = maxValue;
- hdr->unitMagnitude = unitMagnitude;
- hdr->significantFigures = significantFigures;
- hdr->subBucketHalfCountMagnitude = subBucketHalfCountMagnitude;
- hdr->subBucketHalfCount = subBucketHalfCount;
- hdr->subBucketMask = subBucketMask;
- hdr->subBucketCount = subBucketCount;
- hdr->bucketCount = bucketCount;
- hdr->countsLen = countsLen;
- hdr->totalCount = 0;
- hdr->lowestOutOfRange = minValue;
- hdr->highestOutOfRange = maxValue;
-
- return hdr;
-}
-
-/**
- * @brief Deletes all recorded values and resets histogram.
- */
-void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr) {
- int32_t i;
- hdr->totalCount = 0;
- for (i = 0; i < hdr->countsLen; i++)
- hdr->counts[i] = 0;
-}
-
-
-
-static RD_INLINE int32_t rd_hdr_countsIndex(const rd_hdr_histogram_t *hdr,
- int32_t bucketIdx,
- int32_t subBucketIdx) {
- int32_t bucketBaseIdx = (bucketIdx + 1)
- << hdr->subBucketHalfCountMagnitude;
- int32_t offsetInBucket = subBucketIdx - hdr->subBucketHalfCount;
- return bucketBaseIdx + offsetInBucket;
-}
-
-static RD_INLINE int64_t rd_hdr_getCountAtIndex(const rd_hdr_histogram_t *hdr,
- int32_t bucketIdx,
- int32_t subBucketIdx) {
- return hdr->counts[rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx)];
-}
-
-
-static RD_INLINE int64_t bitLen(int64_t x) {
- int64_t n = 0;
- for (; x >= 0x8000; x >>= 16)
- n += 16;
- if (x >= 0x80) {
- x >>= 8;
- n += 8;
- }
- if (x >= 0x8) {
- x >>= 4;
- n += 4;
- }
- if (x >= 0x2) {
- x >>= 2;
- n += 2;
- }
- if (x >= 0x1)
- n++;
- return n;
-}
-
-
-static RD_INLINE int32_t rd_hdr_getBucketIndex(const rd_hdr_histogram_t *hdr,
- int64_t v) {
- int64_t pow2Ceiling = bitLen(v | hdr->subBucketMask);
- return (int32_t)(pow2Ceiling - (int64_t)hdr->unitMagnitude -
- (int64_t)(hdr->subBucketHalfCountMagnitude + 1));
-}
-
-static RD_INLINE int32_t rd_hdr_getSubBucketIdx(const rd_hdr_histogram_t *hdr,
- int64_t v,
- int32_t idx) {
- return (int32_t)(v >> ((int64_t)idx + (int64_t)hdr->unitMagnitude));
-}
-
-static RD_INLINE int64_t rd_hdr_valueFromIndex(const rd_hdr_histogram_t *hdr,
- int32_t bucketIdx,
- int32_t subBucketIdx) {
- return (int64_t)subBucketIdx
- << ((int64_t)bucketIdx + hdr->unitMagnitude);
-}
-
-static RD_INLINE int64_t
-rd_hdr_sizeOfEquivalentValueRange(const rd_hdr_histogram_t *hdr, int64_t v) {
- int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
- int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
- int32_t adjustedBucket = bucketIdx;
- if (unlikely(subBucketIdx >= hdr->subBucketCount))
- adjustedBucket++;
- return (int64_t)1 << (hdr->unitMagnitude + (int64_t)adjustedBucket);
-}
-
-static RD_INLINE int64_t
-rd_hdr_lowestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
- int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
- int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
- return rd_hdr_valueFromIndex(hdr, bucketIdx, subBucketIdx);
-}
-
-
-static RD_INLINE int64_t
-rd_hdr_nextNonEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
- return rd_hdr_lowestEquivalentValue(hdr, v) +
- rd_hdr_sizeOfEquivalentValueRange(hdr, v);
-}
-
-
-static RD_INLINE int64_t
-rd_hdr_highestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
- return rd_hdr_nextNonEquivalentValue(hdr, v) - 1;
-}
-
-static RD_INLINE int64_t
-rd_hdr_medianEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
- return rd_hdr_lowestEquivalentValue(hdr, v) +
- (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1);
-}
-
-
-static RD_INLINE int32_t rd_hdr_countsIndexFor(const rd_hdr_histogram_t *hdr,
- int64_t v) {
- int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
- int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
- return rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx);
-}
-
-
-
-typedef struct rd_hdr_iter_s {
- const rd_hdr_histogram_t *hdr;
- int bucketIdx;
- int subBucketIdx;
- int64_t countAtIdx;
- int64_t countToIdx;
- int64_t valueFromIdx;
- int64_t highestEquivalentValue;
-} rd_hdr_iter_t;
-
-#define RD_HDR_ITER_INIT(hdr) \
- { .hdr = hdr, .subBucketIdx = -1 }
-
-static int rd_hdr_iter_next(rd_hdr_iter_t *it) {
- const rd_hdr_histogram_t *hdr = it->hdr;
-
- if (unlikely(it->countToIdx >= hdr->totalCount))
- return 0;
-
- it->subBucketIdx++;
- if (unlikely(it->subBucketIdx >= hdr->subBucketCount)) {
- it->subBucketIdx = hdr->subBucketHalfCount;
- it->bucketIdx++;
- }
-
- if (unlikely(it->bucketIdx >= hdr->bucketCount))
- return 0;
-
- it->countAtIdx =
- rd_hdr_getCountAtIndex(hdr, it->bucketIdx, it->subBucketIdx);
- it->countToIdx += it->countAtIdx;
- it->valueFromIdx =
- rd_hdr_valueFromIndex(hdr, it->bucketIdx, it->subBucketIdx);
- it->highestEquivalentValue =
- rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx);
-
- return 1;
-}
-
-
-double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr) {
- double mean;
- double geometricDevTotal = 0.0;
- rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
-
- if (hdr->totalCount == 0)
- return 0;
-
- mean = rd_hdr_histogram_mean(hdr);
-
-
- while (rd_hdr_iter_next(&it)) {
- double dev;
-
- if (it.countAtIdx == 0)
- continue;
-
- dev =
- (double)rd_hdr_medianEquivalentValue(hdr, it.valueFromIdx) -
- mean;
- geometricDevTotal += (dev * dev) * (double)it.countAtIdx;
- }
-
- return sqrt(geometricDevTotal / (double)hdr->totalCount);
-}
-
-
-/**
- * @returns the approximate maximum recorded value.
- */
-int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr) {
- int64_t vmax = 0;
- rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
-
- while (rd_hdr_iter_next(&it)) {
- if (it.countAtIdx != 0)
- vmax = it.highestEquivalentValue;
- }
- return rd_hdr_highestEquivalentValue(hdr, vmax);
-}
-
-/**
- * @returns the approximate minimum recorded value.
- */
-int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr) {
- int64_t vmin = 0;
- rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
-
- while (rd_hdr_iter_next(&it)) {
- if (it.countAtIdx != 0 && vmin == 0) {
- vmin = it.highestEquivalentValue;
- break;
- }
- }
- return rd_hdr_lowestEquivalentValue(hdr, vmin);
-}
-
-/**
- * @returns the approximate arithmetic mean of the recorded values.
- */
-double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr) {
- int64_t total = 0;
- rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
-
- if (hdr->totalCount == 0)
- return 0.0;
-
- while (rd_hdr_iter_next(&it)) {
- if (it.countAtIdx != 0)
- total += it.countAtIdx * rd_hdr_medianEquivalentValue(
- hdr, it.valueFromIdx);
- }
- return (double)total / (double)hdr->totalCount;
-}
-
-
-
-/**
- * @brief Records the given value.
- *
- * @returns 1 if value was recorded or 0 if value is out of range.
- */
-
-int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v) {
- int32_t idx = rd_hdr_countsIndexFor(hdr, v);
-
- if (idx < 0 || hdr->countsLen <= idx) {
- hdr->outOfRangeCount++;
- if (v > hdr->highestOutOfRange)
- hdr->highestOutOfRange = v;
- if (v < hdr->lowestOutOfRange)
- hdr->lowestOutOfRange = v;
- return 0;
- }
-
- hdr->counts[idx]++;
- hdr->totalCount++;
-
- return 1;
-}
-
-
-/**
- * @returns the recorded value at the given quantile (0..100).
- */
-int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q) {
- int64_t total = 0;
- int64_t countAtPercentile;
- rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
-
- if (q > 100.0)
- q = 100.0;
-
- countAtPercentile =
- (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5);
-
- while (rd_hdr_iter_next(&it)) {
- total += it.countAtIdx;
- if (total >= countAtPercentile)
- return rd_hdr_highestEquivalentValue(hdr,
- it.valueFromIdx);
- }
-
- return 0;
-}
-
-
-
-/**
- * @name Unit tests
- * @{
- *
- *
- *
- */
-
-/**
- * @returns 0 on success or 1 on failure.
- */
-static int ut_high_sigfig(void) {
- rd_hdr_histogram_t *hdr;
- const int64_t input[] = {
- 459876, 669187, 711612, 816326, 931423,
- 1033197, 1131895, 2477317, 3964974, 12718782,
- };
- size_t i;
- int64_t v;
- const int64_t exp = 1048575;
-
- hdr = rd_hdr_histogram_new(459876, 12718782, 5);
- for (i = 0; i < RD_ARRAYSIZE(input); i++) {
- /* Ignore errors (some should fail) */
- rd_hdr_histogram_record(hdr, input[i]);
- }
-
- v = rd_hdr_histogram_quantile(hdr, 50);
- RD_UT_ASSERT(v == exp, "Median is %" PRId64 ", expected %" PRId64, v,
- exp);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_quantile(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- size_t i;
- const struct {
- double q;
- int64_t v;
- } exp[] = {
- {50, 500223}, {75, 750079}, {90, 900095}, {95, 950271},
- {99, 990207}, {99.9, 999423}, {99.99, 999935},
- };
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, (int64_t)i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
- }
-
- for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
- int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
- RD_UT_ASSERT(v == exp[i].v,
- "P%.2f is %" PRId64 ", expected %" PRId64,
- exp[i].q, v, exp[i].v);
- }
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_mean(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- size_t i;
- const double exp = 500000.013312;
- double v;
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, (int64_t)i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
- }
-
- v = rd_hdr_histogram_mean(hdr);
- RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), "Mean is %f, expected %f",
- v, exp);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-static int ut_stddev(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- size_t i;
- const double exp = 288675.140368;
- const double epsilon = 0.000001;
- double v;
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, (int64_t)i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
- }
-
- v = rd_hdr_histogram_stddev(hdr);
- RD_UT_ASSERT(rd_dbl_eq0(v, exp, epsilon),
- "StdDev is %.6f, expected %.6f: diff %.6f vs epsilon %.6f",
- v, exp, fabs(v - exp), epsilon);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_totalcount(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- int64_t i;
-
- for (i = 0; i < 1000000; i++) {
- int64_t v;
- int r = rd_hdr_histogram_record(hdr, i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
-
- v = hdr->totalCount;
- RD_UT_ASSERT(v == i + 1,
- "total_count is %" PRId64 ", expected %" PRId64, v,
- i + 1);
- }
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-static int ut_max(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- int64_t i, v;
- const int64_t exp = 1000447;
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
- }
-
- v = rd_hdr_histogram_max(hdr);
- RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_min(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- int64_t i, v;
- const int64_t exp = 0;
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
- }
-
- v = rd_hdr_histogram_min(hdr);
- RD_UT_ASSERT(v == exp, "Min is %" PRId64 ", expected %" PRId64, v, exp);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_reset(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
- int64_t i, v;
- const int64_t exp = 0;
-
- for (i = 0; i < 1000000; i++) {
- int r = rd_hdr_histogram_record(hdr, i);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
- }
-
- rd_hdr_histogram_reset(hdr);
-
- v = rd_hdr_histogram_max(hdr);
- RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-static int ut_nan(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 100000, 3);
- double v;
-
- v = rd_hdr_histogram_mean(hdr);
- RD_UT_ASSERT(!isnan(v), "Mean is %f, expected NaN", v);
- v = rd_hdr_histogram_stddev(hdr);
- RD_UT_ASSERT(!isnan(v), "StdDev is %f, expected NaN", v);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-static int ut_sigfigs(void) {
- int sigfigs;
-
- for (sigfigs = 1; sigfigs <= 5; sigfigs++) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10, sigfigs);
- RD_UT_ASSERT(hdr->significantFigures == sigfigs,
- "Significant figures is %" PRId64 ", expected %d",
- hdr->significantFigures, sigfigs);
- rd_hdr_histogram_destroy(hdr);
- }
-
- RD_UT_PASS();
-}
-
-static int ut_minmax_trackable(void) {
- const int64_t minval = 2;
- const int64_t maxval = 11;
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(minval, maxval, 3);
-
- RD_UT_ASSERT(hdr->lowestTrackableValue == minval,
- "lowestTrackableValue is %" PRId64 ", expected %" PRId64,
- hdr->lowestTrackableValue, minval);
- RD_UT_ASSERT(hdr->highestTrackableValue == maxval,
- "highestTrackableValue is %" PRId64 ", expected %" PRId64,
- hdr->highestTrackableValue, maxval);
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-static int ut_unitmagnitude_overflow(void) {
- rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(0, 200, 4);
- int r = rd_hdr_histogram_record(hdr, 11);
- RD_UT_ASSERT(r, "record(11) failed\n");
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-static int ut_subbucketmask_overflow(void) {
- rd_hdr_histogram_t *hdr;
- const int64_t input[] = {(int64_t)1e8, (int64_t)2e7, (int64_t)3e7};
- const struct {
- double q;
- int64_t v;
- } exp[] = {
- {50, 33554431},
- {83.33, 33554431},
- {83.34, 100663295},
- {99, 100663295},
- };
- size_t i;
-
- hdr = rd_hdr_histogram_new((int64_t)2e7, (int64_t)1e8, 5);
-
- for (i = 0; i < RD_ARRAYSIZE(input); i++) {
- /* Ignore errors (some should fail) */
- int r = rd_hdr_histogram_record(hdr, input[i]);
- RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", input[i]);
- }
-
- for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
- int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
- RD_UT_ASSERT(v == exp[i].v,
- "P%.2f is %" PRId64 ", expected %" PRId64,
- exp[i].q, v, exp[i].v);
- }
-
- rd_hdr_histogram_destroy(hdr);
- RD_UT_PASS();
-}
-
-
-int unittest_rdhdrhistogram(void) {
- int fails = 0;
-
- fails += ut_high_sigfig();
- fails += ut_quantile();
- fails += ut_mean();
- fails += ut_stddev();
- fails += ut_totalcount();
- fails += ut_max();
- fails += ut_min();
- fails += ut_reset();
- fails += ut_nan();
- fails += ut_sigfigs();
- fails += ut_minmax_trackable();
- fails += ut_unitmagnitude_overflow();
- fails += ut_subbucketmask_overflow();
-
- return fails;
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h
deleted file mode 100644
index 868614b7b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDHDR_HISTOGRAM_H_
-#define _RDHDR_HISTOGRAM_H_
-
-#include <inttypes.h>
-
-
-typedef struct rd_hdr_histogram_s {
- int64_t lowestTrackableValue;
- int64_t highestTrackableValue;
- int64_t unitMagnitude;
- int64_t significantFigures;
- int32_t subBucketHalfCountMagnitude;
- int32_t subBucketHalfCount;
- int64_t subBucketMask;
- int32_t subBucketCount;
- int32_t bucketCount;
- int32_t countsLen;
- int64_t totalCount;
- int64_t *counts;
- int64_t outOfRangeCount; /**< Number of rejected records due to
- * value being out of range. */
- int64_t lowestOutOfRange; /**< Lowest value that was out of range.
- * Initialized to lowestTrackableValue */
- int64_t highestOutOfRange; /**< Highest value that was out of range.
- * Initialized to highestTrackableValue */
- int32_t allocatedSize; /**< Allocated size of histogram, for
- * sigfigs tuning. */
-} rd_hdr_histogram_t;
-
-
-#endif /* !_RDHDR_HISTOGRAM_H_ */
-
-
-void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr);
-
-/**
- * @brief Create a new Hdr_Histogram.
- *
- * @param significant_figures must be between 1..5
- *
- * @returns a newly allocated histogram, or NULL on error.
- *
- * @sa rd_hdr_histogram_destroy()
- */
-rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
- int64_t maxValue,
- int significantFigures);
-
-void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr);
-
-int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v);
-
-double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr);
-double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr);
-int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr);
-int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr);
-int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q);
-
-
-int unittest_rdhdrhistogram(void);
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c
deleted file mode 100644
index 7457a7fbe..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2021 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name HTTP client
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdunittest.h"
-
-#include <stdarg.h>
-
-#include <curl/curl.h>
-#include "rdhttp.h"
-
-/** Maximum response size, increase as necessary. */
-#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */
-
-
-void rd_http_error_destroy(rd_http_error_t *herr) {
- rd_free(herr);
-}
-
-static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...)
- RD_FORMAT(printf, 2, 3);
-static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) {
- size_t len = 0;
- rd_http_error_t *herr;
- va_list ap;
-
- va_start(ap, fmt);
-
- if (fmt && *fmt) {
- va_list ap2;
- va_copy(ap2, ap);
- len = rd_vsnprintf(NULL, 0, fmt, ap2);
- va_end(ap2);
- }
-
- /* Use single allocation for both herr and the error string */
- herr = rd_malloc(sizeof(*herr) + len + 1);
- herr->code = code;
- herr->errstr = herr->data;
-
- if (len > 0)
- rd_vsnprintf(herr->errstr, len + 1, fmt, ap);
- else
- herr->errstr[0] = '\0';
-
- va_end(ap);
-
- return herr;
-}
-
-/**
- * @brief Same as rd_http_error_new() but reads the error string from the
- * provided buffer.
- */
-static rd_http_error_t *rd_http_error_new_from_buf(int code,
- const rd_buf_t *rbuf) {
- rd_http_error_t *herr;
- rd_slice_t slice;
- size_t len = rd_buf_len(rbuf);
-
- if (len == 0)
- return rd_http_error_new(
- code, "Server did not provide an error string");
-
-
- /* Use single allocation for both herr and the error string */
- herr = rd_malloc(sizeof(*herr) + len + 1);
- herr->code = code;
- herr->errstr = herr->data;
- rd_slice_init_full(&slice, rbuf);
- rd_slice_read(&slice, herr->errstr, len);
- herr->errstr[len] = '\0';
-
- return herr;
-}
-
-void rd_http_req_destroy(rd_http_req_t *hreq) {
- RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup);
- RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy_free);
-}
-
-
-/**
- * @brief Curl writefunction. Writes the bytes passed from curl
- * to the hreq's buffer.
- */
-static size_t
-rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) {
- rd_http_req_t *hreq = (rd_http_req_t *)userdata;
-
- if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb >
- RD_HTTP_RESPONSE_SIZE_MAX))
- return 0; /* FIXME: Set some overflow flag or rely on curl? */
-
- rd_buf_write(hreq->hreq_buf, ptr, nmemb);
-
- return nmemb;
-}
-
-rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
-
- memset(hreq, 0, sizeof(*hreq));
-
- hreq->hreq_curl = curl_easy_init();
- if (!hreq->hreq_curl)
- return rd_http_error_new(-1, "Failed to create curl handle");
-
- hreq->hreq_buf = rd_buf_new(1, 1024);
-
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_URL, url);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS,
- CURLPROTO_HTTP | CURLPROTO_HTTPS);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_MAXREDIRS, 16);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_TIMEOUT, 30);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_ERRORBUFFER,
- hreq->hreq_curl_errstr);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_NOSIGNAL, 1);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEFUNCTION,
- rd_http_req_write_cb);
- curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEDATA, (void *)hreq);
-
- return NULL;
-}
-
-/**
- * @brief Synchronously (blockingly) perform the HTTP operation.
- */
-rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq) {
- CURLcode res;
- long code = 0;
-
- res = curl_easy_perform(hreq->hreq_curl);
- if (unlikely(res != CURLE_OK))
- return rd_http_error_new(-1, "%s", hreq->hreq_curl_errstr);
-
- curl_easy_getinfo(hreq->hreq_curl, CURLINFO_RESPONSE_CODE, &code);
- hreq->hreq_code = (int)code;
- if (hreq->hreq_code >= 400)
- return rd_http_error_new_from_buf(hreq->hreq_code,
- hreq->hreq_buf);
-
- return NULL;
-}
-
-
-int rd_http_req_get_code(const rd_http_req_t *hreq) {
- return hreq->hreq_code;
-}
-
-const char *rd_http_req_get_content_type(rd_http_req_t *hreq) {
- const char *content_type = NULL;
-
- if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE,
- &content_type))
- return NULL;
-
- return content_type;
-}
-
-
-/**
- * @brief Perform a blocking HTTP(S) request to \p url.
- *
- * Returns the response (even if there's a HTTP error code returned)
- * in \p *rbufp.
- *
- * Returns NULL on success (HTTP response code < 400), or an error
- * object on transport or HTTP error - this error object must be destroyed
- * by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp
- * may be filled with the error response.
- */
-rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) {
- rd_http_req_t hreq;
- rd_http_error_t *herr;
-
- *rbufp = NULL;
-
- herr = rd_http_req_init(&hreq, url);
- if (unlikely(herr != NULL))
- return herr;
-
- herr = rd_http_req_perform_sync(&hreq);
- if (herr) {
- rd_http_req_destroy(&hreq);
- return herr;
- }
-
- *rbufp = hreq.hreq_buf;
- hreq.hreq_buf = NULL;
-
- return NULL;
-}
-
-
-/**
- * @brief Extract the JSON object from \p hreq and return it in \p *jsonp.
- *
- * @returns Returns NULL on success, or an JSON parsing error - this
- * error object must be destroyed by calling rd_http_error_destroy().
- */
-rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp) {
- size_t len;
- char *raw_json;
- const char *end = NULL;
- rd_slice_t slice;
- rd_http_error_t *herr = NULL;
-
- /* cJSON requires the entire input to parse in contiguous memory. */
- rd_slice_init_full(&slice, hreq->hreq_buf);
- len = rd_buf_len(hreq->hreq_buf);
-
- raw_json = rd_malloc(len + 1);
- rd_slice_read(&slice, raw_json, len);
- raw_json[len] = '\0';
-
- /* Parse JSON */
- *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
-
- if (!*jsonp)
- herr = rd_http_error_new(hreq->hreq_code,
- "Failed to parse JSON response "
- "at %" PRIusz "/%" PRIusz,
- (size_t)(end - raw_json), len);
- rd_free(raw_json);
- return herr;
-}
-
-
-/**
- * @brief Check if the error returned from HTTP(S) is temporary or not.
- *
- * @returns If the \p error_code is temporary, return rd_true,
- * otherwise return rd_false.
- *
- * @locality Any thread.
- */
-static rd_bool_t rd_http_is_failure_temporary(int error_code) {
- switch (error_code) {
- case 408: /**< Request timeout */
- case 425: /**< Too early */
- case 500: /**< Internal server error */
- case 502: /**< Bad gateway */
- case 503: /**< Service unavailable */
- case 504: /**< Gateway timeout */
- return rd_true;
-
- default:
- return rd_false;
- }
-}
-
-
-/**
- * @brief Perform a blocking HTTP(S) request to \p url with
- * HTTP(S) headers and data with \p timeout_s.
- * If the HTTP(S) request fails, will retry another \p retries times
- * with multiplying backoff \p retry_ms.
- *
- * @returns The result will be returned in \p *jsonp.
- * Returns NULL on success (HTTP response code < 400), or an error
- * object on transport, HTTP error or a JSON parsing error - this
- * error object must be destroyed by calling rd_http_error_destroy().
- *
- * @locality Any thread.
- */
-rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
- const char *url,
- const struct curl_slist *headers,
- const char *post_fields,
- size_t post_fields_size,
- int timeout_s,
- int retries,
- int retry_ms,
- cJSON **jsonp) {
- rd_http_error_t *herr;
- rd_http_req_t hreq;
- int i;
- size_t len;
- const char *content_type;
-
- herr = rd_http_req_init(&hreq, url);
- if (unlikely(herr != NULL))
- return herr;
-
- curl_easy_setopt(hreq.hreq_curl, CURLOPT_HTTPHEADER, headers);
- curl_easy_setopt(hreq.hreq_curl, CURLOPT_TIMEOUT, timeout_s);
-
- curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDSIZE,
- post_fields_size);
- curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDS, post_fields);
-
- for (i = 0; i <= retries; i++) {
- if (rd_kafka_terminating(rk)) {
- rd_http_req_destroy(&hreq);
- return rd_http_error_new(-1, "Terminating");
- }
-
- herr = rd_http_req_perform_sync(&hreq);
- len = rd_buf_len(hreq.hreq_buf);
-
- if (!herr) {
- if (len > 0)
- break; /* Success */
- /* Empty response */
- rd_http_req_destroy(&hreq);
- return NULL;
- }
- /* Retry if HTTP(S) request returns temporary error and there
- * are remaining retries, else fail. */
- if (i == retries || !rd_http_is_failure_temporary(herr->code)) {
- rd_http_req_destroy(&hreq);
- return herr;
- }
-
- /* Retry */
- rd_http_error_destroy(herr);
- rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate);
- }
-
- content_type = rd_http_req_get_content_type(&hreq);
-
- if (!content_type || rd_strncasecmp(content_type, "application/json",
- strlen("application/json"))) {
- if (!herr)
- herr = rd_http_error_new(
- hreq.hreq_code, "Response is not JSON encoded: %s",
- content_type ? content_type : "(n/a)");
- rd_http_req_destroy(&hreq);
- return herr;
- }
-
- herr = rd_http_parse_json(&hreq, jsonp);
-
- rd_http_req_destroy(&hreq);
-
- return herr;
-}
-
-
-/**
- * @brief Same as rd_http_get() but requires a JSON response.
- * The response is parsed and a JSON object is returned in \p *jsonp.
- *
- * Same error semantics as rd_http_get().
- */
-rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) {
- rd_http_req_t hreq;
- rd_http_error_t *herr;
- rd_slice_t slice;
- size_t len;
- const char *content_type;
- char *raw_json;
- const char *end;
-
- *jsonp = NULL;
-
- herr = rd_http_req_init(&hreq, url);
- if (unlikely(herr != NULL))
- return herr;
-
- // FIXME: send Accept: json.. header?
-
- herr = rd_http_req_perform_sync(&hreq);
- len = rd_buf_len(hreq.hreq_buf);
- if (herr && len == 0) {
- rd_http_req_destroy(&hreq);
- return herr;
- }
-
- if (len == 0) {
- /* Empty response: create empty JSON object */
- *jsonp = cJSON_CreateObject();
- rd_http_req_destroy(&hreq);
- return NULL;
- }
-
- content_type = rd_http_req_get_content_type(&hreq);
-
- if (!content_type || rd_strncasecmp(content_type, "application/json",
- strlen("application/json"))) {
- if (!herr)
- herr = rd_http_error_new(
- hreq.hreq_code, "Response is not JSON encoded: %s",
- content_type ? content_type : "(n/a)");
- rd_http_req_destroy(&hreq);
- return herr;
- }
-
- /* cJSON requires the entire input to parse in contiguous memory. */
- rd_slice_init_full(&slice, hreq.hreq_buf);
- raw_json = rd_malloc(len + 1);
- rd_slice_read(&slice, raw_json, len);
- raw_json[len] = '\0';
-
- /* Parse JSON */
- end = NULL;
- *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
- if (!*jsonp && !herr)
- herr = rd_http_error_new(hreq.hreq_code,
- "Failed to parse JSON response "
- "at %" PRIusz "/%" PRIusz,
- (size_t)(end - raw_json), len);
-
- rd_free(raw_json);
- rd_http_req_destroy(&hreq);
-
- return herr;
-}
-
-
-void rd_http_global_init(void) {
- curl_global_init(CURL_GLOBAL_DEFAULT);
-}
-
-
-/**
- * @brief Unittest. Requires a (local) webserver to be set with env var
- * RD_UT_HTTP_URL=http://localhost:1234/some-path
- *
- * This server must return a JSON object or array containing at least one
- * object on the main URL with a 2xx response code,
- * and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body).
- */
-
-int unittest_http(void) {
- const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL);
- char *error_url;
- size_t error_url_size;
- cJSON *json, *jval;
- rd_http_error_t *herr;
- rd_bool_t empty;
-
- if (!base_url || !*base_url)
- RD_UT_SKIP("RD_UT_HTTP_URL environment variable not set");
-
- RD_UT_BEGIN();
-
- error_url_size = strlen(base_url) + strlen("/error") + 1;
- error_url = rd_alloca(error_url_size);
- rd_snprintf(error_url, error_url_size, "%s/error", base_url);
-
- /* Try the base url first, parse its JSON and extract a key-value. */
- json = NULL;
- herr = rd_http_get_json(base_url, &json);
- RD_UT_ASSERT(!herr, "Expected get_json(%s) to succeed, got: %s",
- base_url, herr->errstr);
-
- empty = rd_true;
- cJSON_ArrayForEach(jval, json) {
- empty = rd_false;
- break;
- }
- RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s",
- base_url);
- RD_UT_SAY(
- "URL %s returned no error and a non-empty "
- "JSON object/array as expected",
- base_url);
- cJSON_Delete(json);
-
-
- /* Try the error URL, verify error code. */
- json = NULL;
- herr = rd_http_get_json(error_url, &json);
- RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url);
- RD_UT_ASSERT(herr->code >= 400,
- "Expected get_json(%s) error code >= "
- "400, got %d",
- error_url, herr->code);
- RD_UT_SAY(
- "Error URL %s returned code %d, errstr \"%s\" "
- "and %s JSON object as expected",
- error_url, herr->code, herr->errstr, json ? "a" : "no");
- /* Check if there's a JSON document returned */
- if (json)
- cJSON_Delete(json);
- rd_http_error_destroy(herr);
-
- RD_UT_PASS();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h
deleted file mode 100644
index 80512e5ac..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2021 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDHTTP_H_
-#define _RDHTTP_H_
-
-#define CJSON_HIDE_SYMBOLS
-#include "cJSON.h"
-
-
-typedef struct rd_http_error_s {
- int code;
- char *errstr;
- char data[1]; /**< This is where the error string begins. */
-} rd_http_error_t;
-
-void rd_http_error_destroy(rd_http_error_t *herr);
-
-rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp);
-rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp);
-
-void rd_http_global_init(void);
-
-
-
-#ifdef LIBCURL_VERSION
-/* Advanced API that exposes the underlying CURL handle.
- * Requires caller to have included curl.h prior to this file. */
-
-
-typedef struct rd_http_req_s {
- CURL *hreq_curl; /**< CURL handle */
- rd_buf_t *hreq_buf; /**< Response buffer */
- int hreq_code; /**< HTTP response code */
- char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to
- * write to. */
-} rd_http_req_t;
-
-rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url);
-rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq);
-rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp);
-rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
- const char *url,
- const struct curl_slist *headers,
- const char *data_to_token,
- size_t data_to_token_size,
- int timeout_s,
- int retry,
- int retry_ms,
- cJSON **jsonp);
-void rd_http_req_destroy(rd_http_req_t *hreq);
-
-#endif
-
-
-
-#endif /* _RDHTTP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h
deleted file mode 100644
index 428337646..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDINTERVAL_H_
-#define _RDINTERVAL_H_
-
-#include "rd.h"
-
-typedef struct rd_interval_s {
- rd_ts_t ri_ts_last; /* last interval timestamp */
- rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */
- int ri_backoff; /* back off the next interval by this much */
-} rd_interval_t;
-
-
-static RD_INLINE RD_UNUSED void rd_interval_init(rd_interval_t *ri) {
- memset(ri, 0, sizeof(*ri));
-}
-
-
-
-/**
- * Returns the number of microseconds the interval has been over-shot.
- * If the return value is >0 (i.e., time for next intervalled something) then
- * the time interval is updated to the current time.
- *
- * The current time can be provided in 'now', or if this is set to 0 the time
- * will be gathered automatically.
- *
- * If 'interval_us' is set to 0 the fixed interval will be used, see
- * 'rd_interval_fixed()'.
- *
- * If this is the first time rd_interval() is called after an _init() or
- * _reset() or the \p immediate parameter is true, then a positive value
- * will be returned immediately even though the initial interval has not
- * passed.
- */
-#define rd_interval(ri, interval_us, now) rd_interval0(ri, interval_us, now, 0)
-#define rd_interval_immediate(ri, interval_us, now) \
- rd_interval0(ri, interval_us, now, 1)
-static RD_INLINE RD_UNUSED rd_ts_t rd_interval0(rd_interval_t *ri,
- rd_ts_t interval_us,
- rd_ts_t now,
- int immediate) {
- rd_ts_t diff;
-
- if (!now)
- now = rd_clock();
- if (!interval_us)
- interval_us = ri->ri_fixed;
-
- if (ri->ri_ts_last || !immediate) {
- diff = now - (ri->ri_ts_last + interval_us + ri->ri_backoff);
- } else
- diff = 1;
- if (unlikely(diff > 0)) {
- ri->ri_ts_last = now;
- ri->ri_backoff = 0;
- }
-
- return diff;
-}
-
-
-/**
- * Reset the interval to zero, i.e., the next call to rd_interval()
- * will be immediate.
- */
-static RD_INLINE RD_UNUSED void rd_interval_reset(rd_interval_t *ri) {
- ri->ri_ts_last = 0;
- ri->ri_backoff = 0;
-}
-
-/**
- * Reset the interval to 'now'. If now is 0, the time will be gathered
- * automatically.
- */
-static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri,
- rd_ts_t now) {
- if (!now)
- now = rd_clock();
-
- ri->ri_ts_last = now;
- ri->ri_backoff = 0;
-}
-
-/**
- * Back off the next interval by `backoff_us` microseconds.
- */
-static RD_INLINE RD_UNUSED void rd_interval_backoff(rd_interval_t *ri,
- int backoff_us) {
- ri->ri_backoff = backoff_us;
-}
-
-/**
- * Expedite (speed up) the next interval by `expedite_us` microseconds.
- * If `expedite_us` is 0 the interval will be set to trigger
- * immedately on the next rd_interval() call.
- */
-static RD_INLINE RD_UNUSED void rd_interval_expedite(rd_interval_t *ri,
- int expedite_us) {
- if (!expedite_us)
- ri->ri_ts_last = 0;
- else
- ri->ri_backoff = -expedite_us;
-}
-
-/**
- * Specifies a fixed interval to use if rd_interval() is called with
- * `interval_us` set to 0.
- */
-static RD_INLINE RD_UNUSED void rd_interval_fixed(rd_interval_t *ri,
- rd_ts_t fixed_us) {
- ri->ri_fixed = fixed_us;
-}
-
-/**
- * Disables the interval (until rd_interval_init()/reset() is called).
- * A disabled interval will never return a positive value from
- * rd_interval().
- */
-static RD_INLINE RD_UNUSED void rd_interval_disable(rd_interval_t *ri) {
- /* Set last beat to a large value a long time in the future. */
- ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */
-}
-
-/**
- * Returns true if the interval is disabled.
- */
-static RD_INLINE RD_UNUSED int rd_interval_disabled(const rd_interval_t *ri) {
- return ri->ri_ts_last == 6000000000000000000LL;
-}
-
-#endif /* _RDINTERVAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c
deleted file mode 100644
index b254748eb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c
+++ /dev/null
@@ -1,5026 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#define _GNU_SOURCE
-#include <errno.h>
-#include <string.h>
-#include <stdarg.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-#if !_WIN32
-#include <sys/types.h>
-#include <dirent.h>
-#endif
-
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_request.h"
-#include "rdkafka_event.h"
-#include "rdkafka_error.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_sasl_oauthbearer.h"
-#if WITH_OAUTHBEARER_OIDC
-#include "rdkafka_sasl_oauthbearer_oidc.h"
-#endif
-#if WITH_SSL
-#include "rdkafka_ssl.h"
-#endif
-
-#include "rdtime.h"
-#include "crc32c.h"
-#include "rdunittest.h"
-
-#ifdef _WIN32
-#include <sys/types.h>
-#include <sys/timeb.h>
-#endif
-
-#define CJSON_HIDE_SYMBOLS
-#include "cJSON.h"
-
-#if WITH_CURL
-#include "rdhttp.h"
-#endif
-
-
-static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT;
-static once_flag rd_kafka_global_srand_once = ONCE_FLAG_INIT;
-
-/**
- * @brief Global counter+lock for all active librdkafka instances
- */
-mtx_t rd_kafka_global_lock;
-int rd_kafka_global_cnt;
-
-
-/**
- * Last API error code, per thread.
- * Shared among all rd_kafka_t instances.
- */
-rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
-
-
-/**
- * Current number of threads created by rdkafka.
- * This is used in regression tests.
- */
-rd_atomic32_t rd_kafka_thread_cnt_curr;
-int rd_kafka_thread_cnt(void) {
- return rd_atomic32_get(&rd_kafka_thread_cnt_curr);
-}
-
-/**
- * Current thread's log name (TLS)
- */
-char RD_TLS rd_kafka_thread_name[64] = "app";
-
-void rd_kafka_set_thread_name(const char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), fmt,
- ap);
- va_end(ap);
-}
-
-/**
- * @brief Current thread's system name (TLS)
- *
- * Note the name must be 15 characters or less, because it is passed to
- * pthread_setname_np on Linux which imposes this limit.
- */
-static char RD_TLS rd_kafka_thread_sysname[16] = "app";
-
-void rd_kafka_set_thread_sysname(const char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- rd_vsnprintf(rd_kafka_thread_sysname, sizeof(rd_kafka_thread_sysname),
- fmt, ap);
- va_end(ap);
-
- thrd_setname(rd_kafka_thread_sysname);
-}
-
-static void rd_kafka_global_init0(void) {
- cJSON_Hooks json_hooks = {.malloc_fn = rd_malloc, .free_fn = rd_free};
-
- mtx_init(&rd_kafka_global_lock, mtx_plain);
-#if ENABLE_DEVEL
- rd_atomic32_init(&rd_kafka_op_cnt, 0);
-#endif
- rd_crc32c_global_init();
-#if WITH_SSL
- /* The configuration interface might need to use
- * OpenSSL to parse keys, prior to any rd_kafka_t
- * object has been created. */
- rd_kafka_ssl_init();
-#endif
-
- cJSON_InitHooks(&json_hooks);
-
-#if WITH_CURL
- rd_http_global_init();
-#endif
-}
-
-/**
- * @brief Initialize once per process
- */
-void rd_kafka_global_init(void) {
- call_once(&rd_kafka_global_init_once, rd_kafka_global_init0);
-}
-
-
-/**
- * @brief Seed the PRNG with current_time.milliseconds
- */
-static void rd_kafka_global_srand(void) {
- struct timeval tv;
-
- rd_gettimeofday(&tv, NULL);
-
- srand((unsigned int)(tv.tv_usec / 1000));
-}
-
-
-/**
- * @returns the current number of active librdkafka instances
- */
-static int rd_kafka_global_cnt_get(void) {
- int r;
- mtx_lock(&rd_kafka_global_lock);
- r = rd_kafka_global_cnt;
- mtx_unlock(&rd_kafka_global_lock);
- return r;
-}
-
-
-/**
- * @brief Increase counter for active librdkafka instances.
- * If this is the first instance the global constructors will be called, if any.
- */
-static void rd_kafka_global_cnt_incr(void) {
- mtx_lock(&rd_kafka_global_lock);
- rd_kafka_global_cnt++;
- if (rd_kafka_global_cnt == 1) {
- rd_kafka_transport_init();
-#if WITH_SSL
- rd_kafka_ssl_init();
-#endif
- rd_kafka_sasl_global_init();
- }
- mtx_unlock(&rd_kafka_global_lock);
-}
-
-/**
- * @brief Decrease counter for active librdkafka instances.
- * If this counter reaches 0 the global destructors will be called, if any.
- */
-static void rd_kafka_global_cnt_decr(void) {
- mtx_lock(&rd_kafka_global_lock);
- rd_kafka_assert(NULL, rd_kafka_global_cnt > 0);
- rd_kafka_global_cnt--;
- if (rd_kafka_global_cnt == 0) {
- rd_kafka_sasl_global_term();
-#if WITH_SSL
- rd_kafka_ssl_term();
-#endif
- }
- mtx_unlock(&rd_kafka_global_lock);
-}
-
-
-/**
- * Wait for all rd_kafka_t objects to be destroyed.
- * Returns 0 if all kafka objects are now destroyed, or -1 if the
- * timeout was reached.
- */
-int rd_kafka_wait_destroyed(int timeout_ms) {
- rd_ts_t timeout = rd_clock() + (timeout_ms * 1000);
-
- while (rd_kafka_thread_cnt() > 0 || rd_kafka_global_cnt_get() > 0) {
- if (rd_clock() >= timeout) {
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
- ETIMEDOUT);
- return -1;
- }
- rd_usleep(25000, NULL); /* 25ms */
- }
-
- return 0;
-}
-
-static void rd_kafka_log_buf(const rd_kafka_conf_t *conf,
- const rd_kafka_t *rk,
- int level,
- int ctx,
- const char *fac,
- const char *buf) {
- if (level > conf->log_level)
- return;
- else if (rk && conf->log_queue) {
- rd_kafka_op_t *rko;
-
- if (!rk->rk_logq)
- return; /* Terminating */
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_LOG);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_MEDIUM);
- rko->rko_u.log.level = level;
- rd_strlcpy(rko->rko_u.log.fac, fac, sizeof(rko->rko_u.log.fac));
- rko->rko_u.log.str = rd_strdup(buf);
- rko->rko_u.log.ctx = ctx;
- rd_kafka_q_enq(rk->rk_logq, rko);
-
- } else if (conf->log_cb) {
- conf->log_cb(rk, level, fac, buf);
- }
-}
-
-/**
- * @brief Logger
- *
- * @remark conf must be set, but rk may be NULL
- */
-void rd_kafka_log0(const rd_kafka_conf_t *conf,
- const rd_kafka_t *rk,
- const char *extra,
- int level,
- int ctx,
- const char *fac,
- const char *fmt,
- ...) {
- char buf[2048];
- va_list ap;
- unsigned int elen = 0;
- unsigned int of = 0;
-
- if (level > conf->log_level)
- return;
-
- if (conf->log_thread_name) {
- elen = rd_snprintf(buf, sizeof(buf),
- "[thrd:%s]: ", rd_kafka_thread_name);
- if (unlikely(elen >= sizeof(buf)))
- elen = sizeof(buf);
- of = elen;
- }
-
- if (extra) {
- elen = rd_snprintf(buf + of, sizeof(buf) - of, "%s: ", extra);
- if (unlikely(elen >= sizeof(buf) - of))
- elen = sizeof(buf) - of;
- of += elen;
- }
-
- va_start(ap, fmt);
- rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap);
- va_end(ap);
-
- rd_kafka_log_buf(conf, rk, level, ctx, fac, buf);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_oauthbearer_set_token(rd_kafka_t *rk,
- const char *token_value,
- int64_t md_lifetime_ms,
- const char *md_principal_name,
- const char **extensions,
- size_t extension_size,
- char *errstr,
- size_t errstr_size) {
-#if WITH_SASL_OAUTHBEARER
- return rd_kafka_oauthbearer_set_token0(
- rk, token_value, md_lifetime_ms, md_principal_name, extensions,
- extension_size, errstr, errstr_size);
-#else
- rd_snprintf(errstr, errstr_size,
- "librdkafka not built with SASL OAUTHBEARER support");
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-#endif
-}
-
-rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk,
- const char *errstr) {
-#if WITH_SASL_OAUTHBEARER
- return rd_kafka_oauthbearer_set_token_failure0(rk, errstr);
-#else
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-#endif
-}
-
-void rd_kafka_log_print(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf) {
- int secs, msecs;
- struct timeval tv;
- rd_gettimeofday(&tv, NULL);
- secs = (int)tv.tv_sec;
- msecs = (int)(tv.tv_usec / 1000);
- fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", level, secs, msecs, fac,
- rk ? rk->rk_name : "", buf);
-}
-
-void rd_kafka_log_syslog(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf) {
-#if WITH_SYSLOG
- static int initialized = 0;
-
- if (!initialized)
- openlog("rdkafka", LOG_PID | LOG_CONS, LOG_USER);
-
- syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf);
-#else
- rd_assert(!*"syslog support not enabled in this build");
-#endif
-}
-
-void rd_kafka_set_logger(rd_kafka_t *rk,
- void (*func)(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf)) {
-#if !WITH_SYSLOG
- if (func == rd_kafka_log_syslog)
- rd_assert(!*"syslog support not enabled in this build");
-#endif
- rk->rk_conf.log_cb = func;
-}
-
-void rd_kafka_set_log_level(rd_kafka_t *rk, int level) {
- rk->rk_conf.log_level = level;
-}
-
-
-
-static const char *rd_kafka_type2str(rd_kafka_type_t type) {
- static const char *types[] = {
- [RD_KAFKA_PRODUCER] = "producer",
- [RD_KAFKA_CONSUMER] = "consumer",
- };
- return types[type];
-}
-
-#define _ERR_DESC(ENUM, DESC) \
- [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC}
-
-static const struct rd_kafka_err_desc rd_kafka_err_descs[] = {
- _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL),
- _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, "Local: Bad message format"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION,
- "Local: Invalid compressed data"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, "Local: Broker handle destroyed"),
- _ERR_DESC(
- RD_KAFKA_RESP_ERR__FAIL,
- "Local: Communication failure with broker"), // FIXME: too specific
- _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, "Local: Broker transport failure"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
- "Local: Critical system resource failure"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, "Local: Host resolution failure"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, "Local: Message timed out"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, "Broker: No more messages"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, "Local: Unknown partition"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__FS, "Local: File or filesystem error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, "Local: Unknown topic"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
- "Local: All broker connections are down"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Local: Invalid argument or configuration"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, "Local: Timed out"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, "Local: Queue full"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, "Local: ISR count insufficient"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, "Local: Broker node update"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, "Local: SSL error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, "Local: Waiting for coordinator"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, "Local: Unknown group"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, "Local: Operation in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS,
- "Local: Previous operation in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION,
- "Local: Existing subscription"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Local: Assign partitions"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, "Local: Revoke partitions"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, "Local: Conflicting use"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, "Local: Erroneous state"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, "Local: Unknown protocol"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, "Local: Not implemented"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "Local: Authentication failure"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, "Local: No offset stored"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, "Local: Outdated"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, "Local: Timed out in queue"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "Local: Required feature not supported by broker"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, "Local: Awaiting cache update"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, "Local: Operation interrupted"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION,
- "Local: Key serialization error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION,
- "Local: Value serialization error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION,
- "Local: Key deserialization error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION,
- "Local: Value deserialization error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, "Local: Partial response"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, "Local: Read-only object"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, "Local: No such entry"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, "Local: Read underflow"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, "Local: Invalid type"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, "Local: Retry operation"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, "Local: Purged in queue"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, "Local: Purged in flight"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, "Local: Fatal error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, "Local: Inconsistent state"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE,
- "Local: Gap-less ordering would not be guaranteed "
- "if proceeding"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED,
- "Local: Maximum application poll interval "
- "(max.poll.interval.ms) exceeded"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, "Local: Unknown broker"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
- "Local: Functionality not configured"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED,
- "Local: This instance has been fenced by a newer instance"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION,
- "Local: Application generated error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST,
- "Local: Group partition assignment lost"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET,
- "Local: No offset to automatically reset to"),
- _ERR_DESC(RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
- "Local: Partition log truncation detected"),
-
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE,
- "Broker: Offset out of range"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, "Broker: Invalid message"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
- "Broker: Unknown topic or partition"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
- "Broker: Invalid message size"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE,
- "Broker: Leader not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
- "Broker: Not leader for partition"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, "Broker: Request timed out"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE,
- "Broker: Broker not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE,
- "Broker: Replica not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
- "Broker: Message size too large"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH,
- "Broker: StaleControllerEpochCode"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
- "Broker: Offset metadata string too large"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION,
- "Broker: Broker disconnected before response received"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
- "Broker: Coordinator load in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- "Broker: Coordinator not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, "Broker: Not coordinator"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, "Broker: Invalid topic"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE,
- "Broker: Message batch larger than configured server "
- "segment size"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS,
- "Broker: Not enough in-sync replicas"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND,
- "Broker: Message(s) written to insufficient number of "
- "in-sync replicas"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS,
- "Broker: Invalid required acks value"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- "Broker: Specified group generation id is not valid"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL,
- "Broker: Inconsistent group protocol"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, "Broker: Invalid group.id"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, "Broker: Unknown member"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT,
- "Broker: Invalid session timeout"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- "Broker: Group rebalance in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
- "Broker: Commit offset data size is not valid"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
- "Broker: Topic authorization failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
- "Broker: Group authorization failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED,
- "Broker: Cluster authorization failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, "Broker: Invalid timestamp"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM,
- "Broker: Unsupported SASL mechanism"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE,
- "Broker: Request not valid in current SASL state"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION,
- "Broker: API version not supported"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS,
- "Broker: Topic already exists"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS,
- "Broker: Invalid number of partitions"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR,
- "Broker: Invalid replication factor"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT,
- "Broker: Invalid replica assignment"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG,
- "Broker: Configuration is invalid"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER,
- "Broker: Not controller for cluster"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, "Broker: Invalid request"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT,
- "Broker: Message format on broker does not support request"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, "Broker: Policy violation"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
- "Broker: Broker received an out of order sequence number"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER,
- "Broker: Broker received a duplicate sequence number"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH,
- "Broker: Producer attempted an operation with an old epoch"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
- "Broker: Producer attempted a transactional operation in "
- "an invalid state"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
- "Broker: Producer attempted to use a producer id which is "
- "not currently assigned to its transactional id"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
- "Broker: Transaction timeout is larger than the maximum "
- "value allowed by the broker's max.transaction.timeout.ms"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- "Broker: Producer attempted to update a transaction while "
- "another concurrent operation on the same transaction was "
- "ongoing"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED,
- "Broker: Indicates that the transaction coordinator sending "
- "a WriteTxnMarker is no longer the current coordinator for "
- "a given producer"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
- "Broker: Transactional Id authorization failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED,
- "Broker: Security features are disabled"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED,
- "Broker: Operation not attempted"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
- "Broker: Disk error when trying to access log file on disk"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND,
- "Broker: The user-specified log directory is not found "
- "in the broker config"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED,
- "Broker: SASL Authentication failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
- "Broker: Unknown Producer Id"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS,
- "Broker: Partition reassignment is in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED,
- "Broker: Delegation Token feature is not enabled"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND,
- "Broker: Delegation Token is not found on server"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH,
- "Broker: Specified Principal is not valid Owner/Renewer"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED,
- "Broker: Delegation Token requests are not allowed on "
- "this connection"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED,
- "Broker: Delegation Token authorization failed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED,
- "Broker: Delegation Token is expired"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE,
- "Broker: Supplied principalType is not supported"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP,
- "Broker: The group is not empty"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND,
- "Broker: The group id does not exist"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND,
- "Broker: The fetch session ID was not found"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH,
- "Broker: The fetch session epoch is invalid"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND,
- "Broker: No matching listener"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED,
- "Broker: Topic deletion is disabled"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
- "Broker: Leader epoch is older than broker epoch"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
- "Broker: Leader epoch is newer than broker epoch"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE,
- "Broker: Unsupported compression type"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH,
- "Broker: Broker epoch has changed"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
- "Broker: Leader high watermark is not caught up"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED,
- "Broker: Group member needs a valid member ID"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE,
- "Broker: Preferred leader was not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED,
- "Broker: Consumer group has reached maximum size"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
- "Broker: Static consumer fenced by other consumer with same "
- "group.instance.id"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE,
- "Broker: Eligible partition leaders are not available"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED,
- "Broker: Leader election not needed for topic partition"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS,
- "Broker: No partition reassignment is in progress"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC,
- "Broker: Deleting offsets of a topic while the consumer "
- "group is subscribed to it"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD,
- "Broker: Broker failed to validate record"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- "Broker: There are unstable offsets that need to be cleared"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED,
- "Broker: Throttling quota has been exceeded"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED,
- "Broker: There is a newer producer with the same "
- "transactionalId which fences the current one"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND,
- "Broker: Request illegally referred to resource that "
- "does not exist"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE,
- "Broker: Request illegally referred to the same resource "
- "twice"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL,
- "Broker: Requested credential would not meet criteria for "
- "acceptability"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET,
- "Broker: Indicates that the either the sender or recipient "
- "of a voter-only request is not one of the expected voters"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION,
- "Broker: Invalid update version"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED,
- "Broker: Unable to update finalized features due to "
- "server error"),
- _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE,
- "Broker: Request principal deserialization failed during "
- "forwarding"),
-
- _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)};
-
-
-void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs,
- size_t *cntp) {
- *errdescs = rd_kafka_err_descs;
- *cntp = RD_ARRAYSIZE(rd_kafka_err_descs);
-}
-
-
-const char *rd_kafka_err2str(rd_kafka_resp_err_t err) {
- static RD_TLS char ret[32];
- int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
-
- if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
- err >= RD_KAFKA_RESP_ERR_END_ALL ||
- !rd_kafka_err_descs[idx].desc)) {
- rd_snprintf(ret, sizeof(ret), "Err-%i?", err);
- return ret;
- }
-
- return rd_kafka_err_descs[idx].desc;
-}
-
-
-const char *rd_kafka_err2name(rd_kafka_resp_err_t err) {
- static RD_TLS char ret[32];
- int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
-
- if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
- err >= RD_KAFKA_RESP_ERR_END_ALL ||
- !rd_kafka_err_descs[idx].desc)) {
- rd_snprintf(ret, sizeof(ret), "ERR_%i?", err);
- return ret;
- }
-
- return rd_kafka_err_descs[idx].name;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_last_error(void) {
- return rd_kafka_last_error_code;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_errno2err(int errnox) {
- switch (errnox) {
- case EINVAL:
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- case EBUSY:
- return RD_KAFKA_RESP_ERR__CONFLICT;
-
- case ENOENT:
- return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
-
- case ESRCH:
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- case ETIMEDOUT:
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- case EMSGSIZE:
- return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
-
- case ENOBUFS:
- return RD_KAFKA_RESP_ERR__QUEUE_FULL;
-
- case ECANCELED:
- return RD_KAFKA_RESP_ERR__FATAL;
-
- default:
- return RD_KAFKA_RESP_ERR__FAIL;
- }
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- rd_kafka_resp_err_t err;
-
- if (unlikely((err = rd_atomic32_get(&rk->rk_fatal.err)))) {
- rd_kafka_rdlock(rk);
- rd_snprintf(errstr, errstr_size, "%s", rk->rk_fatal.errstr);
- rd_kafka_rdunlock(rk);
- }
-
- return err;
-}
-
-
-/**
- * @brief Set's the fatal error for this instance.
- *
- * @param do_lock RD_DO_LOCK: rd_kafka_wrlock() will be acquired and released,
- * RD_DONT_LOCK: caller must hold rd_kafka_wrlock().
- *
- * @returns 1 if the error was set, or 0 if a previous fatal error
- * has already been set on this instance.
- *
- * @locality any
- * @locks none
- */
-int rd_kafka_set_fatal_error0(rd_kafka_t *rk,
- rd_dolock_t do_lock,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
- char buf[512];
-
- if (do_lock)
- rd_kafka_wrlock(rk);
- rk->rk_fatal.cnt++;
- if (rd_atomic32_get(&rk->rk_fatal.err)) {
- if (do_lock)
- rd_kafka_wrunlock(rk);
- rd_kafka_dbg(rk, GENERIC, "FATAL",
- "Suppressing subsequent fatal error: %s",
- rd_kafka_err2name(err));
- return 0;
- }
-
- rd_atomic32_set(&rk->rk_fatal.err, err);
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
- rk->rk_fatal.errstr = rd_strdup(buf);
-
- if (do_lock)
- rd_kafka_wrunlock(rk);
-
- /* If there is an error callback or event handler we
- * also log the fatal error as it happens.
- * If there is no error callback the error event
- * will be automatically logged, and this check here
- * prevents us from duplicate logs. */
- if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)
- rd_kafka_log(rk, LOG_EMERG, "FATAL", "Fatal error: %s: %s",
- rd_kafka_err2str(err), rk->rk_fatal.errstr);
- else
- rd_kafka_dbg(rk, ALL, "FATAL", "Fatal error: %s: %s",
- rd_kafka_err2str(err), rk->rk_fatal.errstr);
-
- /* Indicate to the application that a fatal error was raised,
- * the app should use rd_kafka_fatal_error() to extract the
- * fatal error code itself.
- * For the high-level consumer we propagate the error as a
- * consumer error so it is returned from consumer_poll(),
- * while for all other client types (the producer) we propagate to
- * the standard error handler (typically error_cb). */
- if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp)
- rd_kafka_consumer_err(
- rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA,
- RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL,
- RD_KAFKA_OFFSET_INVALID, "Fatal error: %s: %s",
- rd_kafka_err2str(err), rk->rk_fatal.errstr);
- else
- rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL,
- "Fatal error: %s: %s", rd_kafka_err2str(err),
- rk->rk_fatal.errstr);
-
-
- /* Tell rdkafka main thread to purge producer queues, but not
- * in-flight since we'll want proper delivery status for transmitted
- * requests.
- * Need NON_BLOCKING to avoid dead-lock if user is
- * calling purge() at the same time, which could be
- * waiting for this broker thread to handle its
- * OP_PURGE request. */
- if (rk->rk_type == RD_KAFKA_PRODUCER) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE);
- rko->rko_u.purge.flags =
- RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_NON_BLOCKING;
- rd_kafka_q_enq(rk->rk_ops, rko);
- }
-
- return 1;
-}
-
-
-/**
- * @returns a copy of the current fatal error, if any, else NULL.
- *
- * @locks_acquired rd_kafka_rdlock(rk)
- */
-rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
-
- if (!(err = rd_atomic32_get(&rk->rk_fatal.err)))
- return NULL; /* No fatal error raised */
-
- rd_kafka_rdlock(rk);
- error = rd_kafka_error_new_fatal(err, "%s", rk->rk_fatal.errstr);
- rd_kafka_rdunlock(rk);
-
- return error;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason) {
- if (!rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason))
- return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
- else
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Final destructor for rd_kafka_t, must only be called with refcnt 0.
- *
- * @locality application thread
- */
-void rd_kafka_destroy_final(rd_kafka_t *rk) {
-
- rd_kafka_assert(rk, rd_kafka_terminating(rk));
-
- /* Synchronize state */
- rd_kafka_wrlock(rk);
- rd_kafka_wrunlock(rk);
-
- /* Terminate SASL provider */
- if (rk->rk_conf.sasl.provider)
- rd_kafka_sasl_term(rk);
-
- rd_kafka_timers_destroy(&rk->rk_timers);
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying op queues");
-
- /* Destroy cgrp */
- if (rk->rk_cgrp) {
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying cgrp");
- /* Reset queue forwarding (rep -> cgrp) */
- rd_kafka_q_fwd_set(rk->rk_rep, NULL);
- rd_kafka_cgrp_destroy_final(rk->rk_cgrp);
- }
-
- rd_kafka_assignors_term(rk);
-
- if (rk->rk_type == RD_KAFKA_CONSUMER) {
- rd_kafka_assignment_destroy(rk);
- if (rk->rk_consumer.q)
- rd_kafka_q_destroy(rk->rk_consumer.q);
- }
-
- /* Purge op-queues */
- rd_kafka_q_destroy_owner(rk->rk_rep);
- rd_kafka_q_destroy_owner(rk->rk_ops);
-
-#if WITH_SSL
- if (rk->rk_conf.ssl.ctx) {
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX");
- rd_kafka_ssl_ctx_term(rk);
- }
- rd_list_destroy(&rk->rk_conf.ssl.loaded_providers);
-#endif
-
- /* It is not safe to log after this point. */
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Termination done: freeing resources");
-
- if (rk->rk_logq) {
- rd_kafka_q_destroy_owner(rk->rk_logq);
- rk->rk_logq = NULL;
- }
-
- if (rk->rk_type == RD_KAFKA_PRODUCER) {
- cnd_destroy(&rk->rk_curr_msgs.cnd);
- mtx_destroy(&rk->rk_curr_msgs.lock);
- }
-
- if (rk->rk_fatal.errstr) {
- rd_free(rk->rk_fatal.errstr);
- rk->rk_fatal.errstr = NULL;
- }
-
- cnd_destroy(&rk->rk_broker_state_change_cnd);
- mtx_destroy(&rk->rk_broker_state_change_lock);
-
- mtx_destroy(&rk->rk_suppress.sparse_connect_lock);
-
- cnd_destroy(&rk->rk_init_cnd);
- mtx_destroy(&rk->rk_init_lock);
-
- if (rk->rk_full_metadata)
- rd_kafka_metadata_destroy(rk->rk_full_metadata);
- rd_kafkap_str_destroy(rk->rk_client_id);
- rd_kafkap_str_destroy(rk->rk_group_id);
- rd_kafkap_str_destroy(rk->rk_eos.transactional_id);
- rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf);
- rd_list_destroy(&rk->rk_broker_by_id);
-
- mtx_destroy(&rk->rk_conf.sasl.lock);
- rwlock_destroy(&rk->rk_lock);
-
- rd_free(rk);
- rd_kafka_global_cnt_decr();
-}
-
-
-static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) {
- thrd_t thrd;
-#ifndef _WIN32
- int term_sig = rk->rk_conf.term_sig;
-#endif
- int res;
- char flags_str[256];
- static const char *rd_kafka_destroy_flags_names[] = {
- "Terminate", "DestroyCalled", "Immediate", "NoConsumerClose", NULL};
-
- /* Fatal errors and _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */
- if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE ||
- rd_kafka_fatal_error_code(rk))
- flags |= RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE;
-
- rd_flags2str(flags_str, sizeof(flags_str), rd_kafka_destroy_flags_names,
- flags);
- rd_kafka_dbg(rk, ALL, "DESTROY",
- "Terminating instance "
- "(destroy flags %s (0x%x))",
- flags ? flags_str : "none", flags);
-
- /* If producer still has messages in queue the application
- * is terminating the producer without first calling flush() or purge()
- * which is a common new user mistake, so hint the user of proper
- * shutdown semantics. */
- if (rk->rk_type == RD_KAFKA_PRODUCER) {
- unsigned int tot_cnt;
- size_t tot_size;
-
- rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
-
- if (tot_cnt > 0)
- rd_kafka_log(rk, LOG_WARNING, "TERMINATE",
- "Producer terminating with %u message%s "
- "(%" PRIusz
- " byte%s) still in "
- "queue or transit: "
- "use flush() to wait for "
- "outstanding message delivery",
- tot_cnt, tot_cnt > 1 ? "s" : "", tot_size,
- tot_size > 1 ? "s" : "");
- }
-
- /* Make sure destroy is not called from a librdkafka thread
- * since this will most likely cause a deadlock.
- * FIXME: include broker threads (for log_cb) */
- if (thrd_is_current(rk->rk_thread) ||
- thrd_is_current(rk->rk_background.thread)) {
- rd_kafka_log(rk, LOG_EMERG, "BGQUEUE",
- "Application bug: "
- "rd_kafka_destroy() called from "
- "librdkafka owned thread");
- rd_kafka_assert(NULL,
- !*"Application bug: "
- "calling rd_kafka_destroy() from "
- "librdkafka owned thread is prohibited");
- }
-
- /* Before signaling for general termination, set the destroy
- * flags to hint cgrp how to shut down. */
- rd_atomic32_set(&rk->rk_terminate,
- flags | RD_KAFKA_DESTROY_F_DESTROY_CALLED);
-
- /* The legacy/simple consumer lacks an API to close down the consumer*/
- if (rk->rk_cgrp) {
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Terminating consumer group handler");
- rd_kafka_consumer_close(rk);
- }
-
- /* With the consumer closed, terminate the rest of librdkafka. */
- rd_atomic32_set(&rk->rk_terminate,
- flags | RD_KAFKA_DESTROY_F_TERMINATE);
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers");
- rd_kafka_wrlock(rk);
- thrd = rk->rk_thread;
- rd_kafka_timers_interrupt(&rk->rk_timers);
- rd_kafka_wrunlock(rk);
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Sending TERMINATE to internal main thread");
- /* Send op to trigger queue/io wake-up.
- * The op itself is (likely) ignored by the receiver. */
- rd_kafka_q_enq(rk->rk_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
-#ifndef _WIN32
- /* Interrupt main kafka thread to speed up termination. */
- if (term_sig) {
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Sending thread kill signal %d", term_sig);
- pthread_kill(thrd, term_sig);
- }
-#endif
-
- if (rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_IMMEDIATE))
- return; /* FIXME: thread resource leak */
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Joining internal main thread");
-
- if (thrd_join(thrd, &res) != thrd_success)
- rd_kafka_log(rk, LOG_ERR, "DESTROY",
- "Failed to join internal main thread: %s "
- "(was process forked?)",
- rd_strerror(errno));
-
- rd_kafka_destroy_final(rk);
-}
-
-
-/* NOTE: Must only be called by application.
- * librdkafka itself must use rd_kafka_destroy0(). */
-void rd_kafka_destroy(rd_kafka_t *rk) {
- rd_kafka_destroy_app(rk, 0);
-}
-
-void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags) {
- rd_kafka_destroy_app(rk, flags);
-}
-
-
-/**
- * Main destructor for rd_kafka_t
- *
- * Locality: rdkafka main thread or application thread during rd_kafka_new()
- */
-static void rd_kafka_destroy_internal(rd_kafka_t *rk) {
- rd_kafka_topic_t *rkt, *rkt_tmp;
- rd_kafka_broker_t *rkb, *rkb_tmp;
- rd_list_t wait_thrds;
- thrd_t *thrd;
- int i;
-
- rd_kafka_dbg(rk, ALL, "DESTROY", "Destroy internal");
-
- /* Trigger any state-change waiters (which should check the
- * terminate flag whenever they wake up). */
- rd_kafka_brokers_broadcast_state_change(rk);
-
- if (rk->rk_background.thread) {
- int res;
- /* Send op to trigger queue/io wake-up.
- * The op itself is (likely) ignored by the receiver. */
- rd_kafka_q_enq(rk->rk_background.q,
- rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
- rd_kafka_dbg(rk, ALL, "DESTROY",
- "Waiting for background queue thread "
- "to terminate");
- thrd_join(rk->rk_background.thread, &res);
- rd_kafka_q_destroy_owner(rk->rk_background.q);
- }
-
- /* Call on_destroy() interceptors */
- rd_kafka_interceptors_on_destroy(rk);
-
- /* Brokers pick up on rk_terminate automatically. */
-
- /* List of (broker) threads to join to synchronize termination */
- rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL);
-
- rd_kafka_wrlock(rk);
-
- rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics");
- /* Decommission all topics */
- TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) {
- rd_kafka_wrunlock(rk);
- rd_kafka_topic_partitions_remove(rkt);
- rd_kafka_wrlock(rk);
- }
-
- /* Decommission brokers.
- * Broker thread holds a refcount and detects when broker refcounts
- * reaches 1 and then decommissions itself. */
- TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) {
- /* Add broker's thread to wait_thrds list for later joining */
- thrd = rd_malloc(sizeof(*thrd));
- *thrd = rkb->rkb_thread;
- rd_list_add(&wait_thrds, thrd);
- rd_kafka_wrunlock(rk);
-
- rd_kafka_dbg(rk, BROKER, "DESTROY", "Sending TERMINATE to %s",
- rd_kafka_broker_name(rkb));
- /* Send op to trigger queue/io wake-up.
- * The op itself is (likely) ignored by the broker thread. */
- rd_kafka_q_enq(rkb->rkb_ops,
- rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
-#ifndef _WIN32
- /* Interrupt IO threads to speed up termination. */
- if (rk->rk_conf.term_sig)
- pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig);
-#endif
-
- rd_kafka_broker_destroy(rkb);
-
- rd_kafka_wrlock(rk);
- }
-
- if (rk->rk_clusterid) {
- rd_free(rk->rk_clusterid);
- rk->rk_clusterid = NULL;
- }
-
- /* Destroy coord requests */
- rd_kafka_coord_reqs_term(rk);
-
- /* Destroy the coordinator cache */
- rd_kafka_coord_cache_destroy(&rk->rk_coord_cache);
-
- /* Purge metadata cache.
- * #3279:
- * We mustn't call cache_destroy() here since there might be outstanding
- * broker rkos that hold references to the metadata cache lock,
- * and these brokers are destroyed below. So to avoid a circular
- * dependency refcnt deadlock we first purge the cache here
- * and destroy it after the brokers are destroyed. */
- rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/);
-
- rd_kafka_wrunlock(rk);
-
- mtx_lock(&rk->rk_broker_state_change_lock);
- /* Purge broker state change waiters */
- rd_list_destroy(&rk->rk_broker_state_change_waiters);
- mtx_unlock(&rk->rk_broker_state_change_lock);
-
- if (rk->rk_type == RD_KAFKA_CONSUMER) {
- if (rk->rk_consumer.q)
- rd_kafka_q_disable(rk->rk_consumer.q);
- }
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Purging reply queue");
-
- /* Purge op-queue */
- rd_kafka_q_disable(rk->rk_rep);
- rd_kafka_q_purge(rk->rk_rep);
-
- /* Loose our special reference to the internal broker. */
- mtx_lock(&rk->rk_internal_rkb_lock);
- if ((rkb = rk->rk_internal_rkb)) {
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Decommissioning internal broker");
-
- /* Send op to trigger queue wake-up. */
- rd_kafka_q_enq(rkb->rkb_ops,
- rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
- rk->rk_internal_rkb = NULL;
- thrd = rd_malloc(sizeof(*thrd));
- *thrd = rkb->rkb_thread;
- rd_list_add(&wait_thrds, thrd);
- }
- mtx_unlock(&rk->rk_internal_rkb_lock);
- if (rkb)
- rd_kafka_broker_destroy(rkb);
-
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Join %d broker thread(s)",
- rd_list_cnt(&wait_thrds));
-
- /* Join broker threads */
- RD_LIST_FOREACH(thrd, &wait_thrds, i) {
- int res;
- if (thrd_join(*thrd, &res) != thrd_success)
- ;
- rd_free(thrd);
- }
-
- rd_list_destroy(&wait_thrds);
-
- /* Destroy mock cluster */
- if (rk->rk_mock.cluster)
- rd_kafka_mock_cluster_destroy(rk->rk_mock.cluster);
-
- if (rd_atomic32_get(&rk->rk_mock.cluster_cnt) > 0) {
- rd_kafka_log(rk, LOG_EMERG, "MOCK",
- "%d mock cluster(s) still active: "
- "must be explicitly destroyed with "
- "rd_kafka_mock_cluster_destroy() prior to "
- "terminating the rd_kafka_t instance",
- (int)rd_atomic32_get(&rk->rk_mock.cluster_cnt));
- rd_assert(!*"All mock clusters must be destroyed prior to "
- "rd_kafka_t destroy");
- }
-
- /* Destroy metadata cache */
- rd_kafka_wrlock(rk);
- rd_kafka_metadata_cache_destroy(rk);
- rd_kafka_wrunlock(rk);
-}
-
-/**
- * @brief Buffer state for stats emitter
- */
-struct _stats_emit {
- char *buf; /* Pointer to allocated buffer */
- size_t size; /* Current allocated size of buf */
- size_t of; /* Current write-offset in buf */
-};
-
-
-/* Stats buffer printf. Requires a (struct _stats_emit *)st variable in the
- * current scope. */
-#define _st_printf(...) \
- do { \
- ssize_t _r; \
- ssize_t _rem = st->size - st->of; \
- _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \
- if (_r >= _rem) { \
- st->size *= 2; \
- _rem = st->size - st->of; \
- st->buf = rd_realloc(st->buf, st->size); \
- _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \
- } \
- st->of += _r; \
- } while (0)
-
-struct _stats_total {
- int64_t tx; /**< broker.tx */
- int64_t tx_bytes; /**< broker.tx_bytes */
- int64_t rx; /**< broker.rx */
- int64_t rx_bytes; /**< broker.rx_bytes */
- int64_t txmsgs; /**< partition.txmsgs */
- int64_t txmsg_bytes; /**< partition.txbytes */
- int64_t rxmsgs; /**< partition.rxmsgs */
- int64_t rxmsg_bytes; /**< partition.rxbytes */
-};
-
-
-
-/**
- * @brief Rollover and emit an average window.
- */
-static RD_INLINE void rd_kafka_stats_emit_avg(struct _stats_emit *st,
- const char *name,
- rd_avg_t *src_avg) {
- rd_avg_t avg;
-
- rd_avg_rollover(&avg, src_avg);
- _st_printf(
- "\"%s\": {"
- " \"min\":%" PRId64
- ","
- " \"max\":%" PRId64
- ","
- " \"avg\":%" PRId64
- ","
- " \"sum\":%" PRId64
- ","
- " \"stddev\": %" PRId64
- ","
- " \"p50\": %" PRId64
- ","
- " \"p75\": %" PRId64
- ","
- " \"p90\": %" PRId64
- ","
- " \"p95\": %" PRId64
- ","
- " \"p99\": %" PRId64
- ","
- " \"p99_99\": %" PRId64
- ","
- " \"outofrange\": %" PRId64
- ","
- " \"hdrsize\": %" PRId32
- ","
- " \"cnt\":%i "
- "}, ",
- name, avg.ra_v.minv, avg.ra_v.maxv, avg.ra_v.avg, avg.ra_v.sum,
- (int64_t)avg.ra_hist.stddev, avg.ra_hist.p50, avg.ra_hist.p75,
- avg.ra_hist.p90, avg.ra_hist.p95, avg.ra_hist.p99,
- avg.ra_hist.p99_99, avg.ra_hist.oor, avg.ra_hist.hdrsize,
- avg.ra_v.cnt);
- rd_avg_destroy(&avg);
-}
-
-/**
- * Emit stats for toppar
- */
-static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
- struct _stats_total *total,
- rd_kafka_toppar_t *rktp,
- int first) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- int64_t end_offset;
- int64_t consumer_lag = -1;
- int64_t consumer_lag_stored = -1;
- struct offset_stats offs;
- int32_t broker_id = -1;
-
- rd_kafka_toppar_lock(rktp);
-
- if (rktp->rktp_broker) {
- rd_kafka_broker_lock(rktp->rktp_broker);
- broker_id = rktp->rktp_broker->rkb_nodeid;
- rd_kafka_broker_unlock(rktp->rktp_broker);
- }
-
- /* Grab a copy of the latest finalized offset stats */
- offs = rktp->rktp_offsets_fin;
-
- end_offset = (rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED)
- ? rktp->rktp_ls_offset
- : rktp->rktp_hi_offset;
-
- /* Calculate consumer_lag by using the highest offset
- * of stored_offset (the last message passed to application + 1, or
- * if enable.auto.offset.store=false the last message manually stored),
- * or the committed_offset (the last message committed by this or
- * another consumer).
- * Using stored_offset allows consumer_lag to be up to date even if
- * offsets are not (yet) committed.
- */
- if (end_offset != RD_KAFKA_OFFSET_INVALID) {
- if (rktp->rktp_stored_pos.offset >= 0 &&
- rktp->rktp_stored_pos.offset <= end_offset)
- consumer_lag_stored =
- end_offset - rktp->rktp_stored_pos.offset;
- if (rktp->rktp_committed_pos.offset >= 0 &&
- rktp->rktp_committed_pos.offset <= end_offset)
- consumer_lag =
- end_offset - rktp->rktp_committed_pos.offset;
- }
-
- _st_printf(
- "%s\"%" PRId32
- "\": { "
- "\"partition\":%" PRId32
- ", "
- "\"broker\":%" PRId32
- ", "
- "\"leader\":%" PRId32
- ", "
- "\"desired\":%s, "
- "\"unknown\":%s, "
- "\"msgq_cnt\":%i, "
- "\"msgq_bytes\":%" PRIusz
- ", "
- "\"xmit_msgq_cnt\":%i, "
- "\"xmit_msgq_bytes\":%" PRIusz
- ", "
- "\"fetchq_cnt\":%i, "
- "\"fetchq_size\":%" PRIu64
- ", "
- "\"fetch_state\":\"%s\", "
- "\"query_offset\":%" PRId64
- ", "
- "\"next_offset\":%" PRId64
- ", "
- "\"app_offset\":%" PRId64
- ", "
- "\"stored_offset\":%" PRId64
- ", "
- "\"stored_leader_epoch\":%" PRId32
- ", "
- "\"commited_offset\":%" PRId64
- ", " /*FIXME: issue #80 */
- "\"committed_offset\":%" PRId64
- ", "
- "\"committed_leader_epoch\":%" PRId32
- ", "
- "\"eof_offset\":%" PRId64
- ", "
- "\"lo_offset\":%" PRId64
- ", "
- "\"hi_offset\":%" PRId64
- ", "
- "\"ls_offset\":%" PRId64
- ", "
- "\"consumer_lag\":%" PRId64
- ", "
- "\"consumer_lag_stored\":%" PRId64
- ", "
- "\"leader_epoch\":%" PRId32
- ", "
- "\"txmsgs\":%" PRIu64
- ", "
- "\"txbytes\":%" PRIu64
- ", "
- "\"rxmsgs\":%" PRIu64
- ", "
- "\"rxbytes\":%" PRIu64
- ", "
- "\"msgs\": %" PRIu64
- ", "
- "\"rx_ver_drops\": %" PRIu64
- ", "
- "\"msgs_inflight\": %" PRId32
- ", "
- "\"next_ack_seq\": %" PRId32
- ", "
- "\"next_err_seq\": %" PRId32
- ", "
- "\"acked_msgid\": %" PRIu64 "} ",
- first ? "" : ", ", rktp->rktp_partition, rktp->rktp_partition,
- broker_id, rktp->rktp_leader_id,
- (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) ? "true" : "false",
- (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) ? "true" : "false",
- rd_kafka_msgq_len(&rktp->rktp_msgq),
- rd_kafka_msgq_size(&rktp->rktp_msgq),
- /* FIXME: xmit_msgq is local to the broker thread. */
- 0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq),
- rd_kafka_q_size(rktp->rktp_fetchq),
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- rktp->rktp_query_pos.offset, offs.fetch_pos.offset,
- rktp->rktp_app_pos.offset, rktp->rktp_stored_pos.offset,
- rktp->rktp_stored_pos.leader_epoch,
- rktp->rktp_committed_pos.offset, /* FIXME: issue #80 */
- rktp->rktp_committed_pos.offset,
- rktp->rktp_committed_pos.leader_epoch, offs.eof_offset,
- rktp->rktp_lo_offset, rktp->rktp_hi_offset, rktp->rktp_ls_offset,
- consumer_lag, consumer_lag_stored, rktp->rktp_leader_epoch,
- rd_atomic64_get(&rktp->rktp_c.tx_msgs),
- rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes),
- rd_atomic64_get(&rktp->rktp_c.rx_msgs),
- rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes),
- rk->rk_type == RD_KAFKA_PRODUCER
- ? rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs)
- : rd_atomic64_get(
- &rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */
- rd_atomic64_get(&rktp->rktp_c.rx_ver_drops),
- rd_atomic32_get(&rktp->rktp_msgs_inflight),
- rktp->rktp_eos.next_ack_seq, rktp->rktp_eos.next_err_seq,
- rktp->rktp_eos.acked_msgid);
-
- if (total) {
- total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs);
- total->txmsg_bytes +=
- rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes);
- total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs);
- total->rxmsg_bytes +=
- rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes);
- }
-
- rd_kafka_toppar_unlock(rktp);
-}
-
-/**
- * @brief Emit broker request type stats
- */
-static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st,
- rd_kafka_broker_t *rkb) {
- /* Filter out request types that will never be sent by the client. */
- static const rd_bool_t filter[4][RD_KAFKAP__NUM] = {
- [RD_KAFKA_PRODUCER] = {[RD_KAFKAP_Fetch] = rd_true,
- [RD_KAFKAP_OffsetCommit] = rd_true,
- [RD_KAFKAP_OffsetFetch] = rd_true,
- [RD_KAFKAP_JoinGroup] = rd_true,
- [RD_KAFKAP_Heartbeat] = rd_true,
- [RD_KAFKAP_LeaveGroup] = rd_true,
- [RD_KAFKAP_SyncGroup] = rd_true},
- [RD_KAFKA_CONSUMER] =
- {
- [RD_KAFKAP_Produce] = rd_true,
- [RD_KAFKAP_InitProducerId] = rd_true,
- /* Transactional producer */
- [RD_KAFKAP_AddPartitionsToTxn] = rd_true,
- [RD_KAFKAP_AddOffsetsToTxn] = rd_true,
- [RD_KAFKAP_EndTxn] = rd_true,
- [RD_KAFKAP_TxnOffsetCommit] = rd_true,
- },
- [2 /*any client type*/] =
- {
- [RD_KAFKAP_UpdateMetadata] = rd_true,
- [RD_KAFKAP_ControlledShutdown] = rd_true,
- [RD_KAFKAP_LeaderAndIsr] = rd_true,
- [RD_KAFKAP_StopReplica] = rd_true,
- [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true,
-
- [RD_KAFKAP_WriteTxnMarkers] = rd_true,
-
- [RD_KAFKAP_AlterReplicaLogDirs] = rd_true,
- [RD_KAFKAP_DescribeLogDirs] = rd_true,
-
- [RD_KAFKAP_CreateDelegationToken] = rd_true,
- [RD_KAFKAP_RenewDelegationToken] = rd_true,
- [RD_KAFKAP_ExpireDelegationToken] = rd_true,
- [RD_KAFKAP_DescribeDelegationToken] = rd_true,
- [RD_KAFKAP_IncrementalAlterConfigs] = rd_true,
- [RD_KAFKAP_ElectLeaders] = rd_true,
- [RD_KAFKAP_AlterPartitionReassignments] = rd_true,
- [RD_KAFKAP_ListPartitionReassignments] = rd_true,
- [RD_KAFKAP_AlterUserScramCredentials] = rd_true,
- [RD_KAFKAP_Vote] = rd_true,
- [RD_KAFKAP_BeginQuorumEpoch] = rd_true,
- [RD_KAFKAP_EndQuorumEpoch] = rd_true,
- [RD_KAFKAP_DescribeQuorum] = rd_true,
- [RD_KAFKAP_AlterIsr] = rd_true,
- [RD_KAFKAP_UpdateFeatures] = rd_true,
- [RD_KAFKAP_Envelope] = rd_true,
- [RD_KAFKAP_FetchSnapshot] = rd_true,
- [RD_KAFKAP_BrokerHeartbeat] = rd_true,
- [RD_KAFKAP_UnregisterBroker] = rd_true,
- [RD_KAFKAP_AllocateProducerIds] = rd_true,
- },
- [3 /*hide-unless-non-zero*/] = {
- /* Hide Admin requests unless they've been used */
- [RD_KAFKAP_CreateTopics] = rd_true,
- [RD_KAFKAP_DeleteTopics] = rd_true,
- [RD_KAFKAP_DeleteRecords] = rd_true,
- [RD_KAFKAP_CreatePartitions] = rd_true,
- [RD_KAFKAP_DescribeAcls] = rd_true,
- [RD_KAFKAP_CreateAcls] = rd_true,
- [RD_KAFKAP_DeleteAcls] = rd_true,
- [RD_KAFKAP_DescribeConfigs] = rd_true,
- [RD_KAFKAP_AlterConfigs] = rd_true,
- [RD_KAFKAP_DeleteGroups] = rd_true,
- [RD_KAFKAP_ListGroups] = rd_true,
- [RD_KAFKAP_DescribeGroups] = rd_true,
- [RD_KAFKAP_DescribeLogDirs] = rd_true,
- [RD_KAFKAP_IncrementalAlterConfigs] = rd_true,
- [RD_KAFKAP_AlterPartitionReassignments] = rd_true,
- [RD_KAFKAP_ListPartitionReassignments] = rd_true,
- [RD_KAFKAP_OffsetDelete] = rd_true,
- [RD_KAFKAP_DescribeClientQuotas] = rd_true,
- [RD_KAFKAP_AlterClientQuotas] = rd_true,
- [RD_KAFKAP_DescribeUserScramCredentials] = rd_true,
- [RD_KAFKAP_AlterUserScramCredentials] = rd_true,
- }};
- int i;
- int cnt = 0;
-
- _st_printf("\"req\": { ");
- for (i = 0; i < RD_KAFKAP__NUM; i++) {
- int64_t v;
-
- if (filter[rkb->rkb_rk->rk_type][i] || filter[2][i])
- continue;
-
- v = rd_atomic64_get(&rkb->rkb_c.reqtype[i]);
- if (!v && filter[3][i])
- continue; /* Filter out zero values */
-
- _st_printf("%s\"%s\": %" PRId64, cnt > 0 ? ", " : "",
- rd_kafka_ApiKey2str(i), v);
-
- cnt++;
- }
- _st_printf(" }, ");
-}
-
-
-/**
- * Emit all statistics
- */
-static void rd_kafka_stats_emit_all(rd_kafka_t *rk) {
- rd_kafka_broker_t *rkb;
- rd_kafka_topic_t *rkt;
- rd_ts_t now;
- rd_kafka_op_t *rko;
- unsigned int tot_cnt;
- size_t tot_size;
- rd_kafka_resp_err_t err;
- struct _stats_emit stx = {.size = 1024 * 10};
- struct _stats_emit *st = &stx;
- struct _stats_total total = {0};
-
- st->buf = rd_malloc(st->size);
-
-
- rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
- rd_kafka_rdlock(rk);
-
- now = rd_clock();
- _st_printf(
- "{ "
- "\"name\": \"%s\", "
- "\"client_id\": \"%s\", "
- "\"type\": \"%s\", "
- "\"ts\":%" PRId64
- ", "
- "\"time\":%lli, "
- "\"age\":%" PRId64
- ", "
- "\"replyq\":%i, "
- "\"msg_cnt\":%u, "
- "\"msg_size\":%" PRIusz
- ", "
- "\"msg_max\":%u, "
- "\"msg_size_max\":%" PRIusz
- ", "
- "\"simple_cnt\":%i, "
- "\"metadata_cache_cnt\":%i, "
- "\"brokers\":{ " /*open brokers*/,
- rk->rk_name, rk->rk_conf.client_id_str,
- rd_kafka_type2str(rk->rk_type), now, (signed long long)time(NULL),
- now - rk->rk_ts_created, rd_kafka_q_len(rk->rk_rep), tot_cnt,
- tot_size, rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size,
- rd_atomic32_get(&rk->rk_simple_cnt),
- rk->rk_metadata_cache.rkmc_cnt);
-
-
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- rd_kafka_toppar_t *rktp;
- rd_ts_t txidle = -1, rxidle = -1;
-
- rd_kafka_broker_lock(rkb);
-
- if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) {
- /* Calculate tx and rx idle time in usecs */
- txidle = rd_atomic64_get(&rkb->rkb_c.ts_send);
- rxidle = rd_atomic64_get(&rkb->rkb_c.ts_recv);
-
- if (txidle)
- txidle = RD_MAX(now - txidle, 0);
- else
- txidle = -1;
-
- if (rxidle)
- rxidle = RD_MAX(now - rxidle, 0);
- else
- rxidle = -1;
- }
-
- _st_printf(
- "%s\"%s\": { " /*open broker*/
- "\"name\":\"%s\", "
- "\"nodeid\":%" PRId32
- ", "
- "\"nodename\":\"%s\", "
- "\"source\":\"%s\", "
- "\"state\":\"%s\", "
- "\"stateage\":%" PRId64
- ", "
- "\"outbuf_cnt\":%i, "
- "\"outbuf_msg_cnt\":%i, "
- "\"waitresp_cnt\":%i, "
- "\"waitresp_msg_cnt\":%i, "
- "\"tx\":%" PRIu64
- ", "
- "\"txbytes\":%" PRIu64
- ", "
- "\"txerrs\":%" PRIu64
- ", "
- "\"txretries\":%" PRIu64
- ", "
- "\"txidle\":%" PRId64
- ", "
- "\"req_timeouts\":%" PRIu64
- ", "
- "\"rx\":%" PRIu64
- ", "
- "\"rxbytes\":%" PRIu64
- ", "
- "\"rxerrs\":%" PRIu64
- ", "
- "\"rxcorriderrs\":%" PRIu64
- ", "
- "\"rxpartial\":%" PRIu64
- ", "
- "\"rxidle\":%" PRId64
- ", "
- "\"zbuf_grow\":%" PRIu64
- ", "
- "\"buf_grow\":%" PRIu64
- ", "
- "\"wakeups\":%" PRIu64
- ", "
- "\"connects\":%" PRId32
- ", "
- "\"disconnects\":%" PRId32 ", ",
- rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ",
- rkb->rkb_name, rkb->rkb_name, rkb->rkb_nodeid,
- rkb->rkb_nodename, rd_kafka_confsource2str(rkb->rkb_source),
- rd_kafka_broker_state_names[rkb->rkb_state],
- rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0,
- rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt),
- rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt),
- rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt),
- rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt),
- rd_atomic64_get(&rkb->rkb_c.tx),
- rd_atomic64_get(&rkb->rkb_c.tx_bytes),
- rd_atomic64_get(&rkb->rkb_c.tx_err),
- rd_atomic64_get(&rkb->rkb_c.tx_retries), txidle,
- rd_atomic64_get(&rkb->rkb_c.req_timeouts),
- rd_atomic64_get(&rkb->rkb_c.rx),
- rd_atomic64_get(&rkb->rkb_c.rx_bytes),
- rd_atomic64_get(&rkb->rkb_c.rx_err),
- rd_atomic64_get(&rkb->rkb_c.rx_corrid_err),
- rd_atomic64_get(&rkb->rkb_c.rx_partial), rxidle,
- rd_atomic64_get(&rkb->rkb_c.zbuf_grow),
- rd_atomic64_get(&rkb->rkb_c.buf_grow),
- rd_atomic64_get(&rkb->rkb_c.wakeups),
- rd_atomic32_get(&rkb->rkb_c.connects),
- rd_atomic32_get(&rkb->rkb_c.disconnects));
-
- total.tx += rd_atomic64_get(&rkb->rkb_c.tx);
- total.tx_bytes += rd_atomic64_get(&rkb->rkb_c.tx_bytes);
- total.rx += rd_atomic64_get(&rkb->rkb_c.rx);
- total.rx_bytes += rd_atomic64_get(&rkb->rkb_c.rx_bytes);
-
- rd_kafka_stats_emit_avg(st, "int_latency",
- &rkb->rkb_avg_int_latency);
- rd_kafka_stats_emit_avg(st, "outbuf_latency",
- &rkb->rkb_avg_outbuf_latency);
- rd_kafka_stats_emit_avg(st, "rtt", &rkb->rkb_avg_rtt);
- rd_kafka_stats_emit_avg(st, "throttle", &rkb->rkb_avg_throttle);
-
- rd_kafka_stats_emit_broker_reqs(st, rkb);
-
- _st_printf("\"toppars\":{ " /*open toppars*/);
-
- TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
- _st_printf(
- "%s\"%.*s-%" PRId32
- "\": { "
- "\"topic\":\"%.*s\", "
- "\"partition\":%" PRId32 "} ",
- rktp == TAILQ_FIRST(&rkb->rkb_toppars) ? "" : ", ",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- }
-
- rd_kafka_broker_unlock(rkb);
-
- _st_printf(
- "} " /*close toppars*/
- "} " /*close broker*/);
- }
-
-
- _st_printf(
- "}, " /* close "brokers" array */
- "\"topics\":{ ");
-
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- rd_kafka_toppar_t *rktp;
- int i, j;
-
- rd_kafka_topic_rdlock(rkt);
- _st_printf(
- "%s\"%.*s\": { "
- "\"topic\":\"%.*s\", "
- "\"age\":%" PRId64
- ", "
- "\"metadata_age\":%" PRId64 ", ",
- rkt == TAILQ_FIRST(&rk->rk_topics) ? "" : ", ",
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- (now - rkt->rkt_ts_create) / 1000,
- rkt->rkt_ts_metadata ? (now - rkt->rkt_ts_metadata) / 1000
- : 0);
-
- rd_kafka_stats_emit_avg(st, "batchsize",
- &rkt->rkt_avg_batchsize);
- rd_kafka_stats_emit_avg(st, "batchcnt", &rkt->rkt_avg_batchcnt);
-
- _st_printf("\"partitions\":{ " /*open partitions*/);
-
- for (i = 0; i < rkt->rkt_partition_cnt; i++)
- rd_kafka_stats_emit_toppar(st, &total, rkt->rkt_p[i],
- i == 0);
-
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, j)
- rd_kafka_stats_emit_toppar(st, &total, rktp, i + j == 0);
-
- i += j;
-
- if (rkt->rkt_ua)
- rd_kafka_stats_emit_toppar(st, NULL, rkt->rkt_ua,
- i++ == 0);
-
- rd_kafka_topic_rdunlock(rkt);
-
- _st_printf(
- "} " /*close partitions*/
- "} " /*close topic*/);
- }
- _st_printf("} " /*close topics*/);
-
- if (rk->rk_cgrp) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
- _st_printf(
- ", \"cgrp\": { "
- "\"state\": \"%s\", "
- "\"stateage\": %" PRId64
- ", "
- "\"join_state\": \"%s\", "
- "\"rebalance_age\": %" PRId64
- ", "
- "\"rebalance_cnt\": %d, "
- "\"rebalance_reason\": \"%s\", "
- "\"assignment_size\": %d }",
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rkcg->rkcg_ts_statechange
- ? (now - rkcg->rkcg_ts_statechange) / 1000
- : 0,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rkcg->rkcg_c.ts_rebalance
- ? (now - rkcg->rkcg_c.ts_rebalance) / 1000
- : 0,
- rkcg->rkcg_c.rebalance_cnt, rkcg->rkcg_c.rebalance_reason,
- rkcg->rkcg_c.assignment_size);
- }
-
- if (rd_kafka_is_idempotent(rk)) {
- _st_printf(
- ", \"eos\": { "
- "\"idemp_state\": \"%s\", "
- "\"idemp_stateage\": %" PRId64
- ", "
- "\"txn_state\": \"%s\", "
- "\"txn_stateage\": %" PRId64
- ", "
- "\"txn_may_enq\": %s, "
- "\"producer_id\": %" PRId64
- ", "
- "\"producer_epoch\": %hd, "
- "\"epoch_cnt\": %d "
- "}",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- (now - rk->rk_eos.ts_idemp_state) / 1000,
- rd_kafka_txn_state2str(rk->rk_eos.txn_state),
- (now - rk->rk_eos.ts_txn_state) / 1000,
- rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? "true" : "false",
- rk->rk_eos.pid.id, rk->rk_eos.pid.epoch,
- rk->rk_eos.epoch_cnt);
- }
-
- if ((err = rd_atomic32_get(&rk->rk_fatal.err)))
- _st_printf(
- ", \"fatal\": { "
- "\"error\": \"%s\", "
- "\"reason\": \"%s\", "
- "\"cnt\": %d "
- "}",
- rd_kafka_err2str(err), rk->rk_fatal.errstr,
- rk->rk_fatal.cnt);
-
- rd_kafka_rdunlock(rk);
-
- /* Total counters */
- _st_printf(
- ", "
- "\"tx\":%" PRId64
- ", "
- "\"tx_bytes\":%" PRId64
- ", "
- "\"rx\":%" PRId64
- ", "
- "\"rx_bytes\":%" PRId64
- ", "
- "\"txmsgs\":%" PRId64
- ", "
- "\"txmsg_bytes\":%" PRId64
- ", "
- "\"rxmsgs\":%" PRId64
- ", "
- "\"rxmsg_bytes\":%" PRId64,
- total.tx, total.tx_bytes, total.rx, total.rx_bytes, total.txmsgs,
- total.txmsg_bytes, total.rxmsgs, total.rxmsg_bytes);
-
- _st_printf("}" /*close object*/);
-
-
- /* Enqueue op for application */
- rko = rd_kafka_op_new(RD_KAFKA_OP_STATS);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
- rko->rko_u.stats.json = st->buf;
- rko->rko_u.stats.json_len = st->of;
- rd_kafka_q_enq(rk->rk_rep, rko);
-}
-
-
-/**
- * @brief 1 second generic timer.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_1s_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_t *rk = rkts->rkts_rk;
-
- /* Scan topic state, message timeouts, etc. */
- rd_kafka_topic_scan_all(rk, rd_clock());
-
- /* Sparse connections:
- * try to maintain at least one connection to the cluster. */
- if (rk->rk_conf.sparse_connections &&
- rd_atomic32_get(&rk->rk_broker_up_cnt) == 0)
- rd_kafka_connect_any(rk, "no cluster connection");
-
- rd_kafka_coord_cache_expire(&rk->rk_coord_cache);
-}
-
-static void rd_kafka_stats_emit_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_t *rk = rkts->rkts_rk;
- rd_kafka_stats_emit_all(rk);
-}
-
-
-/**
- * @brief Periodic metadata refresh callback
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_metadata_refresh_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_t *rk = rkts->rkts_rk;
- rd_kafka_resp_err_t err;
-
- /* High-level consumer:
- * We need to query both locally known topics and subscribed topics
- * so that we can detect locally known topics changing partition
- * count or disappearing, as well as detect previously non-existent
- * subscribed topics now being available in the cluster. */
- if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp)
- err = rd_kafka_metadata_refresh_consumer_topics(
- rk, NULL, "periodic topic and broker list refresh");
- else
- err = rd_kafka_metadata_refresh_known_topics(
- rk, NULL, rd_true /*force*/,
- "periodic topic and broker list refresh");
-
-
- if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC &&
- rd_interval(&rk->rk_suppress.broker_metadata_refresh,
- 10 * 1000 * 1000 /*10s*/, 0) > 0) {
- /* If there are no (locally referenced) topics
- * to query, refresh the broker list.
- * This avoids getting idle-disconnected for clients
- * that have not yet referenced a topic and makes
- * sure such a client has an up to date broker list. */
- rd_kafka_metadata_refresh_brokers(
- rk, NULL, "periodic broker list refresh");
- }
-}
-
-
-
-/**
- * @brief Wait for background threads to initialize.
- *
- * @returns the number of background threads still not initialized.
- *
- * @locality app thread calling rd_kafka_new()
- * @locks none
- */
-static int rd_kafka_init_wait(rd_kafka_t *rk, int timeout_ms) {
- struct timespec tspec;
- int ret;
-
- rd_timeout_init_timespec(&tspec, timeout_ms);
-
- mtx_lock(&rk->rk_init_lock);
- while (rk->rk_init_wait_cnt > 0 &&
- cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, &tspec) ==
- thrd_success)
- ;
- ret = rk->rk_init_wait_cnt;
- mtx_unlock(&rk->rk_init_lock);
-
- return ret;
-}
-
-
-/**
- * Main loop for Kafka handler thread.
- */
-static int rd_kafka_thread_main(void *arg) {
- rd_kafka_t *rk = arg;
- rd_kafka_timer_t tmr_1s = RD_ZERO_INIT;
- rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT;
- rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT;
-
- rd_kafka_set_thread_name("main");
- rd_kafka_set_thread_sysname("rdk:main");
-
- rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_MAIN);
-
- (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
-
- /* Acquire lock (which was held by thread creator during creation)
- * to synchronise state. */
- rd_kafka_wrlock(rk);
- rd_kafka_wrunlock(rk);
-
- /* 1 second timer for topic scan and connection checking. */
- rd_kafka_timer_start(&rk->rk_timers, &tmr_1s, 1000000,
- rd_kafka_1s_tmr_cb, NULL);
- if (rk->rk_conf.stats_interval_ms)
- rd_kafka_timer_start(&rk->rk_timers, &tmr_stats_emit,
- rk->rk_conf.stats_interval_ms * 1000ll,
- rd_kafka_stats_emit_tmr_cb, NULL);
- if (rk->rk_conf.metadata_refresh_interval_ms > 0)
- rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh,
- rk->rk_conf.metadata_refresh_interval_ms *
- 1000ll,
- rd_kafka_metadata_refresh_cb, NULL);
-
- if (rk->rk_cgrp)
- rd_kafka_q_fwd_set(rk->rk_cgrp->rkcg_ops, rk->rk_ops);
-
- if (rd_kafka_is_idempotent(rk))
- rd_kafka_idemp_init(rk);
-
- mtx_lock(&rk->rk_init_lock);
- rk->rk_init_wait_cnt--;
- cnd_broadcast(&rk->rk_init_cnd);
- mtx_unlock(&rk->rk_init_lock);
-
- while (likely(!rd_kafka_terminating(rk) || rd_kafka_q_len(rk->rk_ops) ||
- (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state !=
- RD_KAFKA_CGRP_STATE_TERM)))) {
- rd_ts_t sleeptime = rd_kafka_timers_next(
- &rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/);
- rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0,
- RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
- if (rk->rk_cgrp) /* FIXME: move to timer-triggered */
- rd_kafka_cgrp_serve(rk->rk_cgrp);
- rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT);
- }
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Internal main thread terminating");
-
- if (rd_kafka_is_idempotent(rk))
- rd_kafka_idemp_term(rk);
-
- rd_kafka_q_disable(rk->rk_ops);
- rd_kafka_q_purge(rk->rk_ops);
-
- rd_kafka_timer_stop(&rk->rk_timers, &tmr_1s, 1);
- if (rk->rk_conf.stats_interval_ms)
- rd_kafka_timer_stop(&rk->rk_timers, &tmr_stats_emit, 1);
- rd_kafka_timer_stop(&rk->rk_timers, &tmr_metadata_refresh, 1);
-
- /* Synchronise state */
- rd_kafka_wrlock(rk);
- rd_kafka_wrunlock(rk);
-
- rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_MAIN);
-
- rd_kafka_destroy_internal(rk);
-
- rd_kafka_dbg(rk, GENERIC, "TERMINATE",
- "Internal main thread termination done");
-
- rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
-
- return 0;
-}
-
-
-void rd_kafka_term_sig_handler(int sig) {
- /* nop */
-}
-
-
-rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
- rd_kafka_conf_t *app_conf,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_t *rk;
- static rd_atomic32_t rkid;
- rd_kafka_conf_t *conf;
- rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int ret_errno = 0;
- const char *conf_err;
-#ifndef _WIN32
- sigset_t newset, oldset;
-#endif
- char builtin_features[128];
- size_t bflen;
-
- rd_kafka_global_init();
-
- /* rd_kafka_new() takes ownership of the provided \p app_conf
- * object if rd_kafka_new() succeeds.
- * Since \p app_conf is optional we allocate a default configuration
- * object here if \p app_conf is NULL.
- * The configuration object itself is struct-copied later
- * leaving the default *conf pointer to be ready for freeing.
- * In case new() fails and app_conf was specified we will clear out
- * rk_conf to avoid double-freeing from destroy_internal() and the
- * user's eventual call to rd_kafka_conf_destroy().
- * This is all a bit tricky but that's the nature of
- * legacy interfaces. */
- if (!app_conf)
- conf = rd_kafka_conf_new();
- else
- conf = app_conf;
-
- /* Verify and finalize configuration */
- if ((conf_err = rd_kafka_conf_finalize(type, conf))) {
- /* Incompatible configuration settings */
- rd_snprintf(errstr, errstr_size, "%s", conf_err);
- if (!app_conf)
- rd_kafka_conf_destroy(conf);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return NULL;
- }
-
-
- rd_kafka_global_cnt_incr();
-
- /*
- * Set up the handle.
- */
- rk = rd_calloc(1, sizeof(*rk));
-
- rk->rk_type = type;
- rk->rk_ts_created = rd_clock();
-
- /* Struct-copy the config object. */
- rk->rk_conf = *conf;
- if (!app_conf)
- rd_free(conf); /* Free the base config struct only,
- * not its fields since they were copied to
- * rk_conf just above. Those fields are
- * freed from rd_kafka_destroy_internal()
- * as the rk itself is destroyed. */
-
- /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap.
- */
- if (rk->rk_conf.enable_random_seed)
- call_once(&rd_kafka_global_srand_once, rd_kafka_global_srand);
-
- /* Call on_new() interceptors */
- rd_kafka_interceptors_on_new(rk, &rk->rk_conf);
-
- rwlock_init(&rk->rk_lock);
- mtx_init(&rk->rk_conf.sasl.lock, mtx_plain);
- mtx_init(&rk->rk_internal_rkb_lock, mtx_plain);
-
- cnd_init(&rk->rk_broker_state_change_cnd);
- mtx_init(&rk->rk_broker_state_change_lock, mtx_plain);
- rd_list_init(&rk->rk_broker_state_change_waiters, 8,
- rd_kafka_enq_once_trigger_destroy);
-
- cnd_init(&rk->rk_init_cnd);
- mtx_init(&rk->rk_init_lock, mtx_plain);
-
- rd_interval_init(&rk->rk_suppress.no_idemp_brokers);
- rd_interval_init(&rk->rk_suppress.broker_metadata_refresh);
- rd_interval_init(&rk->rk_suppress.sparse_connect_random);
- mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain);
-
- rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created);
- rd_atomic32_init(&rk->rk_flushing, 0);
-
- rk->rk_rep = rd_kafka_q_new(rk);
- rk->rk_ops = rd_kafka_q_new(rk);
- rk->rk_ops->rkq_serve = rd_kafka_poll_cb;
- rk->rk_ops->rkq_opaque = rk;
-
- if (rk->rk_conf.log_queue) {
- rk->rk_logq = rd_kafka_q_new(rk);
- rk->rk_logq->rkq_serve = rd_kafka_poll_cb;
- rk->rk_logq->rkq_opaque = rk;
- }
-
- TAILQ_INIT(&rk->rk_brokers);
- TAILQ_INIT(&rk->rk_topics);
- rd_kafka_timers_init(&rk->rk_timers, rk, rk->rk_ops);
- rd_kafka_metadata_cache_init(rk);
- rd_kafka_coord_cache_init(&rk->rk_coord_cache,
- rk->rk_conf.metadata_max_age_ms);
- rd_kafka_coord_reqs_init(rk);
-
- if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb)
- rk->rk_drmode = RD_KAFKA_DR_MODE_CB;
- else if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR)
- rk->rk_drmode = RD_KAFKA_DR_MODE_EVENT;
- else
- rk->rk_drmode = RD_KAFKA_DR_MODE_NONE;
- if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE)
- rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR;
-
- if (rk->rk_conf.rebalance_cb)
- rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE;
- if (rk->rk_conf.offset_commit_cb)
- rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT;
- if (rk->rk_conf.error_cb)
- rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR;
-#if WITH_SASL_OAUTHBEARER
- if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt &&
- !rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
- rd_kafka_conf_set_oauthbearer_token_refresh_cb(
- &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token);
-
- if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb &&
- rk->rk_conf.sasl.oauthbearer.method !=
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC)
- rk->rk_conf.enabled_events |=
- RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH;
-#endif
-
-#if WITH_OAUTHBEARER_OIDC
- if (rk->rk_conf.sasl.oauthbearer.method ==
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
- !rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
- rd_kafka_conf_set_oauthbearer_token_refresh_cb(
- &rk->rk_conf, rd_kafka_oidc_token_refresh_cb);
-#endif
-
- rk->rk_controllerid = -1;
-
- /* Admin client defaults */
- rk->rk_conf.admin.request_timeout_ms = rk->rk_conf.socket_timeout_ms;
-
- if (rk->rk_conf.debug)
- rk->rk_conf.log_level = LOG_DEBUG;
-
- rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i",
- rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type),
- rd_atomic32_add(&rkid, 1));
-
- /* Construct clientid kafka string */
- rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str, -1);
-
- /* Convert group.id to kafka string (may be NULL) */
- rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str, -1);
-
- /* Config fixups */
- rk->rk_conf.queued_max_msg_bytes =
- (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll;
-
- /* Enable api.version.request=true if fallback.broker.version
- * indicates a supporting broker. */
- if (rd_kafka_ApiVersion_is_queryable(
- rk->rk_conf.broker_version_fallback))
- rk->rk_conf.api_version_request = 1;
-
- if (rk->rk_type == RD_KAFKA_PRODUCER) {
- mtx_init(&rk->rk_curr_msgs.lock, mtx_plain);
- cnd_init(&rk->rk_curr_msgs.cnd);
- rk->rk_curr_msgs.max_cnt = rk->rk_conf.queue_buffering_max_msgs;
- if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes *
- 1024 >
- (unsigned long long)SIZE_MAX) {
- rk->rk_curr_msgs.max_size = SIZE_MAX;
- rd_kafka_log(rk, LOG_WARNING, "QUEUESIZE",
- "queue.buffering.max.kbytes adjusted "
- "to system SIZE_MAX limit %" PRIusz
- " bytes",
- rk->rk_curr_msgs.max_size);
- } else {
- rk->rk_curr_msgs.max_size =
- (size_t)rk->rk_conf.queue_buffering_max_kbytes *
- 1024;
- }
- }
-
- if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) {
- ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- ret_errno = EINVAL;
- goto fail;
- }
-
- /* Create Mock cluster */
- rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0);
- if (rk->rk_conf.mock.broker_cnt > 0) {
- const char *mock_bootstraps;
- rk->rk_mock.cluster =
- rd_kafka_mock_cluster_new(rk, rk->rk_conf.mock.broker_cnt);
-
- if (!rk->rk_mock.cluster) {
- rd_snprintf(errstr, errstr_size,
- "Failed to create mock cluster, see logs");
- ret_err = RD_KAFKA_RESP_ERR__FAIL;
- ret_errno = EINVAL;
- goto fail;
- }
-
- mock_bootstraps =
- rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster),
- rd_kafka_log(rk, LOG_NOTICE, "MOCK",
- "Mock cluster enabled: "
- "original bootstrap.servers and security.protocol "
- "ignored and replaced with %s",
- mock_bootstraps);
-
- /* Overwrite bootstrap.servers and connection settings */
- if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers",
- mock_bootstraps, NULL,
- 0) != RD_KAFKA_CONF_OK)
- rd_assert(!"failed to replace mock bootstrap.servers");
-
- if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol",
- "plaintext", NULL, 0) != RD_KAFKA_CONF_OK)
- rd_assert(!"failed to reset mock security.protocol");
-
- rk->rk_conf.security_protocol = RD_KAFKA_PROTO_PLAINTEXT;
-
- /* Apply default RTT to brokers */
- if (rk->rk_conf.mock.broker_rtt)
- rd_kafka_mock_broker_set_rtt(
- rk->rk_mock.cluster, -1 /*all brokers*/,
- rk->rk_conf.mock.broker_rtt);
- }
-
- if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
- rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) {
- /* Select SASL provider */
- if (rd_kafka_sasl_select_provider(rk, errstr, errstr_size) ==
- -1) {
- ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- ret_errno = EINVAL;
- goto fail;
- }
-
- /* Initialize SASL provider */
- if (rd_kafka_sasl_init(rk, errstr, errstr_size) == -1) {
- rk->rk_conf.sasl.provider = NULL;
- ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- ret_errno = EINVAL;
- goto fail;
- }
- }
-
-#if WITH_SSL
- if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SSL ||
- rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) {
- /* Create SSL context */
- if (rd_kafka_ssl_ctx_init(rk, errstr, errstr_size) == -1) {
- ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- ret_errno = EINVAL;
- goto fail;
- }
- }
-#endif
-
- if (type == RD_KAFKA_CONSUMER) {
- rd_kafka_assignment_init(rk);
-
- if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) {
- /* Create consumer group handle */
- rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id,
- rk->rk_client_id);
- rk->rk_consumer.q =
- rd_kafka_q_keep(rk->rk_cgrp->rkcg_q);
- } else {
- /* Legacy consumer */
- rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep);
- }
-
- } else if (type == RD_KAFKA_PRODUCER) {
- rk->rk_eos.transactional_id =
- rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1);
- }
-
-#ifndef _WIN32
- /* Block all signals in newly created threads.
- * To avoid race condition we block all signals in the calling
- * thread, which the new thread will inherit its sigmask from,
- * and then restore the original sigmask of the calling thread when
- * we're done creating the thread. */
- sigemptyset(&oldset);
- sigfillset(&newset);
- if (rk->rk_conf.term_sig) {
- struct sigaction sa_term = {.sa_handler =
- rd_kafka_term_sig_handler};
- sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
- }
- pthread_sigmask(SIG_SETMASK, &newset, &oldset);
-#endif
-
- /* Create background thread and queue if background_event_cb()
- * RD_KAFKA_EVENT_BACKGROUND has been enabled.
- * Do this before creating the main thread since after
- * the main thread is created it is no longer trivial to error
- * out from rd_kafka_new(). */
- if (rk->rk_conf.background_event_cb ||
- (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_wrlock(rk);
- if (!rk->rk_background.q)
- err = rd_kafka_background_thread_create(rk, errstr,
- errstr_size);
- rd_kafka_wrunlock(rk);
- if (err)
- goto fail;
- }
-
- /* Lock handle here to synchronise state, i.e., hold off
- * the thread until we've finalized the handle. */
- rd_kafka_wrlock(rk);
-
- /* Create handler thread */
- mtx_lock(&rk->rk_init_lock);
- rk->rk_init_wait_cnt++;
- if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) !=
- thrd_success) {
- rk->rk_init_wait_cnt--;
- ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- ret_errno = errno;
- if (errstr)
- rd_snprintf(errstr, errstr_size,
- "Failed to create thread: %s (%i)",
- rd_strerror(errno), errno);
- mtx_unlock(&rk->rk_init_lock);
- rd_kafka_wrunlock(rk);
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
- goto fail;
- }
-
- mtx_unlock(&rk->rk_init_lock);
- rd_kafka_wrunlock(rk);
-
- /*
- * @warning `goto fail` is prohibited past this point
- */
-
- mtx_lock(&rk->rk_internal_rkb_lock);
- rk->rk_internal_rkb =
- rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT,
- "", 0, RD_KAFKA_NODEID_UA);
- mtx_unlock(&rk->rk_internal_rkb_lock);
-
- /* Add initial list of brokers from configuration */
- if (rk->rk_conf.brokerlist) {
- if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0)
- rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
- "No brokers configured");
- }
-
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-
- /* Wait for background threads to fully initialize so that
- * the client instance is fully functional at the time it is
- * returned from the constructor. */
- if (rd_kafka_init_wait(rk, 60 * 1000) != 0) {
- /* This should never happen unless there is a bug
- * or the OS is not scheduling the background threads.
- * Either case there is no point in handling this gracefully
- * in the current state since the thread joins are likely
- * to hang as well. */
- mtx_lock(&rk->rk_init_lock);
- rd_kafka_log(rk, LOG_CRIT, "INIT",
- "Failed to initialize %s: "
- "%d background thread(s) did not initialize "
- "within 60 seconds",
- rk->rk_name, rk->rk_init_wait_cnt);
- if (errstr)
- rd_snprintf(errstr, errstr_size,
- "Timed out waiting for "
- "%d background thread(s) to initialize",
- rk->rk_init_wait_cnt);
- mtx_unlock(&rk->rk_init_lock);
-
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
- EDEADLK);
- return NULL;
- }
-
- rk->rk_initialized = 1;
-
- bflen = sizeof(builtin_features);
- if (rd_kafka_conf_get(&rk->rk_conf, "builtin.features",
- builtin_features, &bflen) != RD_KAFKA_CONF_OK)
- rd_snprintf(builtin_features, sizeof(builtin_features), "?");
- rd_kafka_dbg(rk, ALL, "INIT",
- "librdkafka v%s (0x%x) %s initialized "
- "(builtin.features %s, %s, debug 0x%x)",
- rd_kafka_version_str(), rd_kafka_version(), rk->rk_name,
- builtin_features, BUILT_WITH, rk->rk_conf.debug);
-
- /* Log warnings for deprecated configuration */
- rd_kafka_conf_warn(rk);
-
- /* Debug dump configuration */
- if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) {
- rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, &rk->rk_conf,
- "Client configuration");
- if (rk->rk_conf.topic_conf)
- rd_kafka_anyconf_dump_dbg(
- rk, _RK_TOPIC, rk->rk_conf.topic_conf,
- "Default topic configuration");
- }
-
- /* Free user supplied conf's base pointer on success,
- * but not the actual allocated fields since the struct
- * will have been copied in its entirety above. */
- if (app_conf)
- rd_free(app_conf);
- rd_kafka_set_last_error(0, 0);
-
- return rk;
-
-fail:
- /*
- * Error out and clean up
- */
-
- /*
- * Tell background thread to terminate and wait for it to return.
- */
- rd_atomic32_set(&rk->rk_terminate, RD_KAFKA_DESTROY_F_TERMINATE);
-
- /* Terminate SASL provider */
- if (rk->rk_conf.sasl.provider)
- rd_kafka_sasl_term(rk);
-
- if (rk->rk_background.thread) {
- int res;
- thrd_join(rk->rk_background.thread, &res);
- rd_kafka_q_destroy_owner(rk->rk_background.q);
- }
-
- /* If on_new() interceptors have been called we also need
- * to allow interceptor clean-up by calling on_destroy() */
- rd_kafka_interceptors_on_destroy(rk);
-
- /* If rk_conf is a struct-copy of the application configuration
- * we need to avoid rk_conf fields from being freed from
- * rd_kafka_destroy_internal() since they belong to app_conf.
- * However, there are some internal fields, such as interceptors,
- * that belong to rk_conf and thus needs to be cleaned up.
- * Legacy APIs, sigh.. */
- if (app_conf) {
- rd_kafka_assignors_term(rk);
- rd_kafka_interceptors_destroy(&rk->rk_conf);
- memset(&rk->rk_conf, 0, sizeof(rk->rk_conf));
- }
-
- rd_kafka_destroy_internal(rk);
- rd_kafka_destroy_final(rk);
-
- rd_kafka_set_last_error(ret_err, ret_errno);
-
- return NULL;
-}
-
-
-
-/**
- * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with
- * friends) since it does not have an API for stopping the cgrp we will need to
- * sort that out automatically in the background when all consumption
- * has stopped.
- *
- * Returns 0 if a High level consumer is already instantiated
- * which means a Simple consumer cannot co-operate with it, else 1.
- *
- * A rd_kafka_t handle can never migrate from simple to high-level, or
- * vice versa, so we dont need a ..consumer_del().
- */
-int rd_kafka_simple_consumer_add(rd_kafka_t *rk) {
- if (rd_atomic32_get(&rk->rk_simple_cnt) < 0)
- return 0;
-
- return (int)rd_atomic32_add(&rk->rk_simple_cnt, 1);
-}
-
-
-
-/**
- * rktp fetch is split up in these parts:
- * * application side:
- * * broker side (handled by current leader broker thread for rktp):
- * - the fetch state, initial offset, etc.
- * - fetching messages, updating fetched offset, etc.
- * - offset commits
- *
- * Communication between the two are:
- * app side -> rdkafka main side: rktp_ops
- * broker thread -> app side: rktp_fetchq
- *
- * There is no shared state between these threads, instead
- * state is communicated through the two op queues, and state synchronization
- * is performed by version barriers.
- *
- */
-
-static RD_UNUSED int rd_kafka_consume_start0(rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset,
- rd_kafka_q_t *rkq) {
- rd_kafka_toppar_t *rktp;
-
- if (partition < 0) {
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- ESRCH);
- return -1;
- }
-
- if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) {
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return -1;
- }
-
- rd_kafka_topic_wrlock(rkt);
- rktp = rd_kafka_toppar_desired_add(rkt, partition);
- rd_kafka_topic_wrunlock(rkt);
-
- /* Verify offset */
- if (offset == RD_KAFKA_OFFSET_BEGINNING ||
- offset == RD_KAFKA_OFFSET_END ||
- offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
- /* logical offsets */
-
- } else if (offset == RD_KAFKA_OFFSET_STORED) {
- /* offset manager */
-
- if (rkt->rkt_conf.offset_store_method ==
- RD_KAFKA_OFFSET_METHOD_BROKER &&
- RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) {
- /* Broker based offsets require a group id. */
- rd_kafka_toppar_destroy(rktp);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG,
- EINVAL);
- return -1;
- }
-
- } else if (offset < 0) {
- rd_kafka_toppar_destroy(rktp);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return -1;
- }
-
- rd_kafka_toppar_op_fetch_start(rktp, RD_KAFKA_FETCH_POS(offset, -1),
- rkq, RD_KAFKA_NO_REPLYQ);
-
- rd_kafka_toppar_destroy(rktp);
-
- rd_kafka_set_last_error(0, 0);
- return 0;
-}
-
-
-
-int rd_kafka_consume_start(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int64_t offset) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START",
- "Start consuming partition %" PRId32, partition);
- return rd_kafka_consume_start0(rkt, partition, offset, NULL);
-}
-
-int rd_kafka_consume_start_queue(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int64_t offset,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
-
- return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q);
-}
-
-
-
-static RD_UNUSED int rd_kafka_consume_stop0(rd_kafka_toppar_t *rktp) {
- rd_kafka_q_t *tmpq = NULL;
- rd_kafka_resp_err_t err;
-
- rd_kafka_topic_wrlock(rktp->rktp_rkt);
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_desired_del(rktp);
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_topic_wrunlock(rktp->rktp_rkt);
-
- tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk);
-
- rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_REPLYQ(tmpq, 0));
-
- /* Synchronisation: Wait for stop reply from broker thread */
- err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
- rd_kafka_q_destroy_owner(tmpq);
-
- rd_kafka_set_last_error(err, err ? EINVAL : 0);
-
- return err ? -1 : 0;
-}
-
-
-int rd_kafka_consume_stop(rd_kafka_topic_t *app_rkt, int32_t partition) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- int r;
-
- if (partition == RD_KAFKA_PARTITION_UA) {
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return -1;
- }
-
- rd_kafka_topic_wrlock(rkt);
- if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
- !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
- rd_kafka_topic_wrunlock(rkt);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- ESRCH);
- return -1;
- }
- rd_kafka_topic_wrunlock(rkt);
-
- r = rd_kafka_consume_stop0(rktp);
- /* set_last_error() called by stop0() */
-
- rd_kafka_toppar_destroy(rktp);
-
- return r;
-}
-
-
-
-rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int64_t offset,
- int timeout_ms) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- rd_kafka_q_t *tmpq = NULL;
- rd_kafka_resp_err_t err;
- rd_kafka_replyq_t replyq = RD_KAFKA_NO_REPLYQ;
-
- /* FIXME: simple consumer check */
-
- if (partition == RD_KAFKA_PARTITION_UA)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- rd_kafka_topic_rdlock(rkt);
- if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
- !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
- rd_kafka_topic_rdunlock(rkt);
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- }
- rd_kafka_topic_rdunlock(rkt);
-
- if (timeout_ms) {
- tmpq = rd_kafka_q_new(rkt->rkt_rk);
- replyq = RD_KAFKA_REPLYQ(tmpq, 0);
- }
-
- if ((err = rd_kafka_toppar_op_seek(rktp, RD_KAFKA_FETCH_POS(offset, -1),
- replyq))) {
- if (tmpq)
- rd_kafka_q_destroy_owner(tmpq);
- rd_kafka_toppar_destroy(rktp);
- return err;
- }
-
- rd_kafka_toppar_destroy(rktp);
-
- if (tmpq) {
- err = rd_kafka_q_wait_result(tmpq, timeout_ms);
- rd_kafka_q_destroy_owner(tmpq);
- return err;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_error_t *
-rd_kafka_seek_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions,
- int timeout_ms) {
- rd_kafka_q_t *tmpq = NULL;
- rd_kafka_topic_partition_t *rktpar;
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
- int cnt = 0;
-
- if (rk->rk_type != RD_KAFKA_CONSUMER)
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Must only be used on consumer instance");
-
- if (!partitions || partitions->cnt == 0)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "partitions must be specified");
-
- if (timeout_ms)
- tmpq = rd_kafka_q_new(rk);
-
- RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_resp_err_t err;
-
- rktp = rd_kafka_toppar_get2(
- rk, rktpar->topic, rktpar->partition,
- rd_false /*no-ua-on-miss*/, rd_false /*no-create-on-miss*/);
- if (!rktp) {
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- continue;
- }
-
- err = rd_kafka_toppar_op_seek(
- rktp, rd_kafka_topic_partition_get_fetch_pos(rktpar),
- RD_KAFKA_REPLYQ(tmpq, 0));
- if (err) {
- rktpar->err = err;
- } else {
- rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
- cnt++;
- }
-
- rd_kafka_toppar_destroy(rktp); /* refcnt from toppar_get2() */
- }
-
- if (!timeout_ms)
- return NULL;
-
-
- while (cnt > 0) {
- rd_kafka_op_t *rko;
-
- rko =
- rd_kafka_q_pop(tmpq, rd_timeout_remains_us(abs_timeout), 0);
- if (!rko) {
- rd_kafka_q_destroy_owner(tmpq);
-
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Timed out waiting for %d remaining partition "
- "seek(s) to finish",
- cnt);
- }
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) {
- rd_kafka_q_destroy_owner(tmpq);
- rd_kafka_op_destroy(rko);
-
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY,
- "Instance is terminating");
- }
-
- rd_assert(rko->rko_rktp);
-
- rktpar = rd_kafka_topic_partition_list_find(
- partitions, rko->rko_rktp->rktp_rkt->rkt_topic->str,
- rko->rko_rktp->rktp_partition);
- rd_assert(rktpar);
-
- rktpar->err = rko->rko_err;
-
- rd_kafka_op_destroy(rko);
-
- cnt--;
- }
-
- rd_kafka_q_destroy_owner(tmpq);
-
- return NULL;
-}
-
-
-
-static ssize_t rd_kafka_consume_batch0(rd_kafka_q_t *rkq,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size) {
- /* Populate application's rkmessages array. */
- return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, rkmessages,
- rkmessages_size);
-}
-
-
-ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- ssize_t cnt;
-
- /* Get toppar */
- rd_kafka_topic_rdlock(rkt);
- rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
- if (unlikely(!rktp))
- rktp = rd_kafka_toppar_desired_get(rkt, partition);
- rd_kafka_topic_rdunlock(rkt);
-
- if (unlikely(!rktp)) {
- /* No such toppar known */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- ESRCH);
- return -1;
- }
-
- /* Populate application's rkmessages array. */
- cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms,
- rkmessages, rkmessages_size);
-
- rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */
-
- rd_kafka_set_last_error(0, 0);
-
- return cnt;
-}
-
-ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size) {
- /* Populate application's rkmessages array. */
- return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, rkmessages,
- rkmessages_size);
-}
-
-
-struct consume_ctx {
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque);
- void *opaque;
-};
-
-
-/**
- * Trampoline for application's consume_cb()
- */
-static rd_kafka_op_res_t rd_kafka_consume_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- struct consume_ctx *ctx = opaque;
- rd_kafka_message_t *rkmessage;
-
- if (unlikely(rd_kafka_op_version_outdated(rko, 0)) ||
- rko->rko_type == RD_KAFKA_OP_BARRIER) {
- rd_kafka_op_destroy(rko);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- rkmessage = rd_kafka_message_get(rko);
-
- rd_kafka_fetch_op_app_prepare(rk, rko);
-
- ctx->consume_cb(rkmessage, ctx->opaque);
-
- rd_kafka_op_destroy(rko);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-static rd_kafka_op_res_t rd_kafka_consume_callback0(
- rd_kafka_q_t *rkq,
- int timeout_ms,
- int max_cnt,
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque),
- void *opaque) {
- struct consume_ctx ctx = {.consume_cb = consume_cb, .opaque = opaque};
- rd_kafka_op_res_t res;
-
- if (timeout_ms)
- rd_kafka_app_poll_blocking(rkq->rkq_rk);
-
- res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN,
- rd_kafka_consume_cb, &ctx);
-
- rd_kafka_app_polled(rkq->rkq_rk);
-
- return res;
-}
-
-
-int rd_kafka_consume_callback(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int timeout_ms,
- void (*consume_cb)(rd_kafka_message_t *rkmessage,
- void *opaque),
- void *opaque) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- int r;
-
- /* Get toppar */
- rd_kafka_topic_rdlock(rkt);
- rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
- if (unlikely(!rktp))
- rktp = rd_kafka_toppar_desired_get(rkt, partition);
- rd_kafka_topic_rdunlock(rkt);
-
- if (unlikely(!rktp)) {
- /* No such toppar known */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- ESRCH);
- return -1;
- }
-
- r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms,
- rkt->rkt_conf.consume_callback_max_msgs,
- consume_cb, opaque);
-
- rd_kafka_toppar_destroy(rktp);
-
- rd_kafka_set_last_error(0, 0);
-
- return r;
-}
-
-
-
-int rd_kafka_consume_callback_queue(
- rd_kafka_queue_t *rkqu,
- int timeout_ms,
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque),
- void *opaque) {
- return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0,
- consume_cb, opaque);
-}
-
-
-/**
- * Serve queue 'rkq' and return one message.
- * By serving the queue it will also call any registered callbacks
- * registered for matching events, this includes consumer_cb()
- * in which case no message will be returned.
- */
-static rd_kafka_message_t *
-rd_kafka_consume0(rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) {
- rd_kafka_op_t *rko;
- rd_kafka_message_t *rkmessage = NULL;
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
- if (timeout_ms)
- rd_kafka_app_poll_blocking(rk);
-
- rd_kafka_yield_thread = 0;
- while ((
- rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) {
- rd_kafka_op_res_t res;
-
- res =
- rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL);
-
- if (res == RD_KAFKA_OP_RES_PASS)
- break;
-
- if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
- rd_kafka_yield_thread)) {
- /* Callback called rd_kafka_yield(), we must
- * stop dispatching the queue and return. */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, EINTR);
- rd_kafka_app_polled(rk);
- return NULL;
- }
-
- /* Message was handled by callback. */
- continue;
- }
-
- if (!rko) {
- /* Timeout reached with no op returned. */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
- ETIMEDOUT);
- rd_kafka_app_polled(rk);
- return NULL;
- }
-
- rd_kafka_assert(rk, rko->rko_type == RD_KAFKA_OP_FETCH ||
- rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR);
-
- /* Get rkmessage from rko */
- rkmessage = rd_kafka_message_get(rko);
-
- /* Store offset, etc */
- rd_kafka_fetch_op_app_prepare(rk, rko);
-
- rd_kafka_set_last_error(0, 0);
-
- rd_kafka_app_polled(rk);
-
- return rkmessage;
-}
-
-rd_kafka_message_t *
-rd_kafka_consume(rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- rd_kafka_message_t *rkmessage;
-
- rd_kafka_topic_rdlock(rkt);
- rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
- if (unlikely(!rktp))
- rktp = rd_kafka_toppar_desired_get(rkt, partition);
- rd_kafka_topic_rdunlock(rkt);
-
- if (unlikely(!rktp)) {
- /* No such toppar known */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- ESRCH);
- return NULL;
- }
-
- rkmessage =
- rd_kafka_consume0(rkt->rkt_rk, rktp->rktp_fetchq, timeout_ms);
-
- rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */
-
- return rkmessage;
-}
-
-
-rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu,
- int timeout_ms) {
- return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms);
-}
-
-
-
-rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk) {
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- rd_kafka_q_fwd_set(rk->rk_rep, rkcg->rkcg_q);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms) {
- rd_kafka_cgrp_t *rkcg;
-
- if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) {
- rd_kafka_message_t *rkmessage = rd_kafka_message_new();
- rkmessage->err = RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
- return rkmessage;
- }
-
- return rd_kafka_consume0(rk, rkcg->rkcg_q, timeout_ms);
-}
-
-
-/**
- * @brief Consumer close.
- *
- * @param rkq The consumer group queue will be forwarded to this queue, which
- * which must be served (rebalance events) by the application/caller
- * until rd_kafka_consumer_closed() returns true.
- * If the consumer is not in a joined state, no rebalance events
- * will be emitted.
- */
-static rd_kafka_error_t *rd_kafka_consumer_close_q(rd_kafka_t *rk,
- rd_kafka_q_t *rkq) {
- rd_kafka_cgrp_t *rkcg;
- rd_kafka_error_t *error = NULL;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP,
- "Consume close called on non-group "
- "consumer");
-
- if (rd_atomic32_get(&rkcg->rkcg_terminated))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY,
- "Consumer already closed");
-
- /* If a fatal error has been raised and this is an
- * explicit consumer_close() from the application we return
- * a fatal error. Otherwise let the "silent" no_consumer_close
- * logic be performed to clean up properly. */
- if (!rd_kafka_destroy_flags_no_consumer_close(rk) &&
- (error = rd_kafka_get_fatal_error(rk)))
- return error;
-
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
- "Closing consumer");
-
- /* Redirect cgrp queue to the rebalance queue to make sure all posted
- * ops (e.g., rebalance callbacks) are served by
- * the application/caller. */
- rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq);
-
- /* Tell cgrp subsystem to terminate. A TERMINATE op will be posted
- * on the rkq when done. */
- rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */
-
- return error;
-}
-
-rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk,
- rd_kafka_queue_t *rkqu) {
- if (!rkqu)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Queue must be specified");
- return rd_kafka_consumer_close_q(rk, rkqu->rkqu_q);
-}
-
-rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- rd_kafka_q_t *rkq;
-
- /* Create a temporary reply queue to handle the TERMINATE reply op. */
- rkq = rd_kafka_q_new(rk);
-
- /* Initiate the close (async) */
- error = rd_kafka_consumer_close_q(rk, rkq);
- if (error) {
- err = rd_kafka_error_is_fatal(error)
- ? RD_KAFKA_RESP_ERR__FATAL
- : rd_kafka_error_code(error);
- rd_kafka_error_destroy(error);
- rd_kafka_q_destroy_owner(rkq);
- return err;
- }
-
- /* Disable the queue if termination is immediate or the user
- * does not want the blocking consumer_close() behaviour, this will
- * cause any ops posted for this queue (such as rebalance) to
- * be destroyed.
- */
- if (rd_kafka_destroy_flags_no_consumer_close(rk)) {
- rd_kafka_dbg(rk, CONSUMER, "CLOSE",
- "Disabling and purging temporary queue to quench "
- "close events");
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_q_disable(rkq);
- /* Purge ops already enqueued */
- rd_kafka_q_purge(rkq);
- } else {
- rd_kafka_op_t *rko;
- rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Waiting for close events");
- while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) {
- rd_kafka_op_res_t res;
- if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) ==
- RD_KAFKA_OP_TERMINATE) {
- err = rko->rko_err;
- rd_kafka_op_destroy(rko);
- break;
- }
- /* Handle callbacks */
- res = rd_kafka_poll_cb(rk, rkq, rko,
- RD_KAFKA_Q_CB_RETURN, NULL);
- if (res == RD_KAFKA_OP_RES_PASS)
- rd_kafka_op_destroy(rko);
- /* Ignore YIELD, we need to finish */
- }
- }
-
- rd_kafka_q_destroy_owner(rkq);
-
- if (err)
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
- "Consumer closed with error: %s",
- rd_kafka_err2str(err));
- else
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
- "Consumer closed");
-
- return err;
-}
-
-
-int rd_kafka_consumer_closed(rd_kafka_t *rk) {
- if (unlikely(!rk->rk_cgrp))
- return 0;
-
- return rd_atomic32_get(&rk->rk_cgrp->rkcg_terminated);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_committed(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions,
- int timeout_ms) {
- rd_kafka_q_t *rkq;
- rd_kafka_resp_err_t err;
- rd_kafka_cgrp_t *rkcg;
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
- if (!partitions)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- /* Set default offsets. */
- rd_kafka_topic_partition_list_reset_offsets(partitions,
- RD_KAFKA_OFFSET_INVALID);
-
- rkq = rd_kafka_q_new(rk);
-
- do {
- rd_kafka_op_t *rko;
- int state_version = rd_kafka_brokers_get_state_version(rk);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
- rd_kafka_op_set_replyq(rko, rkq, NULL);
-
- /* Issue #827
- * Copy partition list to avoid use-after-free if we time out
- * here, the app frees the list, and then cgrp starts
- * processing the op. */
- rko->rko_u.offset_fetch.partitions =
- rd_kafka_topic_partition_list_copy(partitions);
- rko->rko_u.offset_fetch.require_stable_offsets =
- rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED;
- rko->rko_u.offset_fetch.do_free = 1;
-
- if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) {
- err = RD_KAFKA_RESP_ERR__DESTROY;
- break;
- }
-
- rko =
- rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0);
- if (rko) {
- if (!(err = rko->rko_err))
- rd_kafka_topic_partition_list_update(
- partitions,
- rko->rko_u.offset_fetch.partitions);
- else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD ||
- err == RD_KAFKA_RESP_ERR__TRANSPORT) &&
- !rd_kafka_brokers_wait_state_change(
- rk, state_version,
- rd_timeout_remains(abs_timeout)))
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- rd_kafka_op_destroy(rko);
- } else
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- } while (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__WAIT_COORD);
-
- rd_kafka_q_destroy_owner(rkq);
-
- return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) {
- int i;
-
- for (i = 0; i < partitions->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
- rd_kafka_toppar_t *rktp;
-
- if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic,
- rktpar->partition, 0, 1))) {
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
- continue;
- }
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_topic_partition_set_from_fetch_pos(rktpar,
- rktp->rktp_app_pos);
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp);
-
- rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-struct _query_wmark_offsets_state {
- rd_kafka_resp_err_t err;
- const char *topic;
- int32_t partition;
- int64_t offsets[2];
- int offidx; /* next offset to set from response */
- rd_ts_t ts_end;
- int state_version; /* Broker state version */
-};
-
-static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- struct _query_wmark_offsets_state *state;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* 'state' has gone out of scope when query_watermark..()
- * timed out and returned to the caller. */
- return;
- }
-
- state = opaque;
-
- offsets = rd_kafka_topic_partition_list_new(1);
- err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
- NULL);
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
- rd_kafka_topic_partition_list_destroy(offsets);
- return; /* Retrying */
- }
-
- /* Retry if no broker connection is available yet. */
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb &&
- rd_kafka_brokers_wait_state_change(
- rkb->rkb_rk, state->state_version,
- rd_timeout_remains(state->ts_end))) {
- /* Retry */
- state->state_version = rd_kafka_brokers_get_state_version(rk);
- request->rkbuf_retries = 0;
- if (rd_kafka_buf_retry(rkb, request)) {
- rd_kafka_topic_partition_list_destroy(offsets);
- return; /* Retry in progress */
- }
- /* FALLTHRU */
- }
-
- /* Partition not seen in response. */
- if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic,
- state->partition)))
- err = RD_KAFKA_RESP_ERR__BAD_MSG;
- else if (rktpar->err)
- err = rktpar->err;
- else
- state->offsets[state->offidx] = rktpar->offset;
-
- state->offidx++;
-
- if (err || state->offidx == 2) /* Error or Done */
- state->err = err;
-
- rd_kafka_topic_partition_list_destroy(offsets);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t *low,
- int64_t *high,
- int timeout_ms) {
- rd_kafka_q_t *rkq;
- struct _query_wmark_offsets_state state;
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- rd_kafka_topic_partition_list_t *partitions;
- rd_kafka_topic_partition_t *rktpar;
- struct rd_kafka_partition_leader *leader;
- rd_list_t leaders;
- rd_kafka_resp_err_t err;
-
- partitions = rd_kafka_topic_partition_list_new(1);
- rktpar =
- rd_kafka_topic_partition_list_add(partitions, topic, partition);
-
- rd_list_init(&leaders, partitions->cnt,
- (void *)rd_kafka_partition_leader_destroy);
-
- err = rd_kafka_topic_partition_list_query_leaders(rk, partitions,
- &leaders, timeout_ms);
- if (err) {
- rd_list_destroy(&leaders);
- rd_kafka_topic_partition_list_destroy(partitions);
- return err;
- }
-
- leader = rd_list_elem(&leaders, 0);
-
- rkq = rd_kafka_q_new(rk);
-
- /* Due to KAFKA-1588 we need to send a request for each wanted offset,
- * in this case one for the low watermark and one for the high. */
- state.topic = topic;
- state.partition = partition;
- state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING;
- state.offsets[1] = RD_KAFKA_OFFSET_END;
- state.offidx = 0;
- state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
- state.ts_end = ts_end;
- state.state_version = rd_kafka_brokers_get_state_version(rk);
-
-
- rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
- rd_kafka_ListOffsetsRequest(
- leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
- rd_kafka_query_wmark_offsets_resp_cb, &state);
-
- rktpar->offset = RD_KAFKA_OFFSET_END;
- rd_kafka_ListOffsetsRequest(
- leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
- rd_kafka_query_wmark_offsets_resp_cb, &state);
-
- rd_kafka_topic_partition_list_destroy(partitions);
- rd_list_destroy(&leaders);
-
- /* Wait for reply (or timeout) */
- while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS &&
- rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK,
- rd_kafka_poll_cb,
- NULL) != RD_KAFKA_OP_RES_YIELD)
- ;
-
- rd_kafka_q_destroy_owner(rkq);
-
- if (state.err)
- return state.err;
- else if (state.offidx != 2)
- return RD_KAFKA_RESP_ERR__FAIL;
-
- /* We are not certain about the returned order. */
- if (state.offsets[0] < state.offsets[1]) {
- *low = state.offsets[0];
- *high = state.offsets[1];
- } else {
- *low = state.offsets[1];
- *high = state.offsets[0];
- }
-
- /* If partition is empty only one offset (the last) will be returned. */
- if (*low < 0 && *high >= 0)
- *low = *high;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t *low,
- int64_t *high) {
- rd_kafka_toppar_t *rktp;
-
- rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1);
- if (!rktp)
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- rd_kafka_toppar_lock(rktp);
- *low = rktp->rktp_lo_offset;
- *high = rktp->rktp_hi_offset;
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief get_offsets_for_times() state
- */
-struct _get_offsets_for_times {
- rd_kafka_topic_partition_list_t *results;
- rd_kafka_resp_err_t err;
- int wait_reply;
- int state_version;
- rd_ts_t ts_end;
-};
-
-/**
- * @brief Handle OffsetRequest responses
- */
-static void rd_kafka_get_offsets_for_times_resp_cb(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- struct _get_offsets_for_times *state;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* 'state' has gone out of scope when offsets_for_times()
- * timed out and returned to the caller. */
- return;
- }
-
- state = opaque;
-
- err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request,
- state->results, NULL);
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
- return; /* Retrying */
-
- /* Retry if no broker connection is available yet. */
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb &&
- rd_kafka_brokers_wait_state_change(
- rkb->rkb_rk, state->state_version,
- rd_timeout_remains(state->ts_end))) {
- /* Retry */
- state->state_version = rd_kafka_brokers_get_state_version(rk);
- request->rkbuf_retries = 0;
- if (rd_kafka_buf_retry(rkb, request))
- return; /* Retry in progress */
- /* FALLTHRU */
- }
-
- if (err && !state->err)
- state->err = err;
-
- state->wait_reply--;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_offsets_for_times(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *offsets,
- int timeout_ms) {
- rd_kafka_q_t *rkq;
- struct _get_offsets_for_times state = RD_ZERO_INIT;
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- rd_list_t leaders;
- int i;
- rd_kafka_resp_err_t err;
- struct rd_kafka_partition_leader *leader;
- int tmout;
-
- if (offsets->cnt == 0)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- rd_list_init(&leaders, offsets->cnt,
- (void *)rd_kafka_partition_leader_destroy);
-
- err = rd_kafka_topic_partition_list_query_leaders(rk, offsets, &leaders,
- timeout_ms);
- if (err) {
- rd_list_destroy(&leaders);
- return err;
- }
-
-
- rkq = rd_kafka_q_new(rk);
-
- state.wait_reply = 0;
- state.results = rd_kafka_topic_partition_list_new(offsets->cnt);
-
- /* For each leader send a request for its partitions */
- RD_LIST_FOREACH(leader, &leaders, i) {
- state.wait_reply++;
- rd_kafka_ListOffsetsRequest(
- leader->rkb, leader->partitions, RD_KAFKA_REPLYQ(rkq, 0),
- rd_kafka_get_offsets_for_times_resp_cb, &state);
- }
-
- rd_list_destroy(&leaders);
-
- /* Wait for reply (or timeout) */
- while (state.wait_reply > 0 &&
- !rd_timeout_expired((tmout = rd_timeout_remains(ts_end))))
- rd_kafka_q_serve(rkq, tmout, 0, RD_KAFKA_Q_CB_CALLBACK,
- rd_kafka_poll_cb, NULL);
-
- rd_kafka_q_destroy_owner(rkq);
-
- if (state.wait_reply > 0 && !state.err)
- state.err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- /* Then update the queried partitions. */
- if (!state.err)
- rd_kafka_topic_partition_list_update(offsets, state.results);
-
- rd_kafka_topic_partition_list_destroy(state.results);
-
- return state.err;
-}
-
-
-/**
- * @brief rd_kafka_poll() (and similar) op callback handler.
- * Will either call registered callback depending on cb_type and op type
- * or return op to application, if applicable (e.g., fetch message).
- *
- * @returns RD_KAFKA_OP_RES_HANDLED if op was handled, else one of the
- * other res types (such as OP_RES_PASS).
- *
- * @locality any thread that serves op queues
- */
-rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- rd_kafka_msg_t *rkm;
- rd_kafka_op_res_t res = RD_KAFKA_OP_RES_HANDLED;
-
- /* Special handling for events based on cb_type */
- if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko)) {
- /* Return-as-event requested. */
- return RD_KAFKA_OP_RES_PASS; /* Return as event */
- }
-
- switch ((int)rko->rko_type) {
- case RD_KAFKA_OP_FETCH:
- if (!rk->rk_conf.consume_cb ||
- cb_type == RD_KAFKA_Q_CB_RETURN ||
- cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
- return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
- else {
- struct consume_ctx ctx = {.consume_cb =
- rk->rk_conf.consume_cb,
- .opaque = rk->rk_conf.opaque};
-
- return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx);
- }
- break;
-
- case RD_KAFKA_OP_REBALANCE:
- if (rk->rk_conf.rebalance_cb)
- rk->rk_conf.rebalance_cb(
- rk, rko->rko_err, rko->rko_u.rebalance.partitions,
- rk->rk_conf.opaque);
- else {
- /** If EVENT_REBALANCE is enabled but rebalance_cb
- * isn't, we need to perform a dummy assign for the
- * application. This might happen during termination
- * with consumer_close() */
- rd_kafka_dbg(rk, CGRP, "UNASSIGN",
- "Forcing unassign of %d partition(s)",
- rko->rko_u.rebalance.partitions
- ? rko->rko_u.rebalance.partitions->cnt
- : 0);
- rd_kafka_assign(rk, NULL);
- }
- break;
-
- case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
- if (!rko->rko_u.offset_commit.cb)
- return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
- rko->rko_u.offset_commit.cb(rk, rko->rko_err,
- rko->rko_u.offset_commit.partitions,
- rko->rko_u.offset_commit.opaque);
- break;
-
- case RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY:
- /* Reply from toppar FETCH_STOP */
- rd_kafka_assignment_partition_stopped(rk, rko->rko_rktp);
- break;
-
- case RD_KAFKA_OP_CONSUMER_ERR:
- /* rd_kafka_consumer_poll() (_Q_CB_CONSUMER):
- * Consumer errors are returned to the application
- * as rkmessages, not error callbacks.
- *
- * rd_kafka_poll() (_Q_CB_GLOBAL):
- * convert to ERR op (fallthru)
- */
- if (cb_type == RD_KAFKA_Q_CB_RETURN ||
- cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) {
- /* return as message_t to application */
- return RD_KAFKA_OP_RES_PASS;
- }
- /* FALLTHRU */
-
- case RD_KAFKA_OP_ERR:
- if (rk->rk_conf.error_cb)
- rk->rk_conf.error_cb(rk, rko->rko_err,
- rko->rko_u.err.errstr,
- rk->rk_conf.opaque);
- else
- rd_kafka_log(rk, LOG_ERR, "ERROR", "%s: %s",
- rk->rk_name, rko->rko_u.err.errstr);
- break;
-
- case RD_KAFKA_OP_DR:
- /* Delivery report:
- * call application DR callback for each message. */
- while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) {
- rd_kafka_message_t *rkmessage;
-
- TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, rkm,
- rkm_link);
-
- rkmessage = rd_kafka_message_get_from_rkm(rko, rkm);
-
- if (likely(rk->rk_conf.dr_msg_cb != NULL)) {
- rk->rk_conf.dr_msg_cb(rk, rkmessage,
- rk->rk_conf.opaque);
-
- } else if (rk->rk_conf.dr_cb) {
- rk->rk_conf.dr_cb(
- rk, rkmessage->payload, rkmessage->len,
- rkmessage->err, rk->rk_conf.opaque,
- rkmessage->_private);
- } else if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) {
- rd_kafka_log(
- rk, LOG_WARNING, "DRDROP",
- "Dropped delivery report for "
- "message to "
- "%s [%" PRId32
- "] (%s) with "
- "opaque %p: flush() or poll() "
- "should not be called when "
- "EVENT_DR is enabled",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition,
- rd_kafka_err2name(rkmessage->err),
- rkmessage->_private);
- } else {
- rd_assert(!*"BUG: neither a delivery report "
- "callback or EVENT_DR flag set");
- }
-
- rd_kafka_msg_destroy(rk, rkm);
-
- if (unlikely(rd_kafka_yield_thread)) {
- /* Callback called yield(),
- * re-enqueue the op (if there are any
- * remaining messages). */
- if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq.rkmq_msgs))
- rd_kafka_q_reenq(rkq, rko);
- else
- rd_kafka_op_destroy(rko);
- return RD_KAFKA_OP_RES_YIELD;
- }
- }
-
- rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
-
- break;
-
- case RD_KAFKA_OP_THROTTLE:
- if (rk->rk_conf.throttle_cb)
- rk->rk_conf.throttle_cb(
- rk, rko->rko_u.throttle.nodename,
- rko->rko_u.throttle.nodeid,
- rko->rko_u.throttle.throttle_time,
- rk->rk_conf.opaque);
- break;
-
- case RD_KAFKA_OP_STATS:
- /* Statistics */
- if (rk->rk_conf.stats_cb &&
- rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json,
- rko->rko_u.stats.json_len,
- rk->rk_conf.opaque) == 1)
- rko->rko_u.stats.json =
- NULL; /* Application wanted json ptr */
- break;
-
- case RD_KAFKA_OP_LOG:
- if (likely(rk->rk_conf.log_cb &&
- rk->rk_conf.log_level >= rko->rko_u.log.level))
- rk->rk_conf.log_cb(rk, rko->rko_u.log.level,
- rko->rko_u.log.fac,
- rko->rko_u.log.str);
- break;
-
- case RD_KAFKA_OP_TERMINATE:
- /* nop: just a wake-up */
- res = RD_KAFKA_OP_RES_YIELD;
- rd_kafka_op_destroy(rko);
- break;
-
- case RD_KAFKA_OP_CREATETOPICS:
- case RD_KAFKA_OP_DELETETOPICS:
- case RD_KAFKA_OP_CREATEPARTITIONS:
- case RD_KAFKA_OP_ALTERCONFIGS:
- case RD_KAFKA_OP_DESCRIBECONFIGS:
- case RD_KAFKA_OP_DELETERECORDS:
- case RD_KAFKA_OP_DELETEGROUPS:
- case RD_KAFKA_OP_ADMIN_FANOUT:
- case RD_KAFKA_OP_CREATEACLS:
- case RD_KAFKA_OP_DESCRIBEACLS:
- case RD_KAFKA_OP_DELETEACLS:
- /* Calls op_destroy() from worker callback,
- * when the time comes. */
- res = rd_kafka_op_call(rk, rkq, rko);
- break;
-
- case RD_KAFKA_OP_ADMIN_RESULT:
- if (cb_type == RD_KAFKA_Q_CB_RETURN ||
- cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
- return RD_KAFKA_OP_RES_PASS; /* Don't handle here */
-
- /* Op is silently destroyed below */
- break;
-
- case RD_KAFKA_OP_TXN:
- /* Must only be handled by rdkafka main thread */
- rd_assert(thrd_is_current(rk->rk_thread));
- res = rd_kafka_op_call(rk, rkq, rko);
- break;
-
- case RD_KAFKA_OP_BARRIER:
- break;
-
- case RD_KAFKA_OP_PURGE:
- rd_kafka_purge(rk, rko->rko_u.purge.flags);
- break;
-
- default:
- /* If op has a callback set (e.g., OAUTHBEARER_REFRESH),
- * call it. */
- if (rko->rko_type & RD_KAFKA_OP_CB) {
- res = rd_kafka_op_call(rk, rkq, rko);
- break;
- }
-
- RD_BUG("Can't handle op type %s (0x%x)",
- rd_kafka_op2str(rko->rko_type), rko->rko_type);
- break;
- }
-
- if (res == RD_KAFKA_OP_RES_HANDLED)
- rd_kafka_op_destroy(rko);
-
- return res;
-}
-
-int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) {
- int r;
-
- r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK,
- rd_kafka_poll_cb, NULL);
-
- return r;
-}
-
-
-rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, rd_timeout_us(timeout_ms), 0,
- RD_KAFKA_Q_CB_EVENT, rd_kafka_poll_cb, NULL);
-
- if (!rko)
- return NULL;
-
- return rko;
-}
-
-int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms) {
- int r;
-
- r = rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0,
- RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL);
-
- return r;
-}
-
-
-
-static void
-rd_kafka_toppar_dump(FILE *fp, const char *indent, rd_kafka_toppar_t *rktp) {
-
- fprintf(fp,
- "%s%.*s [%" PRId32
- "] broker %s, "
- "leader_id %s\n",
- indent, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rktp->rktp_broker ? rktp->rktp_broker->rkb_name : "none",
- rktp->rktp_leader ? rktp->rktp_leader->rkb_name : "none");
- fprintf(fp,
- "%s refcnt %i\n"
- "%s msgq: %i messages\n"
- "%s xmit_msgq: %i messages\n"
- "%s total: %" PRIu64 " messages, %" PRIu64 " bytes\n",
- indent, rd_refcnt_get(&rktp->rktp_refcnt), indent,
- rktp->rktp_msgq.rkmq_msg_cnt, indent,
- rktp->rktp_xmit_msgq.rkmq_msg_cnt, indent,
- rd_atomic64_get(&rktp->rktp_c.tx_msgs),
- rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes));
-}
-
-static void rd_kafka_broker_dump(FILE *fp, rd_kafka_broker_t *rkb, int locks) {
- rd_kafka_toppar_t *rktp;
-
- if (locks)
- rd_kafka_broker_lock(rkb);
- fprintf(fp,
- " rd_kafka_broker_t %p: %s NodeId %" PRId32
- " in state %s (for %.3fs)\n",
- rkb, rkb->rkb_name, rkb->rkb_nodeid,
- rd_kafka_broker_state_names[rkb->rkb_state],
- rkb->rkb_ts_state
- ? (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f
- : 0.0f);
- fprintf(fp, " refcnt %i\n", rd_refcnt_get(&rkb->rkb_refcnt));
- fprintf(fp, " outbuf_cnt: %i waitresp_cnt: %i\n",
- rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt),
- rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt));
- fprintf(fp,
- " %" PRIu64 " messages sent, %" PRIu64
- " bytes, "
- "%" PRIu64 " errors, %" PRIu64
- " timeouts\n"
- " %" PRIu64 " messages received, %" PRIu64
- " bytes, "
- "%" PRIu64
- " errors\n"
- " %" PRIu64 " messageset transmissions were retried\n",
- rd_atomic64_get(&rkb->rkb_c.tx),
- rd_atomic64_get(&rkb->rkb_c.tx_bytes),
- rd_atomic64_get(&rkb->rkb_c.tx_err),
- rd_atomic64_get(&rkb->rkb_c.req_timeouts),
- rd_atomic64_get(&rkb->rkb_c.rx),
- rd_atomic64_get(&rkb->rkb_c.rx_bytes),
- rd_atomic64_get(&rkb->rkb_c.rx_err),
- rd_atomic64_get(&rkb->rkb_c.tx_retries));
-
- fprintf(fp, " %i toppars:\n", rkb->rkb_toppar_cnt);
- TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink)
- rd_kafka_toppar_dump(fp, " ", rktp);
- if (locks) {
- rd_kafka_broker_unlock(rkb);
- }
-}
-
-
-static void rd_kafka_dump0(FILE *fp, rd_kafka_t *rk, int locks) {
- rd_kafka_broker_t *rkb;
- rd_kafka_topic_t *rkt;
- rd_kafka_toppar_t *rktp;
- int i;
- unsigned int tot_cnt;
- size_t tot_size;
-
- rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
-
- if (locks)
- rd_kafka_rdlock(rk);
-#if ENABLE_DEVEL
- fprintf(fp, "rd_kafka_op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt));
-#endif
- fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name);
-
- fprintf(fp, " producer.msg_cnt %u (%" PRIusz " bytes)\n", tot_cnt,
- tot_size);
- fprintf(fp, " rk_rep reply queue: %i ops\n",
- rd_kafka_q_len(rk->rk_rep));
-
- fprintf(fp, " brokers:\n");
- if (locks)
- mtx_lock(&rk->rk_internal_rkb_lock);
- if (rk->rk_internal_rkb)
- rd_kafka_broker_dump(fp, rk->rk_internal_rkb, locks);
- if (locks)
- mtx_unlock(&rk->rk_internal_rkb_lock);
-
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- rd_kafka_broker_dump(fp, rkb, locks);
- }
-
- fprintf(fp, " cgrp:\n");
- if (rk->rk_cgrp) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
- fprintf(fp, " %.*s in state %s, flags 0x%x\n",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rkcg->rkcg_flags);
- fprintf(fp, " coord_id %" PRId32 ", broker %s\n",
- rkcg->rkcg_coord_id,
- rkcg->rkcg_curr_coord
- ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
- : "(none)");
-
- fprintf(fp, " toppars:\n");
- RD_LIST_FOREACH(rktp, &rkcg->rkcg_toppars, i) {
- fprintf(fp, " %.*s [%" PRId32 "] in state %s\n",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state]);
- }
- }
-
- fprintf(fp, " topics:\n");
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- fprintf(fp,
- " %.*s with %" PRId32
- " partitions, state %s, "
- "refcnt %i\n",
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- rkt->rkt_partition_cnt,
- rd_kafka_topic_state_names[rkt->rkt_state],
- rd_refcnt_get(&rkt->rkt_refcnt));
- if (rkt->rkt_ua)
- rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua);
- if (rd_list_empty(&rkt->rkt_desp)) {
- fprintf(fp, " desired partitions:");
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
- fprintf(fp, " %" PRId32, rktp->rktp_partition);
- fprintf(fp, "\n");
- }
- }
-
- fprintf(fp, "\n");
- rd_kafka_metadata_cache_dump(fp, rk);
-
- if (locks)
- rd_kafka_rdunlock(rk);
-}
-
-void rd_kafka_dump(FILE *fp, rd_kafka_t *rk) {
- if (rk)
- rd_kafka_dump0(fp, rk, 1 /*locks*/);
-}
-
-
-
-const char *rd_kafka_name(const rd_kafka_t *rk) {
- return rk->rk_name;
-}
-
-rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) {
- return rk->rk_type;
-}
-
-
-char *rd_kafka_memberid(const rd_kafka_t *rk) {
- rd_kafka_op_t *rko;
- rd_kafka_cgrp_t *rkcg;
- char *memberid;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return NULL;
-
- rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME);
- if (!rko)
- return NULL;
- memberid = rko->rko_u.name.str;
- rko->rko_u.name.str = NULL;
- rd_kafka_op_destroy(rko);
-
- return memberid;
-}
-
-
-char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms) {
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
- /* ClusterId is returned in Metadata >=V2 responses and
- * cached on the rk. If no cached value is available
- * it means no metadata has been received yet, or we're
- * using a lower protocol version
- * (e.g., lack of api.version.request=true). */
-
- while (1) {
- int remains_ms;
-
- rd_kafka_rdlock(rk);
-
- if (rk->rk_clusterid) {
- /* Cached clusterid available. */
- char *ret = rd_strdup(rk->rk_clusterid);
- rd_kafka_rdunlock(rk);
- return ret;
- } else if (rk->rk_ts_metadata > 0) {
- /* Metadata received but no clusterid,
- * this probably means the broker is too old
- * or api.version.request=false. */
- rd_kafka_rdunlock(rk);
- return NULL;
- }
-
- rd_kafka_rdunlock(rk);
-
- /* Wait for up to timeout_ms for a metadata refresh,
- * if permitted by application. */
- remains_ms = rd_timeout_remains(abs_timeout);
- if (rd_timeout_expired(remains_ms))
- return NULL;
-
- rd_kafka_metadata_cache_wait_change(rk, remains_ms);
- }
-
- return NULL;
-}
-
-
-int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms) {
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
- /* ControllerId is returned in Metadata >=V1 responses and
- * cached on the rk. If no cached value is available
- * it means no metadata has been received yet, or we're
- * using a lower protocol version
- * (e.g., lack of api.version.request=true). */
-
- while (1) {
- int remains_ms;
- int version;
-
- version = rd_kafka_brokers_get_state_version(rk);
-
- rd_kafka_rdlock(rk);
-
- if (rk->rk_controllerid != -1) {
- /* Cached controllerid available. */
- rd_kafka_rdunlock(rk);
- return rk->rk_controllerid;
- } else if (rk->rk_ts_metadata > 0) {
- /* Metadata received but no clusterid,
- * this probably means the broker is too old
- * or api.version.request=false. */
- rd_kafka_rdunlock(rk);
- return -1;
- }
-
- rd_kafka_rdunlock(rk);
-
- /* Wait for up to timeout_ms for a metadata refresh,
- * if permitted by application. */
- remains_ms = rd_timeout_remains(abs_timeout);
- if (rd_timeout_expired(remains_ms))
- return -1;
-
- rd_kafka_brokers_wait_state_change(rk, version, remains_ms);
- }
-
- return -1;
-}
-
-
-void *rd_kafka_opaque(const rd_kafka_t *rk) {
- return rk->rk_conf.opaque;
-}
-
-
-int rd_kafka_outq_len(rd_kafka_t *rk) {
- return rd_kafka_curr_msgs_cnt(rk) + rd_kafka_q_len(rk->rk_rep) +
- (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms) {
- unsigned int msg_cnt = 0;
-
- if (rk->rk_type != RD_KAFKA_PRODUCER)
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-
- rd_kafka_yield_thread = 0;
-
- /* Set flushing flag on the producer for the duration of the
- * flush() call. This tells producer_serve() that the linger.ms
- * time should be considered immediate. */
- rd_atomic32_add(&rk->rk_flushing, 1);
-
- /* Wake up all broker threads to trigger the produce_serve() call.
- * If this flush() call finishes before the broker wakes up
- * then no flushing will be performed by that broker thread. */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP, "flushing");
-
- if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) {
- /* Application wants delivery reports as events rather
- * than callbacks, we must thus not serve this queue
- * with rd_kafka_poll() since that would trigger non-existent
- * delivery report callbacks, which would result
- * in the delivery reports being dropped.
- * Instead we rely on the application to serve the event
- * queue in another thread, so all we do here is wait
- * for the current message count to reach zero. */
- rd_kafka_curr_msgs_wait_zero(rk, timeout_ms, &msg_cnt);
-
- } else {
- /* Standard poll interface.
- *
- * First poll call is non-blocking for the case
- * where timeout_ms==RD_POLL_NOWAIT to make sure poll is
- * called at least once. */
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- int tmout = RD_POLL_NOWAIT;
- int qlen = 0;
-
- do {
- rd_kafka_poll(rk, tmout);
- qlen = rd_kafka_q_len(rk->rk_rep);
- msg_cnt = rd_kafka_curr_msgs_cnt(rk);
- } while (qlen + msg_cnt > 0 && !rd_kafka_yield_thread &&
- (tmout = rd_timeout_remains_limit(ts_end, 10)) !=
- RD_POLL_NOWAIT);
-
- msg_cnt += qlen;
- }
-
- rd_atomic32_sub(&rk->rk_flushing, 1);
-
- return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT
- : RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Purge the partition message queue (according to \p purge_flags) for
- * all toppars.
- *
- * This is a necessity to avoid the race condition when a purge() is scheduled
- * shortly in-between an rktp has been created but before it has been
- * joined to a broker handler thread.
- *
- * The rktp_xmit_msgq is handled by the broker-thread purge.
- *
- * @returns the number of messages purged.
- *
- * @locks_required rd_kafka_*lock()
- * @locks_acquired rd_kafka_topic_rdlock()
- */
-static int rd_kafka_purge_toppars(rd_kafka_t *rk, int purge_flags) {
- rd_kafka_topic_t *rkt;
- int cnt = 0;
-
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- rd_kafka_toppar_t *rktp;
- int i;
-
- rd_kafka_topic_rdlock(rkt);
- for (i = 0; i < rkt->rkt_partition_cnt; i++)
- cnt += rd_kafka_toppar_purge_queues(
- rkt->rkt_p[i], purge_flags, rd_false /*!xmit*/);
-
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
- cnt += rd_kafka_toppar_purge_queues(rktp, purge_flags,
- rd_false /*!xmit*/);
-
- if (rkt->rkt_ua)
- cnt += rd_kafka_toppar_purge_queues(
- rkt->rkt_ua, purge_flags, rd_false /*!xmit*/);
- rd_kafka_topic_rdunlock(rkt);
- }
-
- return cnt;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags) {
- rd_kafka_broker_t *rkb;
- rd_kafka_q_t *tmpq = NULL;
- int waitcnt = 0;
-
- if (rk->rk_type != RD_KAFKA_PRODUCER)
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-
- /* Check that future flags are not passed */
- if ((purge_flags & ~RD_KAFKA_PURGE_F_MASK) != 0)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- /* Nothing to purge */
- if (!purge_flags)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* Set up a reply queue to wait for broker thread signalling
- * completion, unless non-blocking. */
- if (!(purge_flags & RD_KAFKA_PURGE_F_NON_BLOCKING))
- tmpq = rd_kafka_q_new(rk);
-
- rd_kafka_rdlock(rk);
-
- /* Purge msgq for all toppars. */
- rd_kafka_purge_toppars(rk, purge_flags);
-
- /* Send purge request to all broker threads */
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- rd_kafka_broker_purge_queues(rkb, purge_flags,
- RD_KAFKA_REPLYQ(tmpq, 0));
- waitcnt++;
- }
-
- rd_kafka_rdunlock(rk);
-
-
- if (tmpq) {
- /* Wait for responses */
- while (waitcnt-- > 0)
- rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
-
- rd_kafka_q_destroy_owner(tmpq);
- }
-
- /* Purge messages for the UA(-1) partitions (which are not
- * handled by a broker thread) */
- if (purge_flags & RD_KAFKA_PURGE_F_QUEUE)
- rd_kafka_purge_ua_toppar_queues(rk);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @returns a csv string of purge flags in thread-local storage
- */
-const char *rd_kafka_purge_flags2str(int flags) {
- static const char *names[] = {"queue", "inflight", "non-blocking",
- NULL};
- static RD_TLS char ret[64];
-
- return rd_flags2str(ret, sizeof(ret), names, flags);
-}
-
-
-int rd_kafka_version(void) {
- return RD_KAFKA_VERSION;
-}
-
-const char *rd_kafka_version_str(void) {
- static RD_TLS char ret[128];
- size_t of = 0, r;
-
- if (*ret)
- return ret;
-
-#ifdef LIBRDKAFKA_GIT_VERSION
- if (*LIBRDKAFKA_GIT_VERSION) {
- of = rd_snprintf(ret, sizeof(ret), "%s",
- *LIBRDKAFKA_GIT_VERSION == 'v'
- ? &LIBRDKAFKA_GIT_VERSION[1]
- : LIBRDKAFKA_GIT_VERSION);
- if (of > sizeof(ret))
- of = sizeof(ret);
- }
-#endif
-
-#define _my_sprintf(...) \
- do { \
- r = rd_snprintf(ret + of, sizeof(ret) - of, __VA_ARGS__); \
- if (r > sizeof(ret) - of) \
- r = sizeof(ret) - of; \
- of += r; \
- } while (0)
-
- if (of == 0) {
- int ver = rd_kafka_version();
- int prel = (ver & 0xff);
- _my_sprintf("%i.%i.%i", (ver >> 24) & 0xff, (ver >> 16) & 0xff,
- (ver >> 8) & 0xff);
- if (prel != 0xff) {
- /* pre-builds below 200 are just running numbers,
- * above 200 are RC numbers. */
- if (prel <= 200)
- _my_sprintf("-pre%d", prel);
- else
- _my_sprintf("-RC%d", prel - 200);
- }
- }
-
-#if ENABLE_DEVEL
- _my_sprintf("-devel");
-#endif
-
-#if WITHOUT_OPTIMIZATION
- _my_sprintf("-O0");
-#endif
-
- return ret;
-}
-
-
-/**
- * Assert trampoline to print some debugging information on crash.
- */
-void RD_NORETURN rd_kafka_crash(const char *file,
- int line,
- const char *function,
- rd_kafka_t *rk,
- const char *reason) {
- fprintf(stderr, "*** %s:%i:%s: %s ***\n", file, line, function, reason);
- if (rk)
- rd_kafka_dump0(stderr, rk, 0 /*no locks*/);
- abort();
-}
-
-
-
-struct list_groups_state {
- rd_kafka_q_t *q;
- rd_kafka_resp_err_t err;
- int wait_cnt;
- const char *desired_group;
- struct rd_kafka_group_list *grplist;
- int grplist_size;
-};
-
-static const char *rd_kafka_consumer_group_state_names[] = {
- "Unknown", "PreparingRebalance", "CompletingRebalance", "Stable", "Dead",
- "Empty"};
-
-const char *
-rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state) {
- if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT)
- return NULL;
- return rd_kafka_consumer_group_state_names[state];
-}
-
-rd_kafka_consumer_group_state_t
-rd_kafka_consumer_group_state_code(const char *name) {
- size_t i;
- for (i = 0; i < RD_KAFKA_CONSUMER_GROUP_STATE__CNT; i++) {
- if (!rd_strcasecmp(rd_kafka_consumer_group_state_names[i],
- name))
- return i;
- }
- return RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN;
-}
-
-static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
- struct list_groups_state *state;
- const int log_decode_errors = LOG_ERR;
- int cnt;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* 'state' has gone out of scope due to list_groups()
- * timing out and returning. */
- return;
- }
-
- state = opaque;
- state->wait_cnt--;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_i32(reply, &cnt);
-
- while (cnt-- > 0) {
- int16_t ErrorCode;
- rd_kafkap_str_t Group, GroupState, ProtoType, Proto;
- int MemberCnt;
- struct rd_kafka_group_info *gi;
-
- if (state->grplist->group_cnt == state->grplist_size) {
- /* Grow group array */
- state->grplist_size *= 2;
- state->grplist->groups =
- rd_realloc(state->grplist->groups,
- state->grplist_size *
- sizeof(*state->grplist->groups));
- }
-
- gi = &state->grplist->groups[state->grplist->group_cnt++];
- memset(gi, 0, sizeof(*gi));
-
- rd_kafka_buf_read_i16(reply, &ErrorCode);
- rd_kafka_buf_read_str(reply, &Group);
- rd_kafka_buf_read_str(reply, &GroupState);
- rd_kafka_buf_read_str(reply, &ProtoType);
- rd_kafka_buf_read_str(reply, &Proto);
- rd_kafka_buf_read_i32(reply, &MemberCnt);
-
- if (MemberCnt > 100000) {
- err = RD_KAFKA_RESP_ERR__BAD_MSG;
- goto err;
- }
-
- rd_kafka_broker_lock(rkb);
- gi->broker.id = rkb->rkb_nodeid;
- gi->broker.host = rd_strdup(rkb->rkb_origname);
- gi->broker.port = rkb->rkb_port;
- rd_kafka_broker_unlock(rkb);
-
- gi->err = ErrorCode;
- gi->group = RD_KAFKAP_STR_DUP(&Group);
- gi->state = RD_KAFKAP_STR_DUP(&GroupState);
- gi->protocol_type = RD_KAFKAP_STR_DUP(&ProtoType);
- gi->protocol = RD_KAFKAP_STR_DUP(&Proto);
-
- if (MemberCnt > 0)
- gi->members =
- rd_malloc(MemberCnt * sizeof(*gi->members));
-
- while (MemberCnt-- > 0) {
- rd_kafkap_str_t MemberId, ClientId, ClientHost;
- rd_kafkap_bytes_t Meta, Assignment;
- struct rd_kafka_group_member_info *mi;
-
- mi = &gi->members[gi->member_cnt++];
- memset(mi, 0, sizeof(*mi));
-
- rd_kafka_buf_read_str(reply, &MemberId);
- rd_kafka_buf_read_str(reply, &ClientId);
- rd_kafka_buf_read_str(reply, &ClientHost);
- rd_kafka_buf_read_bytes(reply, &Meta);
- rd_kafka_buf_read_bytes(reply, &Assignment);
-
- mi->member_id = RD_KAFKAP_STR_DUP(&MemberId);
- mi->client_id = RD_KAFKAP_STR_DUP(&ClientId);
- mi->client_host = RD_KAFKAP_STR_DUP(&ClientHost);
-
- if (RD_KAFKAP_BYTES_LEN(&Meta) == 0) {
- mi->member_metadata_size = 0;
- mi->member_metadata = NULL;
- } else {
- mi->member_metadata_size =
- RD_KAFKAP_BYTES_LEN(&Meta);
- mi->member_metadata = rd_memdup(
- Meta.data, mi->member_metadata_size);
- }
-
- if (RD_KAFKAP_BYTES_LEN(&Assignment) == 0) {
- mi->member_assignment_size = 0;
- mi->member_assignment = NULL;
- } else {
- mi->member_assignment_size =
- RD_KAFKAP_BYTES_LEN(&Assignment);
- mi->member_assignment =
- rd_memdup(Assignment.data,
- mi->member_assignment_size);
- }
- }
- }
-
-err:
- state->err = err;
- return;
-
-err_parse:
- state->err = reply->rkbuf_err;
-}
-
-static void rd_kafka_ListGroups_resp_cb(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
- struct list_groups_state *state;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode;
- char **grps = NULL;
- int cnt, grpcnt, i = 0;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* 'state' is no longer in scope because
- * list_groups() timed out and returned to the caller.
- * We must not touch anything here but simply return. */
- return;
- }
-
- state = opaque;
-
- state->wait_cnt--;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_i16(reply, &ErrorCode);
- if (ErrorCode) {
- err = ErrorCode;
- goto err;
- }
-
- rd_kafka_buf_read_i32(reply, &cnt);
-
- if (state->desired_group)
- grpcnt = 1;
- else
- grpcnt = cnt;
-
- if (cnt == 0 || grpcnt == 0)
- return;
-
- grps = rd_malloc(sizeof(*grps) * grpcnt);
-
- while (cnt-- > 0) {
- rd_kafkap_str_t grp, proto;
-
- rd_kafka_buf_read_str(reply, &grp);
- rd_kafka_buf_read_str(reply, &proto);
-
- if (state->desired_group &&
- rd_kafkap_str_cmp_str(&grp, state->desired_group))
- continue;
-
- grps[i++] = RD_KAFKAP_STR_DUP(&grp);
-
- if (i == grpcnt)
- break;
- }
-
- if (i > 0) {
- rd_kafka_error_t *error;
-
- state->wait_cnt++;
- error = rd_kafka_DescribeGroupsRequest(
- rkb, 0, grps, i, RD_KAFKA_REPLYQ(state->q, 0),
- rd_kafka_DescribeGroups_resp_cb, state);
- if (error) {
- rd_kafka_DescribeGroups_resp_cb(
- rk, rkb, rd_kafka_error_code(error), reply, request,
- opaque);
- rd_kafka_error_destroy(error);
- }
-
- while (i-- > 0)
- rd_free(grps[i]);
- }
-
-
- rd_free(grps);
-
-err:
- state->err = err;
- return;
-
-err_parse:
- if (grps)
- rd_free(grps);
- state->err = reply->rkbuf_err;
-}
-
-rd_kafka_resp_err_t
-rd_kafka_list_groups(rd_kafka_t *rk,
- const char *group,
- const struct rd_kafka_group_list **grplistp,
- int timeout_ms) {
- rd_kafka_broker_t *rkb;
- int rkb_cnt = 0;
- struct list_groups_state state = RD_ZERO_INIT;
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-
- /* Wait until metadata has been fetched from cluster so
- * that we have a full broker list.
- * This state only happens during initial client setup, after that
- * there'll always be a cached metadata copy. */
- while (1) {
- int state_version = rd_kafka_brokers_get_state_version(rk);
- rd_bool_t has_metadata;
-
- rd_kafka_rdlock(rk);
- has_metadata = rk->rk_ts_metadata != 0;
- rd_kafka_rdunlock(rk);
-
- if (has_metadata)
- break;
-
- if (!rd_kafka_brokers_wait_state_change(
- rk, state_version, rd_timeout_remains(ts_end)))
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
- }
-
-
- state.q = rd_kafka_q_new(rk);
- state.desired_group = group;
- state.grplist = rd_calloc(1, sizeof(*state.grplist));
- state.grplist_size = group ? 1 : 32;
-
- state.grplist->groups =
- rd_malloc(state.grplist_size * sizeof(*state.grplist->groups));
-
- /* Query each broker for its list of groups */
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- rd_kafka_error_t *error;
- rd_kafka_broker_lock(rkb);
- if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
- rd_kafka_broker_unlock(rkb);
- continue;
- }
- rd_kafka_broker_unlock(rkb);
-
- state.wait_cnt++;
- rkb_cnt++;
- error = rd_kafka_ListGroupsRequest(
- rkb, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0),
- rd_kafka_ListGroups_resp_cb, &state);
- if (error) {
- rd_kafka_ListGroups_resp_cb(rk, rkb,
- rd_kafka_error_code(error),
- NULL, NULL, &state);
- rd_kafka_error_destroy(error);
- }
- }
- rd_kafka_rdunlock(rk);
-
- if (rkb_cnt == 0) {
- state.err = RD_KAFKA_RESP_ERR__TRANSPORT;
-
- } else {
- int remains;
-
- while (state.wait_cnt > 0 &&
- !rd_timeout_expired(
- (remains = rd_timeout_remains(ts_end)))) {
- rd_kafka_q_serve(state.q, remains, 0,
- RD_KAFKA_Q_CB_CALLBACK,
- rd_kafka_poll_cb, NULL);
- /* Ignore yields */
- }
- }
-
- rd_kafka_q_destroy_owner(state.q);
-
- if (state.wait_cnt > 0 && !state.err) {
- if (state.grplist->group_cnt == 0)
- state.err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- else {
- *grplistp = state.grplist;
- return RD_KAFKA_RESP_ERR__PARTIAL;
- }
- }
-
- if (state.err)
- rd_kafka_group_list_destroy(state.grplist);
- else
- *grplistp = state.grplist;
-
- return state.err;
-}
-
-
-void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist0) {
- struct rd_kafka_group_list *grplist =
- (struct rd_kafka_group_list *)grplist0;
-
- while (grplist->group_cnt-- > 0) {
- struct rd_kafka_group_info *gi;
- gi = &grplist->groups[grplist->group_cnt];
-
- if (gi->broker.host)
- rd_free(gi->broker.host);
- if (gi->group)
- rd_free(gi->group);
- if (gi->state)
- rd_free(gi->state);
- if (gi->protocol_type)
- rd_free(gi->protocol_type);
- if (gi->protocol)
- rd_free(gi->protocol);
-
- while (gi->member_cnt-- > 0) {
- struct rd_kafka_group_member_info *mi;
- mi = &gi->members[gi->member_cnt];
-
- if (mi->member_id)
- rd_free(mi->member_id);
- if (mi->client_id)
- rd_free(mi->client_id);
- if (mi->client_host)
- rd_free(mi->client_host);
- if (mi->member_metadata)
- rd_free(mi->member_metadata);
- if (mi->member_assignment)
- rd_free(mi->member_assignment);
- }
-
- if (gi->members)
- rd_free(gi->members);
- }
-
- if (grplist->groups)
- rd_free(grplist->groups);
-
- rd_free(grplist);
-}
-
-
-
-const char *rd_kafka_get_debug_contexts(void) {
- return RD_KAFKA_DEBUG_CONTEXTS;
-}
-
-
-int rd_kafka_path_is_dir(const char *path) {
-#ifdef _WIN32
- struct _stat st;
- return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR);
-#else
- struct stat st;
- return (stat(path, &st) == 0 && S_ISDIR(st.st_mode));
-#endif
-}
-
-
-/**
- * @returns true if directory is empty or can't be accessed, else false.
- */
-rd_bool_t rd_kafka_dir_is_empty(const char *path) {
-#if _WIN32
- /* FIXME: Unsupported */
- return rd_true;
-#else
- DIR *dir;
- struct dirent *d;
-#if defined(__sun)
- struct stat st;
- int ret = 0;
-#endif
-
- dir = opendir(path);
- if (!dir)
- return rd_true;
-
- while ((d = readdir(dir))) {
-
- if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
- continue;
-
-#if defined(__sun)
- ret = stat(d->d_name, &st);
- if (ret != 0) {
- return rd_true; // Can't be accessed
- }
- if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) ||
- S_ISLNK(st.st_mode)) {
-#else
- if (d->d_type == DT_REG || d->d_type == DT_LNK ||
- d->d_type == DT_DIR) {
-#endif
- closedir(dir);
- return rd_false;
- }
- }
-
- closedir(dir);
- return rd_true;
-#endif
-}
-
-
-void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size) {
- return rd_malloc(size);
-}
-
-void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size) {
- return rd_calloc(num, size);
-}
-
-void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr) {
- rd_free(ptr);
-}
-
-
-int rd_kafka_errno(void) {
- return errno;
-}
-
-int rd_kafka_unittest(void) {
- return rd_unittest();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h
deleted file mode 100644
index e3474e50f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h
+++ /dev/null
@@ -1,9340 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2022 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @file rdkafka.h
- * @brief Apache Kafka C/C++ consumer and producer client library.
- *
- * rdkafka.h contains the public API for librdkafka.
- * The API is documented in this file as comments prefixing the function, type,
- * enum, define, etc.
- *
- * @sa For the C++ interface see rdkafkacpp.h
- *
- * @tableofcontents
- */
-
-
-/* @cond NO_DOC */
-#ifndef _RDKAFKA_H_
-#define _RDKAFKA_H_
-
-#include <stdio.h>
-#include <inttypes.h>
-#include <sys/types.h>
-
-#ifdef __cplusplus
-extern "C" {
-#if 0
-} /* Restore indent */
-#endif
-#endif
-
-#ifdef _WIN32
-#include <basetsd.h>
-#ifndef WIN32_MEAN_AND_LEAN
-#define WIN32_MEAN_AND_LEAN
-#endif
-#include <winsock2.h> /* for sockaddr, .. */
-#ifndef _SSIZE_T_DEFINED
-#define _SSIZE_T_DEFINED
-typedef SSIZE_T ssize_t;
-#endif
-#define RD_UNUSED
-#define RD_INLINE __inline
-#define RD_DEPRECATED __declspec(deprecated)
-#define RD_FORMAT(...)
-#undef RD_EXPORT
-#ifdef LIBRDKAFKA_STATICLIB
-#define RD_EXPORT
-#else
-#ifdef LIBRDKAFKA_EXPORTS
-#define RD_EXPORT __declspec(dllexport)
-#else
-#define RD_EXPORT __declspec(dllimport)
-#endif
-#ifndef LIBRDKAFKA_TYPECHECKS
-#define LIBRDKAFKA_TYPECHECKS 0
-#endif
-#endif
-
-#else
-#include <sys/socket.h> /* for sockaddr, .. */
-
-#define RD_UNUSED __attribute__((unused))
-#define RD_INLINE inline
-#define RD_EXPORT
-#define RD_DEPRECATED __attribute__((deprecated))
-
-#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
-#define RD_HAS_STATEMENT_EXPRESSIONS
-#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__)))
-#else
-#define RD_FORMAT(...)
-#endif
-
-#ifndef LIBRDKAFKA_TYPECHECKS
-#define LIBRDKAFKA_TYPECHECKS 1
-#endif
-#endif
-
-
-/**
- * @brief Type-checking macros
- * Compile-time checking that \p ARG is of type \p TYPE.
- * @returns \p RET
- */
-#if LIBRDKAFKA_TYPECHECKS
-#define _LRK_TYPECHECK(RET, TYPE, ARG) \
- ({ \
- if (0) { \
- TYPE __t RD_UNUSED = (ARG); \
- } \
- RET; \
- })
-
-#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \
- ({ \
- if (0) { \
- TYPE __t RD_UNUSED = (ARG); \
- TYPE2 __t2 RD_UNUSED = (ARG2); \
- } \
- RET; \
- })
-
-#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \
- ({ \
- if (0) { \
- TYPE __t RD_UNUSED = (ARG); \
- TYPE2 __t2 RD_UNUSED = (ARG2); \
- TYPE3 __t3 RD_UNUSED = (ARG3); \
- } \
- RET; \
- })
-#else
-#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET)
-#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET)
-#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET)
-#endif
-
-/* @endcond */
-
-
-/**
- * @name librdkafka version
- * @{
- *
- *
- */
-
-/**
- * @brief librdkafka version
- *
- * Interpreted as hex \c MM.mm.rr.xx:
- * - MM = Major
- * - mm = minor
- * - rr = revision
- * - xx = pre-release id (0xff is the final release)
- *
- * E.g.: \c 0x000801ff = 0.8.1
- *
- * @remark This value should only be used during compile time,
- * for runtime checks of version use rd_kafka_version()
- */
-#define RD_KAFKA_VERSION 0x020100ff
-
-/**
- * @brief Returns the librdkafka version as integer.
- *
- * @returns Version integer.
- *
- * @sa See RD_KAFKA_VERSION for how to parse the integer format.
- * @sa Use rd_kafka_version_str() to retreive the version as a string.
- */
-RD_EXPORT
-int rd_kafka_version(void);
-
-/**
- * @brief Returns the librdkafka version as string.
- *
- * @returns Version string
- */
-RD_EXPORT
-const char *rd_kafka_version_str(void);
-
-/**@}*/
-
-
-/**
- * @name Constants, errors, types
- * @{
- *
- *
- */
-
-
-/**
- * @enum rd_kafka_type_t
- *
- * @brief rd_kafka_t handle type.
- *
- * @sa rd_kafka_new()
- */
-typedef enum rd_kafka_type_t {
- RD_KAFKA_PRODUCER, /**< Producer client */
- RD_KAFKA_CONSUMER /**< Consumer client */
-} rd_kafka_type_t;
-
-
-/*!
- * Timestamp types
- *
- * @sa rd_kafka_message_timestamp()
- */
-typedef enum rd_kafka_timestamp_type_t {
- RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */
- RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */
- RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */
-} rd_kafka_timestamp_type_t;
-
-
-
-/**
- * @brief Retrieve supported debug contexts for use with the \c \"debug\"
- * configuration property. (runtime)
- *
- * @returns Comma-separated list of available debugging contexts.
- */
-RD_EXPORT
-const char *rd_kafka_get_debug_contexts(void);
-
-/**
- * @brief Supported debug contexts. (compile time)
- *
- * @deprecated This compile time value may be outdated at runtime due to
- * linking another version of the library.
- * Use rd_kafka_get_debug_contexts() instead.
- */
-#define RD_KAFKA_DEBUG_CONTEXTS \
- "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \
- "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \
- "conf"
-
-
-/* @cond NO_DOC */
-/* Private types to provide ABI compatibility */
-typedef struct rd_kafka_s rd_kafka_t;
-typedef struct rd_kafka_topic_s rd_kafka_topic_t;
-typedef struct rd_kafka_conf_s rd_kafka_conf_t;
-typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
-typedef struct rd_kafka_queue_s rd_kafka_queue_t;
-typedef struct rd_kafka_op_s rd_kafka_event_t;
-typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
-typedef struct rd_kafka_consumer_group_metadata_s
- rd_kafka_consumer_group_metadata_t;
-typedef struct rd_kafka_error_s rd_kafka_error_t;
-typedef struct rd_kafka_headers_s rd_kafka_headers_t;
-typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
-typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
-/* @endcond */
-
-
-/**
- * @enum rd_kafka_resp_err_t
- * @brief Error codes.
- *
- * The negative error codes delimited by two underscores
- * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are
- * displayed as \c \"Local: \<error string..\>\", while the error codes
- * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker
- * errors and are displayed as \c \"Broker: \<error string..\>\".
- *
- * @sa Use rd_kafka_err2str() to translate an error code a human readable string
- */
-typedef enum {
- /* Internal errors to rdkafka: */
- /** Begin internal error codes */
- RD_KAFKA_RESP_ERR__BEGIN = -200,
- /** Received message is incorrect */
- RD_KAFKA_RESP_ERR__BAD_MSG = -199,
- /** Bad/unknown compression */
- RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198,
- /** Broker is going away */
- RD_KAFKA_RESP_ERR__DESTROY = -197,
- /** Generic failure */
- RD_KAFKA_RESP_ERR__FAIL = -196,
- /** Broker transport failure */
- RD_KAFKA_RESP_ERR__TRANSPORT = -195,
- /** Critical system resource */
- RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194,
- /** Failed to resolve broker */
- RD_KAFKA_RESP_ERR__RESOLVE = -193,
- /** Produced message timed out*/
- RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192,
- /** Reached the end of the topic+partition queue on
- * the broker. Not really an error.
- * This event is disabled by default,
- * see the `enable.partition.eof` configuration property. */
- RD_KAFKA_RESP_ERR__PARTITION_EOF = -191,
- /** Permanent: Partition does not exist in cluster. */
- RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190,
- /** File or filesystem error */
- RD_KAFKA_RESP_ERR__FS = -189,
- /** Permanent: Topic does not exist in cluster. */
- RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188,
- /** All broker connections are down. */
- RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187,
- /** Invalid argument, or invalid configuration */
- RD_KAFKA_RESP_ERR__INVALID_ARG = -186,
- /** Operation timed out */
- RD_KAFKA_RESP_ERR__TIMED_OUT = -185,
- /** Queue is full */
- RD_KAFKA_RESP_ERR__QUEUE_FULL = -184,
- /** ISR count < required.acks */
- RD_KAFKA_RESP_ERR__ISR_INSUFF = -183,
- /** Broker node update */
- RD_KAFKA_RESP_ERR__NODE_UPDATE = -182,
- /** SSL error */
- RD_KAFKA_RESP_ERR__SSL = -181,
- /** Waiting for coordinator to become available. */
- RD_KAFKA_RESP_ERR__WAIT_COORD = -180,
- /** Unknown client group */
- RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179,
- /** Operation in progress */
- RD_KAFKA_RESP_ERR__IN_PROGRESS = -178,
- /** Previous operation in progress, wait for it to finish. */
- RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177,
- /** This operation would interfere with an existing subscription */
- RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176,
- /** Assigned partitions (rebalance_cb) */
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175,
- /** Revoked partitions (rebalance_cb) */
- RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174,
- /** Conflicting use */
- RD_KAFKA_RESP_ERR__CONFLICT = -173,
- /** Wrong state */
- RD_KAFKA_RESP_ERR__STATE = -172,
- /** Unknown protocol */
- RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171,
- /** Not implemented */
- RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170,
- /** Authentication failure*/
- RD_KAFKA_RESP_ERR__AUTHENTICATION = -169,
- /** No stored offset */
- RD_KAFKA_RESP_ERR__NO_OFFSET = -168,
- /** Outdated */
- RD_KAFKA_RESP_ERR__OUTDATED = -167,
- /** Timed out in queue */
- RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166,
- /** Feature not supported by broker */
- RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165,
- /** Awaiting cache update */
- RD_KAFKA_RESP_ERR__WAIT_CACHE = -164,
- /** Operation interrupted (e.g., due to yield)) */
- RD_KAFKA_RESP_ERR__INTR = -163,
- /** Key serialization error */
- RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162,
- /** Value serialization error */
- RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161,
- /** Key deserialization error */
- RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160,
- /** Value deserialization error */
- RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159,
- /** Partial response */
- RD_KAFKA_RESP_ERR__PARTIAL = -158,
- /** Modification attempted on read-only object */
- RD_KAFKA_RESP_ERR__READ_ONLY = -157,
- /** No such entry / item not found */
- RD_KAFKA_RESP_ERR__NOENT = -156,
- /** Read underflow */
- RD_KAFKA_RESP_ERR__UNDERFLOW = -155,
- /** Invalid type */
- RD_KAFKA_RESP_ERR__INVALID_TYPE = -154,
- /** Retry operation */
- RD_KAFKA_RESP_ERR__RETRY = -153,
- /** Purged in queue */
- RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152,
- /** Purged in flight */
- RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151,
- /** Fatal error: see rd_kafka_fatal_error() */
- RD_KAFKA_RESP_ERR__FATAL = -150,
- /** Inconsistent state */
- RD_KAFKA_RESP_ERR__INCONSISTENT = -149,
- /** Gap-less ordering would not be guaranteed if proceeding */
- RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148,
- /** Maximum poll interval exceeded */
- RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147,
- /** Unknown broker */
- RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146,
- /** Functionality not configured */
- RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145,
- /** Instance has been fenced */
- RD_KAFKA_RESP_ERR__FENCED = -144,
- /** Application generated error */
- RD_KAFKA_RESP_ERR__APPLICATION = -143,
- /** Assignment lost */
- RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142,
- /** No operation performed */
- RD_KAFKA_RESP_ERR__NOOP = -141,
- /** No offset to automatically reset to */
- RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140,
- /** Partition log truncation detected */
- RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139,
-
- /** End internal error codes */
- RD_KAFKA_RESP_ERR__END = -100,
-
- /* Kafka broker errors: */
- /** Unknown broker error */
- RD_KAFKA_RESP_ERR_UNKNOWN = -1,
- /** Success */
- RD_KAFKA_RESP_ERR_NO_ERROR = 0,
- /** Offset out of range */
- RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
- /** Invalid message */
- RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
- /** Unknown topic or partition */
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
- /** Invalid message size */
- RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
- /** Leader not available */
- RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
-/** Not leader for partition */
-#define RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER \
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
- /** Request timed out */
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
- /** Broker not available */
- RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
- /** Replica not available */
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
- /** Message size too large */
- RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
- /** StaleControllerEpochCode */
- RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
- /** Offset metadata string too large */
- RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
- /** Broker disconnected before response received */
- RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
- /** Coordinator load in progress */
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
-/** Group coordinator load in progress */
-#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS
- /** Coordinator not available */
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
-/** Group coordinator not available */
-#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE
- /** Not coordinator */
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
-/** Not coordinator for group */
-#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR
- /** Invalid topic */
- RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
- /** Message batch larger than configured server segment size */
- RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
- /** Not enough in-sync replicas */
- RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
- /** Message(s) written to insufficient number of in-sync replicas */
- RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
- /** Invalid required acks value */
- RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
- /** Specified group generation id is not valid */
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
- /** Inconsistent group protocol */
- RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
- /** Invalid group.id */
- RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
- /** Unknown member */
- RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
- /** Invalid session timeout */
- RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
- /** Group rebalance in progress */
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
- /** Commit offset data size is not valid */
- RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
- /** Topic authorization failed */
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
- /** Group authorization failed */
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
- /** Cluster authorization failed */
- RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
- /** Invalid timestamp */
- RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
- /** Unsupported SASL mechanism */
- RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
- /** Illegal SASL state */
- RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
- /** Unuspported version */
- RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
- /** Topic already exists */
- RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
- /** Invalid number of partitions */
- RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
- /** Invalid replication factor */
- RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
- /** Invalid replica assignment */
- RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
- /** Invalid config */
- RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
- /** Not controller for cluster */
- RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
- /** Invalid request */
- RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
- /** Message format on broker does not support request */
- RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
- /** Policy violation */
- RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
- /** Broker received an out of order sequence number */
- RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
- /** Broker received a duplicate sequence number */
- RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
- /** Producer attempted an operation with an old epoch */
- RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
- /** Producer attempted a transactional operation in an invalid state */
- RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
- /** Producer attempted to use a producer id which is not
- * currently assigned to its transactional id */
- RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
- /** Transaction timeout is larger than the maximum
- * value allowed by the broker's max.transaction.timeout.ms */
- RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
- /** Producer attempted to update a transaction while another
- * concurrent operation on the same transaction was ongoing */
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
- /** Indicates that the transaction coordinator sending a
- * WriteTxnMarker is no longer the current coordinator for a
- * given producer */
- RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
- /** Transactional Id authorization failed */
- RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
- /** Security features are disabled */
- RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
- /** Operation not attempted */
- RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
- /** Disk error when trying to access log file on the disk */
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
- /** The user-specified log directory is not found in the broker config
- */
- RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
- /** SASL Authentication failed */
- RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
- /** Unknown Producer Id */
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
- /** Partition reassignment is in progress */
- RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
- /** Delegation Token feature is not enabled */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
- /** Delegation Token is not found on server */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
- /** Specified Principal is not valid Owner/Renewer */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
- /** Delegation Token requests are not allowed on this connection */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
- /** Delegation Token authorization failed */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
- /** Delegation Token is expired */
- RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
- /** Supplied principalType is not supported */
- RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
- /** The group is not empty */
- RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
- /** The group id does not exist */
- RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
- /** The fetch session ID was not found */
- RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
- /** The fetch session epoch is invalid */
- RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
- /** No matching listener */
- RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
- /** Topic deletion is disabled */
- RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
- /** Leader epoch is older than broker epoch */
- RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
- /** Leader epoch is newer than broker epoch */
- RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
- /** Unsupported compression type */
- RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
- /** Broker epoch has changed */
- RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
- /** Leader high watermark is not caught up */
- RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
- /** Group member needs a valid member ID */
- RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
- /** Preferred leader was not available */
- RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
- /** Consumer group has reached maximum size */
- RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
- /** Static consumer fenced by other consumer with same
- * group.instance.id. */
- RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
- /** Eligible partition leaders are not available */
- RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
- /** Leader election not needed for topic partition */
- RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
- /** No partition reassignment is in progress */
- RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
- /** Deleting offsets of a topic while the consumer group is
- * subscribed to it */
- RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
- /** Broker failed to validate record */
- RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
- /** There are unstable offsets that need to be cleared */
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
- /** Throttling quota has been exceeded */
- RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
- /** There is a newer producer with the same transactionalId
- * which fences the current one */
- RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
- /** Request illegally referred to resource that does not exist */
- RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
- /** Request illegally referred to the same resource twice */
- RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
- /** Requested credential would not meet criteria for acceptability */
- RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
- /** Indicates that the either the sender or recipient of a
- * voter-only request is not one of the expected voters */
- RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
- /** Invalid update version */
- RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
- /** Unable to update finalized features due to server error */
- RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
- /** Request principal deserialization failed during forwarding */
- RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
-
- RD_KAFKA_RESP_ERR_END_ALL,
-} rd_kafka_resp_err_t;
-
-
-/**
- * @brief Error code value, name and description.
- * Typically for use with language bindings to automatically expose
- * the full set of librdkafka error codes.
- */
-struct rd_kafka_err_desc {
- rd_kafka_resp_err_t code; /**< Error code */
- const char *name; /**< Error name, same as code enum sans prefix */
- const char *desc; /**< Human readable error description. */
-};
-
-
-/**
- * @brief Returns the full list of error codes.
- */
-RD_EXPORT
-void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs,
- size_t *cntp);
-
-
-
-/**
- * @brief Returns a human readable representation of a kafka error.
- *
- * @param err Error code to translate
- */
-RD_EXPORT
-const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
-
-
-
-/**
- * @brief Returns the error code name (enum name).
- *
- * @param err Error code to translate
- */
-RD_EXPORT
-const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
-
-
-/**
- * @brief Returns the last error code generated by a legacy API call
- * in the current thread.
- *
- * The legacy APIs are the ones using errno to propagate error value, namely:
- * - rd_kafka_topic_new()
- * - rd_kafka_consume_start()
- * - rd_kafka_consume_stop()
- * - rd_kafka_consume()
- * - rd_kafka_consume_batch()
- * - rd_kafka_consume_callback()
- * - rd_kafka_consume_queue()
- * - rd_kafka_produce()
- *
- * The main use for this function is to avoid converting system \p errno
- * values to rd_kafka_resp_err_t codes for legacy APIs.
- *
- * @remark The last error is stored per-thread, if multiple rd_kafka_t handles
- * are used in the same application thread the developer needs to
- * make sure rd_kafka_last_error() is called immediately after
- * a failed API call.
- *
- * @remark errno propagation from librdkafka is not safe on Windows
- * and should not be used, use rd_kafka_last_error() instead.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_last_error(void);
-
-
-/**
- * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t
- * error code upon failure from the following functions:
- * - rd_kafka_topic_new()
- * - rd_kafka_consume_start()
- * - rd_kafka_consume_stop()
- * - rd_kafka_consume()
- * - rd_kafka_consume_batch()
- * - rd_kafka_consume_callback()
- * - rd_kafka_consume_queue()
- * - rd_kafka_produce()
- *
- * @param errnox System errno value to convert
- *
- * @returns Appropriate error code for \p errnox
- *
- * @remark A better alternative is to call rd_kafka_last_error() immediately
- * after any of the above functions return -1 or NULL.
- *
- * @deprecated Use rd_kafka_last_error() to retrieve the last error code
- * set by the legacy librdkafka APIs.
- *
- * @sa rd_kafka_last_error()
- */
-RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
-
-
-/**
- * @brief Returns the thread-local system errno
- *
- * On most platforms this is the same as \p errno but in case of different
- * runtimes between library and application (e.g., Windows static DLLs)
- * this provides a means for exposing the errno librdkafka uses.
- *
- * @remark The value is local to the current calling thread.
- *
- * @deprecated Use rd_kafka_last_error() to retrieve the last error code
- * set by the legacy librdkafka APIs.
- */
-RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void);
-
-
-
-/**
- * @brief Returns the first fatal error set on this client instance,
- * or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred.
- *
- * This function is to be used with the Idempotent Producer and \c error_cb
- * to detect fatal errors.
- *
- * Generally all errors raised by \c error_cb are to be considered
- * informational and temporary, the client will try to recover from all
- * errors in a graceful fashion (by retrying, etc).
- *
- * However, some errors should logically be considered fatal to retain
- * consistency; in particular a set of errors that may occur when using the
- * Idempotent Producer and the in-order or exactly-once producer guarantees
- * can't be satisfied.
- *
- * @param rk Client instance.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written to if there is a fatal error.
- * @param errstr_size Writable size in \p errstr.
- *
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else
- * any other error code.
- */
-RD_EXPORT
-rd_kafka_resp_err_t
-rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-
-
-/**
- * @brief Trigger a fatal error for testing purposes.
- *
- * Since there is no practical way to trigger real fatal errors in the
- * idempotent producer, this method allows an application to trigger
- * fabricated fatal errors in tests to check its error handling code.
- *
- * @param rk Client instance.
- * @param err The underlying error code.
- * @param reason A human readable error reason.
- * Will be prefixed with "test_fatal_error: " to differentiate
- * from real fatal errors.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or
- * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error
- * has already been triggered.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason);
-
-
-/**
- * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if
- * \p error is NULL.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
-
-/**
- * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID",
- * or an empty string if \p error is NULL.
- *
- * @remark The lifetime of the returned pointer is the same as the error object.
- *
- * @sa rd_kafka_err2name()
- */
-RD_EXPORT
-const char *rd_kafka_error_name(const rd_kafka_error_t *error);
-
-/**
- * @returns a human readable error string for \p error,
- * or an empty string if \p error is NULL.
- *
- * @remark The lifetime of the returned pointer is the same as the error object.
- */
-RD_EXPORT
-const char *rd_kafka_error_string(const rd_kafka_error_t *error);
-
-
-/**
- * @returns 1 if the error is a fatal error, indicating that the client
- * instance is no longer usable, else 0 (also if \p error is NULL).
- */
-RD_EXPORT
-int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
-
-
-/**
- * @returns 1 if the operation may be retried,
- * else 0 (also if \p error is NULL).
- */
-RD_EXPORT
-int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
-
-
-/**
- * @returns 1 if the error is an abortable transaction error in which case
- * the application must call rd_kafka_abort_transaction() and
- * start a new transaction with rd_kafka_begin_transaction() if it
- * wishes to proceed with transactions.
- * Else returns 0 (also if \p error is NULL).
- *
- * @remark The return value of this method is only valid for errors returned
- * by the transactional API.
- */
-RD_EXPORT
-int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
-
-/**
- * @brief Free and destroy an error object.
- *
- * @remark As a conveniance it is permitted to pass a NULL \p error.
- */
-RD_EXPORT
-void rd_kafka_error_destroy(rd_kafka_error_t *error);
-
-
-/**
- * @brief Create a new error object with error \p code and optional
- * human readable error string in \p fmt.
- *
- * This method is mainly to be used for mocking errors in application test code.
- *
- * The returned object must be destroyed with rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code,
- const char *fmt,
- ...) RD_FORMAT(printf, 2, 3);
-
-
-/**
- * @brief Topic+Partition place holder
- *
- * Generic place holder for a Topic+Partition and its related information
- * used for multiple purposes:
- * - consumer offset (see rd_kafka_commit(), et.al.)
- * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb())
- * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb())
- */
-
-/**
- * @brief Generic place holder for a specific Topic+Partition.
- *
- * @sa rd_kafka_topic_partition_list_new()
- */
-typedef struct rd_kafka_topic_partition_s {
- char *topic; /**< Topic name */
- int32_t partition; /**< Partition */
- int64_t offset; /**< Offset */
- void *metadata; /**< Metadata */
- size_t metadata_size; /**< Metadata size */
- void *opaque; /**< Opaque value for application use */
- rd_kafka_resp_err_t err; /**< Error code, depending on use. */
- void *_private; /**< INTERNAL USE ONLY,
- * INITIALIZE TO ZERO, DO NOT TOUCH,
- * DO NOT COPY, DO NOT SHARE WITH OTHER
- * rd_kafka_t INSTANCES. */
-} rd_kafka_topic_partition_t;
-
-
-/**
- * @brief Destroy a rd_kafka_topic_partition_t.
- * @remark This must not be called for elements in a topic partition list.
- */
-RD_EXPORT
-void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
-
-
-/**
- * @brief Sets the offset leader epoch (use -1 to clear).
- *
- * @param rktpar Partition object.
- * @param leader_epoch Offset leader epoch, use -1 to reset.
- *
- * @remark See KIP-320 for more information.
- */
-RD_EXPORT
-void rd_kafka_topic_partition_set_leader_epoch(
- rd_kafka_topic_partition_t *rktpar,
- int32_t leader_epoch);
-
-/**
- * @returns the offset leader epoch, if relevant and known,
- * else -1.
- *
- * @param rktpar Partition object.
- *
- * @remark See KIP-320 for more information.
- */
-RD_EXPORT
-int32_t rd_kafka_topic_partition_get_leader_epoch(
- const rd_kafka_topic_partition_t *rktpar);
-
-/**
- * @brief A growable list of Topic+Partitions.
- *
- */
-typedef struct rd_kafka_topic_partition_list_s {
- int cnt; /**< Current number of elements */
- int size; /**< Current allocated size */
- rd_kafka_topic_partition_t *elems; /**< Element array[] */
-} rd_kafka_topic_partition_list_t;
-
-
-/**
- * @brief Create a new list/vector Topic+Partition container.
- *
- * @param size Initial allocated size used when the expected number of
- * elements is known or can be estimated.
- * Avoids reallocation and possibly relocation of the
- * elems array.
- *
- * @returns A newly allocated Topic+Partition list.
- *
- * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources
- * in use by a list and the list itself.
- * @sa rd_kafka_topic_partition_list_add()
- */
-RD_EXPORT
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
-
-
-/**
- * @brief Free all resources used by the list and the list itself.
- */
-RD_EXPORT
-void rd_kafka_topic_partition_list_destroy(
- rd_kafka_topic_partition_list_t *rkparlist);
-
-/**
- * @brief Add topic+partition to list
- *
- * @param rktparlist List to extend
- * @param topic Topic name (copied)
- * @param partition Partition id
- *
- * @returns The object which can be used to fill in additionals fields.
- */
-RD_EXPORT
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition);
-
-
-/**
- * @brief Add range of partitions from \p start to \p stop inclusive.
- *
- * @param rktparlist List to extend
- * @param topic Topic name (copied)
- * @param start Start partition of range
- * @param stop Last partition of range (inclusive)
- */
-RD_EXPORT
-void rd_kafka_topic_partition_list_add_range(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t start,
- int32_t stop);
-
-
-
-/**
- * @brief Delete partition from list.
- *
- * @param rktparlist List to modify
- * @param topic Topic name to match
- * @param partition Partition to match
- *
- * @returns 1 if partition was found (and removed), else 0.
- *
- * @remark Any held indices to elems[] are unusable after this call returns 1.
- */
-RD_EXPORT
-int rd_kafka_topic_partition_list_del(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition);
-
-
-/**
- * @brief Delete partition from list by elems[] index.
- *
- * @returns 1 if partition was found (and removed), else 0.
- *
- * @sa rd_kafka_topic_partition_list_del()
- */
-RD_EXPORT
-int rd_kafka_topic_partition_list_del_by_idx(
- rd_kafka_topic_partition_list_t *rktparlist,
- int idx);
-
-
-/**
- * @brief Make a copy of an existing list.
- *
- * @param src The existing list to copy.
- *
- * @returns A new list fully populated to be identical to \p src
- */
-RD_EXPORT
-rd_kafka_topic_partition_list_t *
-rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
-
-
-
-/**
- * @brief Set offset to \p offset for \p topic and \p partition
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
- * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found
- * in the list.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition,
- int64_t offset);
-
-
-
-/**
- * @brief Find element by \p topic and \p partition.
- *
- * @returns a pointer to the first matching element, or NULL if not found.
- */
-RD_EXPORT
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition);
-
-
-/**
- * @brief Sort list using comparator \p cmp.
- *
- * If \p cmp is NULL the default comparator will be used that
- * sorts by ascending topic name and partition.
- *
- * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp.
- *
- */
-RD_EXPORT void rd_kafka_topic_partition_list_sort(
- rd_kafka_topic_partition_list_t *rktparlist,
- int (*cmp)(const void *a, const void *b, void *cmp_opaque),
- void *cmp_opaque);
-
-
-/**@}*/
-
-
-
-/**
- * @name Var-arg tag types
- * @{
- *
- */
-
-/**
- * @enum rd_kafka_vtype_t
- *
- * @brief Var-arg tag types
- *
- * @sa rd_kafka_producev()
- */
-typedef enum rd_kafka_vtype_t {
- RD_KAFKA_VTYPE_END, /**< va-arg sentinel */
- RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */
- RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */
- RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */
- RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/
- RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */
- RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque
- * value. This is the same as
- * the _private field in
- * rd_kafka_message_t, also known
- * as the msg_opaque. */
- RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */
- RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */
- RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t)
- * Message Header */
- RD_KAFKA_VTYPE_HEADERS, /**< (rd_kafka_headers_t *) Headers list */
-} rd_kafka_vtype_t;
-
-
-/**
- * @brief VTYPE + argument container for use with rd_kafka_produce_va()
- *
- * See RD_KAFKA_V_..() macros below for which union field corresponds
- * to which RD_KAFKA_VTYPE_...
- */
-typedef struct rd_kafka_vu_s {
- rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */
- /** Value union, see RD_KAFKA_V_.. macros for which field to use. */
- union {
- const char *cstr;
- rd_kafka_topic_t *rkt;
- int i;
- int32_t i32;
- int64_t i64;
- struct {
- void *ptr;
- size_t size;
- } mem;
- struct {
- const char *name;
- const void *val;
- ssize_t size;
- } header;
- rd_kafka_headers_t *headers;
- void *ptr;
- char _pad[64]; /**< Padding size for future-proofness */
- } u;
-} rd_kafka_vu_t;
-
-/**
- * @brief Convenience macros for rd_kafka_vtype_t that takes the
- * correct arguments for each vtype.
- */
-
-/*!
- * va-arg end sentinel used to terminate the variable argument list
- */
-#define RD_KAFKA_V_END RD_KAFKA_VTYPE_END
-
-/*!
- * Topic name (const char *)
- *
- * rd_kafka_vu_t field: u.cstr
- */
-#define RD_KAFKA_V_TOPIC(topic) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \
- (const char *)topic
-/*!
- * Topic object (rd_kafka_topic_t *)
- *
- * rd_kafka_vu_t field: u.rkt
- */
-#define RD_KAFKA_V_RKT(rkt) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \
- (rd_kafka_topic_t *)rkt
-/*!
- * Partition (int32_t)
- *
- * rd_kafka_vu_t field: u.i32
- */
-#define RD_KAFKA_V_PARTITION(partition) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \
- (int32_t)partition
-/*!
- * Message value/payload pointer and length (void *, size_t)
- *
- * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size
- */
-#define RD_KAFKA_V_VALUE(VALUE, LEN) \
- _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \
- (void *)VALUE, (size_t)LEN
-/*!
- * Message key pointer and length (const void *, size_t)
- *
- * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size
- */
-#define RD_KAFKA_V_KEY(KEY, LEN) \
- _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \
- (void *)KEY, (size_t)LEN
-/*!
- * Message opaque pointer (void *)
- * Same as \c msg_opaque, \c produce(.., msg_opaque),
- * and \c rkmessage->_private .
- *
- * rd_kafka_vu_t field: u.ptr
- */
-#define RD_KAFKA_V_OPAQUE(msg_opaque) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \
- (void *)msg_opaque
-/*!
- * Message flags (int)
- * @sa RD_KAFKA_MSG_F_COPY, et.al.
- *
- * rd_kafka_vu_t field: u.i
- */
-#define RD_KAFKA_V_MSGFLAGS(msgflags) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags
-/*!
- * Timestamp in milliseconds since epoch UTC (int64_t).
- * A value of 0 will use the current wall-clock time.
- *
- * rd_kafka_vu_t field: u.i64
- */
-#define RD_KAFKA_V_TIMESTAMP(timestamp) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \
- (int64_t)timestamp
-/*!
- * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN).
- * @sa rd_kafka_header_add()
- * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed
- * in the same call to producev().
- *
- * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size
- */
-#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \
- _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \
- const void *, VALUE, ssize_t, LEN), \
- (const char *)NAME, (const void *)VALUE, (ssize_t)LEN
-
-/*!
- * Message Headers list (rd_kafka_headers_t *).
- * The message object will assume ownership of the headers (unless producev()
- * fails).
- * Any existing headers will be replaced.
- * @sa rd_kafka_message_set_headers()
- * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed
- * in the same call to producev().
- *
- * rd_kafka_vu_t fields: u.headers
- */
-#define RD_KAFKA_V_HEADERS(HDRS) \
- _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \
- (rd_kafka_headers_t *)HDRS
-
-
-/**@}*/
-
-
-/**
- * @name Message headers
- * @{
- *
- * @brief Message headers consist of a list of (string key, binary value) pairs.
- * Duplicate keys are supported and the order in which keys were
- * added are retained.
- *
- * Header values are considered binary and may have three types of
- * value:
- * - proper value with size > 0 and a valid pointer
- * - empty value with size = 0 and any non-NULL pointer
- * - null value with size = 0 and a NULL pointer
- *
- * Headers require Apache Kafka broker version v0.11.0.0 or later.
- *
- * Header operations are O(n).
- */
-
-
-/**
- * @brief Create a new headers list.
- *
- * @param initial_count Preallocate space for this number of headers.
- * Any number of headers may be added, updated and
- * removed regardless of the initial count.
- */
-RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
-
-/**
- * @brief Destroy the headers list. The object and any returned value pointers
- * are not usable after this call.
- */
-RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
-
-/**
- * @brief Make a copy of headers list \p src.
- */
-RD_EXPORT rd_kafka_headers_t *
-rd_kafka_headers_copy(const rd_kafka_headers_t *src);
-
-/**
- * @brief Add header with name \p name and value \p val (copied) of size
- * \p size (not including null-terminator).
- *
- * @param hdrs Headers list.
- * @param name Header name.
- * @param name_size Header name size (not including the null-terminator).
- * If -1 the \p name length is automatically acquired using
- * strlen().
- * @param value Pointer to header value, or NULL (set size to 0 or -1).
- * @param value_size Size of header value. If -1 the \p value is assumed to be a
- * null-terminated string and the length is automatically
- * acquired using strlen().
- *
- * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only,
- * else RD_KAFKA_RESP_ERR_NO_ERROR.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs,
- const char *name,
- ssize_t name_size,
- const void *value,
- ssize_t value_size);
-
-/**
- * @brief Remove all headers for the given key (if any).
- *
- * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only,
- * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found,
- * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs,
- const char *name);
-
-
-/**
- * @brief Find last header in list \p hdrs matching \p name.
- *
- * @param hdrs Headers list.
- * @param name Header to find (last match).
- * @param valuep (out) Set to a (null-terminated) const pointer to the value
- * (may be NULL).
- * @param sizep (out) Set to the value's size (not including null-terminator).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else
- * RD_KAFKA_RESP_ERR__NOENT.
- *
- * @remark The returned pointer in \p valuep includes a trailing null-terminator
- * that is not accounted for in \p sizep.
- * @remark The returned pointer is only valid as long as the headers list and
- * the header item is valid.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs,
- const char *name,
- const void **valuep,
- size_t *sizep);
-
-/**
- * @brief Iterator for headers matching \p name.
- *
- * Same semantics as rd_kafka_header_get_last()
- *
- * @param hdrs Headers to iterate.
- * @param idx Iterator index, start at 0 and increment by one for each call
- * as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned.
- * @param name Header name to match.
- * @param valuep (out) Set to a (null-terminated) const pointer to the value
- * (may be NULL).
- * @param sizep (out) Set to the value's size (not including null-terminator).
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_header_get(const rd_kafka_headers_t *hdrs,
- size_t idx,
- const char *name,
- const void **valuep,
- size_t *sizep);
-
-
-/**
- * @brief Iterator for all headers.
- *
- * Same semantics as rd_kafka_header_get()
- *
- * @sa rd_kafka_header_get()
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs,
- size_t idx,
- const char **namep,
- const void **valuep,
- size_t *sizep);
-
-
-
-/**@}*/
-
-
-
-/**
- * @name Kafka messages
- * @{
- *
- */
-
-
-
-// FIXME: This doesn't show up in docs for some reason
-// "Compound rd_kafka_message_t is not documented."
-
-/**
- * @brief A Kafka message as returned by the \c rd_kafka_consume*() family
- * of functions as well as provided to the Producer \c dr_msg_cb().
- *
- * For the consumer this object has two purposes:
- * - provide the application with a consumed message. (\c err == 0)
- * - report per-topic+partition consumer errors (\c err != 0)
- *
- * The application must check \c err to decide what action to take.
- *
- * When the application is finished with a message it must call
- * rd_kafka_message_destroy() unless otherwise noted.
- */
-typedef struct rd_kafka_message_s {
- rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */
- rd_kafka_topic_t *rkt; /**< Topic */
- int32_t partition; /**< Partition */
- void *payload; /**< Producer: original message payload.
- * Consumer: Depends on the value of \c err :
- * - \c err==0: Message payload.
- * - \c err!=0: Error string */
- size_t len; /**< Depends on the value of \c err :
- * - \c err==0: Message payload length
- * - \c err!=0: Error string length */
- void *key; /**< Depends on the value of \c err :
- * - \c err==0: Optional message key */
- size_t key_len; /**< Depends on the value of \c err :
- * - \c err==0: Optional message key length*/
- int64_t offset; /**< Consumer:
- * - Message offset (or offset for error
- * if \c err!=0 if applicable).
- * Producer, dr_msg_cb:
- * Message offset assigned by broker.
- * May be RD_KAFKA_OFFSET_INVALID
- * for retried messages when
- * idempotence is enabled. */
- void *_private; /**< Consumer:
- * - rdkafka private pointer:
- * DO NOT MODIFY, DO NOT COPY.
- * Producer:
- * - dr_msg_cb:
- * msg_opaque from produce() call or
- * RD_KAFKA_V_OPAQUE from producev(). */
-} rd_kafka_message_t;
-
-
-/**
- * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka.
- */
-RD_EXPORT
-void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
-
-
-
-/**
- * @brief Returns the error string for an errored rd_kafka_message_t or NULL if
- * there was no error.
- *
- * @remark This function MUST NOT be used with the producer.
- */
-RD_EXPORT
-const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
-
-
-/**
- * @brief Returns the message timestamp for a consumed message.
- *
- * The timestamp is the number of milliseconds since the epoch (UTC).
- *
- * \p tstype (if not NULL) is updated to indicate the type of timestamp.
- *
- * @returns message timestamp, or -1 if not available.
- *
- * @remark Message timestamps require broker version 0.10.0 or later.
- */
-RD_EXPORT
-int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage,
- rd_kafka_timestamp_type_t *tstype);
-
-
-
-/**
- * @brief Returns the latency for a produced message measured from
- * the produce() call.
- *
- * @returns the latency in microseconds, or -1 if not available.
- */
-RD_EXPORT
-int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
-
-
-/**
- * @brief Returns the broker id of the broker the message was produced to
- * or fetched from.
- *
- * @returns a broker id if known, else -1.
- */
-RD_EXPORT
-int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
-
-
-/**
- * @brief Get the message header list.
- *
- * The returned pointer in \p *hdrsp is associated with the \p rkmessage and
- * must not be used after destruction of the message object or the header
- * list is replaced with rd_kafka_message_set_headers().
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned,
- * RD_KAFKA_RESP_ERR__NOENT if the message has no headers,
- * or another error code if the headers could not be parsed.
- *
- * @remark Headers require broker version 0.11.0.0 or later.
- *
- * @remark As an optimization the raw protocol headers are parsed on
- * the first call to this function.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_message_headers(const rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t **hdrsp);
-
-/**
- * @brief Get the message header list and detach the list from the message
- * making the application the owner of the headers.
- * The application must eventually destroy the headers using
- * rd_kafka_headers_destroy().
- * The message's headers will be set to NULL.
- *
- * Otherwise same semantics as rd_kafka_message_headers()
- *
- * @sa rd_kafka_message_headers
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t **hdrsp);
-
-
-/**
- * @brief Replace the message's current headers with a new list.
- *
- * @param rkmessage The message to set headers.
- * @param hdrs New header list. The message object assumes ownership of
- * the list, the list will be destroyed automatically with
- * the message object.
- * The new headers list may be updated until the message object
- * is passed or returned to librdkafka.
- *
- * @remark The existing headers object, if any, will be destroyed.
- */
-RD_EXPORT
-void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t *hdrs);
-
-
-/**
- * @brief Returns the number of header key/value pairs
- *
- * @param hdrs Headers to count
- */
-RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
-
-
-/**
- * @enum rd_kafka_msg_status_t
- * @brief Message persistence status can be used by the application to
- * find out if a produced message was persisted in the topic log.
- */
-typedef enum {
- /** Message was never transmitted to the broker, or failed with
- * an error indicating it was not written to the log.
- * Application retry risks ordering, but not duplication. */
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
-
- /** Message was transmitted to broker, but no acknowledgement was
- * received.
- * Application retry risks ordering and duplication. */
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
-
- /** Message was written to the log and acknowledged by the broker.
- * No reason for application to retry.
- * Note: this value should only be trusted with \c acks=all. */
- RD_KAFKA_MSG_STATUS_PERSISTED = 2
-} rd_kafka_msg_status_t;
-
-
-/**
- * @brief Returns the message's persistence status in the topic log.
- *
- * @remark The message status is not available in on_acknowledgement
- * interceptors.
- */
-RD_EXPORT rd_kafka_msg_status_t
-rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
-
-
-/**
- * @returns the message's partition leader epoch at the time the message was
- * fetched and if known, else -1.
- *
- * @remark This API must only be used on consumed messages without error.
- * @remark Requires broker version >= 2.10 (KIP-320).
- */
-RD_EXPORT int32_t
-rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
-
-
-/**@}*/
-
-
-/**
- * @name Configuration interface
- * @{
- *
- * @brief Main/global configuration property interface
- *
- */
-
-/**
- * @enum rd_kafka_conf_res_t
- * @brief Configuration result type
- */
-typedef enum {
- RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */
- RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or
- * property or value not supported in
- * this build. */
- RD_KAFKA_CONF_OK = 0 /**< Configuration okay */
-} rd_kafka_conf_res_t;
-
-
-/**
- * @brief Create configuration object.
- *
- * When providing your own configuration to the \c rd_kafka_*_new_*() calls
- * the rd_kafka_conf_t objects needs to be created with this function
- * which will set up the defaults.
- * I.e.:
- * @code
- * rd_kafka_conf_t *myconf;
- * rd_kafka_conf_res_t res;
- *
- * myconf = rd_kafka_conf_new();
- * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
- * errstr, sizeof(errstr));
- * if (res != RD_KAFKA_CONF_OK)
- * die("%s\n", errstr);
- *
- * rk = rd_kafka_new(..., myconf);
- * @endcode
- *
- * Please see CONFIGURATION.md for the default settings or use
- * rd_kafka_conf_properties_show() to provide the information at runtime.
- *
- * The properties are identical to the Apache Kafka configuration properties
- * whenever possible.
- *
- * @remark A successful call to rd_kafka_new() will assume ownership of
- * the conf object and rd_kafka_conf_destroy() must not be called.
- *
- * @returns A new rd_kafka_conf_t object with defaults set.
- *
- * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy()
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_new(void);
-
-
-/**
- * @brief Destroys a conf object.
- */
-RD_EXPORT
-void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Creates a copy/duplicate of configuration object \p conf
- *
- * @remark Interceptors are NOT copied to the new configuration object.
- * @sa rd_kafka_interceptor_f_on_conf_dup
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Same as rd_kafka_conf_dup() but with an array of property name
- * prefixes to filter out (ignore) when copying.
- */
-RD_EXPORT
-rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf,
- size_t filter_cnt,
- const char **filter);
-
-
-
-/**
- * @returns the configuration object used by an rd_kafka_t instance.
- * For use with rd_kafka_conf_get(), et.al., to extract configuration
- * properties from a running client.
- *
- * @remark the returned object is read-only and its lifetime is the same
- * as the rd_kafka_t object.
- */
-RD_EXPORT
-const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
-
-
-/**
- * @brief Sets a configuration property.
- *
- * \p conf must have been previously created with rd_kafka_conf_new().
- *
- * Fallthrough:
- * Topic-level configuration properties may be set using this interface
- * in which case they are applied on the \c default_topic_conf.
- * If no \c default_topic_conf has been set one will be created.
- * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will
- * replace the current default topic configuration.
- *
- * @returns \c rd_kafka_conf_res_t to indicate success or failure.
- * In case of failure \p errstr is updated to contain a human readable
- * error string.
- *
- * @remark Setting properties or values that were disabled at build time due to
- * missing dependencies will return RD_KAFKA_CONF_INVALID.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
- const char *name,
- const char *value,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Enable event sourcing.
- * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable
- * for consumption by `rd_kafka_queue_poll()`.
- */
-RD_EXPORT
-void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
-
-
-/**
- * @brief Generic event callback to be used with the event API to trigger
- * callbacks for \c rd_kafka_event_t objects from a background
- * thread serving the background queue.
- *
- * How to use:
- * 1. First set the event callback on the configuration object with this
- * function, followed by creating an rd_kafka_t instance
- * with rd_kafka_new().
- * 2. Get the instance's background queue with rd_kafka_queue_get_background()
- * and pass it as the reply/response queue to an API that takes an
- * event queue, such as rd_kafka_CreateTopics().
- * 3. As the response event is ready and enqueued on the background queue the
- * event callback will be triggered from the background thread.
- * 4. Prior to destroying the client instance, loose your reference to the
- * background queue by calling rd_kafka_queue_destroy().
- *
- * The application must destroy the \c rkev passed to \p event cb using
- * rd_kafka_event_destroy().
- *
- * The \p event_cb \c opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark This callback is a specialized alternative to the poll-based
- * event API described in the Event interface section.
- *
- * @remark The \p event_cb will be called spontaneously from a background
- * thread completely managed by librdkafka.
- * Take care to perform proper locking of application objects.
- *
- * @warning The application MUST NOT call rd_kafka_destroy() from the
- * event callback.
- *
- * @sa rd_kafka_queue_get_background
- */
-RD_EXPORT void rd_kafka_conf_set_background_event_cb(
- rd_kafka_conf_t *conf,
- void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
-
-
-/**
- * @deprecated See rd_kafka_conf_set_dr_msg_cb()
- */
-RD_EXPORT
-void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf,
- void (*dr_cb)(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque));
-
-/**
- * @brief \b Producer: Set delivery report callback in provided \p conf object.
- *
- * The delivery report callback will be called once for each message
- * accepted by rd_kafka_produce() (et.al) with \p err set to indicate
- * the result of the produce request.
- *
- * The callback is called when a message is succesfully produced or
- * if librdkafka encountered a permanent failure.
- * Delivery errors occur when the retry count is exceeded, when the
- * message.timeout.ms timeout is exceeded or there is a permanent error
- * like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART.
- *
- * An application must call rd_kafka_poll() at regular intervals to
- * serve queued delivery report callbacks.
- *
- * The broker-assigned offset can be retrieved with \c rkmessage->offset
- * and the timestamp can be retrieved using rd_kafka_message_timestamp().
- *
- * The \p dr_msg_cb \c opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- * The per-message msg_opaque value is available in
- * \c rd_kafka_message_t._private.
- *
- * @remark The Idempotent Producer may return invalid timestamp
- * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and
- * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages
- * that were previously successfully delivered but not properly
- * acknowledged.
- */
-RD_EXPORT
-void rd_kafka_conf_set_dr_msg_cb(
- rd_kafka_conf_t *conf,
- void (*dr_msg_cb)(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque));
-
-
-/**
- * @brief \b Consumer: Set consume callback for use with
- * rd_kafka_consumer_poll()
- *
- * The \p consume_cb \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- */
-RD_EXPORT
-void rd_kafka_conf_set_consume_cb(
- rd_kafka_conf_t *conf,
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
-
-/**
- * @brief \b Consumer: Set rebalance callback for use with
- * coordinated consumer group balancing.
- *
- * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions'
- * contains the full partition set that was either assigned or revoked.
- *
- * Registering a \p rebalance_cb turns off librdkafka's automatic
- * partition assignment/revocation and instead delegates that responsibility
- * to the application's \p rebalance_cb.
- *
- * The rebalance callback is responsible for updating librdkafka's
- * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle
- * arbitrary rebalancing failures where \p err is neither of those.
- * @remark In this latter case (arbitrary error), the application must
- * call rd_kafka_assign(rk, NULL) to synchronize state.
- *
- * For eager/non-cooperative `partition.assignment.strategy` assignors,
- * such as `range` and `roundrobin`, the application must use
- * rd_kafka_assign() to set or clear the entire assignment.
- * For the cooperative assignors, such as `cooperative-sticky`, the application
- * must use rd_kafka_incremental_assign() for
- * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign()
- * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS.
- *
- * Without a rebalance callback this is done automatically by librdkafka
- * but registering a rebalance callback gives the application flexibility
- * in performing other operations along with the assigning/revocation,
- * such as fetching offsets from an alternate location (on assign)
- * or manually committing offsets (on revoke).
- *
- * rebalance_cb is always triggered exactly once when a rebalance completes
- * with a new assignment, even if that assignment is empty. If an
- * eager/non-cooperative assignor is configured, there will eventually be
- * exactly one corresponding call to rebalance_cb to revoke these partitions
- * (even if empty), whether this is due to a group rebalance or lost
- * partitions. In the cooperative case, rebalance_cb will never be called if
- * the set of partitions being revoked is empty (whether or not lost).
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark The \p partitions list is destroyed by librdkafka on return
- * return from the rebalance_cb and must not be freed or
- * saved by the application.
- *
- * @remark Be careful when modifying the \p partitions list.
- * Changing this list should only be done to change the initial
- * offsets for each partition.
- * But a function like `rd_kafka_position()` might have unexpected
- * effects for instance when a consumer gets assigned a partition
- * it used to consume at an earlier rebalance. In this case, the
- * list of partitions will be updated with the old offset for that
- * partition. In this case, it is generally better to pass a copy
- * of the list (see `rd_kafka_topic_partition_list_copy()`).
- * The result of `rd_kafka_position()` is typically outdated in
- * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS.
- *
- * @sa rd_kafka_assign()
- * @sa rd_kafka_incremental_assign()
- * @sa rd_kafka_incremental_unassign()
- * @sa rd_kafka_assignment_lost()
- * @sa rd_kafka_rebalance_protocol()
- *
- * The following example shows the application's responsibilities:
- * @code
- * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
- * rd_kafka_topic_partition_list_t *partitions,
- * void *opaque) {
- *
- * switch (err)
- * {
- * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- * // application may load offets from arbitrary external
- * // storage here and update \p partitions
- * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
- * rd_kafka_incremental_assign(rk, partitions);
- * else // EAGER
- * rd_kafka_assign(rk, partitions);
- * break;
- *
- * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- * if (manual_commits) // Optional explicit manual commit
- * rd_kafka_commit(rk, partitions, 0); // sync commit
- *
- * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
- * rd_kafka_incremental_unassign(rk, partitions);
- * else // EAGER
- * rd_kafka_assign(rk, NULL);
- * break;
- *
- * default:
- * handle_unlikely_error(err);
- * rd_kafka_assign(rk, NULL); // sync state
- * break;
- * }
- * }
- * @endcode
- *
- * @remark The above example lacks error handling for assign calls, see
- * the examples/ directory.
- */
-RD_EXPORT
-void rd_kafka_conf_set_rebalance_cb(
- rd_kafka_conf_t *conf,
- void (*rebalance_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque));
-
-
-
-/**
- * @brief \b Consumer: Set offset commit callback for use with consumer groups.
- *
- * The results of automatic or manual offset commits will be scheduled
- * for this callback and is served by rd_kafka_consumer_poll().
- *
- * If no partitions had valid offsets to commit this callback will be called
- * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered
- * an error.
- *
- * The \p offsets list contains per-partition information:
- * - \c offset: committed offset (attempted)
- * - \c err: commit error
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- */
-RD_EXPORT
-void rd_kafka_conf_set_offset_commit_cb(
- rd_kafka_conf_t *conf,
- void (*offset_commit_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque));
-
-
-/**
- * @brief Set error callback in provided conf object.
- *
- * The error callback is used by librdkafka to signal warnings and errors
- * back to the application.
- *
- * These errors should generally be considered informational and non-permanent,
- * the client will try to recover automatically from all type of errors.
- * Given that the client and cluster configuration is correct the
- * application should treat these as temporary errors.
- *
- * \p error_cb will be triggered with \c err set to RD_KAFKA_RESP_ERR__FATAL
- * if a fatal error has been raised; in this case use rd_kafka_fatal_error() to
- * retrieve the fatal error code and error string, and then begin terminating
- * the client instance.
- *
- * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set
- * with rd_kafka_conf_set_events, then the errors will be logged instead.
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- */
-RD_EXPORT
-void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,
- void (*error_cb)(rd_kafka_t *rk,
- int err,
- const char *reason,
- void *opaque));
-
-/**
- * @brief Set throttle callback.
- *
- * The throttle callback is used to forward broker throttle times to the
- * application for Produce and Fetch (consume) requests.
- *
- * Callbacks are triggered whenever a non-zero throttle time is returned by
- * the broker, or when the throttle time drops back to zero.
- *
- * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at
- * regular intervals to serve queued callbacks.
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark Requires broker version 0.9.0 or later.
- */
-RD_EXPORT
-void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf,
- void (*throttle_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int throttle_time_ms,
- void *opaque));
-
-
-/**
- * @brief Set logger callback.
- *
- * The default is to print to stderr, but a syslog logger is also available,
- * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives.
- * Alternatively the application may provide its own logger callback.
- * Or pass \p func as NULL to disable logging.
- *
- * This is the configuration alternative to the deprecated rd_kafka_set_logger()
- *
- * @remark The log_cb will be called spontaneously from librdkafka's internal
- * threads unless logs have been forwarded to a poll queue through
- * \c rd_kafka_set_log_queue().
- * An application MUST NOT call any librdkafka APIs or do any prolonged
- * work in a non-forwarded \c log_cb.
- */
-RD_EXPORT
-void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf,
- void (*log_cb)(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf));
-
-
-/**
- * @brief Set statistics callback in provided conf object.
- *
- * The statistics callback is triggered from rd_kafka_poll() every
- * \c statistics.interval.ms (needs to be configured separately).
- * Function arguments:
- * - \p rk - Kafka handle
- * - \p json - String containing the statistics data in JSON format
- * - \p json_len - Length of \p json string.
- * - \p opaque - Application-provided opaque as set by
- * rd_kafka_conf_set_opaque().
- *
- * For more information on the format of \p json, see
- * https://github.com/edenhill/librdkafka/wiki/Statistics
- *
- * If the application wishes to hold on to the \p json pointer and free
- * it at a later time it must return 1 from the \p stats_cb.
- * If the application returns 0 from the \p stats_cb then librdkafka
- * will immediately free the \p json pointer.
- *
- * See STATISTICS.md for a full definition of the JSON object.
- */
-RD_EXPORT
-void rd_kafka_conf_set_stats_cb(
- rd_kafka_conf_t *conf,
- int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
-
-/**
- * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object.
- *
- * @param conf the configuration to mutate.
- * @param oauthbearer_token_refresh_cb the callback to set; callback function
- * arguments:<br>
- * \p rk - Kafka handle<br>
- * \p oauthbearer_config - Value of configuration property
- * sasl.oauthbearer.config.
- * \p opaque - Application-provided opaque set via
- * rd_kafka_conf_set_opaque()
- *
- * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll()
- * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved,
- * typically based on the configuration defined in \c sasl.oauthbearer.config.
- *
- * The callback should invoke rd_kafka_oauthbearer_set_token()
- * or rd_kafka_oauthbearer_set_token_failure() to indicate success
- * or failure, respectively.
- *
- * The refresh operation is eventable and may be received via
- * rd_kafka_queue_poll() with an event type of
- * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH.
- *
- * Note that before any SASL/OAUTHBEARER broker connection can succeed the
- * application must call rd_kafka_oauthbearer_set_token() once -- either
- * directly or, more typically, by invoking either rd_kafka_poll(),
- * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause
- * retrieval of an initial token to occur.
- *
- * Alternatively, the application can enable the SASL queue by calling
- * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to
- * creating the client instance, get the SASL queue with
- * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling
- * rd_kafka_queue_poll(), or redirecting the queue to the background thread to
- * have the queue served automatically. For the latter case the SASL queue
- * must be forwarded to the background queue with rd_kafka_queue_forward().
- * A convenience function is available to automatically forward the SASL queue
- * to librdkafka's background thread, see
- * rd_kafka_sasl_background_callbacks_enable().
- *
- * An unsecured JWT refresh handler is provided by librdkafka for development
- * and testing purposes, it is enabled by setting
- * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is
- * mutually exclusive to using a refresh callback.
- *
- * @sa rd_kafka_sasl_background_callbacks_enable()
- * @sa rd_kafka_queue_get_sasl()
- */
-RD_EXPORT
-void rd_kafka_conf_set_oauthbearer_token_refresh_cb(
- rd_kafka_conf_t *conf,
- void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque));
-
-/**
- * @brief Enable/disable creation of a queue specific to SASL events
- * and callbacks.
- *
- * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this
- * configuration API allows an application to get a dedicated
- * queue for the SASL events/callbacks. After enabling the queue with this API
- * the application can retrieve the queue by calling
- * rd_kafka_queue_get_sasl() on the client instance.
- * This queue may then be served directly by the application
- * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as
- * the background queue.
- *
- * A convenience function is available to automatically forward the SASL queue
- * to librdkafka's background thread, see
- * rd_kafka_sasl_background_callbacks_enable().
- *
- * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(),
- * et.al.) is used for SASL callbacks.
- *
- * @remark The SASL queue is currently only used by the SASL OAUTHBEARER
- * mechanism's token_refresh_cb().
- *
- * @sa rd_kafka_queue_get_sasl()
- * @sa rd_kafka_sasl_background_callbacks_enable()
- */
-
-RD_EXPORT
-void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
-
-
-/**
- * @brief Set socket callback.
- *
- * The socket callback is responsible for opening a socket
- * according to the supplied \p domain, \p type and \p protocol.
- * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
- * possible.
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * Default:
- * - on linux: racefree CLOEXEC
- * - others : non-racefree CLOEXEC
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT
-void rd_kafka_conf_set_socket_cb(
- rd_kafka_conf_t *conf,
- int (*socket_cb)(int domain, int type, int protocol, void *opaque));
-
-
-
-/**
- * @brief Set connect callback.
- *
- * The connect callback is responsible for connecting socket \p sockfd
- * to peer address \p addr.
- * The \p id field contains the broker identifier.
- *
- * \p connect_cb shall return 0 on success (socket connected) or an error
- * number (errno) on error.
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT void
-rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf,
- int (*connect_cb)(int sockfd,
- const struct sockaddr *addr,
- int addrlen,
- const char *id,
- void *opaque));
-
-/**
- * @brief Set close socket callback.
- *
- * Close a socket (optionally opened with socket_cb()).
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT void rd_kafka_conf_set_closesocket_cb(
- rd_kafka_conf_t *conf,
- int (*closesocket_cb)(int sockfd, void *opaque));
-
-
-
-#ifndef _WIN32
-/**
- * @brief Set open callback.
- *
- * The open callback is responsible for opening the file specified by
- * pathname, flags and mode.
- * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
- * possible.
- *
- * Default:
- * - on linux: racefree CLOEXEC
- * - others : non-racefree CLOEXEC
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT
-void rd_kafka_conf_set_open_cb(
- rd_kafka_conf_t *conf,
- int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque));
-#endif
-
-/** Forward declaration to avoid netdb.h or winsock includes */
-struct addrinfo;
-
-/**
- * @brief Set address resolution callback.
- *
- * The callback is responsible for resolving the hostname \p node and the
- * service \p service into a list of socket addresses as \c getaddrinfo(3)
- * would. The \p hints and \p res parameters function as they do for
- * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * If the callback is invoked with a NULL \p node, \p service, and \p hints, the
- * callback should instead free the addrinfo struct specified in \p res. In this
- * case the callback must succeed; the return value will not be checked by the
- * caller.
- *
- * The callback's return value is interpreted as the return value of \p
- * \c getaddrinfo(3).
- *
- * @remark The callback will be called from an internal librdkafka thread.
- */
-RD_EXPORT void
-rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf,
- int (*resolve_cb)(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque));
-
-/**
- * @brief Sets the verification callback of the broker certificate
- *
- * The verification callback is triggered from internal librdkafka threads
- * upon connecting to a broker. On each connection attempt the callback
- * will be called for each certificate in the broker's certificate chain,
- * starting at the root certification, as long as the application callback
- * returns 1 (valid certificate).
- * \c broker_name and \c broker_id correspond to the broker the connection
- * is being made to.
- * The \c x509_error argument indicates if OpenSSL's verification of
- * the certificate succeed (0) or failed (an OpenSSL error code).
- * The application may set the SSL context error code by returning 0
- * from the verify callback and providing a non-zero SSL context error code
- * in \c x509_error.
- * If the verify callback sets \c x509_error to 0, returns 1, and the
- * original \c x509_error was non-zero, the error on the SSL context will
- * be cleared.
- * \c x509_error is always a valid pointer to an int.
- *
- * \c depth is the depth of the current certificate in the chain, starting
- * at the root certificate.
- *
- * The certificate itself is passed in binary DER format in \c buf of
- * size \c size.
- *
- * The callback must return 1 if verification succeeds, or
- * 0 if verification fails and then write a human-readable error message
- * to \c errstr (limited to \c errstr_size bytes, including nul-term).
- *
- * The callback's \p opaque argument is the opaque set with
- * rd_kafka_conf_set_opaque().
- *
- * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else
- * RD_KAFKA_CONF_INVALID.
- *
- * @warning This callback will be called from internal librdkafka threads.
- *
- * @remark See <openssl/x509_vfy.h> in the OpenSSL source distribution
- * for a list of \p x509_error codes.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(
- rd_kafka_conf_t *conf,
- int (*ssl_cert_verify_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int *x509_error,
- int depth,
- const char *buf,
- size_t size,
- char *errstr,
- size_t errstr_size,
- void *opaque));
-
-
-/**
- * @enum rd_kafka_cert_type_t
- *
- * @brief SSL certificate type
- *
- * @sa rd_kafka_conf_set_ssl_cert
- */
-typedef enum rd_kafka_cert_type_t {
- RD_KAFKA_CERT_PUBLIC_KEY, /**< Client's public key */
- RD_KAFKA_CERT_PRIVATE_KEY, /**< Client's private key */
- RD_KAFKA_CERT_CA, /**< CA certificate */
- RD_KAFKA_CERT__CNT,
-} rd_kafka_cert_type_t;
-
-/**
- * @enum rd_kafka_cert_enc_t
- *
- * @brief SSL certificate encoding
- *
- * @sa rd_kafka_conf_set_ssl_cert
- */
-typedef enum rd_kafka_cert_enc_t {
- RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */
- RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */
- RD_KAFKA_CERT_ENC_PEM, /**< PEM */
- RD_KAFKA_CERT_ENC__CNT,
-} rd_kafka_cert_enc_t;
-
-
-/**
- * @brief Set certificate/key \p cert_type from the \p cert_enc encoded
- * memory at \p buffer of \p size bytes.
- *
- * @param conf Configuration object.
- * @param cert_type Certificate or key type to configure.
- * @param cert_enc Buffer \p encoding type.
- * @param buffer Memory pointer to encoded certificate or key.
- * The memory is not referenced after this function returns.
- * @param size Size of memory at \p buffer.
- * @param errstr Memory were a human-readable error string will be written
- * on failure.
- * @param errstr_size Size of \p errstr, including space for nul-terminator.
- *
- * @returns RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the
- * memory in \p buffer is of incorrect encoding, or if librdkafka
- * was not built with SSL support.
- *
- * @remark Calling this method multiple times with the same \p cert_type
- * will replace the previous value.
- *
- * @remark Calling this method with \p buffer set to NULL will clear the
- * configuration for \p cert_type.
- *
- * @remark The private key may require a password, which must be specified
- * with the `ssl.key.password` configuration property prior to
- * calling this function.
- *
- * @remark Private and public keys in PEM format may also be set with the
- * `ssl.key.pem` and `ssl.certificate.pem` configuration properties.
- *
- * @remark CA certificate in PEM format may also be set with the
- * `ssl.ca.pem` configuration property.
- *
- * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is
- * encoded using an obsolete cipher, it might be necessary to set up
- * an OpenSSL configuration file to load the "legacy" provider and
- * set the OPENSSL_CONF environment variable.
- * See
- * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more
- * information.
- */
-RD_EXPORT rd_kafka_conf_res_t
-rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf,
- rd_kafka_cert_type_t cert_type,
- rd_kafka_cert_enc_t cert_enc,
- const void *buffer,
- size_t size,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Set callback_data for OpenSSL engine.
- *
- * @param conf Configuration object.
- * @param callback_data passed to engine callbacks,
- * e.g. \c ENGINE_load_ssl_client_cert.
- *
- * @remark The \c ssl.engine.location configuration must be set for this
- * to have affect.
- *
- * @remark The memory pointed to by \p value must remain valid for the
- * lifetime of the configuration object and any Kafka clients that
- * use it.
- */
-RD_EXPORT
-void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf,
- void *callback_data);
-
-
-/**
- * @brief Sets the application's opaque pointer that will be passed to callbacks
- *
- * @sa rd_kafka_opaque()
- */
-RD_EXPORT
-void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
-
-/**
- * @brief Retrieves the opaque pointer previously set
- * with rd_kafka_conf_set_opaque()
- */
-RD_EXPORT
-void *rd_kafka_opaque(const rd_kafka_t *rk);
-
-
-
-/**
- * @brief Sets the default topic configuration to use for automatically
- * subscribed topics (e.g., through pattern-matched topics).
- * The topic config object is not usable after this call.
- *
- * @warning Any topic configuration settings that have been set on the
- * global rd_kafka_conf_t object will be overwritten by this call
- * since the implicitly created default topic config object is
- * replaced by the user-supplied one.
- *
- * @deprecated Set default topic level configuration on the
- * global rd_kafka_conf_t object instead.
- */
-RD_EXPORT
-void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf);
-
-/**
- * @brief Gets the default topic configuration as previously set with
- * rd_kafka_conf_set_default_topic_conf() or that was implicitly created
- * by configuring a topic-level property on the global \p conf object.
- *
- * @returns the \p conf's default topic configuration (if any), or NULL.
- *
- * @warning The returned topic configuration object is owned by the \p conf
- * object. It may be modified but not destroyed and its lifetime is
- * the same as the \p conf object or the next call to
- * rd_kafka_conf_set_default_topic_conf().
- */
-RD_EXPORT rd_kafka_topic_conf_t *
-rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Retrieve configuration value for property \p name.
- *
- * If \p dest is non-NULL the value will be written to \p dest with at
- * most \p dest_size.
- *
- * \p *dest_size is updated to the full length of the value, thus if
- * \p *dest_size initially is smaller than the full length the application
- * may reallocate \p dest to fit the returned \p *dest_size and try again.
- *
- * If \p dest is NULL only the full length of the value is returned.
- *
- * Fallthrough:
- * Topic-level configuration properties from the \c default_topic_conf
- * may be retrieved using this interface.
- *
- * @returns \p RD_KAFKA_CONF_OK if the property name matched, else
- * \p RD_KAFKA_CONF_UNKNOWN.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf,
- const char *name,
- char *dest,
- size_t *dest_size);
-
-
-/**
- * @brief Retrieve topic configuration value for property \p name.
- *
- * @sa rd_kafka_conf_get()
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf,
- const char *name,
- char *dest,
- size_t *dest_size);
-
-
-/**
- * @brief Dump the configuration properties and values of \p conf to an array
- * with \"key\", \"value\" pairs.
- *
- * The number of entries in the array is returned in \p *cntp.
- *
- * The dump must be freed with `rd_kafka_conf_dump_free()`.
- */
-RD_EXPORT
-const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
-
-
-/**
- * @brief Dump the topic configuration properties and values of \p conf
- * to an array with \"key\", \"value\" pairs.
- *
- * The number of entries in the array is returned in \p *cntp.
- *
- * The dump must be freed with `rd_kafka_conf_dump_free()`.
- */
-RD_EXPORT
-const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf,
- size_t *cntp);
-
-/**
- * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or
- * `rd_kafka_topic_conf_dump().
- */
-RD_EXPORT
-void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
-
-/**
- * @brief Prints a table to \p fp of all supported configuration properties,
- * their default values as well as a description.
- *
- * @remark All properties and properties and values are shown, even those
- * that have been disabled at build time due to missing dependencies.
- */
-RD_EXPORT
-void rd_kafka_conf_properties_show(FILE *fp);
-
-/**@}*/
-
-
-/**
- * @name Topic configuration
- * @brief Topic configuration property interface
- * @{
- *
- */
-
-
-/**
- * @brief Create topic configuration object
- *
- * @sa Same semantics as for rd_kafka_conf_new().
- */
-RD_EXPORT
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
-
-
-/**
- * @brief Creates a copy/duplicate of topic configuration object \p conf.
- */
-RD_EXPORT
-rd_kafka_topic_conf_t *
-rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
-
-/**
- * @brief Creates a copy/duplicate of \p rk 's default topic configuration
- * object.
- */
-RD_EXPORT
-rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
-
-
-/**
- * @brief Destroys a topic conf object.
- */
-RD_EXPORT
-void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
-
-
-/**
- * @brief Sets a single rd_kafka_topic_conf_t value by property name.
- *
- * \p topic_conf should have been previously set up
- * with `rd_kafka_topic_conf_new()`.
- *
- * @returns rd_kafka_conf_res_t to indicate success or failure.
- */
-RD_EXPORT
-rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf,
- const char *name,
- const char *value,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief Sets the application's opaque pointer that will be passed to all topic
- * callbacks as the \c rkt_opaque argument.
- *
- * @sa rd_kafka_topic_opaque()
- */
-RD_EXPORT
-void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf,
- void *rkt_opaque);
-
-
-/**
- * @brief \b Producer: Set partitioner callback in provided topic conf object.
- *
- * The partitioner may be called in any thread at any time,
- * it may be called multiple times for the same message/key.
- *
- * The callback's \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The callback's \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * Partitioner function constraints:
- * - MUST NOT call any rd_kafka_*() functions except:
- * rd_kafka_topic_partition_available()
- * - MUST NOT block or execute for prolonged periods of time.
- * - MUST return a value between 0 and partition_cnt-1, or the
- * special \c RD_KAFKA_PARTITION_UA value if partitioning
- * could not be performed.
- */
-RD_EXPORT
-void rd_kafka_topic_conf_set_partitioner_cb(
- rd_kafka_topic_conf_t *topic_conf,
- int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
- const void *keydata,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque));
-
-
-/**
- * @brief \b Producer: Set message queueing order comparator callback.
- *
- * The callback may be called in any thread at any time,
- * it may be called multiple times for the same message.
- *
- * Ordering comparator function constraints:
- * - MUST be stable sort (same input gives same output).
- * - MUST NOT call any rd_kafka_*() functions.
- * - MUST NOT block or execute for prolonged periods of time.
- *
- * The comparator shall compare the two messages and return:
- * - < 0 if message \p a should be inserted before message \p b.
- * - >=0 if message \p a should be inserted after message \p b.
- *
- * @remark Insert sorting will be used to enqueue the message in the
- * correct queue position, this comes at a cost of O(n).
- *
- * @remark If `queuing.strategy=fifo` new messages are enqueued to the
- * tail of the queue regardless of msg_order_cmp, but retried messages
- * are still affected by msg_order_cmp.
- *
- * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL,
- * DO NOT USE IN PRODUCTION.
- */
-RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp(
- rd_kafka_topic_conf_t *topic_conf,
- int (*msg_order_cmp)(const rd_kafka_message_t *a,
- const rd_kafka_message_t *b));
-
-
-/**
- * @brief Check if partition is available (has a leader broker).
- *
- * @returns 1 if the partition is available, else 0.
- *
- * @warning This function must only be called from inside a partitioner function
- */
-RD_EXPORT
-int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt,
- int32_t partition);
-
-
-/*******************************************************************
- * *
- * Partitioners provided by rdkafka *
- * *
- *******************************************************************/
-
-/**
- * @brief Random partitioner.
- *
- * Will try not to return unavailable partitions.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a random partition between 0 and \p partition_cnt - 1.
- *
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-/**
- * @brief Consistent partitioner.
- *
- * Uses consistent hashing to map identical keys onto identical partitions.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
- * the CRC value of the key
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-/**
- * @brief Consistent-Random partitioner.
- *
- * This is the default partitioner.
- * Uses consistent hashing to map identical keys onto identical partitions, and
- * messages without keys will be assigned via the random partitioner.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
- * the CRC value of the key (if provided)
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-
-/**
- * @brief Murmur2 partitioner (Java compatible).
- *
- * Uses consistent hashing to map identical keys onto identical partitions
- * using Java-compatible Murmur2 hashing.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a partition between 0 and \p partition_cnt - 1.
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-/**
- * @brief Consistent-Random Murmur2 partitioner (Java compatible).
- *
- * Uses consistent hashing to map identical keys onto identical partitions
- * using Java-compatible Murmur2 hashing.
- * Messages without keys will be assigned via the random partitioner.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a partition between 0 and \p partition_cnt - 1.
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-
-/**
- * @brief FNV-1a partitioner.
- *
- * Uses consistent hashing to map identical keys onto identical partitions
- * using FNV-1a hashing.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a partition between 0 and \p partition_cnt - 1.
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-
-/**
- * @brief Consistent-Random FNV-1a partitioner.
- *
- * Uses consistent hashing to map identical keys onto identical partitions
- * using FNV-1a hashing.
- * Messages without keys will be assigned via the random partitioner.
- *
- * The \p rkt_opaque argument is the opaque set by
- * rd_kafka_topic_conf_set_opaque().
- * The \p msg_opaque argument is the per-message opaque
- * passed to produce().
- *
- * @returns a partition between 0 and \p partition_cnt - 1.
- */
-RD_EXPORT
-int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
-
-
-/**@}*/
-
-
-
-/**
- * @name Main Kafka and Topic object handles
- * @{
- *
- *
- */
-
-
-
-/**
- * @brief Creates a new Kafka handle and starts its operation according to the
- * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER).
- *
- * \p conf is an optional struct created with `rd_kafka_conf_new()` that will
- * be used instead of the default configuration.
- * The \p conf object is freed by this function on success and must not be used
- * or destroyed by the application subsequently.
- * See `rd_kafka_conf_set()` et.al for more information.
- *
- * \p errstr must be a pointer to memory of at least size \p errstr_size where
- * `rd_kafka_new()` may write a human readable error message in case the
- * creation of a new handle fails. In which case the function returns NULL.
- *
- * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER
- * rd_kafka_t handle is created it may either operate in the
- * legacy simple consumer mode using the rd_kafka_consume_start()
- * interface, or the High-level KafkaConsumer API.
- * @remark An application must only use one of these groups of APIs on a given
- * rd_kafka_t RD_KAFKA_CONSUMER handle.
-
- *
- * @returns The Kafka handle on success or NULL on error (see \p errstr)
- *
- * @sa To destroy the Kafka handle, use rd_kafka_destroy().
- */
-RD_EXPORT
-rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
- rd_kafka_conf_t *conf,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Destroy Kafka handle.
- *
- * @remark This is a blocking operation.
- * @remark rd_kafka_consumer_close() will be called from this function
- * if the instance type is RD_KAFKA_CONSUMER, a \c group.id was
- * configured, and the rd_kafka_consumer_close() was not
- * explicitly called by the application. This in turn may
- * trigger consumer callbacks, such as rebalance_cb.
- * Use rd_kafka_destroy_flags() with
- * RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour.
- *
- * @sa rd_kafka_destroy_flags()
- */
-RD_EXPORT
-void rd_kafka_destroy(rd_kafka_t *rk);
-
-
-/**
- * @brief Destroy Kafka handle according to specified destroy flags
- *
- */
-RD_EXPORT
-void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
-
-/**
- * @brief Flags for rd_kafka_destroy_flags()
- */
-
-/*!
- * Don't call consumer_close() to leave group and commit final offsets.
- *
- * This also disables consumer callbacks to be called from rd_kafka_destroy*(),
- * such as rebalance_cb.
- *
- * The consumer group handler is still closed internally, but from an
- * application perspective none of the functionality from consumer_close()
- * is performed.
- */
-#define RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE 0x8
-
-
-
-/**
- * @brief Returns Kafka handle name.
- */
-RD_EXPORT
-const char *rd_kafka_name(const rd_kafka_t *rk);
-
-
-/**
- * @brief Returns Kafka handle type.
- */
-RD_EXPORT
-rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
-
-
-/**
- * @brief Returns this client's broker-assigned group member id.
- *
- * @remark This currently requires the high-level KafkaConsumer
- *
- * @returns An allocated string containing the current broker-assigned group
- * member id, or NULL if not available.
- * The application must free the string with \p free() or
- * rd_kafka_mem_free()
- */
-RD_EXPORT
-char *rd_kafka_memberid(const rd_kafka_t *rk);
-
-
-
-/**
- * @brief Returns the ClusterId as reported in broker metadata.
- *
- * @param rk Client instance.
- * @param timeout_ms If there is no cached value from metadata retrieval
- * then this specifies the maximum amount of time
- * (in milliseconds) the call will block waiting
- * for metadata to be retrieved.
- * Use 0 for non-blocking calls.
-
- * @remark Requires broker version >=0.10.0 and api.version.request=true.
- *
- * @remark The application must free the returned pointer
- * using rd_kafka_mem_free().
- *
- * @returns a newly allocated string containing the ClusterId, or NULL
- * if no ClusterId could be retrieved in the allotted timespan.
- */
-RD_EXPORT
-char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Returns the current ControllerId as reported in broker metadata.
- *
- * @param rk Client instance.
- * @param timeout_ms If there is no cached value from metadata retrieval
- * then this specifies the maximum amount of time
- * (in milliseconds) the call will block waiting
- * for metadata to be retrieved.
- * Use 0 for non-blocking calls.
-
- * @remark Requires broker version >=0.10.0 and api.version.request=true.
- *
- * @returns the controller broker id (>= 0), or -1 if no ControllerId could be
- * retrieved in the allotted timespan.
- */
-RD_EXPORT
-int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Creates a new topic handle for topic named \p topic.
- *
- * \p conf is an optional configuration for the topic created with
- * `rd_kafka_topic_conf_new()` that will be used instead of the default
- * topic configuration.
- * The \p conf object is freed by this function and must not be used or
- * destroyed by the application subsequently.
- * See `rd_kafka_topic_conf_set()` et.al for more information.
- *
- * Topic handles are refcounted internally and calling rd_kafka_topic_new()
- * again with the same topic name will return the previous topic handle
- * without updating the original handle's configuration.
- * Applications must eventually call rd_kafka_topic_destroy() for each
- * succesfull call to rd_kafka_topic_new() to clear up resources.
- *
- * @returns the new topic handle or NULL on error (use rd_kafka_errno2err()
- * to convert system \p errno to an rd_kafka_resp_err_t error code.
- *
- * @sa rd_kafka_topic_destroy()
- */
-RD_EXPORT
-rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk,
- const char *topic,
- rd_kafka_topic_conf_t *conf);
-
-
-
-/**
- * @brief Loose application's topic handle refcount as previously created
- * with `rd_kafka_topic_new()`.
- *
- * @remark Since topic objects are refcounted (both internally and for the app)
- * the topic object might not actually be destroyed by this call,
- * but the application must consider the object destroyed.
- */
-RD_EXPORT
-void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Returns the topic name.
- */
-RD_EXPORT
-const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Get the \p rkt_opaque pointer that was set in the topic configuration
- * with rd_kafka_topic_conf_set_opaque().
- */
-RD_EXPORT
-void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
-
-
-/**
- * @brief Unassigned partition.
- *
- * The unassigned partition is used by the producer API for messages
- * that should be partitioned using the configured or default partitioner.
- */
-#define RD_KAFKA_PARTITION_UA ((int32_t)-1)
-
-
-/**
- * @brief Polls the provided kafka handle for events.
- *
- * Events will cause application-provided callbacks to be called.
- *
- * The \p timeout_ms argument specifies the maximum amount of time
- * (in milliseconds) that the call will block waiting for events.
- * For non-blocking calls, provide 0 as \p timeout_ms.
- * To wait indefinitely for an event, provide -1.
- *
- * @remark An application should make sure to call poll() at regular
- * intervals to serve any queued callbacks waiting to be called.
- * @remark If your producer doesn't have any callback set (in particular
- * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb)
- * you might choose not to call poll(), though this is not
- * recommended.
- *
- * Events:
- * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer]
- * - error callbacks (rd_kafka_conf_set_error_cb()) [all]
- * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all]
- * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all]
- * - OAUTHBEARER token refresh callbacks
- * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all]
- *
- * @returns the number of events served.
- */
-RD_EXPORT
-int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Cancels the current callback dispatcher (rd_kafka_poll(),
- * rd_kafka_consume_callback(), etc).
- *
- * A callback may use this to force an immediate return to the calling
- * code (caller of e.g. rd_kafka_poll()) without processing any further
- * events.
- *
- * @remark This function MUST ONLY be called from within a librdkafka callback.
- */
-RD_EXPORT
-void rd_kafka_yield(rd_kafka_t *rk);
-
-
-
-/**
- * @brief Pause producing or consumption for the provided list of partitions.
- *
- * Success or error is returned per-partition \p err in the \p partitions list.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_pause_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions);
-
-
-
-/**
- * @brief Resume producing consumption for the provided list of partitions.
- *
- * Success or error is returned per-partition \p err in the \p partitions list.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_resume_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions);
-
-
-
-/**
- * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets
- * for partition.
- *
- * Offsets are returned in \p *low and \p *high respectively.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_query_watermark_offsets(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t *low,
- int64_t *high,
- int timeout_ms);
-
-
-/**
- * @brief Get last known low (oldest/beginning) and high (newest/end) offsets
- * for partition.
- *
- * The low offset is updated periodically (if statistics.interval.ms is set)
- * while the high offset is updated on each fetched message set from the broker.
- *
- * If there is no cached offset (either low or high, or both) then
- * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.
- *
- * Offsets are returned in \p *low and \p *high respectively.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
- *
- * @remark Shall only be used with an active consumer instance.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t *low,
- int64_t *high);
-
-
-
-/**
- * @brief Look up the offsets for the given partitions by timestamp.
- *
- * The returned offset for each partition is the earliest offset whose
- * timestamp is greater than or equal to the given timestamp in the
- * corresponding partition.
- *
- * The timestamps to query are represented as \c offset in \p offsets
- * on input, and \c offset will contain the offset on output.
- *
- * The function will block for at most \p timeout_ms milliseconds.
- *
- * @remark Duplicate Topic+Partitions are not supported.
- * @remark Per-partition errors may be returned in \c
- * rd_kafka_topic_partition_t.err
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note
- * that per-partition errors might be set),
- * RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched
- * within \p timeout_ms,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p offsets list is empty,
- * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown,
- * RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders
- * for the given partitions.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_offsets_for_times(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *offsets,
- int timeout_ms);
-
-
-
-/**
- * @brief Allocate and zero memory using the same allocator librdkafka uses.
- *
- * This is typically an abstraction for the calloc(3) call and makes sure
- * the application can use the same memory allocator as librdkafka for
- * allocating pointers that are used by librdkafka.
- *
- * \p rk can be set to return memory allocated by a specific \c rk instance
- * otherwise pass NULL for \p rk.
- *
- * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using
- * rd_kafka_mem_free()
- */
-RD_EXPORT
-void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
-
-
-
-/**
- * @brief Allocate memory using the same allocator librdkafka uses.
- *
- * This is typically an abstraction for the malloc(3) call and makes sure
- * the application can use the same memory allocator as librdkafka for
- * allocating pointers that are used by librdkafka.
- *
- * \p rk can be set to return memory allocated by a specific \c rk instance
- * otherwise pass NULL for \p rk.
- *
- * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using
- * rd_kafka_mem_free()
- */
-RD_EXPORT
-void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
-
-
-
-/**
- * @brief Free pointer returned by librdkafka
- *
- * This is typically an abstraction for the free(3) call and makes sure
- * the application can use the same memory allocator as librdkafka for
- * freeing pointers returned by librdkafka.
- *
- * In standard setups it is usually not necessary to use this interface
- * rather than the free(3) functione.
- *
- * \p rk must be set for memory returned by APIs that take an \c rk argument,
- * for other APIs pass NULL for \p rk.
- *
- * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs
- * that explicitly mention using this function for freeing.
- */
-RD_EXPORT
-void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
-
-
-/**@}*/
-
-
-
-/**
- * @name Queue API
- * @{
- *
- * Message queues allows the application to re-route consumed messages
- * from multiple topic+partitions into one single queue point.
- * This queue point containing messages from a number of topic+partitions
- * may then be served by a single rd_kafka_consume*_queue() call,
- * rather than one call per topic+partition combination.
- */
-
-
-/**
- * @brief Create a new message queue.
- *
- * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
-
-/**
- * Destroy a queue, purging all of its enqueued messages.
- */
-RD_EXPORT
-void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
-
-
-/**
- * @returns a reference to the main librdkafka event queue.
- * This is the queue served by rd_kafka_poll().
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
-
-
-
-/**
- * @returns a reference to the SASL callback queue, if a SASL mechanism
- * with callbacks is configured (currently only OAUTHBEARER), else
- * returns NULL.
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @sa rd_kafka_sasl_background_callbacks_enable()
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
-
-
-/**
- * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka
- * background thread.
- *
- * This serves as an alternative for applications that do not call
- * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means
- * of automatically trigger the refresh callbacks, which are needed to
- * initiate connections to the brokers in the case a custom OAUTHBEARER
- * refresh callback is configured.
- *
- * @returns NULL on success or an error object on error.
- *
- * @sa rd_kafka_queue_get_sasl()
- * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb()
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
-
-
-/**
- * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by
- * this Kafka client.
- *
- * This function sets or resets the SASL username and password credentials
- * used by this Kafka client. The new credentials will be used the next time
- * this client needs to authenticate to a broker. This function
- * will not disconnect existing connections that might have been made using
- * the old credentials.
- *
- * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms.
- *
- * @returns NULL on success or an error object on error.
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk,
- const char *username,
- const char *password);
-
-/**
- * @returns a reference to the librdkafka consumer queue.
- * This is the queue served by rd_kafka_consumer_poll().
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @remark rd_kafka_queue_destroy() MUST be called on this queue
- * prior to calling rd_kafka_consumer_close().
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
-
-/**
- * @returns a reference to the partition's queue, or NULL if
- * partition is invalid.
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @remark rd_kafka_queue_destroy() MUST be called on this queue
- *
- * @remark This function only works on consumers.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk,
- const char *topic,
- int32_t partition);
-
-/**
- * @returns a reference to the background thread queue, or NULL if the
- * background queue is not enabled.
- *
- * The background thread queue provides the application with an automatically
- * polled queue that triggers the event callback in a background thread,
- * this background thread is completely managed by librdkafka.
- *
- * The background thread queue is automatically created if a generic event
- * handler callback is configured with rd_kafka_conf_set_background_event_cb()
- * or if rd_kafka_queue_get_background() is called.
- *
- * The background queue is polled and served by librdkafka and MUST NOT be
- * polled, forwarded, or otherwise managed by the application, it may only
- * be used as the destination queue passed to queue-enabled APIs, such as
- * the Admin API.
- *
- * Use rd_kafka_queue_destroy() to loose the reference.
- *
- * @warning The background queue MUST NOT be read from (polled, consumed, etc),
- * or forwarded from.
- */
-RD_EXPORT
-rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
-
-
-/**
- * @brief Forward/re-route queue \p src to \p dst.
- * If \p dst is \c NULL the forwarding is removed.
- *
- * The internal refcounts for both queues are increased.
- *
- * @remark Regardless of whether \p dst is NULL or not, after calling this
- * function, \p src will not forward it's fetch queue to the consumer
- * queue.
- */
-RD_EXPORT
-void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
-
-/**
- * @brief Forward librdkafka logs (and debug) to the specified queue
- * for serving with one of the ..poll() calls.
- *
- * This allows an application to serve log callbacks (\c log_cb)
- * in its thread of choice.
- *
- * @param rk Client instance.
- * @param rkqu Queue to forward logs to. If the value is NULL the logs
- * are forwarded to the main queue.
- *
- * @remark The configuration property \c log.queue MUST also be set to true.
- *
- * @remark librdkafka maintains its own reference to the provided queue.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error,
- * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk,
- rd_kafka_queue_t *rkqu);
-
-
-/**
- * @returns the current number of elements in queue.
- */
-RD_EXPORT
-size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
-
-
-/**
- * @brief Enable IO event triggering for queue.
- *
- * To ease integration with IO based polling loops this API
- * allows an application to create a separate file-descriptor
- * that librdkafka will write \p payload (of size \p size) to
- * whenever a new element is enqueued on a previously empty queue.
- *
- * To remove event triggering call with \p fd = -1.
- *
- * librdkafka will maintain a copy of the \p payload.
- *
- * @remark IO and callback event triggering are mutually exclusive.
- * @remark When using forwarded queues the IO event must only be enabled
- * on the final forwarded-to (destination) queue.
- * @remark The file-descriptor/socket must be set to non-blocking.
- */
-RD_EXPORT
-void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu,
- int fd,
- const void *payload,
- size_t size);
-
-/**
- * @brief Enable callback event triggering for queue.
- *
- * The callback will be called from an internal librdkafka thread
- * when a new element is enqueued on a previously empty queue.
- *
- * To remove event triggering call with \p event_cb = NULL.
- *
- * The \p qev_opaque is passed to the callback's \p qev_opaque argument.
- *
- * @remark IO and callback event triggering are mutually exclusive.
- * @remark Since the callback may be triggered from internal librdkafka
- * threads, the application must not perform any pro-longed work in
- * the callback, or call any librdkafka APIs (for the same rd_kafka_t
- * handle).
- */
-RD_EXPORT
-void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu,
- void (*event_cb)(rd_kafka_t *rk,
- void *qev_opaque),
- void *qev_opaque);
-
-
-/**
- * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu.
- *
- * An application may use this from another thread to force
- * an immediate return to the calling code (caller of rd_kafka_queue_poll()).
- * Must not be used from signal handlers since that may cause deadlocks.
- */
-RD_EXPORT
-void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
-
-
-/**@}*/
-
-/**
- *
- * @name Simple Consumer API (legacy)
- * @{
- *
- */
-
-
-#define RD_KAFKA_OFFSET_BEGINNING \
- -2 /**< Start consuming from beginning of \
- * kafka partition queue: oldest msg */
-#define RD_KAFKA_OFFSET_END \
- -1 /**< Start consuming from end of kafka \
- * partition queue: next msg */
-#define RD_KAFKA_OFFSET_STORED \
- -1000 /**< Start consuming from offset retrieved \
- * from offset store */
-#define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */
-
-
-/** @cond NO_DOC */
-#define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */
-/** @endcond */
-
-/**
- * @brief Start consuming \p CNT messages from topic's current end offset.
- *
- * That is, if current end offset is 12345 and \p CNT is 200, it will start
- * consuming from offset \c 12345-200 = \c 12145. */
-#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT))
-
-/**
- * @brief Start consuming messages for topic \p rkt and \p partition
- * at offset \p offset which may either be an absolute \c (0..N)
- * or one of the logical offsets:
- * - RD_KAFKA_OFFSET_BEGINNING
- * - RD_KAFKA_OFFSET_END
- * - RD_KAFKA_OFFSET_STORED
- * - RD_KAFKA_OFFSET_TAIL
- *
- * rdkafka will attempt to keep \c queued.min.messages (config property)
- * messages in the local queue by repeatedly fetching batches of messages
- * from the broker until the threshold is reached.
- *
- * The application shall use one of the `rd_kafka_consume*()` functions
- * to consume messages from the local queue, each kafka message being
- * represented as a `rd_kafka_message_t *` object.
- *
- * `rd_kafka_consume_start()` must not be called multiple times for the same
- * topic and partition without stopping consumption first with
- * `rd_kafka_consume_stop()`.
- *
- * @returns 0 on success or -1 on error in which case errno is set accordingly:
- * - EBUSY - Conflicts with an existing or previous subscription
- * (RD_KAFKA_RESP_ERR__CONFLICT)
- * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id)
- * (RD_KAFKA_RESP_ERR__INVALID_ARG)
- * - ESRCH - requested \p partition is invalid.
- * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- * - ENOENT - topic is unknown in the Kafka cluster.
- * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- *
- * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t`
- */
-RD_EXPORT
-int rd_kafka_consume_start(rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset);
-
-/**
- * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to
- * the provided queue \p rkqu (which must have been previously allocated
- * with `rd_kafka_queue_new()`.
- *
- * The application must use one of the `rd_kafka_consume_*_queue()` functions
- * to receive fetched messages.
- *
- * `rd_kafka_consume_start_queue()` must not be called multiple times for the
- * same topic and partition without stopping consumption first with
- * `rd_kafka_consume_stop()`.
- * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not
- * be combined for the same topic and partition.
- */
-RD_EXPORT
-int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset,
- rd_kafka_queue_t *rkqu);
-
-/**
- * @brief Stop consuming messages for topic \p rkt and \p partition, purging
- * all messages currently in the local queue.
- *
- * NOTE: To enforce synchronisation this call will block until the internal
- * fetcher has terminated and offsets are committed to configured
- * storage method.
- *
- * The application needs to be stop all consumers before calling
- * `rd_kafka_destroy()` on the main object handle.
- *
- * @returns 0 on success or -1 on error (see `errno`).
- */
-RD_EXPORT
-int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
-
-
-
-/**
- * @brief Seek consumer for topic+partition to \p offset which is either an
- * absolute or logical offset.
- *
- * If \p timeout_ms is specified (not 0) the seek call will wait this long
- * for the consumer to update its fetcher state for the given partition with
- * the new offset. This guarantees that no previously fetched messages for the
- * old offset (or fetch position) will be passed to the application.
- *
- * If the timeout is reached the internal state will be unknown to the caller
- * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
- *
- * If \p timeout_ms is 0 it will initiate the seek but return
- * immediately without any error reporting (e.g., async).
- *
- * This call will purge all pre-fetched messages for the given partition, which
- * may be up to \c queued.max.message.kbytes in size. Repeated use of seek
- * may thus lead to increased network usage as messages are re-fetched from
- * the broker.
- *
- * @remark Seek must only be performed for already assigned/consumed partitions,
- * use rd_kafka_assign() (et.al) to set the initial starting offset
- * for a new assignmenmt.
- *
- * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code.
- *
- * @deprecated Use rd_kafka_seek_partitions().
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset,
- int timeout_ms);
-
-
-
-/**
- * @brief Seek consumer for partitions in \p partitions to the per-partition
- * offset in the \c .offset field of \p partitions.
- *
- * The offset may be either absolute (>= 0) or a logical offset.
- *
- * If \p timeout_ms is specified (not 0) the seek call will wait this long
- * for the consumer to update its fetcher state for the given partition with
- * the new offset. This guarantees that no previously fetched messages for the
- * old offset (or fetch position) will be passed to the application.
- *
- * If the timeout is reached the internal state will be unknown to the caller
- * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
- *
- * If \p timeout_ms is 0 it will initiate the seek but return
- * immediately without any error reporting (e.g., async).
- *
- * This call will purge all pre-fetched messages for the given partition, which
- * may be up to \c queued.max.message.kbytes in size. Repeated use of seek
- * may thus lead to increased network usage as messages are re-fetched from
- * the broker.
- *
- * Individual partition errors are reported in the per-partition \c .err field
- * of \p partitions.
- *
- * @remark Seek must only be performed for already assigned/consumed partitions,
- * use rd_kafka_assign() (et.al) to set the initial starting offset
- * for a new assignmenmt.
- *
- * @returns NULL on success or an error object on failure.
- */
-RD_EXPORT rd_kafka_error_t *
-rd_kafka_seek_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions,
- int timeout_ms);
-
-
-/**
- * @brief Consume a single message from topic \p rkt and \p partition
- *
- * \p timeout_ms is maximum amount of time to wait for a message to be received.
- * Consumer must have been previously started with `rd_kafka_consume_start()`.
- *
- * @returns a message object on success or \c NULL on error.
- * The message object must be destroyed with `rd_kafka_message_destroy()`
- * when the application is done with it.
- *
- * Errors (when returning NULL):
- * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched.
- * - ENOENT - \p rkt + \p partition is unknown.
- * (no prior `rd_kafka_consume_start()` call)
- *
- * NOTE: The returned message's \c ..->err must be checked for errors.
- * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the
- * end of the partition has been reached, which should typically not be
- * considered an error. The application should handle this case
- * (e.g., ignore).
- *
- * @remark on_consume() interceptors may be called from this function prior to
- * passing message to application.
- */
-RD_EXPORT
-rd_kafka_message_t *
-rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
-
-
-
-/**
- * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition
- * putting a pointer to each message in the application provided
- * array \p rkmessages (of size \p rkmessages_size entries).
- *
- * `rd_kafka_consume_batch()` provides higher throughput performance
- * than `rd_kafka_consume()`.
- *
- * \p timeout_ms is the maximum amount of time to wait for all of
- * \p rkmessages_size messages to be put into \p rkmessages.
- * If no messages were available within the timeout period this function
- * returns 0 and \p rkmessages remains untouched.
- * This differs somewhat from `rd_kafka_consume()`.
- *
- * The message objects must be destroyed with `rd_kafka_message_destroy()`
- * when the application is done with it.
- *
- * @returns the number of rkmessages added in \p rkmessages,
- * or -1 on error (same error codes as for `rd_kafka_consume()`.
- *
- * @sa rd_kafka_consume()
- *
- * @remark on_consume() interceptors may be called from this function prior to
- * passing message to application.
- */
-RD_EXPORT
-ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt,
- int32_t partition,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size);
-
-
-
-/**
- * @brief Consumes messages from topic \p rkt and \p partition, calling
- * the provided callback for each consumed messsage.
- *
- * `rd_kafka_consume_callback()` provides higher throughput performance
- * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`.
- *
- * \p timeout_ms is the maximum amount of time to wait for one or more messages
- * to arrive.
- *
- * The provided \p consume_cb function is called for each message,
- * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the
- * provided \p rkmessage.
- *
- * The \p commit_opaque argument is passed to the \p consume_cb
- * as \p commit_opaque.
- *
- * @returns the number of messages processed or -1 on error.
- *
- * @sa rd_kafka_consume()
- *
- * @remark on_consume() interceptors may be called from this function prior to
- * passing message to application.
- *
- * @remark This function will return early if a transaction control message is
- * received, these messages are not exposed to the application but
- * still enqueued on the consumer queue to make sure their
- * offsets are stored.
- *
- * @deprecated This API is deprecated and subject for future removal.
- * There is no new callback-based consume interface, use the
- * poll/queue based alternatives.
- */
-RD_EXPORT
-int rd_kafka_consume_callback(rd_kafka_topic_t *rkt,
- int32_t partition,
- int timeout_ms,
- void (*consume_cb)(rd_kafka_message_t *rkmessage,
- void *commit_opaque),
- void *commit_opaque);
-
-
-/**@}*/
-
-/**
- * @name Simple Consumer API (legacy): Queue consumers
- * @{
- *
- * The following `..._queue()` functions are analogue to the functions above
- * but reads messages from the provided queue \p rkqu instead.
- * \p rkqu must have been previously created with `rd_kafka_queue_new()`
- * and the topic consumer must have been started with
- * `rd_kafka_consume_start_queue()` utilising the the same queue.
- */
-
-/**
- * @brief Consume from queue
- *
- * @sa rd_kafka_consume()
- */
-RD_EXPORT
-rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu,
- int timeout_ms);
-
-/**
- * @brief Consume batch of messages from queue
- *
- * @sa rd_kafka_consume_batch()
- */
-RD_EXPORT
-ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size);
-
-/**
- * @brief Consume multiple messages from queue with callback
- *
- * @sa rd_kafka_consume_callback()
- *
- * @deprecated This API is deprecated and subject for future removal.
- * There is no new callback-based consume interface, use the
- * poll/queue based alternatives.
- */
-RD_EXPORT
-int rd_kafka_consume_callback_queue(
- rd_kafka_queue_t *rkqu,
- int timeout_ms,
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque),
- void *commit_opaque);
-
-
-/**@}*/
-
-
-
-/**
- * @name Simple Consumer API (legacy): Topic+partition offset store.
- * @{
- *
- * If \c auto.commit.enable is true the offset is stored automatically prior to
- * returning of the message(s) in each of the rd_kafka_consume*() functions
- * above.
- */
-
-
-/**
- * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition.
- *
- * The \c offset + 1 will be committed (written) to broker (or file) according
- * to \c `auto.commit.interval.ms` or manual offset-less commit()
- *
- * @deprecated This API lacks support for partition leader epochs, which makes
- * it at risk for unclean leader election log truncation issues.
- * Use rd_kafka_offsets_store() and rd_kafka_offset_store_message()
- * instead.
- *
- * @warning This method may only be called for partitions that are currently
- * assigned.
- * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
- * Since v1.9.0.
- *
- * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
- * this may later interfere with resuming a paused partition, instead
- * store offsets prior to calling seek.
- *
- * @remark \c `enable.auto.offset.store` must be set to "false" when using
- * this API.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
- */
-RD_EXPORT
-rd_kafka_resp_err_t
-rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
-
-
-/**
- * @brief Store offsets for next auto-commit for one or more partitions.
- *
- * The offset will be committed (written) to the offset store according
- * to \c `auto.commit.interval.ms` or manual offset-less commit().
- *
- * Per-partition success/error status propagated through each partition's
- * \c .err for all return values (even NO_ERROR) except INVALID_ARG.
- *
- * @warning This method may only be called for partitions that are currently
- * assigned.
- * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
- * Since v1.9.0.
- *
- * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
- * this may later interfere with resuming a paused partition, instead
- * store offsets prior to calling seek.
- *
- * @remark The \c .offset field is stored as is, it will NOT be + 1.
- *
- * @remark \c `enable.auto.offset.store` must be set to "false" when using
- * this API.
- *
- * @remark The leader epoch, if set, will be used to fence outdated partition
- * leaders. See rd_kafka_topic_partition_set_leader_epoch().
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store
- * is true, or
- * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE
- * if none of the offsets could be stored.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_offsets_store(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *offsets);
-
-
-/**
- * @brief Store offset +1 for the consumed message.
- *
- * The message offset + 1 will be committed to broker according
- * to \c `auto.commit.interval.ms` or manual offset-less commit()
- *
- * @warning This method may only be called for partitions that are currently
- * assigned.
- * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
- * Since v1.9.0.
- *
- * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
- * this may later interfere with resuming a paused partition, instead
- * store offsets prior to calling seek.
- *
- * @remark \c `enable.auto.offset.store` must be set to "false" when using
- * this API.
- *
- * @returns NULL on success or an error object on failure.
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
-
-/**@}*/
-
-
-
-/**
- * @name KafkaConsumer (C)
- * @brief High-level KafkaConsumer C API
- * @{
- *
- *
- *
- */
-
-/**
- * @brief Subscribe to topic set using balanced consumer groups.
- *
- * Wildcard (regex) topics are supported:
- * any topic name in the \p topics list that is prefixed with \c \"^\" will
- * be regex-matched to the full list of topics in the cluster and matching
- * topics will be added to the subscription list.
- *
- * The full topic list is retrieved every \c topic.metadata.refresh.interval.ms
- * to pick up new or delete topics that match the subscription.
- * If there is any change to the matched topics the consumer will
- * immediately rejoin the group with the updated set of subscribed topics.
- *
- * Regex and full topic names can be mixed in \p topics.
- *
- * @remark Only the \c .topic field is used in the supplied \p topics list,
- * all other fields are ignored.
- *
- * @remark subscribe() is an asynchronous method which returns immediately:
- * background threads will (re)join the group, wait for group rebalance,
- * issue any registered rebalance_cb, assign() the assigned partitions,
- * and then start fetching messages. This cycle may take up to
- * \c session.timeout.ms * 2 or more to complete.
- *
- * @remark After this call returns a consumer error will be returned by
- * rd_kafka_consumer_poll (et.al) for each unavailable topic in the
- * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART
- * for non-existent topics, and
- * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics.
- * The consumer error will be raised through rd_kafka_consumer_poll()
- * (et.al.) with the \c rd_kafka_message_t.err field set to one of the
- * error codes mentioned above.
- * The subscribe function itself is asynchronous and will not return
- * an error on unavailable topics.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
- * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid
- * topics or regexes or duplicate entries,
- * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_subscribe(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *topics);
-
-
-/**
- * @brief Unsubscribe from the current subscription set.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
-
-
-/**
- * @brief Returns the current topic subscription
- *
- * @returns An error code on failure, otherwise \p topic is updated
- * to point to a newly allocated topic list (possibly empty).
- *
- * @remark The application is responsible for calling
- * rd_kafka_topic_partition_list_destroy on the returned list.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
-
-
-
-/**
- * @brief Poll the consumer for messages or events.
- *
- * Will block for at most \p timeout_ms milliseconds.
- *
- * @remark An application should make sure to call consumer_poll() at regular
- * intervals, even if no messages are expected, to serve any
- * queued callbacks waiting to be called. This is especially
- * important when a rebalance_cb has been registered as it needs
- * to be called and handled properly to synchronize internal
- * consumer state.
- *
- * @returns A message object which is a proper message if \p ->err is
- * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other
- * value.
- *
- * @remark on_consume() interceptors may be called from this function prior to
- * passing message to application.
- *
- * @remark When subscribing to topics the application must call poll at
- * least every \c max.poll.interval.ms to remain a member of the
- * consumer group.
- *
- * Noteworthy errors returned in \c ->err:
- * - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call
- * poll within `max.poll.interval.ms`.
- *
- * @sa rd_kafka_message_t
- */
-RD_EXPORT
-rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
-
-/**
- * @brief Close the consumer.
- *
- * This call will block until the consumer has revoked its assignment,
- * calling the \c rebalance_cb if it is configured, committed offsets
- * to broker, and left the consumer group (if applicable).
- * The maximum blocking time is roughly limited to session.timeout.ms.
- *
- * @returns An error code indicating if the consumer close was succesful
- * or not.
- * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
- * a fatal error.
- *
- * @remark The application still needs to call rd_kafka_destroy() after
- * this call finishes to clean up the underlying handle resources.
- *
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
-
-
-/**
- * @brief Asynchronously close the consumer.
- *
- * Performs the same actions as rd_kafka_consumer_close() but in a
- * background thread.
- *
- * Rebalance events/callbacks (etc) will be forwarded to the
- * application-provided \p rkqu. The application must poll/serve this queue
- * until rd_kafka_consumer_closed() returns true.
- *
- * @remark Depending on consumer group join state there may or may not be
- * rebalance events emitted on \p rkqu.
- *
- * @returns an error object if the consumer close failed, else NULL.
- *
- * @sa rd_kafka_consumer_closed()
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk,
- rd_kafka_queue_t *rkqu);
-
-
-/**
- * @returns 1 if the consumer is closed, else 0.
- *
- * Should be used in conjunction with rd_kafka_consumer_close_queue() to know
- * when the consumer has been closed.
- *
- * @sa rd_kafka_consumer_close_queue()
- */
-RD_EXPORT
-int rd_kafka_consumer_closed(rd_kafka_t *rk);
-
-
-/**
- * @brief Incrementally add \p partitions to the current assignment.
- *
- * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
- * this method should be used in a rebalance callback to adjust the current
- * assignment appropriately in the case where the rebalance type is
- * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the
- * partition list passed to the callback (or a copy of it), even if the
- * list is empty. \p partitions must not be NULL. This method may also be
- * used outside the context of a rebalance callback.
- *
- * @returns NULL on success, or an error object if the operation was
- * unsuccessful.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT rd_kafka_error_t *
-rd_kafka_incremental_assign(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions);
-
-
-/**
- * @brief Incrementally remove \p partitions from the current assignment.
- *
- * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
- * this method should be used in a rebalance callback to adjust the current
- * assignment appropriately in the case where the rebalance type is
- * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the
- * partition list passed to the callback (or a copy of it), even if the
- * list is empty. \p partitions must not be NULL. This method may also be
- * used outside the context of a rebalance callback.
- *
- * @returns NULL on success, or an error object if the operation was
- * unsuccessful.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions);
-
-
-/**
- * @brief The rebalance protocol currently in use. This will be
- * "NONE" if the consumer has not (yet) joined a group, else it will
- * match the rebalance protocol ("EAGER", "COOPERATIVE") of the
- * configured and selected assignor(s). All configured
- * assignors must have the same protocol type, meaning
- * online migration of a consumer group from using one
- * protocol to another (in particular upgading from EAGER
- * to COOPERATIVE) without a restart is not currently
- * supported.
- *
- * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success.
- */
-RD_EXPORT
-const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
-
-
-/**
- * @brief Atomic assignment of partitions to consume.
- *
- * The new \p partitions will replace the existing assignment.
- *
- * A zero-length \p partitions will treat the partitions as a valid,
- * albeit empty assignment, and maintain internal state, while a \c NULL
- * value for \p partitions will reset and clear the internal state.
- *
- * When used from a rebalance callback, the application should pass the
- * partition list passed to the callback (or a copy of it) even if the list
- * is empty (i.e. should not pass NULL in this case) so as to maintain
- * internal join state. This is not strictly required - the application
- * may adjust the assignment provided by the group. However, this is rarely
- * useful in practice.
- *
- * @returns An error code indicating if the new assignment was applied or not.
- * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
- * a fatal error.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_assign(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions);
-
-/**
- * @brief Returns the current partition assignment as set by rd_kafka_assign()
- * or rd_kafka_incremental_assign().
- *
- * @returns An error code on failure, otherwise \p partitions is updated
- * to point to a newly allocated partition list (possibly empty).
- *
- * @remark The application is responsible for calling
- * rd_kafka_topic_partition_list_destroy on the returned list.
- *
- * @remark This assignment represents the partitions assigned through the
- * assign functions and not the partitions assigned to this consumer
- * instance by the consumer group leader.
- * They are usually the same following a rebalance but not necessarily
- * since an application is free to assign any partitions.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_assignment(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t **partitions);
-
-
-/**
- * @brief Check whether the consumer considers the current assignment to
- * have been lost involuntarily. This method is only applicable for
- * use with a high level subscribing consumer. Assignments are revoked
- * immediately when determined to have been lost, so this method
- * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event
- * or from within a rebalance_cb. Partitions that have been lost may
- * already be owned by other members in the group and therefore
- * commiting offsets, for example, may fail.
- *
- * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or
- * rd_kafka_incremental_unassign() resets this flag.
- *
- * @returns Returns 1 if the current partition assignment is considered
- * lost, 0 otherwise.
- */
-RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk);
-
-
-/**
- * @brief Commit offsets on broker for the provided list of partitions.
- *
- * \p offsets should contain \c topic, \c partition, \c offset and possibly
- * \c metadata. The \c offset should be the offset where consumption will
- * resume, i.e., the last processed offset + 1.
- * If \p offsets is NULL the current partition assignment will be used instead.
- *
- * If \p async is false this operation will block until the broker offset commit
- * is done, returning the resulting success or error code.
- *
- * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been
- * configured the callback will be enqueued for a future call to
- * rd_kafka_poll(), rd_kafka_consumer_poll() or similar.
- *
- * @returns An error code indiciating if the commit was successful,
- * or successfully scheduled if asynchronous, or failed.
- * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
- * a fatal error.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- int async);
-
-
-/**
- * @brief Commit message's offset on broker for the message's partition.
- * The committed offset is the message's offset + 1.
- *
- * @sa rd_kafka_commit
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit_message(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- int async);
-
-
-/**
- * @brief Commit offsets on broker for the provided list of partitions.
- *
- * See rd_kafka_commit for \p offsets semantics.
- *
- * The result of the offset commit will be posted on the provided \p rkqu queue.
- *
- * If the application uses one of the poll APIs (rd_kafka_poll(),
- * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue
- * the \p cb callback is required.
- *
- * The \p commit_opaque argument is passed to the callback as \p commit_opaque,
- * or if using the event API the callback is ignored and the offset commit
- * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the
- * \p commit_opaque value will be available with rd_kafka_event_opaque().
- *
- * If \p rkqu is NULL a temporary queue will be created and the callback will
- * be served by this call.
- *
- * @sa rd_kafka_commit()
- * @sa rd_kafka_conf_set_offset_commit_cb()
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_commit_queue(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_queue_t *rkqu,
- void (*cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *commit_opaque),
- void *commit_opaque);
-
-
-/**
- * @brief Retrieve committed offsets for topics+partitions.
- *
- * The \p offset field of each requested partition will either be set to
- * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored
- * offset for that partition.
- *
- * Committed offsets will be returned according to the `isolation.level`
- * configuration property, if set to `read_committed` (default) then only
- * stable offsets for fully committed transactions will be returned, while
- * `read_uncommitted` may return offsets for not yet committed transactions.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
- * \p offset or \p err field of each \p partitions' element is filled
- * in with the stored offset, or a partition specific error.
- * Else returns an error code.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_committed(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions,
- int timeout_ms);
-
-
-
-/**
- * @brief Retrieve current positions (offsets) for topics+partitions.
- *
- * The \p offset field of each requested partition will be set to the offset
- * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there
- * was no previous message.
- *
- * @remark In this context the last consumed message is the offset consumed
- * by the current librdkafka instance and, in case of rebalancing, not
- * necessarily the last message fetched from the partition.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
- * \p offset or \p err field of each \p partitions' element is filled
- * in with the stored offset, or a partition specific error.
- * Else returns an error code.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
-
-
-
-/**
- * @returns the current consumer group metadata associated with this consumer,
- * or NULL if \p rk is not a consumer configured with a \c group.id.
- * This metadata object should be passed to the transactional
- * producer's rd_kafka_send_offsets_to_transaction() API.
- *
- * @remark The returned pointer must be freed by the application using
- * rd_kafka_consumer_group_metadata_destroy().
- *
- * @sa rd_kafka_send_offsets_to_transaction()
- */
-RD_EXPORT rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
-
-
-/**
- * @brief Create a new consumer group metadata object.
- * This is typically only used for writing tests.
- *
- * @param group_id The group id.
- *
- * @remark The returned pointer must be freed by the application using
- * rd_kafka_consumer_group_metadata_destroy().
- */
-RD_EXPORT rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata_new(const char *group_id);
-
-
-/**
- * @brief Create a new consumer group metadata object.
- * This is typically only used for writing tests.
- *
- * @param group_id The group id.
- * @param generation_id The group generation id.
- * @param member_id The group member id.
- * @param group_instance_id The group instance id (may be NULL).
- *
- * @remark The returned pointer must be freed by the application using
- * rd_kafka_consumer_group_metadata_destroy().
- */
-RD_EXPORT rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id,
- int32_t generation_id,
- const char *member_id,
- const char *group_instance_id);
-
-
-/**
- * @brief Frees the consumer group metadata object as returned by
- * rd_kafka_consumer_group_metadata().
- */
-RD_EXPORT void
-rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
-
-
-/**
- * @brief Serialize the consumer group metadata to a binary format.
- * This is mainly for client binding use and not for application use.
- *
- * @remark The serialized metadata format is private and is not compatible
- * across different versions or even builds of librdkafka.
- * It should only be used in the same process runtime and must only
- * be passed to rd_kafka_consumer_group_metadata_read().
- *
- * @param cgmd Metadata to be serialized.
- * @param bufferp On success this pointer will be updated to point to na
- * allocated buffer containing the serialized metadata.
- * The buffer must be freed with rd_kafka_mem_free().
- * @param sizep The pointed to size will be updated with the size of
- * the serialized buffer.
- *
- * @returns NULL on success or an error object on failure.
- *
- * @sa rd_kafka_consumer_group_metadata_read()
- */
-RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(
- const rd_kafka_consumer_group_metadata_t *cgmd,
- void **bufferp,
- size_t *sizep);
-
-/**
- * @brief Reads serialized consumer group metadata and returns a
- * consumer group metadata object.
- * This is mainly for client binding use and not for application use.
- *
- * @remark The serialized metadata format is private and is not compatible
- * across different versions or even builds of librdkafka.
- * It should only be used in the same process runtime and must only
- * be passed to rd_kafka_consumer_group_metadata_read().
- *
- * @param cgmdp On success this pointer will be updated to point to a new
- * consumer group metadata object which must be freed with
- * rd_kafka_consumer_group_metadata_destroy().
- * @param buffer Pointer to the serialized data.
- * @param size Size of the serialized data.
- *
- * @returns NULL on success or an error object on failure.
- *
- * @sa rd_kafka_consumer_group_metadata_write()
- */
-RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(
- rd_kafka_consumer_group_metadata_t **cgmdp,
- const void *buffer,
- size_t size);
-
-/**@}*/
-
-
-
-/**
- * @name Producer API
- * @{
- *
- *
- */
-
-
-/**
- * @brief Producer message flags
- */
-#define RD_KAFKA_MSG_F_FREE \
- 0x1 /**< Delegate freeing of payload to rdkafka. \
- */
-#define RD_KAFKA_MSG_F_COPY \
- 0x2 /**< rdkafka will make a copy of the payload. \
- */
-#define RD_KAFKA_MSG_F_BLOCK \
- 0x4 /**< Block produce*() on message queue full. \
- * WARNING: If a delivery report callback \
- * is used, the application MUST \
- * call rd_kafka_poll() (or equiv.) \
- * to make sure delivered messages \
- * are drained from the internal \
- * delivery report queue. \
- * Failure to do so will result \
- * in indefinitely blocking on \
- * the produce() call when the \
- * message queue is full. */
-#define RD_KAFKA_MSG_F_PARTITION \
- 0x8 /**< produce_batch() will honor \
- * per-message partition. */
-
-
-
-/**
- * @brief Produce and send a single message to broker.
- *
- * \p rkt is the target topic which must have been previously created with
- * `rd_kafka_topic_new()`.
- *
- * `rd_kafka_produce()` is an asynchronous non-blocking API.
- * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called
- * once the delivery status (success or failure) is known. The delivery report
- * is triggered by the application calling `rd_kafka_poll()` (at regular
- * intervals) or `rd_kafka_flush()` (at termination).
- *
- * Since producing is asynchronous, you should call `rd_kafka_flush()` before
- * you destroy the producer. Otherwise, any outstanding messages will be
- * silently discarded.
- *
- * When temporary errors occur, librdkafka automatically retries to produce the
- * messages. Retries are triggered after retry.backoff.ms and when the
- * leader broker for the given partition is available. Otherwise, librdkafka
- * falls back to polling the topic metadata to monitor when a new leader is
- * elected (see the topic.metadata.refresh.fast.interval.ms and
- * topic.metadata.refresh.interval.ms configurations) and then performs a
- * retry. A delivery error will occur if the message could not be produced
- * within message.timeout.ms.
- *
- * See the "Message reliability" chapter in INTRODUCTION.md for more
- * information.
- *
- * \p partition is the target partition, either:
- * - RD_KAFKA_PARTITION_UA (unassigned) for
- * automatic partitioning using the topic's partitioner function, or
- * - a fixed partition (0..N)
- *
- * \p msgflags is zero or more of the following flags OR:ed together:
- * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if
- * \p queue.buffering.max.messages or
- * \p queue.buffering.max.kbytes are exceeded.
- * Messages are considered in-queue from the point
- * they are accepted by produce() until their corresponding delivery report
- * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or
- * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c
- * RD_KAFKA_MSG_F_BLOCK above.
- *
- * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done
- * with it.
- * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the
- * \p payload pointer will not be used by rdkafka
- * after the call returns.
- * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message
- * partition, either set manually or by the
- * configured partitioner.
- *
- * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are
- * set, the caller must ensure that the memory backing \p payload remains
- * valid and is not modified or reused until the delivery callback is
- * invoked. Other buffers passed to `rd_kafka_produce()` don't have this
- * restriction on reuse, i.e. the memory backing the key or the topic name
- * may be reused as soon as `rd_kafka_produce()` returns.
- *
- * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
- * the memory associated with the payload is still the caller's
- * responsibility.
- *
- * \p payload is the message payload of size \p len bytes.
- *
- * \p key is an optional message key of size \p keylen bytes, if non-NULL it
- * will be passed to the topic partitioner as well as be sent with the
- * message to the broker and passed on to the consumer.
- *
- * \p msg_opaque is an optional application-provided per-message opaque
- * pointer that will provided in the message's delivery report callback
- * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field.
- *
- * @remark on_send() and on_acknowledgement() interceptors may be called
- * from this function. on_acknowledgement() will only be called if the
- * message fails partitioning.
- *
- * @remark If the producer is transactional (\c transactional.id is configured)
- * producing is only allowed during an on-going transaction, namely
- * after rd_kafka_begin_transaction() has been called.
- *
- * @returns 0 on success or -1 on error in which case errno is set accordingly:
- * - ENOBUFS - maximum number of outstanding messages has been reached:
- * "queue.buffering.max.messages"
- * (RD_KAFKA_RESP_ERR__QUEUE_FULL)
- * - EMSGSIZE - message is larger than configured max size:
- * "messages.max.bytes".
- * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
- * - ESRCH - requested \p partition is unknown in the Kafka cluster.
- * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- * - ENOENT - topic is unknown in the Kafka cluster.
- * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- * - ECANCELED - fatal error has been raised on producer, see
- * rd_kafka_fatal_error(),
- * (RD_KAFKA_RESP_ERR__FATAL).
- * - ENOEXEC - transactional state forbids producing
- * (RD_KAFKA_RESP_ERR__STATE)
- *
- * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code.
- */
-RD_EXPORT
-int rd_kafka_produce(rd_kafka_topic_t *rkt,
- int32_t partition,
- int msgflags,
- void *payload,
- size_t len,
- const void *key,
- size_t keylen,
- void *msg_opaque);
-
-
-/**
- * @brief Produce and send a single message to broker.
- *
- * The message is defined by a va-arg list using \c rd_kafka_vtype_t
- * tag tuples which must be terminated with a single \c RD_KAFKA_V_END.
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as
- * described in rd_kafka_produce().
- * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and
- * _V_HEADERS are mixed.
- *
- * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
-
-
-/**
- * @brief Produce and send a single message to broker.
- *
- * The message is defined by an array of \c rd_kafka_vu_t of
- * count \p cnt.
- *
- * @returns an error object on failure or NULL on success.
- * See rd_kafka_producev() for specific error codes.
- *
- * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END
- */
-RD_EXPORT
-rd_kafka_error_t *
-rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
-
-
-/**
- * @brief Produce multiple messages.
- *
- * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will
- * be run for each message (slower), otherwise the messages will be enqueued
- * to the specified partition directly (faster).
- *
- * The messages are provided in the array \p rkmessages of count \p message_cnt
- * elements.
- * The \p partition and \p msgflags are used for all provided messages.
- *
- * Honoured \p rkmessages[] fields are:
- * - payload,len Message payload and length
- * - key,key_len Optional message key
- * - _private Message opaque pointer (msg_opaque)
- * - err Will be set according to success or failure, see
- * rd_kafka_produce() for possible error codes.
- * Application only needs to check for errors if
- * return value != \p message_cnt.
- *
- * @remark If \c RD_KAFKA_MSG_F_PARTITION is set in \p msgflags, the
- * \c .partition field of the \p rkmessages is used instead of
- * \p partition.
- *
- * @returns the number of messages succesfully enqueued for producing.
- *
- * @remark This interface does NOT support setting message headers on
- * the provided \p rkmessages.
- */
-RD_EXPORT
-int rd_kafka_produce_batch(rd_kafka_topic_t *rkt,
- int32_t partition,
- int msgflags,
- rd_kafka_message_t *rkmessages,
- int message_cnt);
-
-
-
-/**
- * @brief Wait until all outstanding produce requests, et.al, are completed.
- * This should typically be done prior to destroying a producer instance
- * to make sure all queued and in-flight produce requests are completed
- * before terminating.
- *
- * @remark This function will call rd_kafka_poll() and thus trigger callbacks.
- *
- * @remark The \c linger.ms time will be ignored for the duration of the call,
- * queued messages will be sent to the broker as soon as possible.
- *
- * @remark If RD_KAFKA_EVENT_DR has been enabled
- * (through rd_kafka_conf_set_events()) this function will not call
- * rd_kafka_poll() but instead wait for the librdkafka-handled
- * message count to reach zero. This requires the application to
- * serve the event queue in a separate thread.
- * In this mode only messages are counted, not other types of
- * queued events.
- *
- * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all
- * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR
- *
- * @sa rd_kafka_outq_len()
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
-
-
-
-/**
- * @brief Purge messages currently handled by the producer instance.
- *
- * @param rk Client instance.
- * @param purge_flags Tells which messages to purge and how.
- *
- * The application will need to call rd_kafka_poll() or rd_kafka_flush()
- * afterwards to serve the delivery report callbacks of the purged messages.
- *
- * Messages purged from internal queues fail with the delivery report
- * error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that
- * are in-flight to or from the broker will fail with the error code set to
- * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT.
- *
- * @warning Purging messages that are in-flight to or from the broker
- * will ignore any subsequent acknowledgement for these messages
- * received from the broker, effectively making it impossible
- * for the application to know if the messages were successfully
- * produced or not. This may result in duplicate messages if the
- * application retries these messages at a later time.
- *
- * @remark This call may block for a short time while background thread
- * queues are purged.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p purge flags are invalid
- * or unknown,
- * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer
- * client instance.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
-
-
-/**
- * @brief Flags for rd_kafka_purge()
- */
-
-/*!
- * Purge messages in internal queues.
- */
-#define RD_KAFKA_PURGE_F_QUEUE 0x1
-
-/*!
- * Purge messages in-flight to or from the broker.
- * Purging these messages will void any future acknowledgements from the
- * broker, making it impossible for the application to know if these
- * messages were successfully delivered or not.
- * Retrying these messages may lead to duplicates.
- */
-#define RD_KAFKA_PURGE_F_INFLIGHT 0x2
-
-
-/*!
- * Don't wait for background thread queue purging to finish.
- */
-#define RD_KAFKA_PURGE_F_NON_BLOCKING 0x4
-
-
-/**@}*/
-
-
-/**
- * @name Metadata API
- * @{
- *
- *
- */
-
-
-/**
- * @brief Broker information
- */
-typedef struct rd_kafka_metadata_broker {
- int32_t id; /**< Broker Id */
- char *host; /**< Broker hostname */
- int port; /**< Broker listening port */
-} rd_kafka_metadata_broker_t;
-
-/**
- * @brief Partition information
- */
-typedef struct rd_kafka_metadata_partition {
- int32_t id; /**< Partition Id */
- rd_kafka_resp_err_t err; /**< Partition error reported by broker */
- int32_t leader; /**< Leader broker */
- int replica_cnt; /**< Number of brokers in \p replicas */
- int32_t *replicas; /**< Replica brokers */
- int isr_cnt; /**< Number of ISR brokers in \p isrs */
- int32_t *isrs; /**< In-Sync-Replica brokers */
-} rd_kafka_metadata_partition_t;
-
-/**
- * @brief Topic information
- */
-typedef struct rd_kafka_metadata_topic {
- char *topic; /**< Topic name */
- int partition_cnt; /**< Number of partitions in \p partitions*/
- struct rd_kafka_metadata_partition *partitions; /**< Partitions */
- rd_kafka_resp_err_t err; /**< Topic error reported by broker */
-} rd_kafka_metadata_topic_t;
-
-
-/**
- * @brief Metadata container
- */
-typedef struct rd_kafka_metadata {
- int broker_cnt; /**< Number of brokers in \p brokers */
- struct rd_kafka_metadata_broker *brokers; /**< Brokers */
-
- int topic_cnt; /**< Number of topics in \p topics */
- struct rd_kafka_metadata_topic *topics; /**< Topics */
-
- int32_t orig_broker_id; /**< Broker originating this metadata */
- char *orig_broker_name; /**< Name of originating broker */
-} rd_kafka_metadata_t;
-
-/**
- * @brief Request Metadata from broker.
- *
- * Parameters:
- * - \p all_topics if non-zero: request info about all topics in cluster,
- * if zero: only request info about locally known topics.
- * - \p only_rkt only request info about this topic
- * - \p metadatap pointer to hold metadata result.
- * The \p *metadatap pointer must be released
- * with rd_kafka_metadata_destroy().
- * - \p timeout_ms maximum response time before failing.
- *
- * @remark Consumer: If \p all_topics is non-zero the Metadata response
- * information may trigger a re-join if any subscribed topics
- * have changed partition count or existence state.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap)
- * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or
- * other error code on error.
- */
-RD_EXPORT
-rd_kafka_resp_err_t
-rd_kafka_metadata(rd_kafka_t *rk,
- int all_topics,
- rd_kafka_topic_t *only_rkt,
- const struct rd_kafka_metadata **metadatap,
- int timeout_ms);
-
-/**
- * @brief Release metadata memory.
- */
-RD_EXPORT
-void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
-
-/**
- * @brief Node (broker) information.
- */
-typedef struct rd_kafka_Node_s rd_kafka_Node_t;
-
-/**
- * @brief Get the id of \p node.
- *
- * @param node The Node instance.
- *
- * @return The node id.
- */
-RD_EXPORT
-int rd_kafka_Node_id(const rd_kafka_Node_t *node);
-
-/**
- * @brief Get the host of \p node.
- *
- * @param node The Node instance.
- *
- * @return The node host.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p node object.
- */
-RD_EXPORT
-const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
-
-/**
- * @brief Get the port of \p node.
- *
- * @param node The Node instance.
- *
- * @return The node port.
- */
-RD_EXPORT
-uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
-
-/**@}*/
-
-
-
-/**
- * @name Client group information
- * @{
- *
- *
- */
-
-
-/**
- * @brief Group member information
- *
- * For more information on \p member_metadata format, see
- * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI
- *
- */
-struct rd_kafka_group_member_info {
- char *member_id; /**< Member id (generated by broker) */
- char *client_id; /**< Client's \p client.id */
- char *client_host; /**< Client's hostname */
- void *member_metadata; /**< Member metadata (binary),
- * format depends on \p protocol_type. */
- int member_metadata_size; /**< Member metadata size in bytes */
- void *member_assignment; /**< Member assignment (binary),
- * format depends on \p protocol_type. */
- int member_assignment_size; /**< Member assignment size in bytes */
-};
-
-/**
- * @enum rd_kafka_consumer_group_state_t
- *
- * @brief Consumer group state.
- */
-typedef enum {
- RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
- RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
- RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
- RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
- RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
- RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
- RD_KAFKA_CONSUMER_GROUP_STATE__CNT
-} rd_kafka_consumer_group_state_t;
-
-/**
- * @brief Group information
- */
-struct rd_kafka_group_info {
- struct rd_kafka_metadata_broker broker; /**< Originating broker info */
- char *group; /**< Group name */
- rd_kafka_resp_err_t err; /**< Broker-originated error */
- char *state; /**< Group state */
- char *protocol_type; /**< Group protocol type */
- char *protocol; /**< Group protocol */
- struct rd_kafka_group_member_info *members; /**< Group members */
- int member_cnt; /**< Group member count */
-};
-
-/**
- * @brief List of groups
- *
- * @sa rd_kafka_group_list_destroy() to release list memory.
- */
-struct rd_kafka_group_list {
- struct rd_kafka_group_info *groups; /**< Groups */
- int group_cnt; /**< Group count */
-};
-
-
-/**
- * @brief List and describe client groups in cluster.
- *
- * \p group is an optional group name to describe, otherwise (\c NULL) all
- * groups are returned.
- *
- * \p timeout_ms is the (approximate) maximum time to wait for response
- * from brokers and must be a positive value.
- *
- * @returns \c RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is
- * updated to point to a newly allocated list of groups.
- * \c RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded
- * in time but at least one group is returned in \p grplistlp.
- * \c RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the
- * given timeframe but not all brokers have yet responded, or
- * if the list of brokers in the cluster could not be obtained within
- * the given timeframe.
- * \c RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found.
- * Other error codes may also be returned from the request layer.
- *
- * The \p grplistp remains untouched if any error code is returned,
- * with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves
- * as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete
- * group list.
- *
- * @sa Use rd_kafka_group_list_destroy() to release list memory.
- *
- * @deprecated Use rd_kafka_ListConsumerGroups() and
- * rd_kafka_DescribeConsumerGroups() instead.
- */
-RD_EXPORT
-rd_kafka_resp_err_t
-rd_kafka_list_groups(rd_kafka_t *rk,
- const char *group,
- const struct rd_kafka_group_list **grplistp,
- int timeout_ms);
-
-/**
- * @brief Returns a name for a state code.
- *
- * @param state The state value.
- *
- * @return The group state name corresponding to the provided group state value.
- */
-RD_EXPORT
-const char *
-rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
-
-/**
- * @brief Returns a code for a state name.
- *
- * @param name The state name.
- *
- * @return The group state value corresponding to the provided group state name.
- */
-RD_EXPORT
-rd_kafka_consumer_group_state_t
-rd_kafka_consumer_group_state_code(const char *name);
-
-/**
- * @brief Release list memory
- */
-RD_EXPORT
-void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
-
-
-/**@}*/
-
-
-
-/**
- * @name Miscellaneous APIs
- * @{
- *
- */
-
-
-/**
- * @brief Adds one or more brokers to the kafka handle's list of initial
- * bootstrap brokers.
- *
- * Additional brokers will be discovered automatically as soon as rdkafka
- * connects to a broker by querying the broker metadata.
- *
- * If a broker name resolves to multiple addresses (and possibly
- * address families) all will be used for connection attempts in
- * round-robin fashion.
- *
- * \p brokerlist is a ,-separated list of brokers in the format:
- * \c \<broker1\>,\<broker2\>,..
- * Where each broker is in either the host or URL based format:
- * \c \<host\>[:\<port\>]
- * \c \<proto\>://\<host\>[:port]
- * \c \<proto\> is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT
- * The two formats can be mixed but ultimately the value of the
- * `security.protocol` config property decides what brokers are allowed.
- *
- * Example:
- * brokerlist = "broker1:10000,broker2"
- * brokerlist = "SSL://broker3:9000,ssl://broker2"
- *
- * @returns the number of brokers successfully added.
- *
- * @remark Brokers may also be defined with the \c metadata.broker.list or
- * \c bootstrap.servers configuration property (preferred method).
- *
- * @deprecated Set bootstrap servers with the \c bootstrap.servers
- * configuration property.
- */
-RD_EXPORT
-int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
-
-
-
-/**
- * @brief Set logger function.
- *
- * The default is to print to stderr, but a syslog logger is also available,
- * see rd_kafka_log_(print|syslog) for the builtin alternatives.
- * Alternatively the application may provide its own logger callback.
- * Or pass 'func' as NULL to disable logging.
- *
- * @deprecated Use rd_kafka_conf_set_log_cb()
- *
- * @remark \p rk may be passed as NULL in the callback.
- */
-RD_EXPORT RD_DEPRECATED void
-rd_kafka_set_logger(rd_kafka_t *rk,
- void (*func)(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf));
-
-
-/**
- * @brief Specifies the maximum logging level emitted by
- * internal kafka logging and debugging.
- *
- * @deprecated Set the \c "log_level" configuration property instead.
- *
- * @remark If the \p \"debug\" configuration property is set the log level is
- * automatically adjusted to \c LOG_DEBUG (7).
- */
-RD_EXPORT
-void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
-
-
-/**
- * @brief Builtin (default) log sink: print to stderr
- */
-RD_EXPORT
-void rd_kafka_log_print(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf);
-
-
-/**
- * @brief Builtin log sink: print to syslog.
- * @remark This logger is only available if librdkafka was built
- * with syslog support.
- */
-RD_EXPORT
-void rd_kafka_log_syslog(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf);
-
-
-/**
- * @brief Returns the current out queue length.
- *
- * The out queue length is the sum of:
- * - number of messages waiting to be sent to, or acknowledged by,
- * the broker.
- * - number of delivery reports (e.g., dr_msg_cb) waiting to be served
- * by rd_kafka_poll() or rd_kafka_flush().
- * - number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be
- * served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush().
- * - number of events waiting to be served by background_event_cb() in
- * the background queue (see rd_kafka_conf_set_background_event_cb).
- *
- * An application should wait for the return value of this function to reach
- * zero before terminating to make sure outstanding messages,
- * requests (such as offset commits), callbacks and events are fully processed.
- * See rd_kafka_flush().
- *
- * @returns number of messages and events waiting in queues.
- *
- * @sa rd_kafka_flush()
- */
-RD_EXPORT
-int rd_kafka_outq_len(rd_kafka_t *rk);
-
-
-
-/**
- * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp
- *
- * This is only useful for debugging rdkafka, showing state and statistics
- * for brokers, topics, partitions, etc.
- */
-RD_EXPORT
-void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
-
-
-
-/**
- * @brief Retrieve the current number of threads in use by librdkafka.
- *
- * Used by regression tests.
- */
-RD_EXPORT
-int rd_kafka_thread_cnt(void);
-
-
-/**
- * @enum rd_kafka_thread_type_t
- *
- * @brief librdkafka internal thread type.
- *
- * @sa rd_kafka_interceptor_add_on_thread_start()
- */
-typedef enum rd_kafka_thread_type_t {
- RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */
- RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */
- RD_KAFKA_THREAD_BROKER /**< Per-broker thread */
-} rd_kafka_thread_type_t;
-
-
-/**
- * @brief Wait for all rd_kafka_t objects to be destroyed.
- *
- * Returns 0 if all kafka objects are now destroyed, or -1 if the
- * timeout was reached.
- *
- * @remark This function is deprecated.
- */
-RD_EXPORT
-int rd_kafka_wait_destroyed(int timeout_ms);
-
-
-/**
- * @brief Run librdkafka's built-in unit-tests.
- *
- * @returns the number of failures, or 0 if all tests passed.
- */
-RD_EXPORT
-int rd_kafka_unittest(void);
-
-
-/**@}*/
-
-
-
-/**
- * @name Experimental APIs
- * @{
- */
-
-/**
- * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's
- * queue (rd_kafka_consumer_poll()).
- *
- * @warning It is not permitted to call rd_kafka_poll() after directing the
- * main queue with rd_kafka_poll_set_consumer().
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
-
-
-/**@}*/
-
-/**
- * @name Event interface
- *
- * @brief The event API provides an alternative pollable non-callback interface
- * to librdkafka's message and event queues.
- *
- * @{
- */
-
-
-/**
- * @brief Event types
- */
-typedef int rd_kafka_event_type_t;
-#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */
-#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */
-#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */
-#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */
-#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */
-#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */
-#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */
-#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */
-#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */
-#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */
-#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \
- 102 /**< CreatePartitions_result_t */
-#define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */
-#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \
- 104 /**< DescribeConfigs_result_t */
-#define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */
-#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */
-/** DeleteConsumerGroupOffsets_result_t */
-#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107
-/** SASL/OAUTHBEARER token needs to be refreshed */
-#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100
-#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */
-#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */
-#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */
-#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */
-/** ListConsumerGroupsResult_t */
-#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000
-/** DescribeConsumerGroups_result_t */
-#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000
-/** ListConsumerGroupOffsets_result_t */
-#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000
-/** AlterConsumerGroupOffsets_result_t */
-#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000
-
-
-/**
- * @returns the event type for the given event.
- *
- * @remark As a convenience it is okay to pass \p rkev as NULL in which case
- * RD_KAFKA_EVENT_NONE is returned.
- */
-RD_EXPORT
-rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
-
-/**
- * @returns the event type's name for the given event.
- *
- * @remark As a convenience it is okay to pass \p rkev as NULL in which case
- * the name for RD_KAFKA_EVENT_NONE is returned.
- */
-RD_EXPORT
-const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
-
-
-/**
- * @brief Destroy an event.
- *
- * @remark Any references to this event, such as extracted messages,
- * will not be usable after this call.
- *
- * @remark As a convenience it is okay to pass \p rkev as NULL in which case
- * no action is performed.
- */
-RD_EXPORT
-void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the next message from an event.
- *
- * Call repeatedly until it returns NULL.
- *
- * Event types:
- * - RD_KAFKA_EVENT_FETCH (1 message)
- * - RD_KAFKA_EVENT_DR (>=1 message(s))
- *
- * @remark The returned message(s) MUST NOT be
- * freed with rd_kafka_message_destroy().
- *
- * @remark on_consume() interceptor may be called
- * from this function prior to passing message to application.
- */
-RD_EXPORT
-const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
-
-
-/**
- * @brief Extacts \p size message(s) from the event into the
- * pre-allocated array \p rkmessages.
- *
- * Event types:
- * - RD_KAFKA_EVENT_FETCH (1 message)
- * - RD_KAFKA_EVENT_DR (>=1 message(s))
- *
- * @returns the number of messages extracted.
- *
- * @remark on_consume() interceptor may be called
- * from this function prior to passing message to application.
- */
-RD_EXPORT
-size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev,
- const rd_kafka_message_t **rkmessages,
- size_t size);
-
-
-/**
- * @returns the number of remaining messages in the event.
- *
- * Event types:
- * - RD_KAFKA_EVENT_FETCH (1 message)
- * - RD_KAFKA_EVENT_DR (>=1 message(s))
- */
-RD_EXPORT
-size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the associated configuration string for the event, or NULL
- * if the configuration property is not set or if
- * not applicable for the given event type.
- *
- * The returned memory is read-only and its lifetime is the same as the
- * event object.
- *
- * Event types:
- * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config
- */
-RD_EXPORT
-const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the error code for the event.
- *
- * Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error.
- *
- * Event types:
- * - all
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the error string (if any).
- * An application should check that rd_kafka_event_error() returns
- * non-zero before calling this function.
- *
- * Event types:
- * - all
- */
-RD_EXPORT
-const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns 1 if the error is a fatal error, else 0.
- *
- * Event types:
- * - RD_KAFKA_EVENT_ERROR
- *
- * @sa rd_kafka_fatal_error()
- */
-RD_EXPORT
-int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or
- * rd_kafka_AdminOptions_set_opaque(), depending on event type.
- *
- * Event types:
- * - RD_KAFKA_EVENT_OFFSET_COMMIT
- * - RD_KAFKA_EVENT_CREATETOPICS_RESULT
- * - RD_KAFKA_EVENT_DELETETOPICS_RESULT
- * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
- * - RD_KAFKA_EVENT_CREATEACLS_RESULT
- * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
- * - RD_KAFKA_EVENT_DELETEACLS_RESULT
- * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
- * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
- * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT
- * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
- * - RD_KAFKA_EVENT_DELETERECORDS_RESULT
- * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
- * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
- * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
- * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
- */
-RD_EXPORT
-void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
-
-
-/**
- * @brief Extract log message from the event.
- *
- * Event types:
- * - RD_KAFKA_EVENT_LOG
- *
- * @returns 0 on success or -1 if unsupported event type.
- */
-RD_EXPORT
-int rd_kafka_event_log(rd_kafka_event_t *rkev,
- const char **fac,
- const char **str,
- int *level);
-
-
-/**
- * @brief Extract log debug context from event.
- *
- * Event types:
- * - RD_KAFKA_EVENT_LOG
- *
- * @param rkev the event to extract data from.
- * @param dst destination string for comma separated list.
- * @param dstsize size of provided dst buffer.
- * @returns 0 on success or -1 if unsupported event type.
- */
-RD_EXPORT
-int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev,
- char *dst,
- size_t dstsize);
-
-
-/**
- * @brief Extract stats from the event.
- *
- * Event types:
- * - RD_KAFKA_EVENT_STATS
- *
- * @returns stats json string.
- *
- * @remark the returned string will be freed automatically along with the event
- * object
- *
- */
-RD_EXPORT
-const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns the topic partition list from the event.
- *
- * @remark The list MUST NOT be freed with
- * rd_kafka_topic_partition_list_destroy()
- *
- * Event types:
- * - RD_KAFKA_EVENT_REBALANCE
- * - RD_KAFKA_EVENT_OFFSET_COMMIT
- */
-RD_EXPORT rd_kafka_topic_partition_list_t *
-rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
-
-
-/**
- * @returns a newly allocated topic_partition container, if applicable for the
- * event type, else NULL.
- *
- * @remark The returned pointer MUST be freed with
- * rd_kafka_topic_partition_destroy().
- *
- * Event types:
- * RD_KAFKA_EVENT_ERROR (for partition level errors)
- */
-RD_EXPORT rd_kafka_topic_partition_t *
-rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
-
-
-/*! CreateTopics result type */
-typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
-/*! DeleteTopics result type */
-typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
-/*! CreateAcls result type */
-typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
-/*! DescribeAcls result type */
-typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
-/*! DeleteAcls result type */
-typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
-/*! CreatePartitions result type */
-typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
-/*! AlterConfigs result type */
-typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
-/*! CreateTopics result type */
-typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
-/*! DeleteRecords result type */
-typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
-/*! ListConsumerGroups result type */
-typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
-/*! DescribeConsumerGroups result type */
-typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
-/*! DeleteGroups result type */
-typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
-/*! DeleteConsumerGroupOffsets result type */
-typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
-/*! AlterConsumerGroupOffsets result type */
-typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
-/*! ListConsumerGroupOffsets result type */
-typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
-
-/**
- * @brief Get CreateTopics result.
- *
- * @returns the result of a CreateTopics request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_CREATETOPICS_RESULT
- */
-RD_EXPORT const rd_kafka_CreateTopics_result_t *
-rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get DeleteTopics result.
- *
- * @returns the result of a DeleteTopics request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DELETETOPICS_RESULT
- */
-RD_EXPORT const rd_kafka_DeleteTopics_result_t *
-rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get CreatePartitions result.
- *
- * @returns the result of a CreatePartitions request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
- */
-RD_EXPORT const rd_kafka_CreatePartitions_result_t *
-rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get AlterConfigs result.
- *
- * @returns the result of a AlterConfigs request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
- */
-RD_EXPORT const rd_kafka_AlterConfigs_result_t *
-rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get DescribeConfigs result.
- *
- * @returns the result of a DescribeConfigs request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
- */
-RD_EXPORT const rd_kafka_DescribeConfigs_result_t *
-rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
-
-/**
- * @returns the result of a DeleteRecords request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DELETERECORDS_RESULT
- */
-RD_EXPORT const rd_kafka_DeleteRecords_result_t *
-rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get ListConsumerGroups result.
- *
- * @returns the result of a ListConsumerGroups request, or NULL if event is of
- * different type.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p rkev object.
- *
- * Event types:
- * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
- */
-RD_EXPORT const rd_kafka_ListConsumerGroups_result_t *
-rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get DescribeConsumerGroups result.
- *
- * @returns the result of a DescribeConsumerGroups request, or NULL if event is
- * of different type.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p rkev object.
- *
- * Event types:
- * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
- */
-RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t *
-rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get DeleteGroups result.
- *
- * @returns the result of a DeleteGroups request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DELETEGROUPS_RESULT
- */
-RD_EXPORT const rd_kafka_DeleteGroups_result_t *
-rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get DeleteConsumerGroupOffsets result.
- *
- * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if
- * event is of different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
- */
-RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t *
-rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
-
-/**
- * @returns the result of a CreateAcls request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_CREATEACLS_RESULT
- */
-RD_EXPORT const rd_kafka_CreateAcls_result_t *
-rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
-
-/**
- * @returns the result of a DescribeAcls request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
- */
-RD_EXPORT const rd_kafka_DescribeAcls_result_t *
-rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
-
-/**
- * @returns the result of a DeleteAcls request, or NULL if event is of
- * different type.
- *
- * Event types:
- * RD_KAFKA_EVENT_DELETEACLS_RESULT
- */
-RD_EXPORT const rd_kafka_DeleteAcls_result_t *
-rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get AlterConsumerGroupOffsets result.
- *
- * @returns the result of a AlterConsumerGroupOffsets request, or NULL if
- * event is of different type.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p rkev object.
- *
- * Event types:
- * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
- */
-RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t *
-rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Get ListConsumerGroupOffsets result.
- *
- * @returns the result of a ListConsumerGroupOffsets request, or NULL if
- * event is of different type.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p rkev object.
- *
- * Event types:
- * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
- */
-RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t *
-rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
-
-/**
- * @brief Poll a queue for an event for max \p timeout_ms.
- *
- * @returns an event, or NULL.
- *
- * @remark Use rd_kafka_event_destroy() to free the event.
- *
- * @sa rd_kafka_conf_set_background_event_cb()
- */
-RD_EXPORT
-rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
-
-/**
- * @brief Poll a queue for events served through callbacks for max \p
- * timeout_ms.
- *
- * @returns the number of events served.
- *
- * @remark This API must only be used for queues with callbacks registered
- * for all expected event types. E.g., not a message queue.
- *
- * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering
- * event callbacks from a librdkafka-managed background thread.
- *
- * @sa rd_kafka_conf_set_background_event_cb()
- */
-RD_EXPORT
-int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
-
-
-/**@}*/
-
-
-/**
- * @name Plugin interface
- *
- * @brief A plugin interface that allows external runtime-loaded libraries
- * to integrate with a client instance without modifications to
- * the application code.
- *
- * Plugins are loaded when referenced through the `plugin.library.paths`
- * configuration property and operates on the \c rd_kafka_conf_t
- * object prior \c rd_kafka_t instance creation.
- *
- * @warning Plugins require the application to link librdkafka dynamically
- * and not statically. Failure to do so will lead to missing symbols
- * or finding symbols in another librdkafka library than the
- * application was linked with.
- * @{
- */
-
-
-/**
- * @brief Plugin's configuration initializer method called each time the
- * library is referenced from configuration (even if previously loaded by
- * another client instance).
- *
- * @remark This method MUST be implemented by plugins and have the symbol name
- * \c conf_init
- *
- * @param conf Configuration set up to this point.
- * @param plug_opaquep Plugin can set this pointer to a per-configuration
- * opaque pointer.
- * @param errstr String buffer of size \p errstr_size where plugin must write
- * a human readable error string in the case the initializer
- * fails (returns non-zero).
- * @param errstr_size Maximum space (including \0) in \p errstr.
- *
- * @remark A plugin may add an on_conf_destroy() interceptor to clean up
- * plugin-specific resources created in the plugin's conf_init() method.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)(
- rd_kafka_conf_t *conf,
- void **plug_opaquep,
- char *errstr,
- size_t errstr_size);
-
-/**@}*/
-
-
-
-/**
- * @name Interceptors
- *
- * @{
- *
- * @brief A callback interface that allows message interception for both
- * producer and consumer data pipelines.
- *
- * Except for the on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy()
- * interceptors, interceptors are added to the
- * newly created rd_kafka_t client instance. These interceptors MUST only
- * be added from on_new() and MUST NOT be added after rd_kafka_new() returns.
- *
- * The on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() interceptors
- * are added to the configuration object which is later passed to
- * rd_kafka_new() where on_new() is called to allow addition of
- * other interceptors.
- *
- * Each interceptor reference consists of a display name (ic_name),
- * a callback function, and an application-specified opaque value that is
- * passed as-is to the callback.
- * The ic_name must be unique for the interceptor implementation and is used
- * to reject duplicate interceptor methods.
- *
- * Any number of interceptors can be added and they are called in the order
- * they were added, unless otherwise noted.
- * The list of registered interceptor methods are referred to as
- * interceptor chains.
- *
- * @remark Contrary to the Java client the librdkafka interceptor interface
- * does not support message key and value modification.
- * Message mutability is discouraged in the Java client and the
- * combination of serializers and headers cover most use-cases.
- *
- * @remark Interceptors are NOT copied to the new configuration on
- * rd_kafka_conf_dup() since it would be hard for interceptors to
- * track usage of the interceptor's opaque value.
- * An interceptor should rely on the plugin, which will be copied
- * in rd_kafka_conf_conf_dup(), to set up the initial interceptors.
- * An interceptor should implement the on_conf_dup() method
- * to manually set up its internal configuration on the newly created
- * configuration object that is being copied-to based on the
- * interceptor-specific configuration properties.
- * conf_dup() should thus be treated the same as conf_init().
- *
- * @remark Interceptors are keyed by the interceptor type (on_..()), the
- * interceptor name (ic_name) and the interceptor method function.
- * Duplicates are not allowed and the .._add_on_..() method will
- * return RD_KAFKA_RESP_ERR__CONFLICT if attempting to add a duplicate
- * method.
- * The only exception is on_conf_destroy() which may be added multiple
- * times by the same interceptor to allow proper cleanup of
- * interceptor configuration state.
- */
-
-
-/**
- * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order
- * the interceptors were added.
- *
- * @param conf Configuration object.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- * @param name The configuration property to set.
- * @param val The configuration value to set, or NULL for reverting to default
- * in which case the previous value should be freed.
- * @param errstr A human readable error string in case the interceptor fails.
- * @param errstr_size Maximum space (including \0) in \p errstr.
- *
- * @returns RD_KAFKA_CONF_OK if the property was known and successfully
- * handled by the interceptor, RD_KAFKA_CONF_INVALID if the
- * property was handled by the interceptor but the value was invalid,
- * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle
- * this property, in which case the property is passed on on the
- * interceptor in the chain, finally ending up at the built-in
- * configuration handler.
- */
-typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)(
- rd_kafka_conf_t *conf,
- const char *name,
- const char *val,
- char *errstr,
- size_t errstr_size,
- void *ic_opaque);
-
-
-/**
- * @brief on_conf_dup() is called from rd_kafka_conf_dup() in the
- * order the interceptors were added and is used to let
- * an interceptor re-register its conf interecptors with a new
- * opaque value.
- * The on_conf_dup() method is called prior to the configuration from
- * \p old_conf being copied to \p new_conf.
- *
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- * @param new_conf New configuration object.
- * @param old_conf Old configuration object to copy properties from.
- * @param filter_cnt Number of property names to filter in \p filter.
- * @param filter Property names to filter out (ignore) when setting up
- * \p new_conf.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code
- * on failure (which is logged but otherwise ignored).
- *
- * @remark No on_conf_* interceptors are copied to the new configuration
- * object on rd_kafka_conf_dup().
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)(
- rd_kafka_conf_t *new_conf,
- const rd_kafka_conf_t *old_conf,
- size_t filter_cnt,
- const char **filter,
- void *ic_opaque);
-
-
-/**
- * @brief on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the
- * order the interceptors were added.
- *
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)(
- void *ic_opaque);
-
-
-/**
- * @brief on_new() is called from rd_kafka_new() prior toreturning
- * the newly created client instance to the application.
- *
- * @param rk The client instance.
- * @param conf The client instance's final configuration.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- * @param errstr A human readable error string in case the interceptor fails.
- * @param errstr_size Maximum space (including \0) in \p errstr.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- *
- * @warning The \p rk client instance will not be fully set up when this
- * interceptor is called and the interceptor MUST NOT call any
- * other rk-specific APIs than rd_kafka_interceptor_add..().
- *
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)(
- rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new()
- * if rd_kafka_new() fails during initialization).
- *
- * @param rk The client instance.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- */
-typedef rd_kafka_resp_err_t(
- rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
-
-
-
-/**
- * @brief on_send() is called from rd_kafka_produce*() (et.al) prior to
- * the partitioner being called.
- *
- * @param rk The client instance.
- * @param rkmessage The message being produced. Immutable.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @remark This interceptor is only used by producer instances.
- *
- * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
- * by the interceptor.
- *
- * @remark If the partitioner fails or an unknown partition was specified,
- * the on_acknowledgement() interceptor chain will be called from
- * within the rd_kafka_produce*() call to maintain send-acknowledgement
- * symmetry.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)(
- rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage,
- void *ic_opaque);
-
-/**
- * @brief on_acknowledgement() is called to inform interceptors that a message
- * was succesfully delivered or permanently failed delivery.
- * The interceptor chain is called from internal librdkafka background
- * threads, or rd_kafka_produce*() if the partitioner failed.
- *
- * @param rk The client instance.
- * @param rkmessage The message being produced. Immutable.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @remark This interceptor is only used by producer instances.
- *
- * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
- * by the interceptor.
- *
- * @warning The on_acknowledgement() method may be called from internal
- * librdkafka threads. An on_acknowledgement() interceptor MUST NOT
- * call any librdkafka API's associated with the \p rk, or perform
- * any blocking or prolonged work.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)(
- rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage,
- void *ic_opaque);
-
-
-/**
- * @brief on_consume() is called just prior to passing the message to the
- * application in rd_kafka_consumer_poll(), rd_kafka_consume*(),
- * the event interface, etc.
- *
- * @param rk The client instance.
- * @param rkmessage The message being consumed. Immutable.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @remark This interceptor is only used by consumer instances.
- *
- * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
- * by the interceptor.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)(
- rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage,
- void *ic_opaque);
-
-/**
- * @brief on_commit() is called on completed or failed offset commit.
- * It is called from internal librdkafka threads.
- *
- * @param rk The client instance.
- * @param offsets List of topic+partition+offset+error that were committed.
- * The error message of each partition should be checked for
- * error.
- * @param err The commit error, if any.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @remark This interceptor is only used by consumer instances.
- *
- * @warning The on_commit() interceptor is called from internal
- * librdkafka threads. An on_commit() interceptor MUST NOT
- * call any librdkafka API's associated with the \p rk, or perform
- * any blocking or prolonged work.
- *
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err,
- void *ic_opaque);
-
-
-/**
- * @brief on_request_sent() is called when a request has been fully written
- * to a broker TCP connections socket.
- *
- * @param rk The client instance.
- * @param sockfd Socket file descriptor.
- * @param brokername Broker request is being sent to.
- * @param brokerid Broker request is being sent to.
- * @param ApiKey Kafka protocol request type.
- * @param ApiVersion Kafka protocol request type version.
- * @param CorrId Kafka protocol request correlation id.
- * @param size Size of request.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @warning The on_request_sent() interceptor is called from internal
- * librdkafka broker threads. An on_request_sent() interceptor MUST NOT
- * call any librdkafka API's associated with the \p rk, or perform
- * any blocking or prolonged work.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)(
- rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- void *ic_opaque);
-
-
-/**
- * @brief on_response_received() is called when a protocol response has been
- * fully received from a broker TCP connection socket but before the
- * response payload is parsed.
- *
- * @param rk The client instance.
- * @param sockfd Socket file descriptor (always -1).
- * @param brokername Broker response was received from, possibly empty string
- * on error.
- * @param brokerid Broker response was received from.
- * @param ApiKey Kafka protocol request type or -1 on error.
- * @param ApiVersion Kafka protocol request type version or -1 on error.
- * @param CorrId Kafka protocol request correlation id, possibly -1 on error.
- * @param size Size of response, possibly 0 on error.
- * @param rtt Request round-trip-time in microseconds, possibly -1 on error.
- * @param err Receive error.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @warning The on_response_received() interceptor is called from internal
- * librdkafka broker threads. An on_response_received() interceptor
- * MUST NOT call any librdkafka API's associated with the \p rk, or
- * perform any blocking or prolonged work.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)(
- rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque);
-
-
-/**
- * @brief on_thread_start() is called from a newly created librdkafka-managed
- * thread.
-
- * @param rk The client instance.
- * @param thread_type Thread type.
- * @param thread_name Human-readable thread name, may not be unique.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @warning The on_thread_start() interceptor is called from internal
- * librdkafka threads. An on_thread_start() interceptor MUST NOT
- * call any librdkafka API's associated with the \p rk, or perform
- * any blocking or prolonged work.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)(
- rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type,
- const char *thread_name,
- void *ic_opaque);
-
-
-/**
- * @brief on_thread_exit() is called just prior to a librdkafka-managed
- * thread exiting from the exiting thread itself.
- *
- * @param rk The client instance.
- * @param thread_type Thread type.n
- * @param thread_name Human-readable thread name, may not be unique.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @remark Depending on the thread type, librdkafka may execute additional
- * code on the thread after on_thread_exit() returns.
- *
- * @warning The on_thread_exit() interceptor is called from internal
- * librdkafka threads. An on_thread_exit() interceptor MUST NOT
- * call any librdkafka API's associated with the \p rk, or perform
- * any blocking or prolonged work.
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)(
- rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type,
- const char *thread_name,
- void *ic_opaque);
-
-
-/**
- * @brief on_broker_state_change() is called just after a broker
- * has been created or its state has been changed.
- *
- * @param rk The client instance.
- * @param broker_id The broker id (-1 is used for bootstrap brokers).
- * @param secproto The security protocol.
- * @param name The original name of the broker.
- * @param port The port of the broker.
- * @param state Broker state name.
- * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
- *
- * @returns an error code on failure, the error is logged but otherwise ignored.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)(
- rd_kafka_t *rk,
- int32_t broker_id,
- const char *secproto,
- const char *name,
- int port,
- const char *state,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_conf_set() interceptor.
- *
- * @param conf Configuration object.
- * @param ic_name Interceptor name, used in logging.
- * @param on_conf_set Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_set_t *on_conf_set,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_conf_dup() interceptor.
- *
- * @param conf Configuration object.
- * @param ic_name Interceptor name, used in logging.
- * @param on_conf_dup Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup,
- void *ic_opaque);
-
-/**
- * @brief Append an on_conf_destroy() interceptor.
- *
- * @param conf Configuration object.
- * @param ic_name Interceptor name, used in logging.
- * @param on_conf_destroy Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR
- *
- * @remark Multiple on_conf_destroy() interceptors are allowed to be added
- * to the same configuration object.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_new() interceptor.
- *
- * @param conf Configuration object.
- * @param ic_name Interceptor name, used in logging.
- * @param on_new Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @remark Since the on_new() interceptor is added to the configuration object
- * it may be copied by rd_kafka_conf_dup().
- * An interceptor implementation must thus be able to handle
- * the same interceptor,ic_opaque tuple to be used by multiple
- * client instances.
- *
- * @remark An interceptor plugin should check the return value to make sure it
- * has not already been added.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_new_t *on_new,
- void *ic_opaque);
-
-
-
-/**
- * @brief Append an on_destroy() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_destroy Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_destroy_t *on_destroy,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_send() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_send Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing intercepted with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_send(rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_send_t *on_send,
- void *ic_opaque);
-
-/**
- * @brief Append an on_acknowledgement() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_acknowledgement Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_consume() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_consume Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_consume_t *on_consume,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_commit() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_commit() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_commit_t *on_commit,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_request_sent() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_request_sent() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_request_sent_t *on_request_sent,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_response_received() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_response_received() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_response_received_t *on_response_received,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_thread_start() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_thread_start() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_thread_start_t *on_thread_start,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_thread_exit() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_thread_exit() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit,
- void *ic_opaque);
-
-
-/**
- * @brief Append an on_broker_state_change() interceptor.
- *
- * @param rk Client instance.
- * @param ic_name Interceptor name, used in logging.
- * @param on_broker_state_change() Function pointer.
- * @param ic_opaque Opaque value that will be passed to the function.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
- * if an existing interceptor with the same \p ic_name and function
- * has already been added to \p conf.
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change,
- void *ic_opaque);
-
-
-
-/**@}*/
-
-
-
-/**
- * @name Auxiliary types
- *
- * @{
- */
-
-
-
-/**
- * @brief Topic result provides per-topic operation result information.
- *
- */
-
-/**
- * @returns the error code for the given topic result.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
-
-/**
- * @returns the human readable error string for the given topic result,
- * or NULL if there was no error.
- *
- * @remark lifetime of the returned string is the same as the \p topicres.
- */
-RD_EXPORT const char *
-rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
-
-/**
- * @returns the name of the topic for the given topic result.
- * @remark lifetime of the returned string is the same as the \p topicres.
- *
- */
-RD_EXPORT const char *
-rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
-
-/**
- * @brief Group result provides per-group operation result information.
- *
- */
-
-/**
- * @returns the error for the given group result, or NULL on success.
- * @remark lifetime of the returned error is the same as the \p groupres.
- */
-RD_EXPORT const rd_kafka_error_t *
-rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
-
-/**
- * @returns the name of the group for the given group result.
- * @remark lifetime of the returned string is the same as the \p groupres.
- *
- */
-RD_EXPORT const char *
-rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
-
-/**
- * @returns the partitions/offsets for the given group result, if applicable
- * to the request type, else NULL.
- * @remark lifetime of the returned list is the same as the \p groupres.
- */
-RD_EXPORT const rd_kafka_topic_partition_list_t *
-rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
-
-
-/**@}*/
-
-
-/**
- * @name Admin API
- * @{
- *
- * @brief The Admin API enables applications to perform administrative
- * Apache Kafka tasks, such as creating and deleting topics,
- * altering and reading broker configuration, etc.
- *
- * The Admin API is asynchronous and makes use of librdkafka's standard
- * \c rd_kafka_queue_t queues to propagate the result of an admin operation
- * back to the application.
- * The supplied queue may be any queue, such as a temporary single-call queue,
- * a shared queue used for multiple requests, or even the main queue or
- * consumer queues.
- *
- * Use \c rd_kafka_queue_poll() to collect the result of an admin operation
- * from the queue of your choice, then extract the admin API-specific result
- * type by using the corresponding \c rd_kafka_event_CreateTopics_result,
- * \c rd_kafka_event_DescribeConfigs_result, etc, methods.
- * Use the getter methods on the \c .._result_t type to extract response
- * information and finally destroy the result and event by calling
- * \c rd_kafka_event_destroy().
- *
- * Use rd_kafka_event_error() and rd_kafka_event_error_string() to acquire
- * the request-level error/success for an Admin API request.
- * Even if the returned value is \c RD_KAFKA_RESP_ERR_NO_ERROR there
- * may be individual objects (topics, resources, etc) that have failed.
- * Extract per-object error information with the corresponding
- * \c rd_kafka_..._result_topics|resources|..() to check per-object errors.
- *
- * Locally triggered errors:
- * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not
- * become available in the time allowed by AdminOption_set_request_timeout.
- */
-
-
-/**
- * @enum rd_kafka_admin_op_t
- *
- * @brief Admin operation enum name for use with rd_kafka_AdminOptions_new()
- *
- * @sa rd_kafka_AdminOptions_new()
- */
-typedef enum rd_kafka_admin_op_t {
- RD_KAFKA_ADMIN_OP_ANY = 0, /**< Default value */
- RD_KAFKA_ADMIN_OP_CREATETOPICS, /**< CreateTopics */
- RD_KAFKA_ADMIN_OP_DELETETOPICS, /**< DeleteTopics */
- RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */
- RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */
- RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */
- RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */
- RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */
- /** DeleteConsumerGroupOffsets */
- RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
- RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */
- RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */
- RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */
- RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */
- RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */
- /** ListConsumerGroupOffsets */
- RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
- /** AlterConsumerGroupOffsets */
- RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
- RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */
-} rd_kafka_admin_op_t;
-
-/**
- * @brief AdminOptions provides a generic mechanism for setting optional
- * parameters for the Admin API requests.
- *
- * @remark Since AdminOptions is decoupled from the actual request type
- * there is no enforcement to prevent setting unrelated properties,
- * e.g. setting validate_only on a DescribeConfigs request is allowed
- * but is silently ignored by DescribeConfigs.
- * Future versions may introduce such enforcement.
- */
-
-
-typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
-
-/**
- * @brief Create a new AdminOptions object.
- *
- * The options object is not modified by the Admin API request APIs,
- * (e.g. CreateTopics) and may be reused for multiple calls.
- *
- * @param rk Client instance.
- * @param for_api Specifies what Admin API this AdminOptions object will be used
- * for, which will enforce what AdminOptions_set_..() calls may
- * be used based on the API, causing unsupported set..() calls
- * to fail.
- * Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement
- * allowing any option to be set, even if the option
- * is not used in a future call to an Admin API method.
- *
- * @returns a new AdminOptions object (which must be freed with
- * rd_kafka_AdminOptions_destroy()), or NULL if \p for_api was set to
- * an unknown API op type.
- */
-RD_EXPORT rd_kafka_AdminOptions_t *
-rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
-
-
-/**
- * @brief Destroy a AdminOptions object.
- */
-RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
-
-
-/**
- * @brief Sets the overall request timeout, including broker lookup,
- * request transmission, operation time on broker, and response.
- *
- * @param options Admin options.
- * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout.
- * Defaults to `socket.timeout.ms`.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or
- * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which
- * case an error string will be written \p errstr.
- *
- * @remark This option is valid for all Admin API requests.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options,
- int timeout_ms,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Sets the broker's operation timeout, such as the timeout for
- * CreateTopics to complete the creation of topics on the controller
- * before returning a result to the application.
- *
- * CreateTopics: values <= 0 will return immediately after triggering topic
- * creation, while > 0 will wait this long for topic creation to propagate
- * in cluster. Default: 60 seconds.
- *
- * DeleteTopics: same semantics as CreateTopics.
- * CreatePartitions: same semantics as CreateTopics.
- *
- * @param options Admin options.
- * @param timeout_ms Timeout in milliseconds.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or
- * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which
- * case an error string will be written \p errstr.
- *
- * @remark This option is valid for CreateTopics, DeleteTopics,
- * CreatePartitions, and DeleteRecords.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options,
- int timeout_ms,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Tell broker to only validate the request, without performing
- * the requested operation (create topics, etc).
- *
- * @param options Admin options.
- * @param true_or_false Defaults to false.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an
- * error code on failure in which case an error string will
- * be written \p errstr.
- *
- * @remark This option is valid for CreateTopics,
- * CreatePartitions, AlterConfigs.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options,
- int true_or_false,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Override what broker the Admin request will be sent to.
- *
- * By default, Admin requests are sent to the controller broker, with
- * the following exceptions:
- * - AlterConfigs with a BROKER resource are sent to the broker id set
- * as the resource name.
- * - DescribeConfigs with a BROKER resource are sent to the broker id set
- * as the resource name.
- *
- * @param options Admin Options.
- * @param broker_id The broker to send the request to.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an
- * error code on failure in which case an error string will
- * be written \p errstr.
- *
- * @remark This API should typically not be used, but serves as a workaround
- * if new resource types are to the broker that the client
- * does not know where to send.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options,
- int32_t broker_id,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Whether broker should return stable offsets
- * (transaction-committed).
- *
- * @param options Admin options.
- * @param true_or_false Defaults to false.
- *
- * @return NULL on success, a new error instance that must be
- * released with rd_kafka_error_destroy() in case of error.
- *
- * @remark This option is valid for ListConsumerGroupOffsets.
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(
- rd_kafka_AdminOptions_t *options,
- int true_or_false);
-
-/**
- * @brief Set consumer groups states to query for.
- *
- * @param options Admin options.
- * @param consumer_group_states Array of consumer group states.
- * @param consumer_group_states_cnt Size of the \p consumer_group_states array.
- *
- * @return NULL on success, a new error instance that must be
- * released with rd_kafka_error_destroy() in case of error.
- *
- * @remark This option is valid for ListConsumerGroups.
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(
- rd_kafka_AdminOptions_t *options,
- const rd_kafka_consumer_group_state_t *consumer_group_states,
- size_t consumer_group_states_cnt);
-
-/**
- * @brief Set application opaque value that can be extracted from the
- * result event using rd_kafka_event_opaque()
- */
-RD_EXPORT void
-rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options,
- void *ev_opaque);
-
-/**@}*/
-
-/**
- * @name Admin API - Topics
- * @brief Topic related operations.
- * @{
- *
- */
-
-
-/*! Defines a new topic to be created. */
-typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
-
-/**
- * @brief Create a new NewTopic object. This object is later passed to
- * rd_kafka_CreateTopics().
- *
- * @param topic Topic name to create.
- * @param num_partitions Number of partitions in topic, or -1 to use the
- * broker's default partition count (>= 2.4.0).
- * @param replication_factor Default replication factor for the topic's
- * partitions, or -1 to use the broker's default
- * replication factor (>= 2.4.0) or if
- * set_replica_assignment() will be used.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- *
- * @returns a new allocated NewTopic object, or NULL if the input parameters
- * are invalid.
- * Use rd_kafka_NewTopic_destroy() to free object when done.
- */
-RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic,
- int num_partitions,
- int replication_factor,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief Destroy and free a NewTopic object previously created with
- * rd_kafka_NewTopic_new()
- */
-RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
-
-
-/**
- * @brief Helper function to destroy all NewTopic objects in the \p new_topics
- * array (of \p new_topic_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics,
- size_t new_topic_cnt);
-
-
-/**
- * @brief Set the replica (broker) assignment for \p partition to the
- * replica set in \p broker_ids (of \p broker_id_cnt elements).
- *
- * @remark When this method is used, rd_kafka_NewTopic_new() must have
- * been called with a \c replication_factor of -1.
- *
- * @remark An application must either set the replica assignment for
- * all new partitions, or none.
- *
- * @remark If called, this function must be called consecutively for each
- * partition, starting at 0.
- *
- * @remark Use rd_kafka_metadata() to retrieve the list of brokers
- * in the cluster.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
- * if the arguments were invalid.
- *
- * @sa rd_kafka_AdminOptions_set_validate_only()
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic,
- int32_t partition,
- int32_t *broker_ids,
- size_t broker_id_cnt,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief Set (broker-side) topic configuration name/value pair.
- *
- * @remark The name and value are not validated by the client, the validation
- * takes place on the broker.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
- * if the arguments were invalid.
- *
- * @sa rd_kafka_AdminOptions_set_validate_only()
- * @sa http://kafka.apache.org/documentation.html#topicconfigs
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic,
- const char *name,
- const char *value);
-
-
-/**
- * @brief Create topics in cluster as specified by the \p new_topics
- * array of size \p new_topic_cnt elements.
- *
- * @param rk Client instance.
- * @param new_topics Array of new topics to create.
- * @param new_topic_cnt Number of elements in \p new_topics array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_validate_only() - default false
- * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
- * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT
- */
-RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk,
- rd_kafka_NewTopic_t **new_topics,
- size_t new_topic_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-/*
- * CreateTopics result type and methods
- */
-
-/**
- * @brief Get an array of topic results from a CreateTopics result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- *
- * @param result Result to get topics from.
- * @param cntp Updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(
- const rd_kafka_CreateTopics_result_t *result,
- size_t *cntp);
-
-
-
-/*
- * DeleteTopics - delete topics from cluster
- *
- */
-
-/*! Represents a topic to be deleted. */
-typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
-
-/**
- * @brief Create a new DeleteTopic object. This object is later passed to
- * rd_kafka_DeleteTopics().
- *
- * @param topic Topic name to delete.
- *
- * @returns a new allocated DeleteTopic object.
- * Use rd_kafka_DeleteTopic_destroy() to free object when done.
- */
-RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
-
-/**
- * @brief Destroy and free a DeleteTopic object previously created with
- * rd_kafka_DeleteTopic_new()
- */
-RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
-
-/**
- * @brief Helper function to destroy all DeleteTopic objects in
- * the \p del_topics array (of \p del_topic_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics,
- size_t del_topic_cnt);
-
-/**
- * @brief Delete topics from cluster as specified by the \p topics
- * array of size \p topic_cnt elements.
- *
- * @param rk Client instance.
- * @param del_topics Array of topics to delete.
- * @param del_topic_cnt Number of elements in \p topics array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT
- */
-RD_EXPORT
-void rd_kafka_DeleteTopics(rd_kafka_t *rk,
- rd_kafka_DeleteTopic_t **del_topics,
- size_t del_topic_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * DeleteTopics result type and methods
- */
-
-/**
- * @brief Get an array of topic results from a DeleteTopics result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- *
- * @param result Result to get topic results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(
- const rd_kafka_DeleteTopics_result_t *result,
- size_t *cntp);
-
-
-/**@}*/
-
-/**
- * @name Admin API - Partitions
- * @brief Partition related operations.
- * @{
- *
- */
-
-/*! Defines a new partition to be created. */
-typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
-
-/**
- * @brief Create a new NewPartitions. This object is later passed to
- * rd_kafka_CreatePartitions() to increase the number of partitions
- * to \p new_total_cnt for an existing topic.
- *
- * @param topic Topic name to create more partitions for.
- * @param new_total_cnt Increase the topic's partition count to this value.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * @returns a new allocated NewPartitions object, or NULL if the
- * input parameters are invalid.
- * Use rd_kafka_NewPartitions_destroy() to free object when done.
- */
-RD_EXPORT rd_kafka_NewPartitions_t *
-rd_kafka_NewPartitions_new(const char *topic,
- size_t new_total_cnt,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief Destroy and free a NewPartitions object previously created with
- * rd_kafka_NewPartitions_new()
- */
-RD_EXPORT void
-rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
-
-/**
- * @brief Helper function to destroy all NewPartitions objects in the
- * \p new_parts array (of \p new_parts_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts,
- size_t new_parts_cnt);
-
-/**
- * @brief Set the replica (broker id) assignment for \p new_partition_idx to the
- * replica set in \p broker_ids (of \p broker_id_cnt elements).
- *
- * @remark An application must either set the replica assignment for
- * all new partitions, or none.
- *
- * @remark If called, this function must be called consecutively for each
- * new partition being created,
- * where \p new_partition_idx 0 is the first new partition,
- * 1 is the second, and so on.
- *
- * @remark \p broker_id_cnt should match the topic's replication factor.
- *
- * @remark Use rd_kafka_metadata() to retrieve the list of brokers
- * in the cluster.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
- * if the arguments were invalid.
- *
- * @sa rd_kafka_AdminOptions_set_validate_only()
- */
-RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(
- rd_kafka_NewPartitions_t *new_parts,
- int32_t new_partition_idx,
- int32_t *broker_ids,
- size_t broker_id_cnt,
- char *errstr,
- size_t errstr_size);
-
-
-/**
- * @brief Create additional partitions for the given topics, as specified
- * by the \p new_parts array of size \p new_parts_cnt elements.
- *
- * @param rk Client instance.
- * @param new_parts Array of topics for which new partitions are to be created.
- * @param new_parts_cnt Number of elements in \p new_parts array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_validate_only() - default false
- * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
- * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
- */
-RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk,
- rd_kafka_NewPartitions_t **new_parts,
- size_t new_parts_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * CreatePartitions result type and methods
- */
-
-/**
- * @brief Get an array of topic results from a CreatePartitions result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- *
- * @param result Result o get topic results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_topic_result_t **
-rd_kafka_CreatePartitions_result_topics(
- const rd_kafka_CreatePartitions_result_t *result,
- size_t *cntp);
-
-/**@}*/
-
-/**
- * @name Admin API - Configuration
- * @brief Cluster, broker, topic configuration entries, sources, etc.
- * @{
- *
- */
-
-/**
- * @enum rd_kafka_ConfigSource_t
- *
- * @brief Apache Kafka config sources.
- *
- * @remark These entities relate to the cluster, not the local client.
- *
- * @sa rd_kafka_conf_set(), et.al. for local client configuration.
- */
-typedef enum rd_kafka_ConfigSource_t {
- /** Source unknown, e.g., in the ConfigEntry used for alter requests
- * where source is not set */
- RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
- /** Dynamic topic config that is configured for a specific topic */
- RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
- /** Dynamic broker config that is configured for a specific broker */
- RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
- /** Dynamic broker config that is configured as default for all
- * brokers in the cluster */
- RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
- /** Static broker config provided as broker properties at startup
- * (e.g. from server.properties file) */
- RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
- /** Built-in default configuration for configs that have a
- * default value */
- RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
-
- /** Number of source types defined */
- RD_KAFKA_CONFIG_SOURCE__CNT,
-} rd_kafka_ConfigSource_t;
-
-
-/**
- * @returns a string representation of the \p confsource.
- */
-RD_EXPORT const char *
-rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
-
-
-/*! Apache Kafka configuration entry. */
-typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
-
-/**
- * @returns the configuration property name
- */
-RD_EXPORT const char *
-rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns the configuration value, may be NULL for sensitive or unset
- * properties.
- */
-RD_EXPORT const char *
-rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns the config source.
- */
-RD_EXPORT rd_kafka_ConfigSource_t
-rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns 1 if the config property is read-only on the broker, else 0.
- * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
- */
-RD_EXPORT int
-rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns 1 if the config property is set to its default value on the broker,
- * else 0.
- * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
- */
-RD_EXPORT int
-rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns 1 if the config property contains sensitive information (such as
- * security configuration), else 0.
- * @remark An application should take care not to include the value of
- * sensitive configuration entries in its output.
- * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
- */
-RD_EXPORT int
-rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
-
-/**
- * @returns 1 if this entry is a synonym, else 0.
- */
-RD_EXPORT int
-rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
-
-
-/**
- * @returns the synonym config entry array.
- *
- * @param entry Entry to get synonyms for.
- * @param cntp is updated to the number of elements in the array.
- *
- * @remark The lifetime of the returned entry is the same as \p conf .
- * @remark Shall only be used on a DescribeConfigs result,
- * otherwise returns NULL.
- */
-RD_EXPORT const rd_kafka_ConfigEntry_t **
-rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry,
- size_t *cntp);
-
-
-
-/**
- * @enum rd_kafka_ResourceType_t
- * @brief Apache Kafka resource types
- */
-typedef enum rd_kafka_ResourceType_t {
- RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */
- RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */
- RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */
- RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */
- RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */
- RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */
-} rd_kafka_ResourceType_t;
-
-/**
- * @enum rd_kafka_ResourcePatternType_t
- * @brief Apache Kafka pattern types
- */
-typedef enum rd_kafka_ResourcePatternType_t {
- /** Unknown */
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
- /** Any (used for lookups) */
- RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
- /** Match: will perform pattern matching */
- RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
- /** Literal: A literal resource name */
- RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
- /** Prefixed: A prefixed resource name */
- RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
- RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
-} rd_kafka_ResourcePatternType_t;
-
-/**
- * @returns a string representation of the \p resource_pattern_type
- */
-RD_EXPORT const char *rd_kafka_ResourcePatternType_name(
- rd_kafka_ResourcePatternType_t resource_pattern_type);
-
-/**
- * @returns a string representation of the \p restype
- */
-RD_EXPORT const char *
-rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
-
-/*! Apache Kafka configuration resource. */
-typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
-
-
-/**
- * @brief Create new ConfigResource object.
- *
- * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC)
- * @param resname The resource name (e.g., the topic name)
- *
- * @returns a newly allocated object
- */
-RD_EXPORT rd_kafka_ConfigResource_t *
-rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype,
- const char *resname);
-
-/**
- * @brief Destroy and free a ConfigResource object previously created with
- * rd_kafka_ConfigResource_new()
- */
-RD_EXPORT void
-rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
-
-
-/**
- * @brief Helper function to destroy all ConfigResource objects in
- * the \p configs array (of \p config_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config,
- size_t config_cnt);
-
-
-/**
- * @brief Set configuration name value pair.
- *
- * @param config ConfigResource to set config property on.
- * @param name Configuration name, depends on resource type.
- * @param value Configuration value, depends on resource type and \p name.
- * Set to \c NULL to revert configuration value to default.
- *
- * This will overwrite the current value.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource,
- * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config,
- const char *name,
- const char *value);
-
-
-/**
- * @brief Get an array of config entries from a ConfigResource object.
- *
- * The returned object life-times are the same as the \p config object.
- *
- * @param config ConfigResource to get configs from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_ConfigEntry_t **
-rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config,
- size_t *cntp);
-
-
-
-/**
- * @returns the ResourceType for \p config
- */
-RD_EXPORT rd_kafka_ResourceType_t
-rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
-
-/**
- * @returns the name for \p config
- */
-RD_EXPORT const char *
-rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
-
-/**
- * @returns the error for this resource from an AlterConfigs request
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
-
-/**
- * @returns the error string for this resource from an AlterConfigs
- * request, or NULL if no error.
- */
-RD_EXPORT const char *
-rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
-
-
-/*
- * AlterConfigs - alter cluster configuration.
- *
- */
-
-
-/**
- * @brief Update the configuration for the specified resources.
- * Updates are not transactional so they may succeed for a subset
- * of the provided resources while the others fail.
- * The configuration for a particular resource is updated atomically,
- * replacing values using the provided ConfigEntrys and reverting
- * unspecified ConfigEntrys to their default values.
- *
- * @remark Requires broker version >=0.11.0.0
- *
- * @warning AlterConfigs will replace all existing configuration for
- * the provided resources with the new configuration given,
- * reverting all other configuration to their default values.
- *
- * @remark Multiple resources and resource types may be set, but at most one
- * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call
- * since these resource requests must be sent to the broker specified
- * in the resource.
- *
- */
-RD_EXPORT
-void rd_kafka_AlterConfigs(rd_kafka_t *rk,
- rd_kafka_ConfigResource_t **configs,
- size_t config_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-/*
- * AlterConfigs result type and methods
- */
-
-/**
- * @brief Get an array of resource results from a AlterConfigs result.
- *
- * Use \c rd_kafka_ConfigResource_error() and
- * \c rd_kafka_ConfigResource_error_string() to extract per-resource error
- * results on the returned array elements.
- *
- * The returned object life-times are the same as the \p result object.
- *
- * @param result Result object to get resource results from.
- * @param cntp is updated to the number of elements in the array.
- *
- * @returns an array of ConfigResource elements, or NULL if not available.
- */
-RD_EXPORT const rd_kafka_ConfigResource_t **
-rd_kafka_AlterConfigs_result_resources(
- const rd_kafka_AlterConfigs_result_t *result,
- size_t *cntp);
-
-
-
-/*
- * DescribeConfigs - retrieve cluster configuration.
- *
- */
-
-
-/**
- * @brief Get configuration for the specified resources in \p configs.
- *
- * The returned configuration includes default values and the
- * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source()
- * methods may be used to distinguish them from user supplied values.
- *
- * The value of config entries where rd_kafka_ConfigEntry_is_sensitive()
- * is true will always be NULL to avoid disclosing sensitive
- * information, such as security settings.
- *
- * Configuration entries where rd_kafka_ConfigEntry_is_read_only()
- * is true can't be updated (with rd_kafka_AlterConfigs()).
- *
- * Synonym configuration entries are returned if the broker supports
- * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms().
- *
- * @remark Requires broker version >=0.11.0.0
- *
- * @remark Multiple resources and resource types may be requested, but at most
- * one resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call
- * since these resource requests must be sent to the broker specified
- * in the resource.
- */
-RD_EXPORT
-void rd_kafka_DescribeConfigs(rd_kafka_t *rk,
- rd_kafka_ConfigResource_t **configs,
- size_t config_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * DescribeConfigs result type and methods
- */
-
-/**
- * @brief Get an array of resource results from a DescribeConfigs result.
- *
- * The returned \p resources life-time is the same as the \p result object.
- *
- * @param result Result object to get resource results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_ConfigResource_t **
-rd_kafka_DescribeConfigs_result_resources(
- const rd_kafka_DescribeConfigs_result_t *result,
- size_t *cntp);
-
-
-/**@}*/
-
-/**
- * @name Admin API - DeleteRecords
- * @brief delete records (messages) from partitions.
- * @{
- *
- */
-
-/**! Represents records to be deleted */
-typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
-
-/**
- * @brief Create a new DeleteRecords object. This object is later passed to
- * rd_kafka_DeleteRecords().
- *
- * \p before_offsets must contain \c topic, \c partition, and
- * \c offset is the offset before which the messages will
- * be deleted (exclusive).
- * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to
- * delete all data in the partition.
- *
- * @param before_offsets For each partition delete all messages up to but not
- * including the specified offset.
- *
- * @returns a new allocated DeleteRecords object.
- * Use rd_kafka_DeleteRecords_destroy() to free object when done.
- */
-RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(
- const rd_kafka_topic_partition_list_t *before_offsets);
-
-/**
- * @brief Destroy and free a DeleteRecords object previously created with
- * rd_kafka_DeleteRecords_new()
- */
-RD_EXPORT void
-rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
-
-/**
- * @brief Helper function to destroy all DeleteRecords objects in
- * the \p del_groups array (of \p del_group_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records,
- size_t del_record_cnt);
-
-/**
- * @brief Delete records (messages) in topic partitions older than the
- * offsets provided.
- *
- * @param rk Client instance.
- * @param del_records The offsets to delete (up to).
- * Currently only one DeleteRecords_t (but containing
- * multiple offsets) is supported.
- * @param del_record_cnt The number of elements in del_records, must be 1.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds.
- * Controls how long the brokers will wait for records to be deleted.
- * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms.
- * Controls how long \c rdkafka will wait for the request to complete.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT
- */
-RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk,
- rd_kafka_DeleteRecords_t **del_records,
- size_t del_record_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-/*
- * DeleteRecords result type and methods
- */
-
-/**
- * @brief Get a list of topic and partition results from a DeleteRecords result.
- * The returned objects will contain \c topic, \c partition, \c offset
- * and \c err. \c offset will be set to the post-deletion low-watermark
- * (smallest available offset of all live replicas). \c err will be set
- * per-partition if deletion failed.
- *
- * The returned object's life-time is the same as the \p result object.
- */
-RD_EXPORT const rd_kafka_topic_partition_list_t *
-rd_kafka_DeleteRecords_result_offsets(
- const rd_kafka_DeleteRecords_result_t *result);
-
-/**@}*/
-
-/**
- * @name Admin API - ListConsumerGroups
- * @{
- */
-
-
-/**
- * @brief ListConsumerGroups result for a single group
- */
-
-/**! ListConsumerGroups result for a single group */
-typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
-
-/**! ListConsumerGroups results and errors */
-typedef struct rd_kafka_ListConsumerGroupsResult_s
- rd_kafka_ListConsumerGroupsResult_t;
-
-/**
- * @brief List the consumer groups available in the cluster.
- *
- * @param rk Client instance.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
- */
-RD_EXPORT
-void rd_kafka_ListConsumerGroups(rd_kafka_t *rk,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**
- * @brief Gets the group id for the \p grplist group.
- *
- * @param grplist The group listing.
- *
- * @return The group id.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grplist object.
- */
-RD_EXPORT
-const char *rd_kafka_ConsumerGroupListing_group_id(
- const rd_kafka_ConsumerGroupListing_t *grplist);
-
-/**
- * @brief Is the \p grplist group a simple consumer group.
- *
- * @param grplist The group listing.
- *
- * @return 1 if the group is a simple consumer group,
- * else 0.
- */
-RD_EXPORT
-int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
- const rd_kafka_ConsumerGroupListing_t *grplist);
-
-/**
- * @brief Gets state for the \p grplist group.
- *
- * @param grplist The group listing.
- *
- * @return A group state.
- */
-RD_EXPORT
-rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(
- const rd_kafka_ConsumerGroupListing_t *grplist);
-
-/**
- * @brief Get an array of valid list groups from a ListConsumerGroups result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p result object.
- */
-RD_EXPORT
-const rd_kafka_ConsumerGroupListing_t **
-rd_kafka_ListConsumerGroups_result_valid(
- const rd_kafka_ListConsumerGroups_result_t *result,
- size_t *cntp);
-
-/**
- * @brief Get an array of errors from a ListConsumerGroups call result.
- *
- * The returned errors life-time is the same as the \p result object.
- *
- * @param result ListConsumerGroups result.
- * @param cntp Is updated to the number of elements in the array.
- *
- * @return Array of errors in \p result.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p result object.
- */
-RD_EXPORT
-const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(
- const rd_kafka_ListConsumerGroups_result_t *result,
- size_t *cntp);
-
-/**@}*/
-
-/**
- * @name Admin API - DescribeConsumerGroups
- * @{
- */
-
-/**
- * @brief DescribeConsumerGroups result type.
- *
- */
-typedef struct rd_kafka_ConsumerGroupDescription_s
- rd_kafka_ConsumerGroupDescription_t;
-
-/**
- * @brief Member description included in ConsumerGroupDescription.
- *
- */
-typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
-
-/**
- * @brief Member assignment included in MemberDescription.
- *
- */
-typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
-
-/**
- * @brief Describe groups from cluster as specified by the \p groups
- * array of size \p groups_cnt elements.
- *
- * @param rk Client instance.
- * @param groups Array of groups to describe.
- * @param groups_cnt Number of elements in \p groups array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
- */
-RD_EXPORT
-void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk,
- const char **groups,
- size_t groups_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**
- * @brief Get an array of group results from a DescribeConsumerGroups result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p result object.
- */
-RD_EXPORT
-const rd_kafka_ConsumerGroupDescription_t **
-rd_kafka_DescribeConsumerGroups_result_groups(
- const rd_kafka_DescribeConsumerGroups_result_t *result,
- size_t *cntp);
-
-
-/**
- * @brief Gets the group id for the \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return The group id.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grpdesc object.
- */
-RD_EXPORT
-const char *rd_kafka_ConsumerGroupDescription_group_id(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-/**
- * @brief Gets the error for the \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return The group description error.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grpdesc object.
- */
-RD_EXPORT
-const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-/**
- * @brief Is the \p grpdesc group a simple consumer group.
- *
- * @param grpdesc The group description.
- * @return 1 if the group is a simple consumer group,
- * else 0.
- */
-RD_EXPORT
-int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-
-/**
- * @brief Gets the partition assignor for the \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return The partition assignor.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grpdesc object.
- */
-RD_EXPORT
-const char *rd_kafka_ConsumerGroupDescription_partition_assignor(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-
-/**
- * @brief Gets state for the \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return A group state.
- */
-RD_EXPORT
-rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-/**
- * @brief Gets the coordinator for the \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return The group coordinator.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grpdesc object.
- */
-RD_EXPORT
-const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-/**
- * @brief Gets the members count of \p grpdesc group.
- *
- * @param grpdesc The group description.
- *
- * @return The member count.
- */
-RD_EXPORT
-size_t rd_kafka_ConsumerGroupDescription_member_count(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc);
-
-/**
- * @brief Gets a member of \p grpdesc group.
- *
- * @param grpdesc The group description.
- * @param idx The member idx.
- *
- * @return A member at index \p idx, or NULL if
- * \p idx is out of range.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p grpdesc object.
- */
-RD_EXPORT
-const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc,
- size_t idx);
-
-/**
- * @brief Gets client id of \p member.
- *
- * @param member The group member.
- *
- * @return The client id.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p member object.
- */
-RD_EXPORT
-const char *rd_kafka_MemberDescription_client_id(
- const rd_kafka_MemberDescription_t *member);
-
-/**
- * @brief Gets group instance id of \p member.
- *
- * @param member The group member.
- *
- * @return The group instance id, or NULL if not available.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p member object.
- */
-RD_EXPORT
-const char *rd_kafka_MemberDescription_group_instance_id(
- const rd_kafka_MemberDescription_t *member);
-
-/**
- * @brief Gets consumer id of \p member.
- *
- * @param member The group member.
- *
- * @return The consumer id.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p member object.
- */
-RD_EXPORT
-const char *rd_kafka_MemberDescription_consumer_id(
- const rd_kafka_MemberDescription_t *member);
-
-/**
- * @brief Gets host of \p member.
- *
- * @param member The group member.
- *
- * @return The host.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p member object.
- */
-RD_EXPORT
-const char *
-rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
-
-/**
- * @brief Gets assignment of \p member.
- *
- * @param member The group member.
- *
- * @return The member assignment.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p member object.
- */
-RD_EXPORT
-const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(
- const rd_kafka_MemberDescription_t *member);
-
-/**
- * @brief Gets assigned partitions of a member \p assignment.
- *
- * @param assignment The group member assignment.
- *
- * @return The assigned partitions.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p assignment object.
- */
-RD_EXPORT
-const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(
- const rd_kafka_MemberAssignment_t *assignment);
-
-/**@}*/
-
-/**
- * @name Admin API - DeleteGroups
- * @brief Delete groups from cluster
- * @{
- *
- *
- */
-
-/*! Represents a group to be deleted. */
-typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
-
-/**
- * @brief Create a new DeleteGroup object. This object is later passed to
- * rd_kafka_DeleteGroups().
- *
- * @param group Name of group to delete.
- *
- * @returns a new allocated DeleteGroup object.
- * Use rd_kafka_DeleteGroup_destroy() to free object when done.
- */
-RD_EXPORT
-rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
-
-/**
- * @brief Destroy and free a DeleteGroup object previously created with
- * rd_kafka_DeleteGroup_new()
- */
-RD_EXPORT
-void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
-
-/**
- * @brief Helper function to destroy all DeleteGroup objects in
- * the \p del_groups array (of \p del_group_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups,
- size_t del_group_cnt);
-
-/**
- * @brief Delete groups from cluster as specified by the \p del_groups
- * array of size \p del_group_cnt elements.
- *
- * @param rk Client instance.
- * @param del_groups Array of groups to delete.
- * @param del_group_cnt Number of elements in \p del_groups array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT
- *
- * @remark This function in called deleteConsumerGroups in the Java client.
- */
-RD_EXPORT
-void rd_kafka_DeleteGroups(rd_kafka_t *rk,
- rd_kafka_DeleteGroup_t **del_groups,
- size_t del_group_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * DeleteGroups result type and methods
- */
-
-/**
- * @brief Get an array of group results from a DeleteGroups result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(
- const rd_kafka_DeleteGroups_result_t *result,
- size_t *cntp);
-
-/**@}*/
-
-/**
- * @name Admin API - ListConsumerGroupOffsets
- * @{
- *
- *
- */
-
-/*! Represents consumer group committed offsets to be listed. */
-typedef struct rd_kafka_ListConsumerGroupOffsets_s
- rd_kafka_ListConsumerGroupOffsets_t;
-
-/**
- * @brief Create a new ListConsumerGroupOffsets object.
- * This object is later passed to rd_kafka_ListConsumerGroupOffsets().
- *
- * @param group_id Consumer group id.
- * @param partitions Partitions to list committed offsets for.
- * Only the topic and partition fields are used.
- *
- * @returns a new allocated ListConsumerGroupOffsets object.
- * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free
- * object when done.
- */
-RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t *
-rd_kafka_ListConsumerGroupOffsets_new(
- const char *group_id,
- const rd_kafka_topic_partition_list_t *partitions);
-
-/**
- * @brief Destroy and free a ListConsumerGroupOffsets object previously
- * created with rd_kafka_ListConsumerGroupOffsets_new()
- */
-RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy(
- rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
-
-/**
- * @brief Helper function to destroy all ListConsumerGroupOffsets objects in
- * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array(
- rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
- size_t list_grpoffset_cnt);
-
-/**
- * @brief List committed offsets for a set of partitions in a consumer
- * group.
- *
- * @param rk Client instance.
- * @param list_grpoffsets Array of group committed offsets to list.
- * MUST only be one single element.
- * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array.
- * MUST always be 1.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
- *
- * @remark The current implementation only supports one group per invocation.
- */
-RD_EXPORT
-void rd_kafka_ListConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
- size_t list_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * ListConsumerGroupOffsets result type and methods
- */
-
-/**
- * @brief Get an array of results from a ListConsumerGroupOffsets result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p result object.
- */
-RD_EXPORT const rd_kafka_group_result_t **
-rd_kafka_ListConsumerGroupOffsets_result_groups(
- const rd_kafka_ListConsumerGroupOffsets_result_t *result,
- size_t *cntp);
-
-
-
-/**@}*/
-
-/**
- * @name Admin API - AlterConsumerGroupOffsets
- * @{
- *
- *
- */
-
-/*! Represents consumer group committed offsets to be altered. */
-typedef struct rd_kafka_AlterConsumerGroupOffsets_s
- rd_kafka_AlterConsumerGroupOffsets_t;
-
-/**
- * @brief Create a new AlterConsumerGroupOffsets object.
- * This object is later passed to rd_kafka_AlterConsumerGroupOffsets().
- *
- * @param group_id Consumer group id.
- * @param partitions Partitions to alter committed offsets for.
- * Only the topic and partition fields are used.
- *
- * @returns a new allocated AlterConsumerGroupOffsets object.
- * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free
- * object when done.
- */
-RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t *
-rd_kafka_AlterConsumerGroupOffsets_new(
- const char *group_id,
- const rd_kafka_topic_partition_list_t *partitions);
-
-/**
- * @brief Destroy and free a AlterConsumerGroupOffsets object previously
- * created with rd_kafka_AlterConsumerGroupOffsets_new()
- */
-RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy(
- rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
-
-/**
- * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in
- * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array(
- rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
- size_t alter_grpoffset_cnt);
-
-/**
- * @brief Alter committed offsets for a set of partitions in a consumer
- * group. This will succeed at the partition level only if the group
- * is not actively subscribed to the corresponding topic.
- *
- * @param rk Client instance.
- * @param alter_grpoffsets Array of group committed offsets to alter.
- * MUST only be one single element.
- * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array.
- * MUST always be 1.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
- *
- * @remark The current implementation only supports one group per invocation.
- */
-RD_EXPORT
-void rd_kafka_AlterConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
- size_t alter_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * AlterConsumerGroupOffsets result type and methods
- */
-
-/**
- * @brief Get an array of results from a AlterConsumerGroupOffsets result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- *
- * @remark The lifetime of the returned memory is the same
- * as the lifetime of the \p result object.
- */
-RD_EXPORT const rd_kafka_group_result_t **
-rd_kafka_AlterConsumerGroupOffsets_result_groups(
- const rd_kafka_AlterConsumerGroupOffsets_result_t *result,
- size_t *cntp);
-
-
-
-/**@}*/
-
-/**
- * @name Admin API - DeleteConsumerGroupOffsets
- * @{
- *
- *
- */
-
-/*! Represents consumer group committed offsets to be deleted. */
-typedef struct rd_kafka_DeleteConsumerGroupOffsets_s
- rd_kafka_DeleteConsumerGroupOffsets_t;
-
-/**
- * @brief Create a new DeleteConsumerGroupOffsets object.
- * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets().
- *
- * @param group Consumer group id.
- * @param partitions Partitions to delete committed offsets for.
- * Only the topic and partition fields are used.
- *
- * @returns a new allocated DeleteConsumerGroupOffsets object.
- * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free
- * object when done.
- */
-RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t *
-rd_kafka_DeleteConsumerGroupOffsets_new(
- const char *group,
- const rd_kafka_topic_partition_list_t *partitions);
-
-/**
- * @brief Destroy and free a DeleteConsumerGroupOffsets object previously
- * created with rd_kafka_DeleteConsumerGroupOffsets_new()
- */
-RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy(
- rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
-
-/**
- * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in
- * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(
- rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
- size_t del_grpoffset_cnt);
-
-/**
- * @brief Delete committed offsets for a set of partitions in a consumer
- * group. This will succeed at the partition level only if the group
- * is not actively subscribed to the corresponding topic.
- *
- * @param rk Client instance.
- * @param del_grpoffsets Array of group committed offsets to delete.
- * MUST only be one single element.
- * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array.
- * MUST always be 1.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
- *
- * @remark The current implementation only supports one group per invocation.
- */
-RD_EXPORT
-void rd_kafka_DeleteConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
- size_t del_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-
-
-/*
- * DeleteConsumerGroupOffsets result type and methods
- */
-
-/**
- * @brief Get an array of results from a DeleteConsumerGroupOffsets result.
- *
- * The returned groups life-time is the same as the \p result object.
- *
- * @param result Result to get group results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_group_result_t **
-rd_kafka_DeleteConsumerGroupOffsets_result_groups(
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *result,
- size_t *cntp);
-
-/**@}*/
-
-/**
- * @name Admin API - ACL operations
- * @{
- */
-
-/**
- * @brief ACL Binding is used to create access control lists.
- *
- *
- */
-typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
-
-/**
- * @brief ACL Binding filter is used to filter access control lists.
- *
- */
-typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
-
-/**
- * @returns the error object for the given acl result, or NULL on success.
- */
-RD_EXPORT const rd_kafka_error_t *
-rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
-
-
-/**
- * @enum rd_kafka_AclOperation_t
- * @brief Apache Kafka ACL operation types.
- */
-typedef enum rd_kafka_AclOperation_t {
- RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */
- RD_KAFKA_ACL_OPERATION_ANY =
- 1, /**< In a filter, matches any AclOperation */
- RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */
- RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */
- RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */
- RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */
- RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */
- RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */
- RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */
- RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION =
- 9, /**< CLUSTER_ACTION operation */
- RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS =
- 10, /**< DESCRIBE_CONFIGS operation */
- RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS =
- 11, /**< ALTER_CONFIGS operation */
- RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE =
- 12, /**< IDEMPOTENT_WRITE operation */
- RD_KAFKA_ACL_OPERATION__CNT
-} rd_kafka_AclOperation_t;
-
-/**
- * @returns a string representation of the \p acl_operation
- */
-RD_EXPORT const char *
-rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
-
-/**
- * @enum rd_kafka_AclPermissionType_t
- * @brief Apache Kafka ACL permission types.
- */
-typedef enum rd_kafka_AclPermissionType_t {
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */
- RD_KAFKA_ACL_PERMISSION_TYPE_ANY =
- 1, /**< In a filter, matches any AclPermissionType */
- RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */
- RD_KAFKA_ACL_PERMISSION_TYPE__CNT
-} rd_kafka_AclPermissionType_t;
-
-/**
- * @returns a string representation of the \p acl_permission_type
- */
-RD_EXPORT const char *rd_kafka_AclPermissionType_name(
- rd_kafka_AclPermissionType_t acl_permission_type);
-
-/**
- * @brief Create a new AclBinding object. This object is later passed to
- * rd_kafka_CreateAcls().
- *
- * @param restype The ResourceType.
- * @param name The resource name.
- * @param resource_pattern_type The pattern type.
- * @param principal A principal, following the kafka specification.
- * @param host An hostname or ip.
- * @param operation A Kafka operation.
- * @param permission_type A Kafka permission type.
- * @param errstr An error string for returning errors or NULL to not use it.
- * @param errstr_size The \p errstr size or 0 to not use it.
- *
- * @returns a new allocated AclBinding object, or NULL if the input parameters
- * are invalid.
- * Use rd_kafka_AclBinding_destroy() to free object when done.
- */
-RD_EXPORT rd_kafka_AclBinding_t *
-rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
- const char *name,
- rd_kafka_ResourcePatternType_t resource_pattern_type,
- const char *principal,
- const char *host,
- rd_kafka_AclOperation_t operation,
- rd_kafka_AclPermissionType_t permission_type,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief Create a new AclBindingFilter object. This object is later passed to
- * rd_kafka_DescribeAcls() or
- * rd_kafka_DeletesAcls() in order to filter
- * the acls to retrieve or to delete.
- * Use the same rd_kafka_AclBinding functions to query or destroy it.
- *
- * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if
- * not filtering by this field.
- * @param name The resource name or NULL if not filtering by this field.
- * @param resource_pattern_type The pattern type or \c
- * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field.
- * @param principal A principal or NULL if not filtering by this field.
- * @param host An hostname or ip or NULL if not filtering by this field.
- * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not
- * filtering by this field.
- * @param permission_type A Kafka permission type or \c
- * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field.
- * @param errstr An error string for returning errors or NULL to not use it.
- * @param errstr_size The \p errstr size or 0 to not use it.
- *
- * @returns a new allocated AclBindingFilter object, or NULL if the input
- * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when
- * done.
- */
-RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
- rd_kafka_ResourceType_t restype,
- const char *name,
- rd_kafka_ResourcePatternType_t resource_pattern_type,
- const char *principal,
- const char *host,
- rd_kafka_AclOperation_t operation,
- rd_kafka_AclPermissionType_t permission_type,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @returns the resource type for the given acl binding.
- */
-RD_EXPORT rd_kafka_ResourceType_t
-rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the resource name for the given acl binding.
- *
- * @remark lifetime of the returned string is the same as the \p acl.
- */
-RD_EXPORT const char *
-rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the principal for the given acl binding.
- *
- * @remark lifetime of the returned string is the same as the \p acl.
- */
-RD_EXPORT const char *
-rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the host for the given acl binding.
- *
- * @remark lifetime of the returned string is the same as the \p acl.
- */
-RD_EXPORT const char *
-rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the acl operation for the given acl binding.
- */
-RD_EXPORT rd_kafka_AclOperation_t
-rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the permission type for the given acl binding.
- */
-RD_EXPORT rd_kafka_AclPermissionType_t
-rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the resource pattern type for the given acl binding.
- */
-RD_EXPORT rd_kafka_ResourcePatternType_t
-rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
-
-/**
- * @returns the error object for the given acl binding, or NULL on success.
- */
-RD_EXPORT const rd_kafka_error_t *
-rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
-
-
-/**
- * @brief Destroy and free an AclBinding object previously created with
- * rd_kafka_AclBinding_new()
- */
-RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
-
-
-/**
- * @brief Helper function to destroy all AclBinding objects in
- * the \p acl_bindings array (of \p acl_bindings_cnt elements).
- * The array itself is not freed.
- */
-RD_EXPORT void
-rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
- size_t acl_bindings_cnt);
-
-/**
- * @brief Get an array of acl results from a CreateAcls result.
- *
- * The returned \p acl result life-time is the same as the \p result object.
- * @param result CreateAcls result to get acl results from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_acl_result_t **
-rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
- size_t *cntp);
-
-/**
- * @brief Create acls as specified by the \p new_acls
- * array of size \p new_topic_cnt elements.
- *
- * @param rk Client instance.
- * @param new_acls Array of new acls to create.
- * @param new_acls_cnt Number of elements in \p new_acls array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_CREATEACLS_RESULT
- */
-RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk,
- rd_kafka_AclBinding_t **new_acls,
- size_t new_acls_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**
- * DescribeAcls - describe access control lists.
- *
- *
- */
-
-/**
- * @brief Get an array of resource results from a DescribeAcls result.
- *
- * The returned \p resources life-time is the same as the \p result object.
- * @param result DescribeAcls result to get acls from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_AclBinding_t **
-rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
- size_t *cntp);
-
-/**
- * @brief Describe acls matching the filter provided in \p acl_filter
- *
- * @param rk Client instance.
- * @param acl_filter Filter for the returned acls.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
- */
-RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk,
- rd_kafka_AclBindingFilter_t *acl_filter,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**
- * DeleteAcls - delete access control lists.
- *
- *
- */
-
-typedef struct rd_kafka_DeleteAcls_result_response_s
- rd_kafka_DeleteAcls_result_response_t;
-
-/**
- * @brief Get an array of DeleteAcls result responses from a DeleteAcls result.
- *
- * The returned \p responses life-time is the same as the \p result object.
- * @param result DeleteAcls result to get responses from.
- * @param cntp is updated to the number of elements in the array.
- */
-RD_EXPORT const rd_kafka_DeleteAcls_result_response_t **
-rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
- size_t *cntp);
-
-/**
- * @returns the error object for the given DeleteAcls result response,
- * or NULL on success.
- */
-RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
- const rd_kafka_DeleteAcls_result_response_t *result_response);
-
-
-/**
- * @returns the matching acls array for the given DeleteAcls result response.
- *
- * @remark lifetime of the returned acl bindings is the same as the \p
- * result_response.
- */
-RD_EXPORT const rd_kafka_AclBinding_t **
-rd_kafka_DeleteAcls_result_response_matching_acls(
- const rd_kafka_DeleteAcls_result_response_t *result_response,
- size_t *matching_acls_cntp);
-
-/**
- * @brief Delete acls matching the filteres provided in \p del_acls
- * array of size \p del_acls_cnt.
- *
- * @param rk Client instance.
- * @param del_acls Filters for the acls to delete.
- * @param del_acls_cnt Number of elements in \p del_acls array.
- * @param options Optional admin options, or NULL for defaults.
- * @param rkqu Queue to emit result on.
- *
- * Supported admin options:
- * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
- *
- * @remark The result event type emitted on the supplied queue is of type
- * \c RD_KAFKA_EVENT_DELETEACLS_RESULT
- */
-RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk,
- rd_kafka_AclBindingFilter_t **del_acls,
- size_t del_acls_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**@}*/
-
-/**
- * @name Security APIs
- * @{
- *
- */
-
-/**
- * @brief Set SASL/OAUTHBEARER token and metadata
- *
- * @param rk Client instance.
- * @param token_value the mandatory token value to set, often (but not
- * necessarily) a JWS compact serialization as per
- * https://tools.ietf.org/html/rfc7515#section-3.1.
- * @param md_lifetime_ms when the token expires, in terms of the number of
- * milliseconds since the epoch.
- * @param md_principal_name the mandatory Kafka principal name associated
- * with the token.
- * @param extensions optional SASL extensions key-value array with
- * \p extensions_size elements (number of keys * 2), where [i] is the key and
- * [i+1] is the key's value, to be communicated to the broker
- * as additional key-value pairs during the initial client response as per
- * https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are
- * copied.
- * @param extension_size the number of SASL extension keys plus values,
- * which must be a non-negative multiple of 2.
- * @param errstr A human readable error string (nul-terminated) is written to
- * this location that must be of at least \p errstr_size bytes.
- * The \p errstr is only written in case of error.
- * @param errstr_size Writable size in \p errstr.
- *
- * The SASL/OAUTHBEARER token refresh callback or event handler should invoke
- * this method upon success. The extension keys must not include the reserved
- * key "`auth`", and all extension keys and values must conform to the required
- * format as per https://tools.ietf.org/html/rfc7628#section-3.1:
- *
- * key = 1*(ALPHA)
- * value = *(VCHAR / SP / HTAB / CR / LF )
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise \p errstr set
- * and:<br>
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are
- * invalid;<br>
- * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
- * supported by this build;<br>
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is
- * not configured as the client's authentication mechanism.<br>
- *
- * @sa rd_kafka_oauthbearer_set_token_failure
- * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb
- */
-RD_EXPORT
-rd_kafka_resp_err_t
-rd_kafka_oauthbearer_set_token(rd_kafka_t *rk,
- const char *token_value,
- int64_t md_lifetime_ms,
- const char *md_principal_name,
- const char **extensions,
- size_t extension_size,
- char *errstr,
- size_t errstr_size);
-
-/**
- * @brief SASL/OAUTHBEARER token refresh failure indicator.
- *
- * @param rk Client instance.
- * @param errstr mandatory human readable error reason for failing to acquire
- * a token.
- *
- * The SASL/OAUTHBEARER token refresh callback or event handler should invoke
- * this method upon failure.
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:<br>
- * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
- * supported by this build;<br>
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is
- * not configured as the client's authentication mechanism,<br>
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
- *
- * @sa rd_kafka_oauthbearer_set_token
- * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb
- */
-RD_EXPORT
-rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk,
- const char *errstr);
-
-/**@}*/
-
-
-/**
- * @name Transactional producer API
- *
- * The transactional producer operates on top of the idempotent producer,
- * and provides full exactly-once semantics (EOS) for Apache Kafka when used
- * with the transaction aware consumer (\c isolation.level=read_committed).
- *
- * A producer instance is configured for transactions by setting the
- * \c transactional.id to an identifier unique for the application. This
- * id will be used to fence stale transactions from previous instances of
- * the application, typically following an outage or crash.
- *
- * After creating the transactional producer instance using rd_kafka_new()
- * the transactional state must be initialized by calling
- * rd_kafka_init_transactions(). This is a blocking call that will
- * acquire a runtime producer id from the transaction coordinator broker
- * as well as abort any stale transactions and fence any still running producer
- * instances with the same \c transactional.id.
- *
- * Once transactions are initialized the application may begin a new
- * transaction by calling rd_kafka_begin_transaction().
- * A producer instance may only have one single on-going transaction.
- *
- * Any messages produced after the transaction has been started will
- * belong to the ongoing transaction and will be committed or aborted
- * atomically.
- * It is not permitted to produce messages outside a transaction
- * boundary, e.g., before rd_kafka_begin_transaction() or after
- * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after
- * the current transaction has failed.
- *
- * If consumed messages are used as input to the transaction, the consumer
- * instance must be configured with \c enable.auto.commit set to \c false.
- * To commit the consumed offsets along with the transaction pass the
- * list of consumed partitions and the last offset processed + 1 to
- * rd_kafka_send_offsets_to_transaction() prior to committing the transaction.
- * This allows an aborted transaction to be restarted using the previously
- * committed offsets.
- *
- * To commit the produced messages, and any consumed offsets, to the
- * current transaction, call rd_kafka_commit_transaction().
- * This call will block until the transaction has been fully committed or
- * failed (typically due to fencing by a newer producer instance).
- *
- * Alternatively, if processing fails, or an abortable transaction error is
- * raised, the transaction needs to be aborted by calling
- * rd_kafka_abort_transaction() which marks any produced messages and
- * offset commits as aborted.
- *
- * After the current transaction has been committed or aborted a new
- * transaction may be started by calling rd_kafka_begin_transaction() again.
- *
- * @par Retriable errors
- * Some error cases allow the attempted operation to be retried, this is
- * indicated by the error object having the retriable flag set which can
- * be detected by calling rd_kafka_error_is_retriable().
- * When this flag is set the application may retry the operation immediately
- * or preferably after a shorter grace period (to avoid busy-looping).
- * Retriable errors include timeouts, broker transport failures, etc.
- *
- * @par Abortable errors
- * An ongoing transaction may fail permanently due to various errors,
- * such as transaction coordinator becoming unavailable, write failures to the
- * Apache Kafka log, under-replicated partitions, etc.
- * At this point the producer application must abort the current transaction
- * using rd_kafka_abort_transaction() and optionally start a new transaction
- * by calling rd_kafka_begin_transaction().
- * Whether an error is abortable or not is detected by calling
- * rd_kafka_error_txn_requires_abort() on the returned error object.
- *
- * @par Fatal errors
- * While the underlying idempotent producer will typically only raise
- * fatal errors for unrecoverable cluster errors where the idempotency
- * guarantees can't be maintained, most of these are treated as abortable by
- * the transactional producer since transactions may be aborted and retried
- * in their entirety;
- * The transactional producer on the other hand introduces a set of additional
- * fatal errors which the application needs to handle by shutting down the
- * producer and terminate. There is no way for a producer instance to recover
- * from fatal errors.
- * Whether an error is fatal or not is detected by calling
- * rd_kafka_error_is_fatal() on the returned error object or by checking
- * the global rd_kafka_fatal_error() code.
- * Fatal errors are raised by triggering the \c error_cb (see the
- * Fatal error chapter in INTRODUCTION.md for more information), and any
- * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL
- * or have the fatal flag set (see rd_kafka_error_is_fatal()).
- * The originating fatal error code can be retrieved by calling
- * rd_kafka_fatal_error().
- *
- * @par Handling of other errors
- * For errors that have neither retriable, abortable or the fatal flag set
- * it is not always obvious how to handle them. While some of these errors
- * may be indicative of bugs in the application code, such as when
- * an invalid parameter is passed to a method, other errors might originate
- * from the broker and be passed thru as-is to the application.
- * The general recommendation is to treat these errors, that have
- * neither the retriable or abortable flags set, as fatal.
- *
- * @par Error handling example
- * @code
- * retry:
- * rd_kafka_error_t *error;
- *
- * error = rd_kafka_commit_transaction(producer, 10*1000);
- * if (!error)
- * return success;
- * else if (rd_kafka_error_txn_requires_abort(error)) {
- * do_abort_transaction_and_reset_inputs();
- * } else if (rd_kafka_error_is_retriable(error)) {
- * rd_kafka_error_destroy(error);
- * goto retry;
- * } else { // treat all other errors as fatal errors
- * fatal_error(rd_kafka_error_string(error));
- * }
- * rd_kafka_error_destroy(error);
- * @endcode
- *
- *
- * @{
- */
-
-
-/**
- * @brief Initialize transactions for the producer instance.
- *
- * This function ensures any transactions initiated by previous instances
- * of the producer with the same \c transactional.id are completed.
- * If the previous instance failed with a transaction in progress the
- * previous transaction will be aborted.
- * This function needs to be called before any other transactional or
- * produce functions are called when the \c transactional.id is configured.
- *
- * If the last transaction had begun completion (following transaction commit)
- * but not yet finished, this function will await the previous transaction's
- * completion.
- *
- * When any previous transactions have been fenced this function
- * will acquire the internal producer id and epoch, used in all future
- * transactional messages issued by this producer instance.
- *
- * @param rk Producer instance.
- * @param timeout_ms The maximum time to block. On timeout the operation
- * may continue in the background, depending on state,
- * and it is okay to call init_transactions() again.
- * If an infinite timeout (-1) is passed, the timeout will
- * be adjusted to 2 * \c transaction.timeout.ms.
- *
- * @remark This function may block up to \p timeout_ms milliseconds.
- *
- * @remark This call is resumable when a retriable timeout error is returned.
- * Calling the function again will resume the operation that is
- * progressing in the background.
- *
- * @returns NULL on success or an error object on failure.
- * Check whether the returned error object permits retrying
- * by calling rd_kafka_error_is_retriable(), or whether a fatal
- * error has been raised by calling rd_kafka_error_is_fatal().
- * Error codes:
- * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator
- * could be not be contacted within \p timeout_ms (retriable),
- * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction
- * coordinator is not available (retriable),
- * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction
- * would not complete within \p timeout_ms (retriable),
- * RD_KAFKA_RESP_ERR__STATE if transactions have already been started
- * or upon fatal error,
- * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not
- * support transactions (<Apache Kafka 0.11), this also raises a
- * fatal error,
- * RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured
- * \c transaction.timeout.ms is outside the broker-configured range,
- * this also raises a fatal error,
- * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
- * configured for the producer instance,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
- * or \p timeout_ms is out of range.
- * Other error codes not listed here may be returned, depending on
- * broker version.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
-
-
-
-/**
- * @brief Begin a new transaction.
- *
- * rd_kafka_init_transactions() must have been called successfully (once)
- * before this function is called.
- *
- * Upon successful return from this function the application has to perform at
- * least one of the following operations within \c transaction.timeout.ms to
- * avoid timing out the transaction on the broker:
- * * rd_kafka_produce() (et.al)
- * * rd_kafka_send_offsets_to_transaction()
- * * rd_kafka_commit_transaction()
- * * rd_kafka_abort_transaction()
- *
- * Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()),
- * etc, after the successful return of this function will be part of
- * the transaction and committed or aborted atomatically.
- *
- * Finish the transaction by calling rd_kafka_commit_transaction() or
- * abort the transaction by calling rd_kafka_abort_transaction().
- *
- * @param rk Producer instance.
- *
- * @returns NULL on success or an error object on failure.
- * Check whether a fatal error has been raised by
- * calling rd_kafka_error_is_fatal().
- * Error codes:
- * RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress
- * or upon fatal error,
- * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
- * configured for the producer instance,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance.
- * Other error codes not listed here may be returned, depending on
- * broker version.
- *
- * @remark With the transactional producer, rd_kafka_produce(),
- * rd_kafka_producev(), et.al, are only allowed during an on-going
- * transaction, as started with this function.
- * Any produce call outside an on-going transaction, or for a failed
- * transaction, will fail.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
-
-
-/**
- * @brief Sends a list of topic partition offsets to the consumer group
- * coordinator for \p cgmetadata, and marks the offsets as part
- * part of the current transaction.
- * These offsets will be considered committed only if the transaction is
- * committed successfully.
- *
- * The offsets should be the next message your application will consume,
- * i.e., the last processed message's offset + 1 for each partition.
- * Either track the offsets manually during processing or use
- * rd_kafka_position() (on the consumer) to get the current offsets for
- * the partitions assigned to the consumer.
- *
- * Use this method at the end of a consume-transform-produce loop prior
- * to committing the transaction with rd_kafka_commit_transaction().
- *
- * @param rk Producer instance.
- * @param offsets List of offsets to commit to the consumer group upon
- * successful commit of the transaction. Offsets should be
- * the next message to consume, e.g., last processed message + 1.
- * @param cgmetadata The current consumer group metadata as returned by
- * rd_kafka_consumer_group_metadata() on the consumer
- * instance the provided offsets were consumed from.
- * @param timeout_ms Maximum time allowed to register the offsets on the broker.
- *
- * @remark This function must be called on the transactional producer instance,
- * not the consumer.
- *
- * @remark The consumer must disable auto commits
- * (set \c enable.auto.commit to false on the consumer).
- *
- * @remark Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in
- * \p offsets will be ignored, if there are no valid offsets in
- * \p offsets the function will return NULL and no action will be taken.
- *
- * @remark This call is retriable but not resumable, which means a new request
- * with a new set of provided offsets and group metadata will be
- * sent to the transaction coordinator if the call is retried.
- *
- * @remark It is highly recommended to retry the call (upon retriable error)
- * with identical \p offsets and \p cgmetadata parameters.
- * Failure to do so risks inconsistent state between what is actually
- * included in the transaction and what the application thinks is
- * included in the transaction.
- *
- * @returns NULL on success or an error object on failure.
- * Check whether the returned error object permits retrying
- * by calling rd_kafka_error_is_retriable(), or whether an abortable
- * or fatal error has been raised by calling
- * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal()
- * respectively.
- * Error codes:
- * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
- * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
- * transaction has been fenced by a newer producer instance,
- * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
- * producer is no longer authorized to perform transactional
- * operations,
- * RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is
- * not authorized to write the consumer offsets to the group
- * coordinator,
- * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
- * configured for the producer instance,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
- * or if the \p consumer_group_id or \p offsets are empty.
- * Other error codes not listed here may be returned, depending on
- * broker version.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- const rd_kafka_consumer_group_metadata_t *cgmetadata,
- int timeout_ms);
-
-
-/**
- * @brief Commit the current transaction (as started with
- * rd_kafka_begin_transaction()).
- *
- * Any outstanding messages will be flushed (delivered) before actually
- * committing the transaction.
- *
- * If any of the outstanding messages fail permanently the current
- * transaction will enter the abortable error state and this
- * function will return an abortable error, in this case the application
- * must call rd_kafka_abort_transaction() before attempting a new
- * transaction with rd_kafka_begin_transaction().
- *
- * @param rk Producer instance.
- * @param timeout_ms The maximum time to block. On timeout the operation
- * may continue in the background, depending on state,
- * and it is okay to call this function again.
- * Pass -1 to use the remaining transaction timeout,
- * this is the recommended use.
- *
- * @remark It is strongly recommended to always pass -1 (remaining transaction
- * time) as the \p timeout_ms. Using other values risk internal
- * state desynchronization in case any of the underlying protocol
- * requests fail.
- *
- * @remark This function will block until all outstanding messages are
- * delivered and the transaction commit request has been successfully
- * handled by the transaction coordinator, or until \p timeout_ms
- * expires, which ever comes first. On timeout the application may
- * call the function again.
- *
- * @remark Will automatically call rd_kafka_flush() to ensure all queued
- * messages are delivered before attempting to commit the
- * transaction.
- * If the application has enabled RD_KAFKA_EVENT_DR it must
- * serve the event queue in a separate thread since rd_kafka_flush()
- * will not serve delivery reports in this mode.
- *
- * @remark This call is resumable when a retriable timeout error is returned.
- * Calling the function again will resume the operation that is
- * progressing in the background.
- *
- * @returns NULL on success or an error object on failure.
- * Check whether the returned error object permits retrying
- * by calling rd_kafka_error_is_retriable(), or whether an abortable
- * or fatal error has been raised by calling
- * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal()
- * respectively.
- * Error codes:
- * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
- * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be
- * complete commmitted within \p timeout_ms, this is a retriable
- * error as the commit continues in the background,
- * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
- * transaction has been fenced by a newer producer instance,
- * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
- * producer is no longer authorized to perform transactional
- * operations,
- * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
- * configured for the producer instance,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
- * Other error codes not listed here may be returned, depending on
- * broker version.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
-
-
-/**
- * @brief Aborts the ongoing transaction.
- *
- * This function should also be used to recover from non-fatal abortable
- * transaction errors.
- *
- * Any outstanding messages will be purged and fail with
- * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE.
- * See rd_kafka_purge() for details.
- *
- * @param rk Producer instance.
- * @param timeout_ms The maximum time to block. On timeout the operation
- * may continue in the background, depending on state,
- * and it is okay to call this function again.
- * Pass -1 to use the remaining transaction timeout,
- * this is the recommended use.
- *
- * @remark It is strongly recommended to always pass -1 (remaining transaction
- * time) as the \p timeout_ms. Using other values risk internal
- * state desynchronization in case any of the underlying protocol
- * requests fail.
- *
- * @remark This function will block until all outstanding messages are purged
- * and the transaction abort request has been successfully
- * handled by the transaction coordinator, or until \p timeout_ms
- * expires, which ever comes first. On timeout the application may
- * call the function again.
- * If the application has enabled RD_KAFKA_EVENT_DR it must
- * serve the event queue in a separate thread since rd_kafka_flush()
- * will not serve delivery reports in this mode.
- *
- * @remark This call is resumable when a retriable timeout error is returned.
- * Calling the function again will resume the operation that is
- * progressing in the background.
- *
- * @returns NULL on success or an error object on failure.
- * Check whether the returned error object permits retrying
- * by calling rd_kafka_error_is_retriable(), or whether a fatal error
- * has been raised by calling rd_kafka_error_is_fatal().
- * Error codes:
- * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
- * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be
- * complete commmitted within \p timeout_ms, this is a retriable
- * error as the commit continues in the background,
- * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
- * transaction has been fenced by a newer producer instance,
- * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
- * producer is no longer authorized to perform transactional
- * operations,
- * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
- * configured for the producer instance,
- * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
- * Other error codes not listed here may be returned, depending on
- * broker version.
- *
- * @remark The returned error object (if not NULL) must be destroyed with
- * rd_kafka_error_destroy().
- */
-RD_EXPORT
-rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
-
-
-/**@}*/
-
-/* @cond NO_DOC */
-#ifdef __cplusplus
-}
-#endif
-#endif /* _RDKAFKA_H_ */
-/* @endcond NO_DOC */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c
deleted file mode 100644
index 6aaec636d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c
+++ /dev/null
@@ -1,6668 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_admin.h"
-#include "rdkafka_request.h"
-#include "rdkafka_aux.h"
-
-#include <stdarg.h>
-
-
-
-/** @brief Descriptive strings for rko_u.admin_request.state */
-static const char *rd_kafka_admin_state_desc[] = {
- "initializing",
- "waiting for broker",
- "waiting for controller",
- "waiting for fanouts",
- "constructing request",
- "waiting for response from broker",
- "waiting for a valid list of brokers to be available"};
-
-
-
-/**
- * @brief Admin API implementation.
- *
- * The public Admin API in librdkafka exposes a completely asynchronous
- * interface where the initial request API (e.g., ..CreateTopics())
- * is non-blocking and returns immediately, and the application polls
- * a ..queue_t for the result.
- *
- * The underlying handling of the request is also completely asynchronous
- * inside librdkafka, for two reasons:
- * - everything is async in librdkafka so adding something new that isn't
- * would mean that existing functionality will need to be changed if
- * it should be able to work simultaneously (such as statistics, timers,
- * etc). There is no functional value to making the admin API
- * synchronous internally, even if it would simplify its implementation.
- * So making it async allows the Admin API to be used with existing
- * client types in existing applications without breakage.
- * - the async approach allows multiple outstanding Admin API requests
- * simultaneously.
- *
- * The internal async implementation relies on the following concepts:
- * - it uses a single rko (rd_kafka_op_t) to maintain state.
- * - the rko has a callback attached - called the worker callback.
- * - the worker callback is a small state machine that triggers
- * async operations (be it controller lookups, timeout timers,
- * protocol transmits, etc).
- * - the worker callback is only called on the rdkafka main thread.
- * - the callback is triggered by different events and sources by enqueuing
- * the rko on the rdkafka main ops queue.
- *
- *
- * Let's illustrate this with a DeleteTopics example. This might look
- * daunting, but it boils down to an asynchronous state machine being
- * triggered by enqueuing the rko op.
- *
- * 1. [app thread] The user constructs the input arguments,
- * including a response rkqu queue and then calls DeleteTopics().
- *
- * 2. [app thread] DeleteTopics() creates a new internal op (rko) of type
- * RD_KAFKA_OP_DELETETOPICS, makes a **copy** on the rko of all the
- * input arguments (which allows the caller to free the originals
- * whenever she likes). The rko op worker callback is set to the
- * generic admin worker callback rd_kafka_admin_worker()
- *
- * 3. [app thread] DeleteTopics() enqueues the rko on librdkafka's main ops
- * queue that is served by the rdkafka main thread in rd_kafka_thread_main()
- *
- * 4. [rdkafka main thread] The rko is dequeued by rd_kafka_q_serve and
- * the rd_kafka_poll_cb() is called.
- *
- * 5. [rdkafka main thread] The rko_type switch case identifies the rko
- * as an RD_KAFKA_OP_DELETETOPICS which is served by the op callback
- * set in step 2.
- *
- * 6. [rdkafka main thread] The worker callback is called.
- * After some initial checking of err==ERR__DESTROY events
- * (which is used to clean up outstanding ops (etc) on termination),
- * the code hits a state machine using rko_u.admin_request.state.
- *
- * 7. [rdkafka main thread] The initial state is RD_KAFKA_ADMIN_STATE_INIT
- * where the worker validates the user input.
- * An enqueue once (eonce) object is created - the use of this object
- * allows having multiple outstanding async functions referencing the
- * same underlying rko object, but only allowing the first one
- * to trigger an event.
- * A timeout timer is set up to trigger the eonce object when the
- * full options.request_timeout has elapsed.
- *
- * 8. [rdkafka main thread] After initialization the state is updated
- * to WAIT_BROKER or WAIT_CONTROLLER and the code falls through to
- * looking up a specific broker or the controller broker and waiting for
- * an active connection.
- * Both the lookup and the waiting for an active connection are
- * fully asynchronous, and the same eonce used for the timer is passed
- * to the rd_kafka_broker_controller_async() or broker_async() functions
- * which will trigger the eonce when a broker state change occurs.
- * If the controller is already known (from metadata) and the connection
- * is up a rkb broker object is returned and the eonce is not used,
- * skip to step 11.
- *
- * 9. [rdkafka main thread] Upon metadata retrieval (which is triggered
- * automatically by other parts of the code) the controller_id may be
- * updated in which case the eonce is triggered.
- * The eonce triggering enqueues the original rko on the rdkafka main
- * ops queue again and we go to step 8 which will check if the controller
- * connection is up.
- *
- * 10. [broker thread] If the controller_id is now known we wait for
- * the corresponding broker's connection to come up. This signaling
- * is performed from the broker thread upon broker state changes
- * and uses the same eonce. The eonce triggering enqueues the original
- * rko on the rdkafka main ops queue again we go to back to step 8
- * to check if broker is now available.
- *
- * 11. [rdkafka main thread] Back in the worker callback we now have an
- * rkb broker pointer (with reference count increased) for the controller
- * with the connection up (it might go down while we're referencing it,
- * but that does not stop us from enqueuing a protocol request).
- *
- * 12. [rdkafka main thread] A DeleteTopics protocol request buffer is
- * constructed using the input parameters saved on the rko and the
- * buffer is enqueued on the broker's transmit queue.
- * The buffer is set up to provide the reply buffer on the rdkafka main
- * ops queue (the same queue we are operating from) with a handler
- * callback of rd_kafka_admin_handle_response().
- * The state is updated to the RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE.
- *
- * 13. [broker thread] If the request times out, a response with error code
- * (ERR__TIMED_OUT) is enqueued. Go to 16.
- *
- * 14. [broker thread] If a response is received, the response buffer
- * is enqueued. Go to 16.
- *
- * 15. [rdkafka main thread] The buffer callback (..handle_response())
- * is called, which attempts to extract the original rko from the eonce,
- * but if the eonce has already been triggered by some other source
- * (the timeout timer) the buffer callback simply returns and does nothing
- * since the admin request is over and a result (probably a timeout)
- * has been enqueued for the application.
- * If the rko was still intact we temporarily set the reply buffer
- * in the rko struct and call the worker callback. Go to 17.
- *
- * 16. [rdkafka main thread] The worker callback is called in state
- * RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE without a response but with an error.
- * An error result op is created and enqueued on the application's
- * provided response rkqu queue.
- *
- * 17. [rdkafka main thread] The worker callback is called in state
- * RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE with a response buffer with no
- * error set.
- * The worker calls the response `parse()` callback to parse the response
- * buffer and populates a result op (rko_result) with the response
- * information (such as per-topic error codes, etc).
- * The result op is returned to the worker.
- *
- * 18. [rdkafka main thread] The worker enqueues the result op (rko_result)
- * on the application's provided response rkqu queue.
- *
- * 19. [app thread] The application calls rd_kafka_queue_poll() to
- * receive the result of the operation. The result may have been
- * enqueued in step 18 thanks to succesful completion, or in any
- * of the earlier stages when an error was encountered.
- *
- * 20. [app thread] The application uses rd_kafka_event_DeleteTopics_result()
- * to retrieve the request-specific result type.
- *
- * 21. Done.
- *
- *
- *
- *
- * Fanout (RD_KAFKA_OP_ADMIN_FANOUT) requests
- * ------------------------------------------
- *
- * Certain Admin APIs may have requests that need to be sent to different
- * brokers, for instance DeleteRecords which needs to be sent to the leader
- * for each given partition.
- *
- * To achieve this we create a Fanout (RD_KAFKA_OP_ADMIN_FANOUT) op for the
- * overall Admin API call (e.g., DeleteRecords), and then sub-ops for each
- * of the per-broker requests. These sub-ops have the proper op type for
- * the operation they are performing (e.g., RD_KAFKA_OP_DELETERECORDS)
- * but their replyq does not point back to the application replyq but
- * rk_ops which is handled by the librdkafka main thread and with the op
- * callback set to rd_kafka_admin_fanout_worker(). This worker aggregates
- * the results of each fanned out sub-op and merges the result into a
- * single result op (RD_KAFKA_OP_ADMIN_RESULT) that is enqueued on the
- * application's replyq.
- *
- * We rely on the timeouts on the fanned out sub-ops rather than the parent
- * fanout op.
- *
- * The parent fanout op must not be destroyed until all fanned out sub-ops
- * are done (either by success, failure or timeout) and destroyed, and this
- * is tracked by the rko_u.admin_request.fanout.outstanding counter.
- *
- */
-
-
-/**
- * @enum Admin request target broker. Must be negative values since the field
- * used is broker_id.
- */
-enum { RD_KAFKA_ADMIN_TARGET_CONTROLLER = -1, /**< Cluster controller */
- RD_KAFKA_ADMIN_TARGET_COORDINATOR = -2, /**< (Group) Coordinator */
- RD_KAFKA_ADMIN_TARGET_FANOUT = -3, /**< This rko is a fanout and
- * and has no target broker */
- RD_KAFKA_ADMIN_TARGET_ALL = -4, /**< All available brokers */
-};
-
-/**
- * @brief Admin op callback types
- */
-typedef rd_kafka_resp_err_t(rd_kafka_admin_Request_cb_t)(
- rd_kafka_broker_t *rkb,
- const rd_list_t *configs /*(ConfigResource_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) RD_WARN_UNUSED_RESULT;
-
-typedef rd_kafka_resp_err_t(rd_kafka_admin_Response_parse_cb_t)(
- rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) RD_WARN_UNUSED_RESULT;
-
-typedef void(rd_kafka_admin_fanout_PartialResponse_cb_t)(
- rd_kafka_op_t *rko_req,
- const rd_kafka_op_t *rko_partial);
-
-typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyResult_cb_t;
-
-typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyArg_cb_t;
-
-/**
- * @struct Request-specific worker callbacks.
- */
-struct rd_kafka_admin_worker_cbs {
- /**< Protocol request callback which is called
- * to construct and send the request. */
- rd_kafka_admin_Request_cb_t *request;
-
- /**< Protocol response parser callback which is called
- * to translate the response to a rko_result op. */
- rd_kafka_admin_Response_parse_cb_t *parse;
-};
-
-/**
- * @struct Fanout request callbacks.
- */
-struct rd_kafka_admin_fanout_worker_cbs {
- /** Merge results from a fanned out request into the user response. */
- rd_kafka_admin_fanout_PartialResponse_cb_t *partial_response;
-
- /** Copy an accumulated result for storing into the rko_result. */
- rd_kafka_admin_fanout_CopyResult_cb_t *copy_result;
-
- /** Copy the original arguments, used by target ALL. */
- rd_kafka_admin_fanout_CopyArg_cb_t *copy_arg;
-};
-
-/* Forward declarations */
-static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- rd_bool_t do_destroy);
-static void rd_kafka_AdminOptions_init(rd_kafka_t *rk,
- rd_kafka_AdminOptions_t *options);
-
-static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst,
- const rd_kafka_AdminOptions_t *src);
-
-static rd_kafka_op_res_t
-rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko);
-static rd_kafka_ConfigEntry_t *
-rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src);
-static void rd_kafka_ConfigEntry_free(void *ptr);
-static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque);
-
-static void rd_kafka_admin_handle_response(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque);
-
-static rd_kafka_op_res_t
-rd_kafka_admin_fanout_worker(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko_fanout);
-
-
-/**
- * @name Common admin request code
- * @{
- *
- *
- */
-
-/**
- * @brief Create a new admin_result op based on the request op \p rko_req.
- *
- * @remark This moves the rko_req's admin_request.args list from \p rko_req
- * to the returned rko. The \p rko_req args will be emptied.
- */
-static rd_kafka_op_t *rd_kafka_admin_result_new(rd_kafka_op_t *rko_req) {
- rd_kafka_op_t *rko_result;
- rd_kafka_op_t *rko_fanout;
-
- if ((rko_fanout = rko_req->rko_u.admin_request.fanout_parent)) {
- /* If this is a fanned out request the rko_result needs to be
- * handled by the fanout worker rather than the application. */
- rko_result = rd_kafka_op_new_cb(rko_req->rko_rk,
- RD_KAFKA_OP_ADMIN_RESULT,
- rd_kafka_admin_fanout_worker);
- /* Transfer fanout pointer to result */
- rko_result->rko_u.admin_result.fanout_parent = rko_fanout;
- rko_req->rko_u.admin_request.fanout_parent = NULL;
- /* Set event type based on original fanout ops reqtype,
- * e.g., ..OP_DELETERECORDS */
- rko_result->rko_u.admin_result.reqtype =
- rko_fanout->rko_u.admin_request.fanout.reqtype;
-
- } else {
- rko_result = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_RESULT);
-
- /* If this is fanout request (i.e., the parent OP_ADMIN_FANOUT
- * to fanned out requests) we need to use the original
- * application request type. */
- if (rko_req->rko_type == RD_KAFKA_OP_ADMIN_FANOUT)
- rko_result->rko_u.admin_result.reqtype =
- rko_req->rko_u.admin_request.fanout.reqtype;
- else
- rko_result->rko_u.admin_result.reqtype =
- rko_req->rko_type;
- }
-
- rko_result->rko_rk = rko_req->rko_rk;
-
- rko_result->rko_u.admin_result.opaque = rd_kafka_confval_get_ptr(
- &rko_req->rko_u.admin_request.options.opaque);
-
- /* Move request arguments (list) from request to result.
- * This is mainly so that partial_response() knows what arguments
- * were provided to the response's request it is merging. */
- rd_list_move(&rko_result->rko_u.admin_result.args,
- &rko_req->rko_u.admin_request.args);
-
- rko_result->rko_evtype = rko_req->rko_u.admin_request.reply_event_type;
-
- return rko_result;
-}
-
-
-/**
- * @brief Set error code and error string on admin_result op \p rko.
- */
-static void rd_kafka_admin_result_set_err0(rd_kafka_op_t *rko,
- rd_kafka_resp_err_t err,
- const char *fmt,
- va_list ap) {
- char buf[512];
-
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
-
- rko->rko_err = err;
-
- if (rko->rko_u.admin_result.errstr)
- rd_free(rko->rko_u.admin_result.errstr);
- rko->rko_u.admin_result.errstr = rd_strdup(buf);
-
- rd_kafka_dbg(rko->rko_rk, ADMIN, "ADMINFAIL",
- "Admin %s result error: %s",
- rd_kafka_op2str(rko->rko_u.admin_result.reqtype),
- rko->rko_u.admin_result.errstr);
-}
-
-/**
- * @sa rd_kafka_admin_result_set_err0
- */
-static RD_UNUSED RD_FORMAT(printf, 3, 4) void rd_kafka_admin_result_set_err(
- rd_kafka_op_t *rko,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
-
- va_start(ap, fmt);
- rd_kafka_admin_result_set_err0(rko, err, fmt, ap);
- va_end(ap);
-}
-
-/**
- * @brief Enqueue admin_result on application's queue.
- */
-static RD_INLINE void rd_kafka_admin_result_enq(rd_kafka_op_t *rko_req,
- rd_kafka_op_t *rko_result) {
- rd_kafka_replyq_enq(&rko_req->rko_u.admin_request.replyq, rko_result,
- rko_req->rko_u.admin_request.replyq.version);
-}
-
-/**
- * @brief Set request-level error code and string in reply op.
- *
- * @remark This function will NOT destroy the \p rko_req, so don't forget to
- * call rd_kafka_admin_common_worker_destroy() when done with the rko.
- */
-static RD_FORMAT(printf,
- 3,
- 4) void rd_kafka_admin_result_fail(rd_kafka_op_t *rko_req,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
- rd_kafka_op_t *rko_result;
-
- if (!rko_req->rko_u.admin_request.replyq.q)
- return;
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- va_start(ap, fmt);
- rd_kafka_admin_result_set_err0(rko_result, err, fmt, ap);
- va_end(ap);
-
- rd_kafka_admin_result_enq(rko_req, rko_result);
-}
-
-
-/**
- * @brief Send the admin request contained in \p rko upon receiving
- * a FindCoordinator response.
- *
- * @param opaque Must be an admin request op's eonce (rko_u.admin_request.eonce)
- * (i.e. created by \c rd_kafka_admin_request_op_new )
- *
- * @remark To be used as a callback for \c rd_kafka_coord_req
- */
-static rd_kafka_resp_err_t
-rd_kafka_admin_coord_request(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko_ignore,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_enq_once_t *eonce = opaque;
- rd_kafka_op_t *rko;
- char errstr[512];
- rd_kafka_resp_err_t err;
-
-
- rko = rd_kafka_enq_once_del_source_return(eonce, "coordinator request");
- if (!rko)
- /* Admin request has timed out and been destroyed */
- return RD_KAFKA_RESP_ERR__DESTROY;
-
- rd_kafka_enq_once_add_source(eonce, "coordinator response");
-
- err = rko->rko_u.admin_request.cbs->request(
- rkb, &rko->rko_u.admin_request.args,
- &rko->rko_u.admin_request.options, errstr, sizeof(errstr), replyq,
- rd_kafka_admin_handle_response, eonce);
- if (err) {
- rd_kafka_enq_once_del_source(eonce, "coordinator response");
- rd_kafka_admin_result_fail(
- rko, err, "%s worker failed to send request: %s",
- rd_kafka_op2str(rko->rko_type), errstr);
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- }
- return err;
-}
-
-
-/**
- * @brief Return the topics list from a topic-related result object.
- */
-static const rd_kafka_topic_result_t **
-rd_kafka_admin_result_ret_topics(const rd_kafka_op_t *rko, size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_CREATETOPICS ||
- reqtype == RD_KAFKA_OP_DELETETOPICS ||
- reqtype == RD_KAFKA_OP_CREATEPARTITIONS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_topic_result_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Return the ConfigResource list from a config-related result object.
- */
-static const rd_kafka_ConfigResource_t **
-rd_kafka_admin_result_ret_resources(const rd_kafka_op_t *rko, size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_ALTERCONFIGS ||
- reqtype == RD_KAFKA_OP_DESCRIBECONFIGS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_ConfigResource_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Return the acl result list from a acl-related result object.
- */
-static const rd_kafka_acl_result_t **
-rd_kafka_admin_result_ret_acl_results(const rd_kafka_op_t *rko, size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_CREATEACLS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_acl_result_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Return the acl binding list from a acl-related result object.
- */
-static const rd_kafka_AclBinding_t **
-rd_kafka_admin_result_ret_acl_bindings(const rd_kafka_op_t *rko, size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_DESCRIBEACLS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_AclBinding_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Return the groups list from a group-related result object.
- */
-static const rd_kafka_group_result_t **
-rd_kafka_admin_result_ret_groups(const rd_kafka_op_t *rko, size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_DELETEGROUPS ||
- reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS ||
- reqtype == RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS ||
- reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_group_result_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Return the DeleteAcls response list from a acl-related result object.
- */
-static const rd_kafka_DeleteAcls_result_response_t **
-rd_kafka_admin_result_ret_delete_acl_result_responses(const rd_kafka_op_t *rko,
- size_t *cntp) {
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_DELETEACLS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_DeleteAcls_result_response_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**
- * @brief Create a new admin_request op of type \p optype and sets up the
- * generic (type independent files).
- *
- * The caller shall then populate the admin_request.args list
- * and enqueue the op on rk_ops for further processing work.
- *
- * @param cbs Callbacks, must reside in .data segment.
- * @param options Optional options, may be NULL to use defaults.
- *
- * @locks none
- * @locality application thread
- */
-static rd_kafka_op_t *
-rd_kafka_admin_request_op_new(rd_kafka_t *rk,
- rd_kafka_op_type_t optype,
- rd_kafka_event_type_t reply_event_type,
- const struct rd_kafka_admin_worker_cbs *cbs,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_q_t *rkq) {
- rd_kafka_op_t *rko;
-
- rd_assert(rk);
- rd_assert(rkq);
- rd_assert(cbs);
-
- rko = rd_kafka_op_new_cb(rk, optype, rd_kafka_admin_worker);
-
- rko->rko_u.admin_request.reply_event_type = reply_event_type;
-
- rko->rko_u.admin_request.cbs = (struct rd_kafka_admin_worker_cbs *)cbs;
-
- /* Make a copy of the options */
- if (options)
- rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options,
- options);
- else
- rd_kafka_AdminOptions_init(rk,
- &rko->rko_u.admin_request.options);
-
- /* Default to controller */
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER;
-
- /* Calculate absolute timeout */
- rko->rko_u.admin_request.abs_timeout =
- rd_timeout_init(rd_kafka_confval_get_int(
- &rko->rko_u.admin_request.options.request_timeout));
-
- /* Setup enq-op-once, which is triggered by either timer code
- * or future wait-controller code. */
- rko->rko_u.admin_request.eonce =
- rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- /* The timer itself must be started from the rdkafka main thread,
- * not here. */
-
- /* Set up replyq */
- rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0);
-
- rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_INIT;
- return rko;
-}
-
-
-/**
- * @returns the remaining request timeout in milliseconds.
- */
-static RD_INLINE int rd_kafka_admin_timeout_remains(rd_kafka_op_t *rko) {
- return rd_timeout_remains(rko->rko_u.admin_request.abs_timeout);
-}
-
-/**
- * @returns the remaining request timeout in microseconds.
- */
-static RD_INLINE rd_ts_t rd_kafka_admin_timeout_remains_us(rd_kafka_op_t *rko) {
- return rd_timeout_remains_us(rko->rko_u.admin_request.abs_timeout);
-}
-
-
-/**
- * @brief Timer timeout callback for the admin rko's eonce object.
- */
-static void rd_kafka_admin_eonce_timeout_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_enq_once_t *eonce = arg;
-
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT,
- "timeout timer");
-}
-
-
-
-/**
- * @brief Common worker destroy to be called in destroy: label
- * in worker.
- */
-static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- rd_bool_t do_destroy) {
- int timer_was_stopped;
-
- /* Free resources for this op. */
- timer_was_stopped = rd_kafka_timer_stop(
- &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true);
-
-
- if (rko->rko_u.admin_request.eonce) {
- /* Remove the stopped timer's eonce reference since its
- * callback will not have fired if we stopped the timer. */
- if (timer_was_stopped)
- rd_kafka_enq_once_del_source(
- rko->rko_u.admin_request.eonce, "timeout timer");
-
- /* This is thread-safe to do even if there are outstanding
- * timers or wait-controller references to the eonce
- * since they only hold direct reference to the eonce,
- * not the rko (the eonce holds a reference to the rko but
- * it is cleared here). */
- rd_kafka_enq_once_destroy(rko->rko_u.admin_request.eonce);
- rko->rko_u.admin_request.eonce = NULL;
- }
-
- if (do_destroy)
- rd_kafka_op_destroy(rko);
-}
-
-
-
-/**
- * @brief Asynchronously look up a broker.
- * To be called repeatedly from each invocation of the worker
- * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER until
- * a valid rkb is returned.
- *
- * @returns the broker rkb with refcount increased, or NULL if not yet
- * available.
- */
-static rd_kafka_broker_t *rd_kafka_admin_common_get_broker(rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- int32_t broker_id) {
- rd_kafka_broker_t *rkb;
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %" PRId32,
- rd_kafka_op2str(rko->rko_type), broker_id);
-
- /* Since we're iterating over this broker_async() call
- * (asynchronously) until a broker is availabe (or timeout)
- * we need to re-enable the eonce to be triggered again (which
- * is not necessary the first time we get here, but there
- * is no harm doing it then either). */
- rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- /* Look up the broker asynchronously, if the broker
- * is not available the eonce is registered for broker
- * state changes which will cause our function to be called
- * again as soon as (any) broker state changes.
- * When we are called again we perform the broker lookup
- * again and hopefully get an rkb back, otherwise defer a new
- * async wait. Repeat until success or timeout. */
- if (!(rkb = rd_kafka_broker_get_async(
- rk, broker_id, RD_KAFKA_BROKER_STATE_UP,
- rko->rko_u.admin_request.eonce))) {
- /* Broker not available, wait asynchronously
- * for broker metadata code to trigger eonce. */
- return NULL;
- }
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %" PRId32 " is %s",
- rd_kafka_op2str(rko->rko_type), broker_id, rkb->rkb_name);
-
- return rkb;
-}
-
-
-/**
- * @brief Asynchronously look up the controller.
- * To be called repeatedly from each invocation of the worker
- * when in state RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER until
- * a valid rkb is returned.
- *
- * @returns the controller rkb with refcount increased, or NULL if not yet
- * available.
- */
-static rd_kafka_broker_t *
-rd_kafka_admin_common_get_controller(rd_kafka_t *rk, rd_kafka_op_t *rko) {
- rd_kafka_broker_t *rkb;
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up controller",
- rd_kafka_op2str(rko->rko_type));
-
- /* Since we're iterating over this controller_async() call
- * (asynchronously) until a controller is availabe (or timeout)
- * we need to re-enable the eonce to be triggered again (which
- * is not necessary the first time we get here, but there
- * is no harm doing it then either). */
- rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- /* Look up the controller asynchronously, if the controller
- * is not available the eonce is registered for broker
- * state changes which will cause our function to be called
- * again as soon as (any) broker state changes.
- * When we are called again we perform the controller lookup
- * again and hopefully get an rkb back, otherwise defer a new
- * async wait. Repeat until success or timeout. */
- if (!(rkb = rd_kafka_broker_controller_async(
- rk, RD_KAFKA_BROKER_STATE_UP,
- rko->rko_u.admin_request.eonce))) {
- /* Controller not available, wait asynchronously
- * for controller code to trigger eonce. */
- return NULL;
- }
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: controller %s",
- rd_kafka_op2str(rko->rko_type), rkb->rkb_name);
-
- return rkb;
-}
-
-
-/**
- * @brief Asynchronously look up current list of broker ids until available.
- * Bootstrap and logical brokers are excluded from the list.
- *
- * To be called repeatedly from each invocation of the worker
- * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST until
- * a not-NULL rd_list_t * is returned.
- *
- * @param rk Client instance.
- * @param rko Op containing the admin request eonce to use for the
- * async callback.
- * @return List of int32_t with broker nodeids when ready, NULL when
- * the eonce callback will be called.
- */
-static rd_list_t *
-rd_kafka_admin_common_brokers_get_nodeids(rd_kafka_t *rk, rd_kafka_op_t *rko) {
- rd_list_t *broker_ids;
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up brokers",
- rd_kafka_op2str(rko->rko_type));
-
- /* Since we're iterating over this rd_kafka_brokers_get_nodeids_async()
- * call (asynchronously) until a nodeids list is available (or timeout),
- * we need to re-enable the eonce to be triggered again (which
- * is not necessary the first time we get here, but there
- * is no harm doing it then either). */
- rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- /* Look up the nodeids list asynchronously, if it's
- * not available the eonce is registered for broker
- * state changes which will cause our function to be called
- * again as soon as (any) broker state changes.
- * When we are called again we perform the same lookup
- * again and hopefully get a list of nodeids again,
- * otherwise defer a new async wait.
- * Repeat until success or timeout. */
- if (!(broker_ids = rd_kafka_brokers_get_nodeids_async(
- rk, rko->rko_u.admin_request.eonce))) {
- /* nodeids list not available, wait asynchronously
- * for the eonce to be triggered. */
- return NULL;
- }
-
- rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: %d broker(s)",
- rd_kafka_op2str(rko->rko_type), rd_list_cnt(broker_ids));
-
- return broker_ids;
-}
-
-
-
-/**
- * @brief Handle response from broker by triggering worker callback.
- *
- * @param opaque is the eonce from the worker protocol request call.
- */
-static void rd_kafka_admin_handle_response(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_enq_once_t *eonce = opaque;
- rd_kafka_op_t *rko;
-
- /* From ...add_source("send") */
- rko = rd_kafka_enq_once_disable(eonce);
-
- if (!rko) {
- /* The operation timed out and the worker was
- * dismantled while we were waiting for broker response,
- * do nothing - everything has been cleaned up. */
- rd_kafka_dbg(
- rk, ADMIN, "ADMIN",
- "Dropping outdated %sResponse with return code %s",
- request ? rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey)
- : "???",
- rd_kafka_err2str(err));
- return;
- }
-
- /* Attach reply buffer to rko for parsing in the worker. */
- rd_assert(!rko->rko_u.admin_request.reply_buf);
- rko->rko_u.admin_request.reply_buf = reply;
- rko->rko_err = err;
-
- if (rko->rko_op_cb(rk, NULL, rko) == RD_KAFKA_OP_RES_HANDLED)
- rd_kafka_op_destroy(rko);
-}
-
-/**
- * @brief Generic handler for protocol responses, calls the admin ops'
- * Response_parse_cb and enqueues the result to the caller's queue.
- */
-static void rd_kafka_admin_response_parse(rd_kafka_op_t *rko) {
- rd_kafka_resp_err_t err;
- rd_kafka_op_t *rko_result = NULL;
- char errstr[512];
-
- if (rko->rko_err) {
- rd_kafka_admin_result_fail(rko, rko->rko_err,
- "%s worker request failed: %s",
- rd_kafka_op2str(rko->rko_type),
- rd_kafka_err2str(rko->rko_err));
- return;
- }
-
- /* Response received.
- * Let callback parse response and provide result in rko_result
- * which is then enqueued on the reply queue. */
- err = rko->rko_u.admin_request.cbs->parse(
- rko, &rko_result, rko->rko_u.admin_request.reply_buf, errstr,
- sizeof(errstr));
- if (err) {
- rd_kafka_admin_result_fail(
- rko, err, "%s worker failed to parse response: %s",
- rd_kafka_op2str(rko->rko_type), errstr);
- return;
- }
-
- rd_assert(rko_result);
-
- /* Enqueue result on application queue, we're done. */
- rd_kafka_admin_result_enq(rko, rko_result);
-}
-
-/**
- * @brief Generic handler for coord_req() responses.
- */
-static void rd_kafka_admin_coord_response_parse(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_op_t *rko_result;
- rd_kafka_enq_once_t *eonce = opaque;
- rd_kafka_op_t *rko;
- char errstr[512];
-
- rko =
- rd_kafka_enq_once_del_source_return(eonce, "coordinator response");
- if (!rko)
- /* Admin request has timed out and been destroyed */
- return;
-
- if (err) {
- rd_kafka_admin_result_fail(
- rko, err, "%s worker coordinator request failed: %s",
- rd_kafka_op2str(rko->rko_type), rd_kafka_err2str(err));
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- return;
- }
-
- err = rko->rko_u.admin_request.cbs->parse(rko, &rko_result, rkbuf,
- errstr, sizeof(errstr));
- if (err) {
- rd_kafka_admin_result_fail(
- rko, err,
- "%s worker failed to parse coordinator %sResponse: %s",
- rd_kafka_op2str(rko->rko_type),
- rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), errstr);
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- return;
- }
-
- rd_assert(rko_result);
-
- /* Enqueue result on application queue, we're done. */
- rd_kafka_admin_result_enq(rko, rko_result);
-}
-
-static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- rd_list_t *nodeids);
-
-
-/**
- * @brief Common worker state machine handling regardless of request type.
- *
- * Tasks:
- * - Sets up timeout on first call.
- * - Checks for timeout.
- * - Checks for and fails on errors.
- * - Async Controller and broker lookups
- * - Calls the Request callback
- * - Calls the parse callback
- * - Result reply
- * - Destruction of rko
- *
- * rko->rko_err may be one of:
- * RD_KAFKA_RESP_ERR_NO_ERROR, or
- * RD_KAFKA_RESP_ERR__DESTROY for queue destruction cleanup, or
- * RD_KAFKA_RESP_ERR__TIMED_OUT if request has timed out,
- * or any other error code triggered by other parts of the code.
- *
- * @returns a hint to the op code whether the rko should be destroyed or not.
- */
-static rd_kafka_op_res_t
-rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
- const char *name = rd_kafka_op2str(rko->rko_type);
- rd_ts_t timeout_in;
- rd_kafka_broker_t *rkb = NULL;
- rd_kafka_resp_err_t err;
- rd_list_t *nodeids = NULL;
- char errstr[512];
-
- /* ADMIN_FANOUT handled by fanout_worker() */
- rd_assert((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) !=
- RD_KAFKA_OP_ADMIN_FANOUT);
-
- if (rd_kafka_terminating(rk)) {
- rd_kafka_dbg(
- rk, ADMIN, name,
- "%s worker called in state %s: "
- "handle is terminating: %s",
- name,
- rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
- rd_kafka_err2str(rko->rko_err));
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY,
- "Handle is terminating: %s",
- rd_kafka_err2str(rko->rko_err));
- goto destroy;
- }
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) {
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY,
- "Destroyed");
- goto destroy; /* rko being destroyed (silent) */
- }
-
- rd_kafka_dbg(rk, ADMIN, name, "%s worker called in state %s: %s", name,
- rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
- rd_kafka_err2str(rko->rko_err));
-
- rd_assert(thrd_is_current(rko->rko_rk->rk_thread));
-
- /* Check for errors raised asynchronously (e.g., by timer) */
- if (rko->rko_err) {
- rd_kafka_admin_result_fail(
- rko, rko->rko_err, "Failed while %s: %s",
- rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
- rd_kafka_err2str(rko->rko_err));
- goto destroy;
- }
-
- /* Check for timeout */
- timeout_in = rd_kafka_admin_timeout_remains_us(rko);
- if (timeout_in <= 0) {
- rd_kafka_admin_result_fail(
- rko, RD_KAFKA_RESP_ERR__TIMED_OUT, "Timed out %s",
- rd_kafka_admin_state_desc[rko->rko_u.admin_request.state]);
- goto destroy;
- }
-
-redo:
- switch (rko->rko_u.admin_request.state) {
- case RD_KAFKA_ADMIN_STATE_INIT: {
- int32_t broker_id;
-
- /* First call. */
-
- /* Set up timeout timer. */
- rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce,
- "timeout timer");
- rd_kafka_timer_start_oneshot(
- &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true,
- timeout_in, rd_kafka_admin_eonce_timeout_cb,
- rko->rko_u.admin_request.eonce);
-
- /* Use explicitly specified broker_id, if available. */
- broker_id = (int32_t)rd_kafka_confval_get_int(
- &rko->rko_u.admin_request.options.broker);
-
- if (broker_id != -1) {
- rd_kafka_dbg(rk, ADMIN, name,
- "%s using explicitly "
- "set broker id %" PRId32
- " rather than %" PRId32,
- name, broker_id,
- rko->rko_u.admin_request.broker_id);
- rko->rko_u.admin_request.broker_id = broker_id;
- } else {
- /* Default to controller */
- broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER;
- }
-
- /* Resolve target broker(s) */
- switch (rko->rko_u.admin_request.broker_id) {
- case RD_KAFKA_ADMIN_TARGET_CONTROLLER:
- /* Controller */
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER;
- goto redo; /* Trigger next state immediately */
-
- case RD_KAFKA_ADMIN_TARGET_COORDINATOR:
- /* Group (or other) coordinator */
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE;
- rd_kafka_enq_once_add_source(
- rko->rko_u.admin_request.eonce,
- "coordinator request");
- rd_kafka_coord_req(
- rk, rko->rko_u.admin_request.coordtype,
- rko->rko_u.admin_request.coordkey,
- rd_kafka_admin_coord_request, NULL, 0 /* no delay*/,
- rd_kafka_admin_timeout_remains(rko),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_admin_coord_response_parse,
- rko->rko_u.admin_request.eonce);
- /* Wait asynchronously for broker response, which will
- * trigger the eonce and worker to be called again. */
- return RD_KAFKA_OP_RES_KEEP;
- case RD_KAFKA_ADMIN_TARGET_ALL:
- /* All brokers */
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST;
- goto redo; /* Trigger next state immediately */
-
- case RD_KAFKA_ADMIN_TARGET_FANOUT:
- /* Shouldn't come here, fanouts are handled by
- * fanout_worker() */
- RD_NOTREACHED();
- return RD_KAFKA_OP_RES_KEEP;
-
- default:
- /* Specific broker */
- rd_assert(rko->rko_u.admin_request.broker_id >= 0);
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_BROKER;
- goto redo; /* Trigger next state immediately */
- }
- }
-
-
- case RD_KAFKA_ADMIN_STATE_WAIT_BROKER:
- /* Broker lookup */
- if (!(rkb = rd_kafka_admin_common_get_broker(
- rk, rko, rko->rko_u.admin_request.broker_id))) {
- /* Still waiting for broker to become available */
- return RD_KAFKA_OP_RES_KEEP;
- }
-
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST;
- goto redo;
-
- case RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER:
- if (!(rkb = rd_kafka_admin_common_get_controller(rk, rko))) {
- /* Still waiting for controller to become available. */
- return RD_KAFKA_OP_RES_KEEP;
- }
-
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST;
- goto redo;
-
- case RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST:
- /* Wait for a valid list of brokers to be available. */
- if (!(nodeids =
- rd_kafka_admin_common_brokers_get_nodeids(rk, rko))) {
- /* Still waiting for brokers to become available. */
- return RD_KAFKA_OP_RES_KEEP;
- }
-
- rd_kafka_admin_fanout_op_distribute(rk, rko, nodeids);
- rd_list_destroy(nodeids);
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS;
- goto redo;
-
- case RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS:
- /* This op can be destroyed, as a new fanout op has been
- * sent, and the response will be enqueued there. */
- goto destroy;
-
- case RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST:
- /* Got broker, send protocol request. */
-
- /* Make sure we're called from a 'goto redo' where
- * the rkb was set. */
- rd_assert(rkb);
-
- /* Still need to use the eonce since this worker may
- * time out while waiting for response from broker, in which
- * case the broker response will hit an empty eonce (ok). */
- rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce,
- "send");
-
- /* Send request (async) */
- err = rko->rko_u.admin_request.cbs->request(
- rkb, &rko->rko_u.admin_request.args,
- &rko->rko_u.admin_request.options, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_admin_handle_response,
- rko->rko_u.admin_request.eonce);
-
- /* Loose broker refcount from get_broker(), get_controller() */
- rd_kafka_broker_destroy(rkb);
-
- if (err) {
- rd_kafka_enq_once_del_source(
- rko->rko_u.admin_request.eonce, "send");
- rd_kafka_admin_result_fail(rko, err, "%s", errstr);
- goto destroy;
- }
-
- rko->rko_u.admin_request.state =
- RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE;
-
- /* Wait asynchronously for broker response, which will
- * trigger the eonce and worker to be called again. */
- return RD_KAFKA_OP_RES_KEEP;
-
-
- case RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE:
- rd_kafka_admin_response_parse(rko);
- goto destroy;
- }
-
- return RD_KAFKA_OP_RES_KEEP;
-
-destroy:
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_false /*don't destroy*/);
- return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy() */
-}
-
-/**
- * @brief Create a new admin_fanout op of type \p req_type and sets up the
- * generic (type independent files).
- *
- * The caller shall then populate the \c admin_fanout.requests list,
- * initialize the \c admin_fanout.responses list,
- * set the initial \c admin_fanout.outstanding value,
- * and enqueue the op on rk_ops for further processing work.
- *
- * @param cbs Callbacks, must reside in .data segment.
- * @param options Optional options, may be NULL to use defaults.
- * @param rkq is the application reply queue.
- *
- * @locks none
- * @locality application thread
- */
-static rd_kafka_op_t *
-rd_kafka_admin_fanout_op_new(rd_kafka_t *rk,
- rd_kafka_op_type_t req_type,
- rd_kafka_event_type_t reply_event_type,
- const struct rd_kafka_admin_fanout_worker_cbs *cbs,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_q_t *rkq) {
- rd_kafka_op_t *rko;
-
- rd_assert(rk);
- rd_assert(rkq);
- rd_assert(cbs);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_FANOUT);
- rko->rko_rk = rk;
-
- rko->rko_u.admin_request.reply_event_type = reply_event_type;
-
- rko->rko_u.admin_request.fanout.cbs =
- (struct rd_kafka_admin_fanout_worker_cbs *)cbs;
-
- /* Make a copy of the options */
- if (options)
- rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options,
- options);
- else
- rd_kafka_AdminOptions_init(rk,
- &rko->rko_u.admin_request.options);
-
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_FANOUT;
-
- /* Calculate absolute timeout */
- rko->rko_u.admin_request.abs_timeout =
- rd_timeout_init(rd_kafka_confval_get_int(
- &rko->rko_u.admin_request.options.request_timeout));
-
- /* Set up replyq */
- rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0);
-
- rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS;
-
- rko->rko_u.admin_request.fanout.reqtype = req_type;
-
- return rko;
-}
-
-/**
- * @brief Duplicate the fanout operation for each nodeid passed and
- * enqueue each new operation. Use the same fanout_parent as
- * the passed \p rko.
- *
- * @param rk Client instance.
- * @param rko Operation to distribute to each broker.
- * @param nodeids List of int32_t with the broker nodeids.
- * @param rkq
- * @return rd_kafka_op_t*
- */
-static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- rd_list_t *nodeids) {
- int i, nodeids_cnt, timeout_remains;
- rd_kafka_op_t *rko_fanout;
- rd_kafka_AdminOptions_t *options = &rko->rko_u.admin_request.options;
- timeout_remains = rd_kafka_admin_timeout_remains(rko);
- rd_kafka_AdminOptions_set_request_timeout(options, timeout_remains,
- NULL, 0);
-
- nodeids_cnt = rd_list_cnt(nodeids);
- rko_fanout = rko->rko_u.admin_request.fanout_parent;
- rko_fanout->rko_u.admin_request.fanout.outstanding = (int)nodeids_cnt;
- rko->rko_u.admin_request.fanout_parent = NULL;
-
- /* Create individual request ops for each node */
- for (i = 0; i < nodeids_cnt; i++) {
- rd_kafka_op_t *rko_dup = rd_kafka_admin_request_op_new(
- rk, rko->rko_type,
- rko->rko_u.admin_request.reply_event_type,
- rko->rko_u.admin_request.cbs, options, rk->rk_ops);
-
- rko_dup->rko_u.admin_request.fanout_parent = rko_fanout;
- rko_dup->rko_u.admin_request.broker_id =
- rd_list_get_int32(nodeids, i);
-
- rd_list_init_copy(&rko_dup->rko_u.admin_request.args,
- &rko->rko_u.admin_request.args);
- rd_list_copy_to(
- &rko_dup->rko_u.admin_request.args,
- &rko->rko_u.admin_request.args,
- rko_fanout->rko_u.admin_request.fanout.cbs->copy_arg, NULL);
-
- rd_kafka_q_enq(rk->rk_ops, rko_dup);
- }
-}
-
-
-/**
- * @brief Common fanout worker state machine handling regardless of request type
- *
- * @param rko Result of a fanned out operation, e.g., DELETERECORDS result.
- *
- * Tasks:
- * - Checks for and responds to client termination
- * - Polls for fanned out responses
- * - Calls the partial response callback
- * - Calls the merge responses callback upon receipt of all partial responses
- * - Destruction of rko
- *
- * rko->rko_err may be one of:
- * RD_KAFKA_RESP_ERR_NO_ERROR, or
- * RD_KAFKA_RESP_ERR__DESTROY for queue destruction cleanup.
- *
- * @returns a hint to the op code whether the rko should be destroyed or not.
- */
-static rd_kafka_op_res_t rd_kafka_admin_fanout_worker(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_op_t *rko_fanout = rko->rko_u.admin_result.fanout_parent;
- const char *name =
- rd_kafka_op2str(rko_fanout->rko_u.admin_request.fanout.reqtype);
- rd_kafka_op_t *rko_result;
-
- RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_ADMIN_RESULT);
- RD_KAFKA_OP_TYPE_ASSERT(rko_fanout, RD_KAFKA_OP_ADMIN_FANOUT);
-
- rd_assert(rko_fanout->rko_u.admin_request.fanout.outstanding > 0);
- rko_fanout->rko_u.admin_request.fanout.outstanding--;
-
- rko->rko_u.admin_result.fanout_parent = NULL;
-
- if (rd_kafka_terminating(rk)) {
- rd_kafka_dbg(rk, ADMIN, name,
- "%s fanout worker called for fanned out op %s: "
- "handle is terminating: %s",
- name, rd_kafka_op2str(rko->rko_type),
- rd_kafka_err2str(rko_fanout->rko_err));
- if (!rko->rko_err)
- rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
- }
-
- rd_kafka_dbg(rk, ADMIN, name,
- "%s fanout worker called for %s with %d request(s) "
- "outstanding: %s",
- name, rd_kafka_op2str(rko->rko_type),
- rko_fanout->rko_u.admin_request.fanout.outstanding,
- rd_kafka_err2str(rko_fanout->rko_err));
-
- /* Add partial response to rko_fanout's result list. */
- rko_fanout->rko_u.admin_request.fanout.cbs->partial_response(rko_fanout,
- rko);
-
- if (rko_fanout->rko_u.admin_request.fanout.outstanding > 0)
- /* Wait for outstanding requests to finish */
- return RD_KAFKA_OP_RES_HANDLED;
-
- rko_result = rd_kafka_admin_result_new(rko_fanout);
- rd_list_init_copy(&rko_result->rko_u.admin_result.results,
- &rko_fanout->rko_u.admin_request.fanout.results);
- rd_list_copy_to(&rko_result->rko_u.admin_result.results,
- &rko_fanout->rko_u.admin_request.fanout.results,
- rko_fanout->rko_u.admin_request.fanout.cbs->copy_result,
- NULL);
-
- /* Enqueue result on application queue, we're done. */
- rd_kafka_replyq_enq(&rko_fanout->rko_u.admin_request.replyq, rko_result,
- rko_fanout->rko_u.admin_request.replyq.version);
-
- /* FALLTHRU */
- if (rko_fanout->rko_u.admin_request.fanout.outstanding == 0)
- rd_kafka_op_destroy(rko_fanout);
-
- return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy(rko) */
-}
-
-/**
- * @brief Create a new operation that targets all the brokers.
- * The operation consists of a fanout parent that is reused and
- * fanout operation that is duplicated for each broker found.
- *
- * @param rk Client instance-
- * @param optype Operation type.
- * @param reply_event_type Reply event type.
- * @param cbs Fanned out op callbacks.
- * @param fanout_cbs Fanout parent out op callbacks.
- * @param result_free Callback for freeing the result list.
- * @param options Operation options.
- * @param rkq Result queue.
- * @return The newly created op targeting all the brokers.
- *
- * @sa Use rd_kafka_op_destroy() to release it.
- */
-static rd_kafka_op_t *rd_kafka_admin_request_op_target_all_new(
- rd_kafka_t *rk,
- rd_kafka_op_type_t optype,
- rd_kafka_event_type_t reply_event_type,
- const struct rd_kafka_admin_worker_cbs *cbs,
- const struct rd_kafka_admin_fanout_worker_cbs *fanout_cbs,
- void (*result_free)(void *),
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_q_t *rkq) {
- rd_kafka_op_t *rko, *rko_fanout;
-
- rko_fanout = rd_kafka_admin_fanout_op_new(rk, optype, reply_event_type,
- fanout_cbs, options, rkq);
-
- rko = rd_kafka_admin_request_op_new(rk, optype, reply_event_type, cbs,
- options, rk->rk_ops);
-
- rko_fanout->rko_u.admin_request.fanout.outstanding = 1;
- rko->rko_u.admin_request.fanout_parent = rko_fanout;
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_ALL;
-
- rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, (int)1,
- result_free);
-
- return rko;
-}
-
-/**@}*/
-
-
-/**
- * @name Generic AdminOptions
- * @{
- *
- *
- */
-
-rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options,
- int timeout_ms,
- char *errstr,
- size_t errstr_size) {
- return rd_kafka_confval_set_type(&options->request_timeout,
- RD_KAFKA_CONFVAL_INT, &timeout_ms,
- errstr, errstr_size);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options,
- int timeout_ms,
- char *errstr,
- size_t errstr_size) {
- return rd_kafka_confval_set_type(&options->operation_timeout,
- RD_KAFKA_CONFVAL_INT, &timeout_ms,
- errstr, errstr_size);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options,
- int true_or_false,
- char *errstr,
- size_t errstr_size) {
- return rd_kafka_confval_set_type(&options->validate_only,
- RD_KAFKA_CONFVAL_INT, &true_or_false,
- errstr, errstr_size);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_incremental(rd_kafka_AdminOptions_t *options,
- int true_or_false,
- char *errstr,
- size_t errstr_size) {
- rd_snprintf(errstr, errstr_size,
- "Incremental updates currently not supported, see KIP-248");
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
-
- return rd_kafka_confval_set_type(&options->incremental,
- RD_KAFKA_CONFVAL_INT, &true_or_false,
- errstr, errstr_size);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options,
- int32_t broker_id,
- char *errstr,
- size_t errstr_size) {
- int ibroker_id = (int)broker_id;
-
- return rd_kafka_confval_set_type(&options->broker, RD_KAFKA_CONFVAL_INT,
- &ibroker_id, errstr, errstr_size);
-}
-
-rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(
- rd_kafka_AdminOptions_t *options,
- int true_or_false) {
- char errstr[512];
- rd_kafka_resp_err_t err = rd_kafka_confval_set_type(
- &options->require_stable_offsets, RD_KAFKA_CONFVAL_INT,
- &true_or_false, errstr, sizeof(errstr));
- return !err ? NULL : rd_kafka_error_new(err, "%s", errstr);
-}
-
-rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(
- rd_kafka_AdminOptions_t *options,
- const rd_kafka_consumer_group_state_t *consumer_group_states,
- size_t consumer_group_states_cnt) {
- size_t i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- rd_list_t *states_list = rd_list_new(0, NULL);
- rd_list_init_int32(states_list, consumer_group_states_cnt);
- uint64_t states_bitmask = 0;
-
- if (RD_KAFKA_CONSUMER_GROUP_STATE__CNT >= 64) {
- rd_assert("BUG: cannot handle states with a bitmask anymore");
- }
-
- for (i = 0; i < consumer_group_states_cnt; i++) {
- uint64_t state_bit;
- rd_kafka_consumer_group_state_t state =
- consumer_group_states[i];
-
- if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) {
- rd_list_destroy(states_list);
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Invalid group state value");
- }
-
- state_bit = 1 << state;
- if (states_bitmask & state_bit) {
- rd_list_destroy(states_list);
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate states not allowed");
- } else {
- states_bitmask = states_bitmask | state_bit;
- rd_list_set_int32(states_list, (int32_t)i, state);
- }
- }
- err = rd_kafka_confval_set_type(&options->match_consumer_group_states,
- RD_KAFKA_CONFVAL_PTR, states_list,
- errstr, sizeof(errstr));
- if (err) {
- rd_list_destroy(states_list);
- }
- return !err ? NULL : rd_kafka_error_new(err, "%s", errstr);
-}
-
-void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options,
- void *opaque) {
- rd_kafka_confval_set_type(&options->opaque, RD_KAFKA_CONFVAL_PTR,
- opaque, NULL, 0);
-}
-
-
-/**
- * @brief Initialize and set up defaults for AdminOptions
- */
-static void rd_kafka_AdminOptions_init(rd_kafka_t *rk,
- rd_kafka_AdminOptions_t *options) {
- rd_kafka_confval_init_int(&options->request_timeout, "request_timeout",
- 0, 3600 * 1000,
- rk->rk_conf.admin.request_timeout_ms);
-
- if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
- options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS ||
- options->for_api == RD_KAFKA_ADMIN_OP_DELETETOPICS ||
- options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS ||
- options->for_api == RD_KAFKA_ADMIN_OP_DELETERECORDS)
- rd_kafka_confval_init_int(&options->operation_timeout,
- "operation_timeout", -1, 3600 * 1000,
- rk->rk_conf.admin.request_timeout_ms);
- else
- rd_kafka_confval_disable(&options->operation_timeout,
- "operation_timeout");
-
- if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
- options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS ||
- options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS ||
- options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS)
- rd_kafka_confval_init_int(&options->validate_only,
- "validate_only", 0, 1, 0);
- else
- rd_kafka_confval_disable(&options->validate_only,
- "validate_only");
-
- if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
- options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS)
- rd_kafka_confval_init_int(&options->incremental, "incremental",
- 0, 1, 0);
- else
- rd_kafka_confval_disable(&options->incremental, "incremental");
-
- if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
- options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS)
- rd_kafka_confval_init_int(&options->require_stable_offsets,
- "require_stable_offsets", 0, 1, 0);
- else
- rd_kafka_confval_disable(&options->require_stable_offsets,
- "require_stable_offsets");
-
- if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
- options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS)
- rd_kafka_confval_init_ptr(&options->match_consumer_group_states,
- "match_consumer_group_states");
- else
- rd_kafka_confval_disable(&options->match_consumer_group_states,
- "match_consumer_group_states");
-
- rd_kafka_confval_init_int(&options->broker, "broker", 0, INT32_MAX, -1);
- rd_kafka_confval_init_ptr(&options->opaque, "opaque");
-}
-
-/**
- * @brief Copy contents of \p src to \p dst.
- * Deep copy every pointer confval.
- *
- * @param dst The destination AdminOptions.
- * @param src The source AdminOptions.
- */
-static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst,
- const rd_kafka_AdminOptions_t *src) {
- *dst = *src;
- if (src->match_consumer_group_states.u.PTR) {
- char errstr[512];
- rd_list_t *states_list_copy = rd_list_copy_preallocated(
- src->match_consumer_group_states.u.PTR, NULL);
-
- rd_kafka_resp_err_t err = rd_kafka_confval_set_type(
- &dst->match_consumer_group_states, RD_KAFKA_CONFVAL_PTR,
- states_list_copy, errstr, sizeof(errstr));
- rd_assert(!err);
- }
-}
-
-
-rd_kafka_AdminOptions_t *
-rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api) {
- rd_kafka_AdminOptions_t *options;
-
- if ((int)for_api < 0 || for_api >= RD_KAFKA_ADMIN_OP__CNT)
- return NULL;
-
- options = rd_calloc(1, sizeof(*options));
-
- options->for_api = for_api;
-
- rd_kafka_AdminOptions_init(rk, options);
-
- return options;
-}
-
-void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options) {
- if (options->match_consumer_group_states.u.PTR) {
- rd_list_destroy(options->match_consumer_group_states.u.PTR);
- }
- rd_free(options);
-}
-
-/**@}*/
-
-
-
-/**
- * @name CreateTopics
- * @{
- *
- *
- *
- */
-
-
-
-rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic,
- int num_partitions,
- int replication_factor,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_NewTopic_t *new_topic;
-
- if (!topic) {
- rd_snprintf(errstr, errstr_size, "Invalid topic name");
- return NULL;
- }
-
- if (num_partitions < -1 || num_partitions > RD_KAFKAP_PARTITIONS_MAX) {
- rd_snprintf(errstr, errstr_size,
- "num_partitions out of "
- "expected range %d..%d or -1 for broker default",
- 1, RD_KAFKAP_PARTITIONS_MAX);
- return NULL;
- }
-
- if (replication_factor < -1 ||
- replication_factor > RD_KAFKAP_BROKERS_MAX) {
- rd_snprintf(errstr, errstr_size,
- "replication_factor out of expected range %d..%d",
- -1, RD_KAFKAP_BROKERS_MAX);
- return NULL;
- }
-
- new_topic = rd_calloc(1, sizeof(*new_topic));
- new_topic->topic = rd_strdup(topic);
- new_topic->num_partitions = num_partitions;
- new_topic->replication_factor = replication_factor;
-
- /* List of int32 lists */
- rd_list_init(&new_topic->replicas, 0, rd_list_destroy_free);
- rd_list_prealloc_elems(&new_topic->replicas, 0,
- num_partitions == -1 ? 0 : num_partitions,
- 0 /*nozero*/);
-
- /* List of ConfigEntrys */
- rd_list_init(&new_topic->config, 0, rd_kafka_ConfigEntry_free);
-
- return new_topic;
-}
-
-
-/**
- * @brief Topic name comparator for NewTopic_t
- */
-static int rd_kafka_NewTopic_cmp(const void *_a, const void *_b) {
- const rd_kafka_NewTopic_t *a = _a, *b = _b;
- return strcmp(a->topic, b->topic);
-}
-
-
-
-/**
- * @brief Allocate a new NewTopic and make a copy of \p src
- */
-static rd_kafka_NewTopic_t *
-rd_kafka_NewTopic_copy(const rd_kafka_NewTopic_t *src) {
- rd_kafka_NewTopic_t *dst;
-
- dst = rd_kafka_NewTopic_new(src->topic, src->num_partitions,
- src->replication_factor, NULL, 0);
- rd_assert(dst);
-
- rd_list_destroy(&dst->replicas); /* created in .._new() */
- rd_list_init_copy(&dst->replicas, &src->replicas);
- rd_list_copy_to(&dst->replicas, &src->replicas,
- rd_list_copy_preallocated, NULL);
-
- rd_list_init_copy(&dst->config, &src->config);
- rd_list_copy_to(&dst->config, &src->config,
- rd_kafka_ConfigEntry_list_copy, NULL);
-
- return dst;
-}
-
-void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic) {
- rd_list_destroy(&new_topic->replicas);
- rd_list_destroy(&new_topic->config);
- rd_free(new_topic->topic);
- rd_free(new_topic);
-}
-
-static void rd_kafka_NewTopic_free(void *ptr) {
- rd_kafka_NewTopic_destroy(ptr);
-}
-
-void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics,
- size_t new_topic_cnt) {
- size_t i;
- for (i = 0; i < new_topic_cnt; i++)
- rd_kafka_NewTopic_destroy(new_topics[i]);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic,
- int32_t partition,
- int32_t *broker_ids,
- size_t broker_id_cnt,
- char *errstr,
- size_t errstr_size) {
- rd_list_t *rl;
- int i;
-
- if (new_topic->replication_factor != -1) {
- rd_snprintf(errstr, errstr_size,
- "Specifying a replication factor and "
- "a replica assignment are mutually exclusive");
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- } else if (new_topic->num_partitions == -1) {
- rd_snprintf(errstr, errstr_size,
- "Specifying a default partition count and a "
- "replica assignment are mutually exclusive");
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- /* Replica partitions must be added consecutively starting from 0. */
- if (partition != rd_list_cnt(&new_topic->replicas)) {
- rd_snprintf(errstr, errstr_size,
- "Partitions must be added in order, "
- "starting at 0: expecting partition %d, "
- "not %" PRId32,
- rd_list_cnt(&new_topic->replicas), partition);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- if (broker_id_cnt > RD_KAFKAP_BROKERS_MAX) {
- rd_snprintf(errstr, errstr_size,
- "Too many brokers specified "
- "(RD_KAFKAP_BROKERS_MAX=%d)",
- RD_KAFKAP_BROKERS_MAX);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
-
- rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt);
-
- for (i = 0; i < (int)broker_id_cnt; i++)
- rd_list_set_int32(rl, i, broker_ids[i]);
-
- rd_list_add(&new_topic->replicas, rl);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Generic constructor of ConfigEntry which is also added to \p rl
- */
-static rd_kafka_resp_err_t
-rd_kafka_admin_add_config0(rd_list_t *rl,
- const char *name,
- const char *value,
- rd_kafka_AlterOperation_t operation) {
- rd_kafka_ConfigEntry_t *entry;
-
- if (!name)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- entry = rd_calloc(1, sizeof(*entry));
- entry->kv = rd_strtup_new(name, value);
- entry->a.operation = operation;
-
- rd_list_add(rl, entry);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic,
- const char *name,
- const char *value) {
- return rd_kafka_admin_add_config0(&new_topic->config, name, value,
- RD_KAFKA_ALTER_OP_ADD);
-}
-
-
-
-/**
- * @brief Parse CreateTopicsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_CreateTopicsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_op_t *rko_result = NULL;
- int32_t topic_cnt;
- int i;
-
- if (rd_kafka_buf_ApiVersion(reply) >= 2) {
- int32_t Throttle_Time;
- rd_kafka_buf_read_i32(reply, &Throttle_Time);
- rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
- }
-
- /* #topics */
- rd_kafka_buf_read_i32(reply, &topic_cnt);
-
- if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " topics in response "
- "when only %d were requested",
- topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
- rd_kafka_topic_result_free);
-
- for (i = 0; i < (int)topic_cnt; i++) {
- rd_kafkap_str_t ktopic;
- int16_t error_code;
- rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
- char *this_errstr = NULL;
- rd_kafka_topic_result_t *terr;
- rd_kafka_NewTopic_t skel;
- int orig_pos;
-
- rd_kafka_buf_read_str(reply, &ktopic);
- rd_kafka_buf_read_i16(reply, &error_code);
-
- if (rd_kafka_buf_ApiVersion(reply) >= 1)
- rd_kafka_buf_read_str(reply, &error_msg);
-
- /* For non-blocking CreateTopicsRequests the broker
- * will returned REQUEST_TIMED_OUT for topics
- * that were triggered for creation -
- * we hide this error code from the application
- * since the topic creation is in fact in progress. */
- if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
- rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
- .options.operation_timeout) <=
- 0) {
- error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
- this_errstr = NULL;
- }
-
- if (error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
- RD_KAFKAP_STR_LEN(&error_msg) == 0)
- this_errstr =
- (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
- }
-
- terr = rd_kafka_topic_result_new(ktopic.str,
- RD_KAFKAP_STR_LEN(&ktopic),
- error_code, this_errstr);
-
- /* As a convenience to the application we insert topic result
- * in the same order as they were requested. The broker
- * does not maintain ordering unfortunately. */
- skel.topic = terr->topic;
- orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
- &skel, rd_kafka_NewTopic_cmp);
- if (orig_pos == -1) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned topic %.*s that was not "
- "included in the original request",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- if (rd_list_elem(&rko_result->rko_u.admin_result.results,
- orig_pos) != NULL) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply, "Broker returned topic %.*s multiple times",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
- terr);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "CreateTopics response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-void rd_kafka_CreateTopics(rd_kafka_t *rk,
- rd_kafka_NewTopic_t **new_topics,
- size_t new_topic_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_CreateTopicsRequest,
- rd_kafka_CreateTopicsResponse_parse,
- };
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATETOPICS,
- RD_KAFKA_EVENT_CREATETOPICS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)new_topic_cnt,
- rd_kafka_NewTopic_free);
-
- for (i = 0; i < new_topic_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_NewTopic_copy(new_topics[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-/**
- * @brief Get an array of topic results from a CreateTopics result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(
- const rd_kafka_CreateTopics_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
- cntp);
-}
-
-/**@}*/
-
-
-
-/**
- * @name Delete topics
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic) {
- size_t tsize = strlen(topic) + 1;
- rd_kafka_DeleteTopic_t *del_topic;
-
- /* Single allocation */
- del_topic = rd_malloc(sizeof(*del_topic) + tsize);
- del_topic->topic = del_topic->data;
- memcpy(del_topic->topic, topic, tsize);
-
- return del_topic;
-}
-
-void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic) {
- rd_free(del_topic);
-}
-
-static void rd_kafka_DeleteTopic_free(void *ptr) {
- rd_kafka_DeleteTopic_destroy(ptr);
-}
-
-
-void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics,
- size_t del_topic_cnt) {
- size_t i;
- for (i = 0; i < del_topic_cnt; i++)
- rd_kafka_DeleteTopic_destroy(del_topics[i]);
-}
-
-
-/**
- * @brief Topic name comparator for DeleteTopic_t
- */
-static int rd_kafka_DeleteTopic_cmp(const void *_a, const void *_b) {
- const rd_kafka_DeleteTopic_t *a = _a, *b = _b;
- return strcmp(a->topic, b->topic);
-}
-
-/**
- * @brief Allocate a new DeleteTopic and make a copy of \p src
- */
-static rd_kafka_DeleteTopic_t *
-rd_kafka_DeleteTopic_copy(const rd_kafka_DeleteTopic_t *src) {
- return rd_kafka_DeleteTopic_new(src->topic);
-}
-
-
-
-/**
- * @brief Parse DeleteTopicsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DeleteTopicsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_op_t *rko_result = NULL;
- int32_t topic_cnt;
- int i;
-
- if (rd_kafka_buf_ApiVersion(reply) >= 1) {
- int32_t Throttle_Time;
- rd_kafka_buf_read_i32(reply, &Throttle_Time);
- rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
- }
-
- /* #topics */
- rd_kafka_buf_read_i32(reply, &topic_cnt);
-
- if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " topics in response "
- "when only %d were requested",
- topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
- rd_kafka_topic_result_free);
-
- for (i = 0; i < (int)topic_cnt; i++) {
- rd_kafkap_str_t ktopic;
- int16_t error_code;
- rd_kafka_topic_result_t *terr;
- rd_kafka_NewTopic_t skel;
- int orig_pos;
-
- rd_kafka_buf_read_str(reply, &ktopic);
- rd_kafka_buf_read_i16(reply, &error_code);
-
- /* For non-blocking DeleteTopicsRequests the broker
- * will returned REQUEST_TIMED_OUT for topics
- * that were triggered for creation -
- * we hide this error code from the application
- * since the topic creation is in fact in progress. */
- if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
- rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
- .options.operation_timeout) <=
- 0) {
- error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- terr = rd_kafka_topic_result_new(
- ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code,
- error_code ? rd_kafka_err2str(error_code) : NULL);
-
- /* As a convenience to the application we insert topic result
- * in the same order as they were requested. The broker
- * does not maintain ordering unfortunately. */
- skel.topic = terr->topic;
- orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
- &skel, rd_kafka_DeleteTopic_cmp);
- if (orig_pos == -1) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned topic %.*s that was not "
- "included in the original request",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- if (rd_list_elem(&rko_result->rko_u.admin_result.results,
- orig_pos) != NULL) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply, "Broker returned topic %.*s multiple times",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
- terr);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "DeleteTopics response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-
-void rd_kafka_DeleteTopics(rd_kafka_t *rk,
- rd_kafka_DeleteTopic_t **del_topics,
- size_t del_topic_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DeleteTopicsRequest,
- rd_kafka_DeleteTopicsResponse_parse,
- };
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETETOPICS,
- RD_KAFKA_EVENT_DELETETOPICS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)del_topic_cnt,
- rd_kafka_DeleteTopic_free);
-
- for (i = 0; i < del_topic_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_DeleteTopic_copy(del_topics[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-/**
- * @brief Get an array of topic results from a DeleteTopics result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(
- const rd_kafka_DeleteTopics_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
- cntp);
-}
-
-
-
-/**
- * @name Create partitions
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic,
- size_t new_total_cnt,
- char *errstr,
- size_t errstr_size) {
- size_t tsize = strlen(topic) + 1;
- rd_kafka_NewPartitions_t *newps;
-
- if (new_total_cnt < 1 || new_total_cnt > RD_KAFKAP_PARTITIONS_MAX) {
- rd_snprintf(errstr, errstr_size,
- "new_total_cnt out of "
- "expected range %d..%d",
- 1, RD_KAFKAP_PARTITIONS_MAX);
- return NULL;
- }
-
- /* Single allocation */
- newps = rd_malloc(sizeof(*newps) + tsize);
- newps->total_cnt = new_total_cnt;
- newps->topic = newps->data;
- memcpy(newps->topic, topic, tsize);
-
- /* List of int32 lists */
- rd_list_init(&newps->replicas, 0, rd_list_destroy_free);
- rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt,
- 0 /*nozero*/);
-
- return newps;
-}
-
-/**
- * @brief Topic name comparator for NewPartitions_t
- */
-static int rd_kafka_NewPartitions_cmp(const void *_a, const void *_b) {
- const rd_kafka_NewPartitions_t *a = _a, *b = _b;
- return strcmp(a->topic, b->topic);
-}
-
-
-/**
- * @brief Allocate a new CreatePartitions and make a copy of \p src
- */
-static rd_kafka_NewPartitions_t *
-rd_kafka_NewPartitions_copy(const rd_kafka_NewPartitions_t *src) {
- rd_kafka_NewPartitions_t *dst;
-
- dst = rd_kafka_NewPartitions_new(src->topic, src->total_cnt, NULL, 0);
-
- rd_list_destroy(&dst->replicas); /* created in .._new() */
- rd_list_init_copy(&dst->replicas, &src->replicas);
- rd_list_copy_to(&dst->replicas, &src->replicas,
- rd_list_copy_preallocated, NULL);
-
- return dst;
-}
-
-void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *newps) {
- rd_list_destroy(&newps->replicas);
- rd_free(newps);
-}
-
-static void rd_kafka_NewPartitions_free(void *ptr) {
- rd_kafka_NewPartitions_destroy(ptr);
-}
-
-
-void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **newps,
- size_t newps_cnt) {
- size_t i;
- for (i = 0; i < newps_cnt; i++)
- rd_kafka_NewPartitions_destroy(newps[i]);
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *newp,
- int32_t new_partition_idx,
- int32_t *broker_ids,
- size_t broker_id_cnt,
- char *errstr,
- size_t errstr_size) {
- rd_list_t *rl;
- int i;
-
- /* Replica partitions must be added consecutively starting from 0. */
- if (new_partition_idx != rd_list_cnt(&newp->replicas)) {
- rd_snprintf(errstr, errstr_size,
- "Partitions must be added in order, "
- "starting at 0: expecting partition "
- "index %d, not %" PRId32,
- rd_list_cnt(&newp->replicas), new_partition_idx);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- if (broker_id_cnt > RD_KAFKAP_BROKERS_MAX) {
- rd_snprintf(errstr, errstr_size,
- "Too many brokers specified "
- "(RD_KAFKAP_BROKERS_MAX=%d)",
- RD_KAFKAP_BROKERS_MAX);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt);
-
- for (i = 0; i < (int)broker_id_cnt; i++)
- rd_list_set_int32(rl, i, broker_ids[i]);
-
- rd_list_add(&newp->replicas, rl);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Parse CreatePartitionsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_CreatePartitionsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_op_t *rko_result = NULL;
- int32_t topic_cnt;
- int i;
- int32_t Throttle_Time;
-
- rd_kafka_buf_read_i32(reply, &Throttle_Time);
- rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
-
- /* #topics */
- rd_kafka_buf_read_i32(reply, &topic_cnt);
-
- if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " topics in response "
- "when only %d were requested",
- topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
- rd_kafka_topic_result_free);
-
- for (i = 0; i < (int)topic_cnt; i++) {
- rd_kafkap_str_t ktopic;
- int16_t error_code;
- char *this_errstr = NULL;
- rd_kafka_topic_result_t *terr;
- rd_kafka_NewTopic_t skel;
- rd_kafkap_str_t error_msg;
- int orig_pos;
-
- rd_kafka_buf_read_str(reply, &ktopic);
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &error_msg);
-
- /* For non-blocking CreatePartitionsRequests the broker
- * will returned REQUEST_TIMED_OUT for topics
- * that were triggered for creation -
- * we hide this error code from the application
- * since the topic creation is in fact in progress. */
- if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
- rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
- .options.operation_timeout) <=
- 0) {
- error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
- RD_KAFKAP_STR_LEN(&error_msg) == 0)
- this_errstr =
- (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
- }
-
- terr = rd_kafka_topic_result_new(
- ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code,
- error_code ? this_errstr : NULL);
-
- /* As a convenience to the application we insert topic result
- * in the same order as they were requested. The broker
- * does not maintain ordering unfortunately. */
- skel.topic = terr->topic;
- orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
- &skel, rd_kafka_NewPartitions_cmp);
- if (orig_pos == -1) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned topic %.*s that was not "
- "included in the original request",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- if (rd_list_elem(&rko_result->rko_u.admin_result.results,
- orig_pos) != NULL) {
- rd_kafka_topic_result_destroy(terr);
- rd_kafka_buf_parse_fail(
- reply, "Broker returned topic %.*s multiple times",
- RD_KAFKAP_STR_PR(&ktopic));
- }
-
- rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
- terr);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "CreatePartitions response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-
-void rd_kafka_CreatePartitions(rd_kafka_t *rk,
- rd_kafka_NewPartitions_t **newps,
- size_t newps_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_CreatePartitionsRequest,
- rd_kafka_CreatePartitionsResponse_parse,
- };
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_CREATEPARTITIONS,
- RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, &cbs, options,
- rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)newps_cnt,
- rd_kafka_NewPartitions_free);
-
- for (i = 0; i < newps_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_NewPartitions_copy(newps[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-/**
- * @brief Get an array of topic results from a CreatePartitions result.
- *
- * The returned \p topics life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(
- const rd_kafka_CreatePartitions_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
- cntp);
-}
-
-/**@}*/
-
-
-
-/**
- * @name ConfigEntry
- * @{
- *
- *
- *
- */
-
-static void rd_kafka_ConfigEntry_destroy(rd_kafka_ConfigEntry_t *entry) {
- rd_strtup_destroy(entry->kv);
- rd_list_destroy(&entry->synonyms);
- rd_free(entry);
-}
-
-
-static void rd_kafka_ConfigEntry_free(void *ptr) {
- rd_kafka_ConfigEntry_destroy((rd_kafka_ConfigEntry_t *)ptr);
-}
-
-
-/**
- * @brief Create new ConfigEntry
- *
- * @param name Config entry name
- * @param name_len Length of name, or -1 to use strlen()
- * @param value Config entry value, or NULL
- * @param value_len Length of value, or -1 to use strlen()
- */
-static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new0(const char *name,
- size_t name_len,
- const char *value,
- size_t value_len) {
- rd_kafka_ConfigEntry_t *entry;
-
- if (!name)
- return NULL;
-
- entry = rd_calloc(1, sizeof(*entry));
- entry->kv = rd_strtup_new0(name, name_len, value, value_len);
-
- rd_list_init(&entry->synonyms, 0, rd_kafka_ConfigEntry_free);
-
- entry->a.source = RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG;
-
- return entry;
-}
-
-/**
- * @sa rd_kafka_ConfigEntry_new0
- */
-static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new(const char *name,
- const char *value) {
- return rd_kafka_ConfigEntry_new0(name, -1, value, -1);
-}
-
-
-
-/**
- * @brief Allocate a new AlterConfigs and make a copy of \p src
- */
-static rd_kafka_ConfigEntry_t *
-rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src) {
- rd_kafka_ConfigEntry_t *dst;
-
- dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value);
- dst->a = src->a;
-
- rd_list_destroy(&dst->synonyms); /* created in .._new() */
- rd_list_init_copy(&dst->synonyms, &src->synonyms);
- rd_list_copy_to(&dst->synonyms, &src->synonyms,
- rd_kafka_ConfigEntry_list_copy, NULL);
-
- return dst;
-}
-
-static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque) {
- return rd_kafka_ConfigEntry_copy((const rd_kafka_ConfigEntry_t *)src);
-}
-
-
-const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry) {
- return entry->kv->name;
-}
-
-const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry) {
- return entry->kv->value;
-}
-
-rd_kafka_ConfigSource_t
-rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry) {
- return entry->a.source;
-}
-
-int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry) {
- return entry->a.is_readonly;
-}
-
-int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry) {
- return entry->a.is_default;
-}
-
-int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry) {
- return entry->a.is_sensitive;
-}
-
-int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry) {
- return entry->a.is_synonym;
-}
-
-const rd_kafka_ConfigEntry_t **
-rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry,
- size_t *cntp) {
- *cntp = rd_list_cnt(&entry->synonyms);
- if (!*cntp)
- return NULL;
- return (const rd_kafka_ConfigEntry_t **)entry->synonyms.rl_elems;
-}
-
-
-/**@}*/
-
-
-
-/**
- * @name ConfigSource
- * @{
- *
- *
- *
- */
-
-const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource) {
- static const char *names[] = {
- "UNKNOWN_CONFIG", "DYNAMIC_TOPIC_CONFIG",
- "DYNAMIC_BROKER_CONFIG", "DYNAMIC_DEFAULT_BROKER_CONFIG",
- "STATIC_BROKER_CONFIG", "DEFAULT_CONFIG",
- };
-
- if ((unsigned int)confsource >=
- (unsigned int)RD_KAFKA_CONFIG_SOURCE__CNT)
- return "UNSUPPORTED";
-
- return names[confsource];
-}
-
-/**@}*/
-
-
-
-/**
- * @name ConfigResource
- * @{
- *
- *
- *
- */
-
-const char *rd_kafka_ResourcePatternType_name(
- rd_kafka_ResourcePatternType_t resource_pattern_type) {
- static const char *names[] = {"UNKNOWN", "ANY", "MATCH", "LITERAL",
- "PREFIXED"};
-
- if ((unsigned int)resource_pattern_type >=
- (unsigned int)RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT)
- return "UNSUPPORTED";
-
- return names[resource_pattern_type];
-}
-
-const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype) {
- static const char *names[] = {
- "UNKNOWN", "ANY", "TOPIC", "GROUP", "BROKER",
- };
-
- if ((unsigned int)restype >= (unsigned int)RD_KAFKA_RESOURCE__CNT)
- return "UNSUPPORTED";
-
- return names[restype];
-}
-
-
-rd_kafka_ConfigResource_t *
-rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype,
- const char *resname) {
- rd_kafka_ConfigResource_t *config;
- size_t namesz = resname ? strlen(resname) : 0;
-
- if (!namesz || (int)restype < 0)
- return NULL;
-
- config = rd_calloc(1, sizeof(*config) + namesz + 1);
- config->name = config->data;
- memcpy(config->name, resname, namesz + 1);
- config->restype = restype;
-
- rd_list_init(&config->config, 8, rd_kafka_ConfigEntry_free);
-
- return config;
-}
-
-void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config) {
- rd_list_destroy(&config->config);
- if (config->errstr)
- rd_free(config->errstr);
- rd_free(config);
-}
-
-static void rd_kafka_ConfigResource_free(void *ptr) {
- rd_kafka_ConfigResource_destroy((rd_kafka_ConfigResource_t *)ptr);
-}
-
-
-void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config,
- size_t config_cnt) {
- size_t i;
- for (i = 0; i < config_cnt; i++)
- rd_kafka_ConfigResource_destroy(config[i]);
-}
-
-
-/**
- * @brief Type and name comparator for ConfigResource_t
- */
-static int rd_kafka_ConfigResource_cmp(const void *_a, const void *_b) {
- const rd_kafka_ConfigResource_t *a = _a, *b = _b;
- int r = RD_CMP(a->restype, b->restype);
- if (r)
- return r;
- return strcmp(a->name, b->name);
-}
-
-/**
- * @brief Allocate a new AlterConfigs and make a copy of \p src
- */
-static rd_kafka_ConfigResource_t *
-rd_kafka_ConfigResource_copy(const rd_kafka_ConfigResource_t *src) {
- rd_kafka_ConfigResource_t *dst;
-
- dst = rd_kafka_ConfigResource_new(src->restype, src->name);
-
- rd_list_destroy(&dst->config); /* created in .._new() */
- rd_list_init_copy(&dst->config, &src->config);
- rd_list_copy_to(&dst->config, &src->config,
- rd_kafka_ConfigEntry_list_copy, NULL);
-
- return dst;
-}
-
-
-static void
-rd_kafka_ConfigResource_add_ConfigEntry(rd_kafka_ConfigResource_t *config,
- rd_kafka_ConfigEntry_t *entry) {
- rd_list_add(&config->config, entry);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_ConfigResource_add_config(rd_kafka_ConfigResource_t *config,
- const char *name,
- const char *value) {
- if (!name || !*name || !value)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- return rd_kafka_admin_add_config0(&config->config, name, value,
- RD_KAFKA_ALTER_OP_ADD);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config,
- const char *name,
- const char *value) {
- if (!name || !*name || !value)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- return rd_kafka_admin_add_config0(&config->config, name, value,
- RD_KAFKA_ALTER_OP_SET);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_ConfigResource_delete_config(rd_kafka_ConfigResource_t *config,
- const char *name) {
- if (!name || !*name)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- return rd_kafka_admin_add_config0(&config->config, name, NULL,
- RD_KAFKA_ALTER_OP_DELETE);
-}
-
-
-const rd_kafka_ConfigEntry_t **
-rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config,
- size_t *cntp) {
- *cntp = rd_list_cnt(&config->config);
- if (!*cntp)
- return NULL;
- return (const rd_kafka_ConfigEntry_t **)config->config.rl_elems;
-}
-
-
-
-rd_kafka_ResourceType_t
-rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config) {
- return config->restype;
-}
-
-const char *
-rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config) {
- return config->name;
-}
-
-rd_kafka_resp_err_t
-rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config) {
- return config->err;
-}
-
-const char *
-rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config) {
- if (!config->err)
- return NULL;
- if (config->errstr)
- return config->errstr;
- return rd_kafka_err2str(config->err);
-}
-
-
-/**
- * @brief Look in the provided ConfigResource_t* list for a resource of
- * type BROKER and set its broker id in \p broker_id, returning
- * RD_KAFKA_RESP_ERR_NO_ERROR.
- *
- * If multiple BROKER resources are found RD_KAFKA_RESP_ERR__CONFLICT
- * is returned and an error string is written to errstr.
- *
- * If no BROKER resources are found RD_KAFKA_RESP_ERR_NO_ERROR
- * is returned and \p broker_idp is set to use the coordinator.
- */
-static rd_kafka_resp_err_t
-rd_kafka_ConfigResource_get_single_broker_id(const rd_list_t *configs,
- int32_t *broker_idp,
- char *errstr,
- size_t errstr_size) {
- const rd_kafka_ConfigResource_t *config;
- int i;
- int32_t broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; /* Some default
- * value that we
- * can compare
- * to below */
-
- RD_LIST_FOREACH(config, configs, i) {
- char *endptr;
- long int r;
-
- if (config->restype != RD_KAFKA_RESOURCE_BROKER)
- continue;
-
- if (broker_id != RD_KAFKA_ADMIN_TARGET_CONTROLLER) {
- rd_snprintf(errstr, errstr_size,
- "Only one ConfigResource of type BROKER "
- "is allowed per call");
- return RD_KAFKA_RESP_ERR__CONFLICT;
- }
-
- /* Convert string broker-id to int32 */
- r = (int32_t)strtol(config->name, &endptr, 10);
- if (r == LONG_MIN || r == LONG_MAX || config->name == endptr ||
- r < 0) {
- rd_snprintf(errstr, errstr_size,
- "Expected an int32 broker_id for "
- "ConfigResource(type=BROKER, name=%s)",
- config->name);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- broker_id = r;
-
- /* Keep scanning to make sure there are no duplicate
- * BROKER resources. */
- }
-
- *broker_idp = broker_id;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**@}*/
-
-
-
-/**
- * @name AlterConfigs
- * @{
- *
- *
- *
- */
-
-
-
-/**
- * @brief Parse AlterConfigsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_AlterConfigsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_op_t *rko_result = NULL;
- int32_t res_cnt;
- int i;
- int32_t Throttle_Time;
-
- rd_kafka_buf_read_i32(reply, &Throttle_Time);
- rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
-
- rd_kafka_buf_read_i32(reply, &res_cnt);
-
- if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) {
- rd_snprintf(errstr, errstr_size,
- "Received %" PRId32
- " ConfigResources in response "
- "when only %d were requested",
- res_cnt,
- rd_list_cnt(&rko_req->rko_u.admin_request.args));
- return RD_KAFKA_RESP_ERR__BAD_MSG;
- }
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
- rd_kafka_ConfigResource_free);
-
- for (i = 0; i < (int)res_cnt; i++) {
- int16_t error_code;
- rd_kafkap_str_t error_msg;
- int8_t res_type;
- rd_kafkap_str_t kres_name;
- char *res_name;
- char *this_errstr = NULL;
- rd_kafka_ConfigResource_t *config;
- rd_kafka_ConfigResource_t skel;
- int orig_pos;
-
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &error_msg);
- rd_kafka_buf_read_i8(reply, &res_type);
- rd_kafka_buf_read_str(reply, &kres_name);
- RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
-
- if (error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
- RD_KAFKAP_STR_LEN(&error_msg) == 0)
- this_errstr =
- (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
- }
-
- config = rd_kafka_ConfigResource_new(res_type, res_name);
- if (!config) {
- rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN",
- "AlterConfigs returned "
- "unsupported ConfigResource #%d with "
- "type %d and name \"%s\": ignoring",
- i, res_type, res_name);
- continue;
- }
-
- config->err = error_code;
- if (this_errstr)
- config->errstr = rd_strdup(this_errstr);
-
- /* As a convenience to the application we insert result
- * in the same order as they were requested. The broker
- * does not maintain ordering unfortunately. */
- skel.restype = config->restype;
- skel.name = config->name;
- orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
- &skel, rd_kafka_ConfigResource_cmp);
- if (orig_pos == -1) {
- rd_kafka_ConfigResource_destroy(config);
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned ConfigResource %d,%s "
- "that was not "
- "included in the original request",
- res_type, res_name);
- }
-
- if (rd_list_elem(&rko_result->rko_u.admin_result.results,
- orig_pos) != NULL) {
- rd_kafka_ConfigResource_destroy(config);
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned ConfigResource %d,%s "
- "multiple times",
- res_type, res_name);
- }
-
- rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
- config);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "AlterConfigs response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-
-void rd_kafka_AlterConfigs(rd_kafka_t *rk,
- rd_kafka_ConfigResource_t **configs,
- size_t config_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- rd_kafka_resp_err_t err;
- char errstr[256];
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_AlterConfigsRequest,
- rd_kafka_AlterConfigsResponse_parse,
- };
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_ALTERCONFIGS,
- RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt,
- rd_kafka_ConfigResource_free);
-
- for (i = 0; i < config_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_ConfigResource_copy(configs[i]));
-
- /* If there's a BROKER resource in the list we need to
- * speak directly to that broker rather than the controller.
- *
- * Multiple BROKER resources are not allowed.
- */
- err = rd_kafka_ConfigResource_get_single_broker_id(
- &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id,
- errstr, sizeof(errstr));
- if (err) {
- rd_kafka_admin_result_fail(rko, err, "%s", errstr);
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- return;
- }
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(
- const rd_kafka_AlterConfigs_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_resources(
- (const rd_kafka_op_t *)result, cntp);
-}
-
-/**@}*/
-
-
-
-/**
- * @name DescribeConfigs
- * @{
- *
- *
- *
- */
-
-
-/**
- * @brief Parse DescribeConfigsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DescribeConfigsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_op_t *rko_result = NULL;
- int32_t res_cnt;
- int i;
- int32_t Throttle_Time;
- rd_kafka_ConfigResource_t *config = NULL;
- rd_kafka_ConfigEntry_t *entry = NULL;
-
- rd_kafka_buf_read_i32(reply, &Throttle_Time);
- rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
-
- /* #resources */
- rd_kafka_buf_read_i32(reply, &res_cnt);
-
- if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " ConfigResources in response "
- "when only %d were requested",
- res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
- rd_kafka_ConfigResource_free);
-
- for (i = 0; i < (int)res_cnt; i++) {
- int16_t error_code;
- rd_kafkap_str_t error_msg;
- int8_t res_type;
- rd_kafkap_str_t kres_name;
- char *res_name;
- char *this_errstr = NULL;
- rd_kafka_ConfigResource_t skel;
- int orig_pos;
- int32_t entry_cnt;
- int ci;
-
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &error_msg);
- rd_kafka_buf_read_i8(reply, &res_type);
- rd_kafka_buf_read_str(reply, &kres_name);
- RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
-
- if (error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
- RD_KAFKAP_STR_LEN(&error_msg) == 0)
- this_errstr =
- (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
- }
-
- config = rd_kafka_ConfigResource_new(res_type, res_name);
- if (!config) {
- rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN",
- "DescribeConfigs returned "
- "unsupported ConfigResource #%d with "
- "type %d and name \"%s\": ignoring",
- i, res_type, res_name);
- continue;
- }
-
- config->err = error_code;
- if (this_errstr)
- config->errstr = rd_strdup(this_errstr);
-
- /* #config_entries */
- rd_kafka_buf_read_i32(reply, &entry_cnt);
-
- for (ci = 0; ci < (int)entry_cnt; ci++) {
- rd_kafkap_str_t config_name, config_value;
- int32_t syn_cnt;
- int si;
-
- rd_kafka_buf_read_str(reply, &config_name);
- rd_kafka_buf_read_str(reply, &config_value);
-
- entry = rd_kafka_ConfigEntry_new0(
- config_name.str, RD_KAFKAP_STR_LEN(&config_name),
- config_value.str, RD_KAFKAP_STR_LEN(&config_value));
-
- rd_kafka_buf_read_bool(reply, &entry->a.is_readonly);
-
- /* ApiVersion 0 has is_default field, while
- * ApiVersion 1 has source field.
- * Convert between the two so they look the same
- * to the caller. */
- if (rd_kafka_buf_ApiVersion(reply) == 0) {
- rd_kafka_buf_read_bool(reply,
- &entry->a.is_default);
- if (entry->a.is_default)
- entry->a.source =
- RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG;
- } else {
- int8_t config_source;
- rd_kafka_buf_read_i8(reply, &config_source);
- entry->a.source = config_source;
-
- if (entry->a.source ==
- RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
- entry->a.is_default = 1;
- }
-
- rd_kafka_buf_read_bool(reply, &entry->a.is_sensitive);
-
-
- if (rd_kafka_buf_ApiVersion(reply) == 1) {
- /* #config_synonyms (ApiVersion 1) */
- rd_kafka_buf_read_i32(reply, &syn_cnt);
-
- if (syn_cnt > 100000)
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned %" PRId32
- " config synonyms for "
- "ConfigResource %d,%s: "
- "limit is 100000",
- syn_cnt, config->restype,
- config->name);
-
- if (syn_cnt > 0)
- rd_list_grow(&entry->synonyms, syn_cnt);
-
- } else {
- /* No synonyms in ApiVersion 0 */
- syn_cnt = 0;
- }
-
-
-
- /* Read synonyms (ApiVersion 1) */
- for (si = 0; si < (int)syn_cnt; si++) {
- rd_kafkap_str_t syn_name, syn_value;
- int8_t syn_source;
- rd_kafka_ConfigEntry_t *syn_entry;
-
- rd_kafka_buf_read_str(reply, &syn_name);
- rd_kafka_buf_read_str(reply, &syn_value);
- rd_kafka_buf_read_i8(reply, &syn_source);
-
- syn_entry = rd_kafka_ConfigEntry_new0(
- syn_name.str, RD_KAFKAP_STR_LEN(&syn_name),
- syn_value.str,
- RD_KAFKAP_STR_LEN(&syn_value));
- if (!syn_entry)
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned invalid "
- "synonym #%d "
- "for ConfigEntry #%d (%s) "
- "and ConfigResource %d,%s: "
- "syn_name.len %d, "
- "syn_value.len %d",
- si, ci, entry->kv->name,
- config->restype, config->name,
- (int)syn_name.len,
- (int)syn_value.len);
-
- syn_entry->a.source = syn_source;
- syn_entry->a.is_synonym = 1;
-
- rd_list_add(&entry->synonyms, syn_entry);
- }
-
- rd_kafka_ConfigResource_add_ConfigEntry(config, entry);
- entry = NULL;
- }
-
- /* As a convenience to the application we insert result
- * in the same order as they were requested. The broker
- * does not maintain ordering unfortunately. */
- skel.restype = config->restype;
- skel.name = config->name;
- orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
- &skel, rd_kafka_ConfigResource_cmp);
- if (orig_pos == -1)
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned ConfigResource %d,%s "
- "that was not "
- "included in the original request",
- res_type, res_name);
-
- if (rd_list_elem(&rko_result->rko_u.admin_result.results,
- orig_pos) != NULL)
- rd_kafka_buf_parse_fail(
- reply,
- "Broker returned ConfigResource %d,%s "
- "multiple times",
- res_type, res_name);
-
- rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
- config);
- config = NULL;
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (entry)
- rd_kafka_ConfigEntry_destroy(entry);
- if (config)
- rd_kafka_ConfigResource_destroy(config);
-
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "DescribeConfigs response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-
-void rd_kafka_DescribeConfigs(rd_kafka_t *rk,
- rd_kafka_ConfigResource_t **configs,
- size_t config_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- rd_kafka_resp_err_t err;
- char errstr[256];
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DescribeConfigsRequest,
- rd_kafka_DescribeConfigsResponse_parse,
- };
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_DESCRIBECONFIGS,
- RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt,
- rd_kafka_ConfigResource_free);
-
- for (i = 0; i < config_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_ConfigResource_copy(configs[i]));
-
- /* If there's a BROKER resource in the list we need to
- * speak directly to that broker rather than the controller.
- *
- * Multiple BROKER resources are not allowed.
- */
- err = rd_kafka_ConfigResource_get_single_broker_id(
- &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id,
- errstr, sizeof(errstr));
- if (err) {
- rd_kafka_admin_result_fail(rko, err, "%s", errstr);
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- return;
- }
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-
-const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(
- const rd_kafka_DescribeConfigs_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_resources(
- (const rd_kafka_op_t *)result, cntp);
-}
-
-/**@}*/
-
-/**
- * @name Delete Records
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(
- const rd_kafka_topic_partition_list_t *before_offsets) {
- rd_kafka_DeleteRecords_t *del_records;
-
- del_records = rd_calloc(1, sizeof(*del_records));
- del_records->offsets =
- rd_kafka_topic_partition_list_copy(before_offsets);
-
- return del_records;
-}
-
-void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records) {
- rd_kafka_topic_partition_list_destroy(del_records->offsets);
- rd_free(del_records);
-}
-
-void rd_kafka_DeleteRecords_destroy_array(
- rd_kafka_DeleteRecords_t **del_records,
- size_t del_record_cnt) {
- size_t i;
- for (i = 0; i < del_record_cnt; i++)
- rd_kafka_DeleteRecords_destroy(del_records[i]);
-}
-
-
-
-/** @brief Merge the DeleteRecords response from a single broker
- * into the user response list.
- */
-static void
-rd_kafka_DeleteRecords_response_merge(rd_kafka_op_t *rko_fanout,
- const rd_kafka_op_t *rko_partial) {
- rd_kafka_t *rk = rko_fanout->rko_rk;
- const rd_kafka_topic_partition_list_t *partitions;
- rd_kafka_topic_partition_list_t *respartitions;
- const rd_kafka_topic_partition_t *partition;
-
- rd_assert(rko_partial->rko_evtype ==
- RD_KAFKA_EVENT_DELETERECORDS_RESULT);
-
- /* All partitions (offsets) from the DeleteRecords() call */
- respartitions =
- rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, 0);
-
- if (rko_partial->rko_err) {
- /* If there was a request-level error, set the error on
- * all requested partitions for this request. */
- const rd_kafka_topic_partition_list_t *reqpartitions;
- rd_kafka_topic_partition_t *reqpartition;
-
- /* Partitions (offsets) from this DeleteRecordsRequest */
- reqpartitions =
- rd_list_elem(&rko_partial->rko_u.admin_result.args, 0);
-
- RD_KAFKA_TPLIST_FOREACH(reqpartition, reqpartitions) {
- rd_kafka_topic_partition_t *respart;
-
- /* Find result partition */
- respart = rd_kafka_topic_partition_list_find(
- respartitions, reqpartition->topic,
- reqpartition->partition);
-
- rd_assert(respart || !*"respart not found");
-
- respart->err = rko_partial->rko_err;
- }
-
- return;
- }
-
- /* Partitions from the DeleteRecordsResponse */
- partitions = rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
-
- RD_KAFKA_TPLIST_FOREACH(partition, partitions) {
- rd_kafka_topic_partition_t *respart;
-
-
- /* Find result partition */
- respart = rd_kafka_topic_partition_list_find(
- respartitions, partition->topic, partition->partition);
- if (unlikely(!respart)) {
- rd_dassert(!*"partition not found");
-
- rd_kafka_log(rk, LOG_WARNING, "DELETERECORDS",
- "DeleteRecords response contains "
- "unexpected %s [%" PRId32
- "] which "
- "was not in the request list: ignored",
- partition->topic, partition->partition);
- continue;
- }
-
- respart->offset = partition->offset;
- respart->err = partition->err;
- }
-}
-
-
-
-/**
- * @brief Parse DeleteRecordsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DeleteRecordsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_op_t *rko_result;
- rd_kafka_topic_partition_list_t *offsets;
-
- rd_kafka_buf_read_throttle_time(reply);
-
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
- RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- offsets = rd_kafka_buf_read_topic_partitions(reply, 0, fields);
- if (!offsets)
- rd_kafka_buf_parse_fail(reply,
- "Failed to parse topic partitions");
-
-
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, 1,
- rd_kafka_topic_partition_list_destroy_free);
- rd_list_add(&rko_result->rko_u.admin_result.results, offsets);
- *rko_resultp = rko_result;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- rd_snprintf(errstr, errstr_size,
- "DeleteRecords response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-
-/**
- * @brief Call when leaders have been queried to progress the DeleteRecords
- * admin op to its next phase, sending DeleteRecords to partition
- * leaders.
- *
- * @param rko Reply op (RD_KAFKA_OP_LEADERS).
- */
-static rd_kafka_op_res_t
-rd_kafka_DeleteRecords_leaders_queried_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *reply) {
- rd_kafka_resp_err_t err = reply->rko_err;
- const rd_list_t *leaders =
- reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */
- rd_kafka_topic_partition_list_t *partitions =
- reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */
- rd_kafka_op_t *rko_fanout = reply->rko_u.leaders.opaque;
- rd_kafka_topic_partition_t *rktpar;
- rd_kafka_topic_partition_list_t *offsets;
- const struct rd_kafka_partition_leader *leader;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DeleteRecordsRequest,
- rd_kafka_DeleteRecordsResponse_parse,
- };
- int i;
-
- rd_assert((rko_fanout->rko_type & ~RD_KAFKA_OP_FLAGMASK) ==
- RD_KAFKA_OP_ADMIN_FANOUT);
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- goto err;
-
- /* Requested offsets */
- offsets = rd_list_elem(&rko_fanout->rko_u.admin_request.args, 0);
-
- /* Update the error field of each partition from the
- * leader-queried partition list so that ERR_UNKNOWN_TOPIC_OR_PART
- * and similar are propagated, since those partitions are not
- * included in the leaders list. */
- RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
- rd_kafka_topic_partition_t *rktpar2;
-
- if (!rktpar->err)
- continue;
-
- rktpar2 = rd_kafka_topic_partition_list_find(
- offsets, rktpar->topic, rktpar->partition);
- rd_assert(rktpar2);
- rktpar2->err = rktpar->err;
- }
-
-
- if (err) {
- err:
- rd_kafka_admin_result_fail(
- rko_fanout, err, "Failed to query partition leaders: %s",
- err == RD_KAFKA_RESP_ERR__NOENT ? "No leaders found"
- : rd_kafka_err2str(err));
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- /* The response lists is one element deep and that element is a
- * rd_kafka_topic_partition_list_t with the results of the deletes. */
- rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, 1,
- rd_kafka_topic_partition_list_destroy_free);
- rd_list_add(&rko_fanout->rko_u.admin_request.fanout.results,
- rd_kafka_topic_partition_list_copy(offsets));
-
- rko_fanout->rko_u.admin_request.fanout.outstanding =
- rd_list_cnt(leaders);
-
- rd_assert(rd_list_cnt(leaders) > 0);
-
- /* For each leader send a request for its partitions */
- RD_LIST_FOREACH(leader, leaders, i) {
- rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_DELETERECORDS,
- RD_KAFKA_EVENT_DELETERECORDS_RESULT, &cbs,
- &rko_fanout->rko_u.admin_request.options, rk->rk_ops);
- rko->rko_u.admin_request.fanout_parent = rko_fanout;
- rko->rko_u.admin_request.broker_id = leader->rkb->rkb_nodeid;
-
- rd_kafka_topic_partition_list_sort_by_topic(leader->partitions);
-
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_topic_partition_list_destroy_free);
- rd_list_add(
- &rko->rko_u.admin_request.args,
- rd_kafka_topic_partition_list_copy(leader->partitions));
-
- /* Enqueue op for admin_worker() to transition to next state */
- rd_kafka_q_enq(rk->rk_ops, rko);
- }
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-void rd_kafka_DeleteRecords(rd_kafka_t *rk,
- rd_kafka_DeleteRecords_t **del_records,
- size_t del_record_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko_fanout;
- static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
- rd_kafka_DeleteRecords_response_merge,
- rd_kafka_topic_partition_list_copy_opaque,
- };
- const rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_list_t *copied_offsets;
-
- rd_assert(rkqu);
-
- rko_fanout = rd_kafka_admin_fanout_op_new(
- rk, RD_KAFKA_OP_DELETERECORDS, RD_KAFKA_EVENT_DELETERECORDS_RESULT,
- &fanout_cbs, options, rkqu->rkqu_q);
-
- if (del_record_cnt != 1) {
- /* We only support one DeleteRecords per call since there
- * is no point in passing multiples, but the API still
- * needs to be extensible/future-proof. */
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Exactly one DeleteRecords must be "
- "passed");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- offsets = del_records[0]->offsets;
-
- if (offsets == NULL || offsets->cnt == 0) {
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "No records to delete");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- /* Copy offsets list and store it on the request op */
- copied_offsets = rd_kafka_topic_partition_list_copy(offsets);
- if (rd_kafka_topic_partition_list_has_duplicates(
- copied_offsets, rd_false /*check partition*/)) {
- rd_kafka_topic_partition_list_destroy(copied_offsets);
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate partitions not allowed");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- /* Set default error on each partition so that if any of the partitions
- * never get a request sent we have an error to indicate it. */
- rd_kafka_topic_partition_list_set_err(copied_offsets,
- RD_KAFKA_RESP_ERR__NOOP);
-
- rd_list_init(&rko_fanout->rko_u.admin_request.args, 1,
- rd_kafka_topic_partition_list_destroy_free);
- rd_list_add(&rko_fanout->rko_u.admin_request.args, copied_offsets);
-
- /* Async query for partition leaders */
- rd_kafka_topic_partition_list_query_leaders_async(
- rk, copied_offsets, rd_kafka_admin_timeout_remains(rko_fanout),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_DeleteRecords_leaders_queried_cb, rko_fanout);
-}
-
-
-/**
- * @brief Get the list of offsets from a DeleteRecords result.
- *
- * The returned \p offsets life-time is the same as the \p result object.
- */
-const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(
- const rd_kafka_DeleteRecords_result_t *result) {
- const rd_kafka_topic_partition_list_t *offsets;
- const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
- size_t cnt;
-
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_DELETERECORDS);
-
- cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
-
- rd_assert(cnt == 1);
-
- offsets = (const rd_kafka_topic_partition_list_t *)rd_list_elem(
- &rko->rko_u.admin_result.results, 0);
-
- rd_assert(offsets);
-
- return offsets;
-}
-
-/**@}*/
-
-/**
- * @name Delete groups
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group) {
- size_t tsize = strlen(group) + 1;
- rd_kafka_DeleteGroup_t *del_group;
-
- /* Single allocation */
- del_group = rd_malloc(sizeof(*del_group) + tsize);
- del_group->group = del_group->data;
- memcpy(del_group->group, group, tsize);
-
- return del_group;
-}
-
-void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group) {
- rd_free(del_group);
-}
-
-static void rd_kafka_DeleteGroup_free(void *ptr) {
- rd_kafka_DeleteGroup_destroy(ptr);
-}
-
-void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups,
- size_t del_group_cnt) {
- size_t i;
- for (i = 0; i < del_group_cnt; i++)
- rd_kafka_DeleteGroup_destroy(del_groups[i]);
-}
-
-/**
- * @brief Group name comparator for DeleteGroup_t
- */
-static int rd_kafka_DeleteGroup_cmp(const void *_a, const void *_b) {
- const rd_kafka_DeleteGroup_t *a = _a, *b = _b;
- return strcmp(a->group, b->group);
-}
-
-/**
- * @brief Allocate a new DeleteGroup and make a copy of \p src
- */
-static rd_kafka_DeleteGroup_t *
-rd_kafka_DeleteGroup_copy(const rd_kafka_DeleteGroup_t *src) {
- return rd_kafka_DeleteGroup_new(src->group);
-}
-
-
-/**
- * @brief Parse DeleteGroupsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DeleteGroupsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- int32_t group_cnt;
- int i;
- rd_kafka_op_t *rko_result = NULL;
-
- rd_kafka_buf_read_throttle_time(reply);
-
- /* #group_error_codes */
- rd_kafka_buf_read_i32(reply, &group_cnt);
-
- if (group_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " groups in response "
- "when only %d were requested",
- group_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, group_cnt,
- rd_kafka_group_result_free);
-
- for (i = 0; i < (int)group_cnt; i++) {
- rd_kafkap_str_t kgroup;
- int16_t error_code;
- rd_kafka_group_result_t *groupres;
-
- rd_kafka_buf_read_str(reply, &kgroup);
- rd_kafka_buf_read_i16(reply, &error_code);
-
- groupres = rd_kafka_group_result_new(
- kgroup.str, RD_KAFKAP_STR_LEN(&kgroup), NULL,
- error_code ? rd_kafka_error_new(error_code, NULL) : NULL);
-
- rd_list_add(&rko_result->rko_u.admin_result.results, groupres);
- }
-
- *rko_resultp = rko_result;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "DeleteGroups response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-/** @brief Merge the DeleteGroups response from a single broker
- * into the user response list.
- */
-void rd_kafka_DeleteGroups_response_merge(rd_kafka_op_t *rko_fanout,
- const rd_kafka_op_t *rko_partial) {
- const rd_kafka_group_result_t *groupres = NULL;
- rd_kafka_group_result_t *newgroupres;
- const rd_kafka_DeleteGroup_t *grp =
- rko_partial->rko_u.admin_result.opaque;
- int orig_pos;
-
- rd_assert(rko_partial->rko_evtype ==
- RD_KAFKA_EVENT_DELETEGROUPS_RESULT);
-
- if (!rko_partial->rko_err) {
- /* Proper results.
- * We only send one group per request, make sure it matches */
- groupres =
- rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
- rd_assert(groupres);
- rd_assert(!strcmp(groupres->group, grp->group));
- newgroupres = rd_kafka_group_result_copy(groupres);
- } else {
- /* Op errored, e.g. timeout */
- newgroupres = rd_kafka_group_result_new(
- grp->group, -1, NULL,
- rd_kafka_error_new(rko_partial->rko_err, NULL));
- }
-
- /* As a convenience to the application we insert group result
- * in the same order as they were requested. */
- orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp,
- rd_kafka_DeleteGroup_cmp);
- rd_assert(orig_pos != -1);
-
- /* Make sure result is not already set */
- rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results,
- orig_pos) == NULL);
-
- rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos,
- newgroupres);
-}
-
-void rd_kafka_DeleteGroups(rd_kafka_t *rk,
- rd_kafka_DeleteGroup_t **del_groups,
- size_t del_group_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko_fanout;
- rd_list_t dup_list;
- size_t i;
- static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
- rd_kafka_DeleteGroups_response_merge,
- rd_kafka_group_result_copy_opaque,
- };
-
- rd_assert(rkqu);
-
- rko_fanout = rd_kafka_admin_fanout_op_new(
- rk, RD_KAFKA_OP_DELETEGROUPS, RD_KAFKA_EVENT_DELETEGROUPS_RESULT,
- &fanout_cbs, options, rkqu->rkqu_q);
-
- if (del_group_cnt == 0) {
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "No groups to delete");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- /* Copy group list and store it on the request op.
- * Maintain original ordering. */
- rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)del_group_cnt,
- rd_kafka_DeleteGroup_free);
- for (i = 0; i < del_group_cnt; i++)
- rd_list_add(&rko_fanout->rko_u.admin_request.args,
- rd_kafka_DeleteGroup_copy(del_groups[i]));
-
- /* Check for duplicates.
- * Make a temporary copy of the group list and sort it to check for
- * duplicates, we don't want the original list sorted since we want
- * to maintain ordering. */
- rd_list_init(&dup_list,
- rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL);
- rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL,
- NULL);
- rd_list_sort(&dup_list, rd_kafka_DeleteGroup_cmp);
- if (rd_list_find_duplicate(&dup_list, rd_kafka_DeleteGroup_cmp)) {
- rd_list_destroy(&dup_list);
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate groups not allowed");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- rd_list_destroy(&dup_list);
-
- /* Prepare results list where fanned out op's results will be
- * accumulated. */
- rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results,
- (int)del_group_cnt, rd_kafka_group_result_free);
- rko_fanout->rko_u.admin_request.fanout.outstanding = (int)del_group_cnt;
-
- /* Create individual request ops for each group.
- * FIXME: A future optimization is to coalesce all groups for a single
- * coordinator into one op. */
- for (i = 0; i < del_group_cnt; i++) {
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DeleteGroupsRequest,
- rd_kafka_DeleteGroupsResponse_parse,
- };
- rd_kafka_DeleteGroup_t *grp =
- rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i);
- rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_DELETEGROUPS,
- RD_KAFKA_EVENT_DELETEGROUPS_RESULT, &cbs, options,
- rk->rk_ops);
-
- rko->rko_u.admin_request.fanout_parent = rko_fanout;
- rko->rko_u.admin_request.broker_id =
- RD_KAFKA_ADMIN_TARGET_COORDINATOR;
- rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
- rko->rko_u.admin_request.coordkey = rd_strdup(grp->group);
-
- /* Set the group name as the opaque so the fanout worker use it
- * to fill in errors.
- * References rko_fanout's memory, which will always outlive
- * the fanned out op. */
- rd_kafka_AdminOptions_set_opaque(
- &rko->rko_u.admin_request.options, grp);
-
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_DeleteGroup_free);
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_DeleteGroup_copy(del_groups[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
- }
-}
-
-
-/**
- * @brief Get an array of group results from a DeleteGroups result.
- *
- * The returned \p groups life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(
- const rd_kafka_DeleteGroups_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
- cntp);
-}
-
-
-/**@}*/
-
-
-/**
- * @name Delete consumer group offsets (committed offsets)
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(
- const char *group,
- const rd_kafka_topic_partition_list_t *partitions) {
- size_t tsize = strlen(group) + 1;
- rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets;
-
- rd_assert(partitions);
-
- /* Single allocation */
- del_grpoffsets = rd_malloc(sizeof(*del_grpoffsets) + tsize);
- del_grpoffsets->group = del_grpoffsets->data;
- memcpy(del_grpoffsets->group, group, tsize);
- del_grpoffsets->partitions =
- rd_kafka_topic_partition_list_copy(partitions);
-
- return del_grpoffsets;
-}
-
-void rd_kafka_DeleteConsumerGroupOffsets_destroy(
- rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets) {
- rd_kafka_topic_partition_list_destroy(del_grpoffsets->partitions);
- rd_free(del_grpoffsets);
-}
-
-static void rd_kafka_DeleteConsumerGroupOffsets_free(void *ptr) {
- rd_kafka_DeleteConsumerGroupOffsets_destroy(ptr);
-}
-
-void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(
- rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
- size_t del_grpoffsets_cnt) {
- size_t i;
- for (i = 0; i < del_grpoffsets_cnt; i++)
- rd_kafka_DeleteConsumerGroupOffsets_destroy(del_grpoffsets[i]);
-}
-
-
-/**
- * @brief Allocate a new DeleteGroup and make a copy of \p src
- */
-static rd_kafka_DeleteConsumerGroupOffsets_t *
-rd_kafka_DeleteConsumerGroupOffsets_copy(
- const rd_kafka_DeleteConsumerGroupOffsets_t *src) {
- return rd_kafka_DeleteConsumerGroupOffsets_new(src->group,
- src->partitions);
-}
-
-
-/**
- * @brief Parse OffsetDeleteResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_OffsetDeleteResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_op_t *rko_result;
- int16_t ErrorCode;
- rd_kafka_topic_partition_list_t *partitions = NULL;
- const rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets;
-
- rd_kafka_buf_read_i16(reply, &ErrorCode);
- if (ErrorCode) {
- rd_snprintf(errstr, errstr_size,
- "OffsetDelete response error: %s",
- rd_kafka_err2str(ErrorCode));
- return ErrorCode;
- }
-
- rd_kafka_buf_read_throttle_time(reply);
-
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- partitions = rd_kafka_buf_read_topic_partitions(reply, 16, fields);
- if (!partitions) {
- rd_snprintf(errstr, errstr_size,
- "Failed to parse OffsetDeleteResponse partitions");
- return RD_KAFKA_RESP_ERR__BAD_MSG;
- }
-
-
- /* Create result op and group_result_t */
- rko_result = rd_kafka_admin_result_new(rko_req);
- del_grpoffsets = rd_list_elem(&rko_result->rko_u.admin_result.args, 0);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, 1,
- rd_kafka_group_result_free);
- rd_list_add(&rko_result->rko_u.admin_result.results,
- rd_kafka_group_result_new(del_grpoffsets->group, -1,
- partitions, NULL));
- rd_kafka_topic_partition_list_destroy(partitions);
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- rd_snprintf(errstr, errstr_size,
- "OffsetDelete response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
- return reply->rkbuf_err;
-}
-
-
-void rd_kafka_DeleteConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
- size_t del_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_OffsetDeleteRequest,
- rd_kafka_OffsetDeleteResponse_parse,
- };
- rd_kafka_op_t *rko;
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS,
- RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, &cbs, options,
- rkqu->rkqu_q);
-
- if (del_grpoffsets_cnt != 1) {
- /* For simplicity we only support one single group for now */
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Exactly one "
- "DeleteConsumerGroupOffsets must "
- "be passed");
- rd_kafka_admin_common_worker_destroy(rk, rko,
- rd_true /*destroy*/);
- return;
- }
-
-
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
- rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
- rko->rko_u.admin_request.coordkey = rd_strdup(del_grpoffsets[0]->group);
-
- /* Store copy of group on request so the group name can be reached
- * from the response parser. */
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_DeleteConsumerGroupOffsets_free);
- rd_list_add(
- &rko->rko_u.admin_request.args,
- rd_kafka_DeleteConsumerGroupOffsets_copy(del_grpoffsets[0]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-
-/**
- * @brief Get an array of group results from a DeleteGroups result.
- *
- * The returned \p groups life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_group_result_t **
-rd_kafka_DeleteConsumerGroupOffsets_result_groups(
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
- cntp);
-}
-
-void rd_kafka_DeleteConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
- size_t del_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu);
-
-/**@}*/
-/**
- * @name CreateAcls
- * @{
- *
- *
- *
- */
-
-const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t operation) {
- static const char *names[] = {"UNKNOWN",
- "ANY",
- "ALL",
- "READ",
- "WRITE",
- "CREATE",
- "DELETE",
- "ALTER",
- "DESCRIBE",
- "CLUSTER_ACTION",
- "DESCRIBE_CONFIGS",
- "ALTER_CONFIGS",
- "IDEMPOTENT_WRITE"};
-
- if ((unsigned int)operation >=
- (unsigned int)RD_KAFKA_ACL_OPERATION__CNT)
- return "UNSUPPORTED";
-
- return names[operation];
-}
-
-const char *
-rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t permission_type) {
- static const char *names[] = {"UNKNOWN", "ANY", "DENY", "ALLOW"};
-
- if ((unsigned int)permission_type >=
- (unsigned int)RD_KAFKA_ACL_PERMISSION_TYPE__CNT)
- return "UNSUPPORTED";
-
- return names[permission_type];
-}
-
-static rd_kafka_AclBinding_t *
-rd_kafka_AclBinding_new0(rd_kafka_ResourceType_t restype,
- const char *name,
- rd_kafka_ResourcePatternType_t resource_pattern_type,
- const char *principal,
- const char *host,
- rd_kafka_AclOperation_t operation,
- rd_kafka_AclPermissionType_t permission_type,
- rd_kafka_resp_err_t err,
- const char *errstr) {
- rd_kafka_AclBinding_t *acl_binding;
-
- acl_binding = rd_calloc(1, sizeof(*acl_binding));
- acl_binding->name = name != NULL ? rd_strdup(name) : NULL;
- acl_binding->principal =
- principal != NULL ? rd_strdup(principal) : NULL;
- acl_binding->host = host != NULL ? rd_strdup(host) : NULL;
- acl_binding->restype = restype;
- acl_binding->resource_pattern_type = resource_pattern_type;
- acl_binding->operation = operation;
- acl_binding->permission_type = permission_type;
- if (err)
- acl_binding->error = rd_kafka_error_new(err, "%s", errstr);
-
- return acl_binding;
-}
-
-rd_kafka_AclBinding_t *
-rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
- const char *name,
- rd_kafka_ResourcePatternType_t resource_pattern_type,
- const char *principal,
- const char *host,
- rd_kafka_AclOperation_t operation,
- rd_kafka_AclPermissionType_t permission_type,
- char *errstr,
- size_t errstr_size) {
- if (!name) {
- rd_snprintf(errstr, errstr_size, "Invalid resource name");
- return NULL;
- }
- if (!principal) {
- rd_snprintf(errstr, errstr_size, "Invalid principal");
- return NULL;
- }
- if (!host) {
- rd_snprintf(errstr, errstr_size, "Invalid host");
- return NULL;
- }
-
- if (restype == RD_KAFKA_RESOURCE_ANY ||
- restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
- restype >= RD_KAFKA_RESOURCE__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid resource type");
- return NULL;
- }
-
- if (resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_ANY ||
- resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_MATCH ||
- resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
- resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
- rd_snprintf(errstr, errstr_size,
- "Invalid resource pattern type");
- return NULL;
- }
-
- if (operation == RD_KAFKA_ACL_OPERATION_ANY ||
- operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
- operation >= RD_KAFKA_ACL_OPERATION__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid operation");
- return NULL;
- }
-
- if (permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ANY ||
- permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
- permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid permission type");
- return NULL;
- }
-
- return rd_kafka_AclBinding_new0(
- restype, name, resource_pattern_type, principal, host, operation,
- permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
-}
-
-rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
- rd_kafka_ResourceType_t restype,
- const char *name,
- rd_kafka_ResourcePatternType_t resource_pattern_type,
- const char *principal,
- const char *host,
- rd_kafka_AclOperation_t operation,
- rd_kafka_AclPermissionType_t permission_type,
- char *errstr,
- size_t errstr_size) {
-
-
- if (restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
- restype >= RD_KAFKA_RESOURCE__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid resource type");
- return NULL;
- }
-
- if (resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
- resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
- rd_snprintf(errstr, errstr_size,
- "Invalid resource pattern type");
- return NULL;
- }
-
- if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
- operation >= RD_KAFKA_ACL_OPERATION__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid operation");
- return NULL;
- }
-
- if (permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
- permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid permission type");
- return NULL;
- }
-
- return rd_kafka_AclBinding_new0(
- restype, name, resource_pattern_type, principal, host, operation,
- permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
-}
-
-rd_kafka_ResourceType_t
-rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl) {
- return acl->restype;
-}
-
-const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl) {
- return acl->name;
-}
-
-const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl) {
- return acl->principal;
-}
-
-const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl) {
- return acl->host;
-}
-
-rd_kafka_AclOperation_t
-rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl) {
- return acl->operation;
-}
-
-rd_kafka_AclPermissionType_t
-rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl) {
- return acl->permission_type;
-}
-
-rd_kafka_ResourcePatternType_t
-rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl) {
- return acl->resource_pattern_type;
-}
-
-const rd_kafka_error_t *
-rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl) {
- return acl->error;
-}
-
-/**
- * @brief Allocate a new AclBinding and make a copy of \p src
- */
-static rd_kafka_AclBinding_t *
-rd_kafka_AclBinding_copy(const rd_kafka_AclBinding_t *src) {
- rd_kafka_AclBinding_t *dst;
-
- dst = rd_kafka_AclBinding_new(
- src->restype, src->name, src->resource_pattern_type, src->principal,
- src->host, src->operation, src->permission_type, NULL, 0);
- rd_assert(dst);
- return dst;
-}
-
-/**
- * @brief Allocate a new AclBindingFilter and make a copy of \p src
- */
-static rd_kafka_AclBindingFilter_t *
-rd_kafka_AclBindingFilter_copy(const rd_kafka_AclBindingFilter_t *src) {
- rd_kafka_AclBindingFilter_t *dst;
-
- dst = rd_kafka_AclBindingFilter_new(
- src->restype, src->name, src->resource_pattern_type, src->principal,
- src->host, src->operation, src->permission_type, NULL, 0);
- rd_assert(dst);
- return dst;
-}
-
-void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding) {
- if (acl_binding->name)
- rd_free(acl_binding->name);
- if (acl_binding->principal)
- rd_free(acl_binding->principal);
- if (acl_binding->host)
- rd_free(acl_binding->host);
- if (acl_binding->error)
- rd_kafka_error_destroy(acl_binding->error);
- rd_free(acl_binding);
-}
-
-static void rd_kafka_AclBinding_free(void *ptr) {
- rd_kafka_AclBinding_destroy(ptr);
-}
-
-
-void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
- size_t acl_bindings_cnt) {
- size_t i;
- for (i = 0; i < acl_bindings_cnt; i++)
- rd_kafka_AclBinding_destroy(acl_bindings[i]);
-}
-
-/**
- * @brief Parse CreateAclsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_CreateAclsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_op_t *rko_result = NULL;
- int32_t acl_cnt;
- int i;
-
- rd_kafka_buf_read_throttle_time(reply);
-
- rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
-
- if (acl_cnt != rd_list_cnt(&rko_req->rko_u.admin_request.args))
- rd_kafka_buf_parse_fail(
- reply,
- "Received %" PRId32
- " acls in response, but %d were requested",
- acl_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, acl_cnt,
- rd_kafka_acl_result_free);
-
- for (i = 0; i < (int)acl_cnt; i++) {
- int16_t error_code;
- rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
- rd_kafka_acl_result_t *acl_res;
- char *errstr = NULL;
-
- rd_kafka_buf_read_i16(reply, &error_code);
-
- rd_kafka_buf_read_str(reply, &error_msg);
-
- if (error_code) {
- if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
- errstr = (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
- }
-
- acl_res = rd_kafka_acl_result_new(
- error_code ? rd_kafka_error_new(error_code, "%s", errstr)
- : NULL);
-
- rd_list_set(&rko_result->rko_u.admin_result.results, i,
- acl_res);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "CreateAcls response protocol parse failure: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-void rd_kafka_CreateAcls(rd_kafka_t *rk,
- rd_kafka_AclBinding_t **new_acls,
- size_t new_acls_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_CreateAclsRequest, rd_kafka_CreateAclsResponse_parse};
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATEACLS,
- RD_KAFKA_EVENT_CREATEACLS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)new_acls_cnt,
- rd_kafka_AclBinding_free);
-
- for (i = 0; i < new_acls_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_AclBinding_copy(new_acls[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-/**
- * @brief Get an array of rd_kafka_acl_result_t from a CreateAcls result.
- *
- * The returned \p rd_kafka_acl_result_t life-time is the same as the \p result
- * object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_acl_result_t **
-rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_acl_results(
- (const rd_kafka_op_t *)result, cntp);
-}
-
-/**@}*/
-
-/**
- * @name DescribeAcls
- * @{
- *
- *
- *
- */
-
-/**
- * @brief Parse DescribeAclsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DescribeAclsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_op_t *rko_result = NULL;
- int32_t res_cnt;
- int i;
- int j;
- rd_kafka_AclBinding_t *acl = NULL;
- int16_t error_code;
- rd_kafkap_str_t error_msg;
-
- rd_kafka_buf_read_throttle_time(reply);
-
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &error_msg);
-
- if (error_code) {
- if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
- errstr = (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
- }
-
- /* #resources */
- rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
- rd_kafka_AclBinding_free);
-
- for (i = 0; i < (int)res_cnt; i++) {
- int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
- rd_kafkap_str_t kres_name;
- char *res_name;
- int8_t resource_pattern_type =
- RD_KAFKA_RESOURCE_PATTERN_LITERAL;
- int32_t acl_cnt;
-
- rd_kafka_buf_read_i8(reply, &res_type);
- rd_kafka_buf_read_str(reply, &kres_name);
- RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
-
- if (rd_kafka_buf_ApiVersion(reply) >= 1) {
- rd_kafka_buf_read_i8(reply, &resource_pattern_type);
- }
-
- if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
- res_type >= RD_KAFKA_RESOURCE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
- "DescribeAclsResponse returned unknown "
- "resource type %d",
- res_type);
- res_type = RD_KAFKA_RESOURCE_UNKNOWN;
- }
- if (resource_pattern_type <=
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
- resource_pattern_type >=
- RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
- "DescribeAclsResponse returned unknown "
- "resource pattern type %d",
- resource_pattern_type);
- resource_pattern_type =
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
- }
-
- /* #resources */
- rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
-
- for (j = 0; j < (int)acl_cnt; j++) {
- rd_kafkap_str_t kprincipal;
- rd_kafkap_str_t khost;
- int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
- int8_t permission_type =
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
- char *principal;
- char *host;
-
- rd_kafka_buf_read_str(reply, &kprincipal);
- rd_kafka_buf_read_str(reply, &khost);
- rd_kafka_buf_read_i8(reply, &operation);
- rd_kafka_buf_read_i8(reply, &permission_type);
- RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
- RD_KAFKAP_STR_DUPA(&host, &khost);
-
- if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
- operation >= RD_KAFKA_ACL_OPERATION__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DESCRIBEACLSRESPONSE",
- "DescribeAclsResponse returned "
- "unknown acl operation %d",
- operation);
- operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
- }
- if (permission_type <=
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
- permission_type >=
- RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DESCRIBEACLSRESPONSE",
- "DescribeAclsResponse returned "
- "unknown acl permission type %d",
- permission_type);
- permission_type =
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
- }
-
- acl = rd_kafka_AclBinding_new0(
- res_type, res_name, resource_pattern_type,
- principal, host, operation, permission_type,
- RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
-
- rd_list_add(&rko_result->rko_u.admin_result.results,
- acl);
- }
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "DescribeAcls response protocol parse failure: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-void rd_kafka_DescribeAcls(rd_kafka_t *rk,
- rd_kafka_AclBindingFilter_t *acl_filter,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
-
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DescribeAclsRequest,
- rd_kafka_DescribeAclsResponse_parse,
- };
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DESCRIBEACLS,
- RD_KAFKA_EVENT_DESCRIBEACLS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_AclBinding_free);
-
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_AclBindingFilter_copy(acl_filter));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-/**
- * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
- *
- * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
- * object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_AclBinding_t **
-rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_acl_bindings(
- (const rd_kafka_op_t *)result, cntp);
-}
-
-/**@}*/
-
-/**
- * @name DeleteAcls
- * @{
- *
- *
- *
- */
-
-/**
- * @brief Allocate a new DeleteAcls result response with the given
- * \p err error code and \p errstr error message.
- */
-const rd_kafka_DeleteAcls_result_response_t *
-rd_kafka_DeleteAcls_result_response_new(rd_kafka_resp_err_t err, char *errstr) {
- rd_kafka_DeleteAcls_result_response_t *result_response;
-
- result_response = rd_calloc(1, sizeof(*result_response));
- if (err)
- result_response->error = rd_kafka_error_new(
- err, "%s", errstr ? errstr : rd_kafka_err2str(err));
-
- /* List of int32 lists */
- rd_list_init(&result_response->matching_acls, 0,
- rd_kafka_AclBinding_free);
-
- return result_response;
-}
-
-static void rd_kafka_DeleteAcls_result_response_destroy(
- rd_kafka_DeleteAcls_result_response_t *resp) {
- if (resp->error)
- rd_kafka_error_destroy(resp->error);
- rd_list_destroy(&resp->matching_acls);
- rd_free(resp);
-}
-
-static void rd_kafka_DeleteAcls_result_response_free(void *ptr) {
- rd_kafka_DeleteAcls_result_response_destroy(
- (rd_kafka_DeleteAcls_result_response_t *)ptr);
-}
-
-/**
- * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
- *
- * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
- * object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_DeleteAcls_result_response_t **
-rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_delete_acl_result_responses(
- (const rd_kafka_op_t *)result, cntp);
-}
-
-const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
- const rd_kafka_DeleteAcls_result_response_t *result_response) {
- return result_response->error;
-}
-
-const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(
- const rd_kafka_DeleteAcls_result_response_t *result_response,
- size_t *matching_acls_cntp) {
- *matching_acls_cntp = result_response->matching_acls.rl_cnt;
- return (const rd_kafka_AclBinding_t **)
- result_response->matching_acls.rl_elems;
-}
-
-/**
- * @brief Parse DeleteAclsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DeleteAclsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_op_t *rko_result = NULL;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int32_t res_cnt;
- int i;
- int j;
-
- rd_kafka_buf_read_throttle_time(reply);
-
- /* #responses */
- rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
-
- rko_result = rd_kafka_admin_result_new(rko_req);
-
- rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
- rd_kafka_DeleteAcls_result_response_free);
-
- for (i = 0; i < (int)res_cnt; i++) {
- int16_t error_code;
- rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
- char *errstr = NULL;
- const rd_kafka_DeleteAcls_result_response_t *result_response;
- int32_t matching_acls_cnt;
-
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &error_msg);
-
- if (error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
- RD_KAFKAP_STR_LEN(&error_msg) == 0)
- errstr = (char *)rd_kafka_err2str(error_code);
- else
- RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
- }
-
- result_response =
- rd_kafka_DeleteAcls_result_response_new(error_code, errstr);
-
- /* #maching_acls */
- rd_kafka_buf_read_arraycnt(reply, &matching_acls_cnt, 100000);
- for (j = 0; j < (int)matching_acls_cnt; j++) {
- int16_t acl_error_code;
- int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
- rd_kafkap_str_t acl_error_msg =
- RD_KAFKAP_STR_INITIALIZER;
- rd_kafkap_str_t kres_name;
- rd_kafkap_str_t khost;
- rd_kafkap_str_t kprincipal;
- int8_t resource_pattern_type =
- RD_KAFKA_RESOURCE_PATTERN_LITERAL;
- int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
- int8_t permission_type =
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
- rd_kafka_AclBinding_t *matching_acl;
- char *acl_errstr = NULL;
- char *res_name;
- char *principal;
- char *host;
-
- rd_kafka_buf_read_i16(reply, &acl_error_code);
- rd_kafka_buf_read_str(reply, &acl_error_msg);
- if (acl_error_code) {
- if (RD_KAFKAP_STR_IS_NULL(&acl_error_msg) ||
- RD_KAFKAP_STR_LEN(&acl_error_msg) == 0)
- acl_errstr = (char *)rd_kafka_err2str(
- acl_error_code);
- else
- RD_KAFKAP_STR_DUPA(&acl_errstr,
- &acl_error_msg);
- }
-
- rd_kafka_buf_read_i8(reply, &res_type);
- rd_kafka_buf_read_str(reply, &kres_name);
-
- if (rd_kafka_buf_ApiVersion(reply) >= 1) {
- rd_kafka_buf_read_i8(reply,
- &resource_pattern_type);
- }
-
- rd_kafka_buf_read_str(reply, &kprincipal);
- rd_kafka_buf_read_str(reply, &khost);
- rd_kafka_buf_read_i8(reply, &operation);
- rd_kafka_buf_read_i8(reply, &permission_type);
- RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
- RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
- RD_KAFKAP_STR_DUPA(&host, &khost);
-
- if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
- res_type >= RD_KAFKA_RESOURCE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DELETEACLSRESPONSE",
- "DeleteAclsResponse returned "
- "unknown resource type %d",
- res_type);
- res_type = RD_KAFKA_RESOURCE_UNKNOWN;
- }
- if (resource_pattern_type <=
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
- resource_pattern_type >=
- RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DELETEACLSRESPONSE",
- "DeleteAclsResponse returned "
- "unknown resource pattern type %d",
- resource_pattern_type);
- resource_pattern_type =
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
- }
- if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
- operation >= RD_KAFKA_ACL_OPERATION__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DELETEACLSRESPONSE",
- "DeleteAclsResponse returned "
- "unknown acl operation %d",
- operation);
- operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
- }
- if (permission_type <=
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
- permission_type >=
- RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
- rd_rkb_log(rkb, LOG_WARNING,
- "DELETEACLSRESPONSE",
- "DeleteAclsResponse returned "
- "unknown acl permission type %d",
- permission_type);
- permission_type =
- RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
- }
-
- matching_acl = rd_kafka_AclBinding_new0(
- res_type, res_name, resource_pattern_type,
- principal, host, operation, permission_type,
- acl_error_code, acl_errstr);
-
- rd_list_add(
- (rd_list_t *)&result_response->matching_acls,
- (void *)matching_acl);
- }
-
- rd_list_add(&rko_result->rko_u.admin_result.results,
- (void *)result_response);
- }
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(errstr, errstr_size,
- "DeleteAcls response protocol parse failure: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-
-void rd_kafka_DeleteAcls(rd_kafka_t *rk,
- rd_kafka_AclBindingFilter_t **del_acls,
- size_t del_acls_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- size_t i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_DeleteAclsRequest, rd_kafka_DeleteAclsResponse_parse};
-
- rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETEACLS,
- RD_KAFKA_EVENT_DELETEACLS_RESULT,
- &cbs, options, rkqu->rkqu_q);
-
- rd_list_init(&rko->rko_u.admin_request.args, (int)del_acls_cnt,
- rd_kafka_AclBinding_free);
-
- for (i = 0; i < del_acls_cnt; i++)
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_AclBindingFilter_copy(del_acls[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-/**@}*/
-
-/**
- * @name Alter consumer group offsets (committed offsets)
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(
- const char *group_id,
- const rd_kafka_topic_partition_list_t *partitions) {
- rd_assert(group_id && partitions);
-
- size_t tsize = strlen(group_id) + 1;
- rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets;
-
- /* Single allocation */
- alter_grpoffsets = rd_malloc(sizeof(*alter_grpoffsets) + tsize);
- alter_grpoffsets->group_id = alter_grpoffsets->data;
- memcpy(alter_grpoffsets->group_id, group_id, tsize);
- alter_grpoffsets->partitions =
- rd_kafka_topic_partition_list_copy(partitions);
-
- return alter_grpoffsets;
-}
-
-void rd_kafka_AlterConsumerGroupOffsets_destroy(
- rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets) {
- rd_kafka_topic_partition_list_destroy(alter_grpoffsets->partitions);
- rd_free(alter_grpoffsets);
-}
-
-static void rd_kafka_AlterConsumerGroupOffsets_free(void *ptr) {
- rd_kafka_AlterConsumerGroupOffsets_destroy(ptr);
-}
-
-void rd_kafka_AlterConsumerGroupOffsets_destroy_array(
- rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
- size_t alter_grpoffsets_cnt) {
- size_t i;
- for (i = 0; i < alter_grpoffsets_cnt; i++)
- rd_kafka_AlterConsumerGroupOffsets_destroy(alter_grpoffsets[i]);
-}
-
-/**
- * @brief Allocate a new AlterGroup and make a copy of \p src
- */
-static rd_kafka_AlterConsumerGroupOffsets_t *
-rd_kafka_AlterConsumerGroupOffsets_copy(
- const rd_kafka_AlterConsumerGroupOffsets_t *src) {
- return rd_kafka_AlterConsumerGroupOffsets_new(src->group_id,
- src->partitions);
-}
-
-/**
- * @brief Send a OffsetCommitRequest to \p rkb with the partitions
- * in alter_grpoffsets (AlterConsumerGroupOffsets_t*) using
- * \p options.
- *
- */
-static rd_kafka_resp_err_t rd_kafka_AlterConsumerGroupOffsetsRequest(
- rd_kafka_broker_t *rkb,
- /* (rd_kafka_AlterConsumerGroupOffsets_t*) */
- const rd_list_t *alter_grpoffsets,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- const rd_kafka_AlterConsumerGroupOffsets_t *grpoffsets =
- rd_list_elem(alter_grpoffsets, 0);
-
- rd_assert(rd_list_cnt(alter_grpoffsets) == 1);
-
- rd_kafka_topic_partition_list_t *offsets = grpoffsets->partitions;
- rd_kafka_consumer_group_metadata_t *cgmetadata =
- rd_kafka_consumer_group_metadata_new(grpoffsets->group_id);
-
- int ret = rd_kafka_OffsetCommitRequest(
- rkb, cgmetadata, offsets, replyq, resp_cb, opaque,
- "rd_kafka_AlterConsumerGroupOffsetsRequest");
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- if (ret == 0) {
- rd_snprintf(errstr, errstr_size,
- "At least one topic-partition offset must "
- "be >= 0");
- return RD_KAFKA_RESP_ERR__NO_OFFSET;
- }
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parse OffsetCommitResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_AlterConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_t *rk;
- rd_kafka_broker_t *rkb;
- rd_kafka_op_t *rko_result;
- rd_kafka_topic_partition_list_t *partitions = NULL;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- const rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets =
- rd_list_elem(&rko_req->rko_u.admin_request.args, 0);
- partitions =
- rd_kafka_topic_partition_list_copy(alter_grpoffsets->partitions);
-
- rk = rko_req->rko_rk;
- rkb = reply->rkbuf_rkb;
- err = rd_kafka_handle_OffsetCommit(rk, rkb, err, reply, NULL,
- partitions, rd_true);
-
- /* Create result op and group_result_t */
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, 1,
- rd_kafka_group_result_free);
- rd_list_add(&rko_result->rko_u.admin_result.results,
- rd_kafka_group_result_new(alter_grpoffsets->group_id, -1,
- partitions, NULL));
- rd_kafka_topic_partition_list_destroy(partitions);
- *rko_resultp = rko_result;
-
- if (reply->rkbuf_err)
- rd_snprintf(
- errstr, errstr_size,
- "AlterConsumerGroupOffset response parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-void rd_kafka_AlterConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
- size_t alter_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- int i;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_AlterConsumerGroupOffsetsRequest,
- rd_kafka_AlterConsumerGroupOffsetsResponse_parse,
- };
- rd_kafka_op_t *rko;
- rd_kafka_topic_partition_list_t *copied_offsets;
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS,
- RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT, &cbs, options,
- rkqu->rkqu_q);
-
- if (alter_grpoffsets_cnt != 1) {
- /* For simplicity we only support one single group for now */
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Exactly one "
- "AlterConsumerGroupOffsets must "
- "be passed");
- goto fail;
- }
-
- if (alter_grpoffsets[0]->partitions->cnt == 0) {
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Non-empty topic partition list "
- "must be present");
- goto fail;
- }
-
- for (i = 0; i < alter_grpoffsets[0]->partitions->cnt; i++) {
- if (alter_grpoffsets[0]->partitions->elems[i].offset < 0) {
- rd_kafka_admin_result_fail(
- rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "All topic-partition offsets "
- "must be >= 0");
- goto fail;
- }
- }
-
- /* TODO: add group id duplication check if in future more than one
- * AlterConsumerGroupOffsets can be passed */
-
- /* Copy offsets list for checking duplicated */
- copied_offsets =
- rd_kafka_topic_partition_list_copy(alter_grpoffsets[0]->partitions);
- if (rd_kafka_topic_partition_list_has_duplicates(
- copied_offsets, rd_false /*check partition*/)) {
- rd_kafka_topic_partition_list_destroy(copied_offsets);
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate partitions not allowed");
- goto fail;
- }
- rd_kafka_topic_partition_list_destroy(copied_offsets);
-
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
- rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
- rko->rko_u.admin_request.coordkey =
- rd_strdup(alter_grpoffsets[0]->group_id);
-
- /* Store copy of group on request so the group name can be reached
- * from the response parser. */
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_AlterConsumerGroupOffsets_free);
- rd_list_add(&rko->rko_u.admin_request.args,
- (void *)rd_kafka_AlterConsumerGroupOffsets_copy(
- alter_grpoffsets[0]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
- return;
-fail:
- rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/);
-}
-
-
-/**
- * @brief Get an array of group results from a AlterGroups result.
- *
- * The returned \p groups life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_group_result_t **
-rd_kafka_AlterConsumerGroupOffsets_result_groups(
- const rd_kafka_AlterConsumerGroupOffsets_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
- cntp);
-}
-
-/**@}*/
-
-
-/**@}*/
-
-/**
- * @name List consumer group offsets (committed offsets)
- * @{
- *
- *
- *
- *
- */
-
-rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(
- const char *group_id,
- const rd_kafka_topic_partition_list_t *partitions) {
- size_t tsize = strlen(group_id) + 1;
- rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets;
-
- rd_assert(group_id);
-
- /* Single allocation */
- list_grpoffsets = rd_calloc(1, sizeof(*list_grpoffsets) + tsize);
- list_grpoffsets->group_id = list_grpoffsets->data;
- memcpy(list_grpoffsets->group_id, group_id, tsize);
- if (partitions) {
- list_grpoffsets->partitions =
- rd_kafka_topic_partition_list_copy(partitions);
- }
-
- return list_grpoffsets;
-}
-
-void rd_kafka_ListConsumerGroupOffsets_destroy(
- rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets) {
- if (list_grpoffsets->partitions != NULL) {
- rd_kafka_topic_partition_list_destroy(
- list_grpoffsets->partitions);
- }
- rd_free(list_grpoffsets);
-}
-
-static void rd_kafka_ListConsumerGroupOffsets_free(void *ptr) {
- rd_kafka_ListConsumerGroupOffsets_destroy(ptr);
-}
-
-void rd_kafka_ListConsumerGroupOffsets_destroy_array(
- rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
- size_t list_grpoffsets_cnt) {
- size_t i;
- for (i = 0; i < list_grpoffsets_cnt; i++)
- rd_kafka_ListConsumerGroupOffsets_destroy(list_grpoffsets[i]);
-}
-
-/**
- * @brief Allocate a new ListGroup and make a copy of \p src
- */
-static rd_kafka_ListConsumerGroupOffsets_t *
-rd_kafka_ListConsumerGroupOffsets_copy(
- const rd_kafka_ListConsumerGroupOffsets_t *src) {
- return rd_kafka_ListConsumerGroupOffsets_new(src->group_id,
- src->partitions);
-}
-
-/**
- * @brief Send a OffsetFetchRequest to \p rkb with the partitions
- * in list_grpoffsets (ListConsumerGroupOffsets_t*) using
- * \p options.
- *
- */
-static rd_kafka_resp_err_t rd_kafka_ListConsumerGroupOffsetsRequest(
- rd_kafka_broker_t *rkb,
- /* (rd_kafka_ListConsumerGroupOffsets_t*) */
- const rd_list_t *list_grpoffsets,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- int op_timeout;
- rd_bool_t require_stable_offsets;
- const rd_kafka_ListConsumerGroupOffsets_t *grpoffsets =
- rd_list_elem(list_grpoffsets, 0);
-
- rd_assert(rd_list_cnt(list_grpoffsets) == 1);
-
- op_timeout = rd_kafka_confval_get_int(&options->request_timeout);
- require_stable_offsets =
- rd_kafka_confval_get_int(&options->require_stable_offsets);
- rd_kafka_OffsetFetchRequest(
- rkb, grpoffsets->group_id, grpoffsets->partitions,
- require_stable_offsets, op_timeout, replyq, resp_cb, opaque);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parse OffsetFetchResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_ListConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets =
- rd_list_elem(&rko_req->rko_u.admin_request.args, 0);
- rd_kafka_t *rk;
- rd_kafka_broker_t *rkb;
- rd_kafka_topic_partition_list_t *offsets = NULL;
- rd_kafka_op_t *rko_result;
- rd_kafka_resp_err_t err;
-
- rk = rko_req->rko_rk;
- rkb = reply->rkbuf_rkb;
- err = rd_kafka_handle_OffsetFetch(rk, rkb, RD_KAFKA_RESP_ERR_NO_ERROR,
- reply, NULL, &offsets, rd_false,
- rd_true, rd_false);
-
- if (unlikely(err != RD_KAFKA_RESP_ERR_NO_ERROR)) {
- reply->rkbuf_err = err;
- goto err;
- }
-
- /* Create result op and group_result_t */
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, 1,
- rd_kafka_group_result_free);
- rd_list_add(&rko_result->rko_u.admin_result.results,
- rd_kafka_group_result_new(list_grpoffsets->group_id, -1,
- offsets, NULL));
-
- if (likely(offsets != NULL))
- rd_kafka_topic_partition_list_destroy(offsets);
-
- *rko_resultp = rko_result;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-err:
- if (likely(offsets != NULL))
- rd_kafka_topic_partition_list_destroy(offsets);
-
- rd_snprintf(errstr, errstr_size,
- "ListConsumerGroupOffsetsResponse response failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-void rd_kafka_ListConsumerGroupOffsets(
- rd_kafka_t *rk,
- rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
- size_t list_grpoffsets_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_ListConsumerGroupOffsetsRequest,
- rd_kafka_ListConsumerGroupOffsetsResponse_parse,
- };
- rd_kafka_op_t *rko;
- rd_kafka_topic_partition_list_t *copied_offsets;
-
- rd_assert(rkqu);
-
- rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS,
- RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, &cbs, options,
- rkqu->rkqu_q);
-
- if (list_grpoffsets_cnt != 1) {
- /* For simplicity we only support one single group for now */
- rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Exactly one "
- "ListConsumerGroupOffsets must "
- "be passed");
- goto fail;
- }
-
- if (list_grpoffsets[0]->partitions != NULL &&
- list_grpoffsets[0]->partitions->cnt == 0) {
- /* Either pass NULL for all the partitions or a non-empty list
- */
- rd_kafka_admin_result_fail(
- rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "NULL or "
- "non-empty topic partition list must "
- "be passed");
- goto fail;
- }
-
- /* TODO: add group id duplication check when implementing KIP-709 */
- if (list_grpoffsets[0]->partitions != NULL) {
- /* Copy offsets list for checking duplicated */
- copied_offsets = rd_kafka_topic_partition_list_copy(
- list_grpoffsets[0]->partitions);
- if (rd_kafka_topic_partition_list_has_duplicates(
- copied_offsets, rd_false /*check partition*/)) {
- rd_kafka_topic_partition_list_destroy(copied_offsets);
- rd_kafka_admin_result_fail(
- rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate partitions not allowed");
- goto fail;
- }
- rd_kafka_topic_partition_list_destroy(copied_offsets);
- }
-
- rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
- rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
- rko->rko_u.admin_request.coordkey =
- rd_strdup(list_grpoffsets[0]->group_id);
-
- /* Store copy of group on request so the group name can be reached
- * from the response parser. */
- rd_list_init(&rko->rko_u.admin_request.args, 1,
- rd_kafka_ListConsumerGroupOffsets_free);
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_kafka_ListConsumerGroupOffsets_copy(list_grpoffsets[0]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
- return;
-fail:
- rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/);
-}
-
-
-/**
- * @brief Get an array of group results from a ListConsumerGroups result.
- *
- * The returned \p groups life-time is the same as the \p result object.
- * @param cntp is updated to the number of elements in the array.
- */
-const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(
- const rd_kafka_ListConsumerGroupOffsets_result_t *result,
- size_t *cntp) {
- return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
- cntp);
-}
-
-/**@}*/
-
-/**
- * @name List consumer groups
- * @{
- *
- *
- *
- *
- */
-
-#define CONSUMER_PROTOCOL_TYPE "consumer"
-
-/**
- * @brief Create a new ConsumerGroupListing object.
- *
- * @param group_id The group id.
- * @param is_simple_consumer_group Is the group simple?
- * @param state Group state.
- */
-static rd_kafka_ConsumerGroupListing_t *
-rd_kafka_ConsumerGroupListing_new(const char *group_id,
- rd_bool_t is_simple_consumer_group,
- rd_kafka_consumer_group_state_t state) {
- rd_kafka_ConsumerGroupListing_t *grplist;
- grplist = rd_calloc(1, sizeof(*grplist));
- grplist->group_id = rd_strdup(group_id);
- grplist->is_simple_consumer_group = is_simple_consumer_group;
- grplist->state = state;
- return grplist;
-}
-
-/**
- * @brief Copy \p grplist ConsumerGroupListing.
- *
- * @param grplist The group listing to copy.
- * @return A new allocated copy of the passed ConsumerGroupListing.
- */
-static rd_kafka_ConsumerGroupListing_t *rd_kafka_ConsumerGroupListing_copy(
- const rd_kafka_ConsumerGroupListing_t *grplist) {
- return rd_kafka_ConsumerGroupListing_new(
- grplist->group_id, grplist->is_simple_consumer_group,
- grplist->state);
-}
-
-/**
- * @brief Same as rd_kafka_ConsumerGroupListing_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-static void *rd_kafka_ConsumerGroupListing_copy_opaque(const void *grplist,
- void *opaque) {
- return rd_kafka_ConsumerGroupListing_copy(grplist);
-}
-
-static void rd_kafka_ConsumerGroupListing_destroy(
- rd_kafka_ConsumerGroupListing_t *grplist) {
- RD_IF_FREE(grplist->group_id, rd_free);
- rd_free(grplist);
-}
-
-static void rd_kafka_ConsumerGroupListing_free(void *ptr) {
- rd_kafka_ConsumerGroupListing_destroy(ptr);
-}
-
-const char *rd_kafka_ConsumerGroupListing_group_id(
- const rd_kafka_ConsumerGroupListing_t *grplist) {
- return grplist->group_id;
-}
-
-int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
- const rd_kafka_ConsumerGroupListing_t *grplist) {
- return grplist->is_simple_consumer_group;
-}
-
-rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(
- const rd_kafka_ConsumerGroupListing_t *grplist) {
- return grplist->state;
-}
-
-/**
- * @brief Create a new ListConsumerGroupsResult object.
- *
- * @param valid
- * @param errors
- */
-static rd_kafka_ListConsumerGroupsResult_t *
-rd_kafka_ListConsumerGroupsResult_new(const rd_list_t *valid,
- const rd_list_t *errors) {
- rd_kafka_ListConsumerGroupsResult_t *res;
- res = rd_calloc(1, sizeof(*res));
- rd_list_init_copy(&res->valid, valid);
- rd_list_copy_to(&res->valid, valid,
- rd_kafka_ConsumerGroupListing_copy_opaque, NULL);
- rd_list_init_copy(&res->errors, errors);
- rd_list_copy_to(&res->errors, errors, rd_kafka_error_copy_opaque, NULL);
- return res;
-}
-
-static void rd_kafka_ListConsumerGroupsResult_destroy(
- rd_kafka_ListConsumerGroupsResult_t *res) {
- rd_list_destroy(&res->valid);
- rd_list_destroy(&res->errors);
- rd_free(res);
-}
-
-static void rd_kafka_ListConsumerGroupsResult_free(void *ptr) {
- rd_kafka_ListConsumerGroupsResult_destroy(ptr);
-}
-
-/**
- * @brief Copy the passed ListConsumerGroupsResult.
- *
- * @param res the ListConsumerGroupsResult to copy
- * @return a newly allocated ListConsumerGroupsResult object.
- *
- * @sa Release the object with rd_kafka_ListConsumerGroupsResult_destroy().
- */
-static rd_kafka_ListConsumerGroupsResult_t *
-rd_kafka_ListConsumerGroupsResult_copy(
- const rd_kafka_ListConsumerGroupsResult_t *res) {
- return rd_kafka_ListConsumerGroupsResult_new(&res->valid, &res->errors);
-}
-
-/**
- * @brief Same as rd_kafka_ListConsumerGroupsResult_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-static void *rd_kafka_ListConsumerGroupsResult_copy_opaque(const void *list,
- void *opaque) {
- return rd_kafka_ListConsumerGroupsResult_copy(list);
-}
-
-/**
- * @brief Send ListConsumerGroupsRequest. Admin worker compatible callback.
- */
-static rd_kafka_resp_err_t
-rd_kafka_admin_ListConsumerGroupsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *groups /*(char*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- int i;
- rd_kafka_resp_err_t err;
- rd_kafka_error_t *error;
- const char **states_str = NULL;
- int states_str_cnt = 0;
- rd_list_t *states =
- rd_kafka_confval_get_ptr(&options->match_consumer_group_states);
-
- /* Prepare list_options */
- if (states && rd_list_cnt(states) > 0) {
- states_str_cnt = rd_list_cnt(states);
- states_str = rd_calloc(states_str_cnt, sizeof(*states_str));
- for (i = 0; i < states_str_cnt; i++) {
- states_str[i] = rd_kafka_consumer_group_state_name(
- rd_list_get_int32(states, i));
- }
- }
-
- error = rd_kafka_ListGroupsRequest(rkb, -1, states_str, states_str_cnt,
- replyq, resp_cb, opaque);
-
- if (states_str) {
- rd_free(states_str);
- }
-
- if (error) {
- rd_snprintf(errstr, errstr_size, "%s",
- rd_kafka_error_string(error));
- err = rd_kafka_error_code(error);
- rd_kafka_error_destroy(error);
- return err;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parse ListConsumerGroupsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_ListConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- int i, cnt;
- int16_t error_code, api_version;
- rd_kafka_op_t *rko_result = NULL;
- rd_kafka_error_t *error = NULL;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_list_t valid, errors;
- rd_kafka_ListConsumerGroupsResult_t *list_result;
- char *group_id = NULL, *group_state = NULL, *proto_type = NULL;
-
- api_version = rd_kafka_buf_ApiVersion(reply);
- if (api_version >= 1) {
- rd_kafka_buf_read_throttle_time(reply);
- }
- rd_kafka_buf_read_i16(reply, &error_code);
- if (error_code) {
- error = rd_kafka_error_new(error_code,
- "Broker [%d"
- "] "
- "ListConsumerGroups: %s",
- rd_kafka_broker_id(rkb),
- rd_kafka_err2str(error_code));
- }
-
- rd_kafka_buf_read_arraycnt(reply, &cnt, RD_KAFKAP_GROUPS_MAX);
- rd_list_init(&valid, cnt, rd_kafka_ConsumerGroupListing_free);
- rd_list_init(&errors, 8, rd_free);
- if (error)
- rd_list_add(&errors, error);
-
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, 1,
- rd_kafka_ListConsumerGroupsResult_free);
-
- for (i = 0; i < cnt; i++) {
- rd_kafkap_str_t GroupId, ProtocolType,
- GroupState = RD_ZERO_INIT;
- rd_kafka_ConsumerGroupListing_t *group_listing;
- rd_bool_t is_simple_consumer_group, is_consumer_protocol_type;
- rd_kafka_consumer_group_state_t state =
- RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN;
-
- rd_kafka_buf_read_str(reply, &GroupId);
- rd_kafka_buf_read_str(reply, &ProtocolType);
- if (api_version >= 4) {
- rd_kafka_buf_read_str(reply, &GroupState);
- }
- rd_kafka_buf_skip_tags(reply);
-
- group_id = RD_KAFKAP_STR_DUP(&GroupId);
- proto_type = RD_KAFKAP_STR_DUP(&ProtocolType);
- if (api_version >= 4) {
- group_state = RD_KAFKAP_STR_DUP(&GroupState);
- state = rd_kafka_consumer_group_state_code(group_state);
- }
-
- is_simple_consumer_group = *proto_type == '\0';
- is_consumer_protocol_type =
- !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE);
- if (is_simple_consumer_group || is_consumer_protocol_type) {
- group_listing = rd_kafka_ConsumerGroupListing_new(
- group_id, is_simple_consumer_group, state);
- rd_list_add(&valid, group_listing);
- }
-
- rd_free(group_id);
- rd_free(group_state);
- rd_free(proto_type);
- group_id = NULL;
- group_state = NULL;
- proto_type = NULL;
- }
- rd_kafka_buf_skip_tags(reply);
-
-err_parse:
- if (group_id)
- rd_free(group_id);
- if (group_state)
- rd_free(group_state);
- if (proto_type)
- rd_free(proto_type);
-
- if (reply->rkbuf_err) {
- error_code = reply->rkbuf_err;
- error = rd_kafka_error_new(
- error_code,
- "Broker [%d"
- "] "
- "ListConsumerGroups response protocol parse failure: %s",
- rd_kafka_broker_id(rkb), rd_kafka_err2str(error_code));
- rd_list_add(&errors, error);
- }
-
- list_result = rd_kafka_ListConsumerGroupsResult_new(&valid, &errors);
- rd_list_add(&rko_result->rko_u.admin_result.results, list_result);
-
- *rko_resultp = rko_result;
- rd_list_destroy(&valid);
- rd_list_destroy(&errors);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/** @brief Merge the ListConsumerGroups response from a single broker
- * into the user response list.
- */
-static void
-rd_kafka_ListConsumerGroups_response_merge(rd_kafka_op_t *rko_fanout,
- const rd_kafka_op_t *rko_partial) {
- int cnt;
- rd_kafka_ListConsumerGroupsResult_t *res = NULL;
- rd_kafka_ListConsumerGroupsResult_t *newres;
- rd_list_t new_valid, new_errors;
-
- rd_assert(rko_partial->rko_evtype ==
- RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT);
-
- cnt = rd_list_cnt(&rko_fanout->rko_u.admin_request.fanout.results);
- if (cnt) {
- res = rd_list_elem(
- &rko_fanout->rko_u.admin_request.fanout.results, 0);
- } else {
- rd_list_init(&new_valid, 0, rd_kafka_ConsumerGroupListing_free);
- rd_list_init(&new_errors, 0, rd_free);
- res = rd_kafka_ListConsumerGroupsResult_new(&new_valid,
- &new_errors);
- rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, 0,
- res);
- rd_list_destroy(&new_valid);
- rd_list_destroy(&new_errors);
- }
- if (!rko_partial->rko_err) {
- int new_valid_count, new_errors_count;
- const rd_list_t *new_valid_list, *new_errors_list;
- /* Read the partial result and merge the valid groups
- * and the errors into the fanout parent result. */
- newres =
- rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
- rd_assert(newres);
- new_valid_count = rd_list_cnt(&newres->valid);
- new_errors_count = rd_list_cnt(&newres->errors);
- if (new_valid_count) {
- new_valid_list = &newres->valid;
- rd_list_grow(&res->valid, new_valid_count);
- rd_list_copy_to(
- &res->valid, new_valid_list,
- rd_kafka_ConsumerGroupListing_copy_opaque, NULL);
- }
- if (new_errors_count) {
- new_errors_list = &newres->errors;
- rd_list_grow(&res->errors, new_errors_count);
- rd_list_copy_to(&res->errors, new_errors_list,
- rd_kafka_error_copy_opaque, NULL);
- }
- } else {
- /* Op errored, e.g. timeout */
- rd_list_add(&res->errors,
- rd_kafka_error_new(rko_partial->rko_err, NULL));
- }
-}
-
-void rd_kafka_ListConsumerGroups(rd_kafka_t *rk,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko;
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_admin_ListConsumerGroupsRequest,
- rd_kafka_ListConsumerGroupsResponse_parse};
- static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
- rd_kafka_ListConsumerGroups_response_merge,
- rd_kafka_ListConsumerGroupsResult_copy_opaque,
- };
-
- rko = rd_kafka_admin_request_op_target_all_new(
- rk, RD_KAFKA_OP_LISTCONSUMERGROUPS,
- RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, &cbs, &fanout_cbs,
- rd_kafka_ListConsumerGroupsResult_free, options, rkqu->rkqu_q);
- rd_kafka_q_enq(rk->rk_ops, rko);
-}
-
-const rd_kafka_ConsumerGroupListing_t **
-rd_kafka_ListConsumerGroups_result_valid(
- const rd_kafka_ListConsumerGroups_result_t *result,
- size_t *cntp) {
- int list_result_cnt;
- const rd_kafka_ListConsumerGroupsResult_t *list_result;
- const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS);
-
- list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
- rd_assert(list_result_cnt == 1);
- list_result = rd_list_elem(&rko->rko_u.admin_result.results, 0);
- *cntp = rd_list_cnt(&list_result->valid);
-
- return (const rd_kafka_ConsumerGroupListing_t **)
- list_result->valid.rl_elems;
-}
-
-const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(
- const rd_kafka_ListConsumerGroups_result_t *result,
- size_t *cntp) {
- int list_result_cnt, error_cnt;
- const rd_kafka_ListConsumerGroupsResult_t *list_result;
- const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS);
-
- list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
- rd_assert(list_result_cnt == 1);
- list_result = rko->rko_u.admin_result.results.rl_elems[0];
- error_cnt = rd_list_cnt(&list_result->errors);
- if (error_cnt == 0) {
- *cntp = 0;
- return NULL;
- }
- *cntp = error_cnt;
- return (const rd_kafka_error_t **)list_result->errors.rl_elems;
-}
-
-/**@}*/
-
-/**
- * @name Describe consumer groups
- * @{
- *
- *
- *
- *
- */
-
-/**
- * @brief Create a new MemberDescription object. This object is used for
- * creating a ConsumerGroupDescription.
- *
- * @param client_id The client id.
- * @param consumer_id The consumer id (or member id).
- * @param group_instance_id (optional) The group instance id
- * for static membership.
- * @param host The consumer host.
- * @param assignment The member's assigned partitions, or NULL if none.
- *
- * @return A new allocated MemberDescription object.
- * Use rd_kafka_MemberDescription_destroy() to free when done.
- */
-static rd_kafka_MemberDescription_t *rd_kafka_MemberDescription_new(
- const char *client_id,
- const char *consumer_id,
- const char *group_instance_id,
- const char *host,
- const rd_kafka_topic_partition_list_t *assignment) {
- rd_kafka_MemberDescription_t *member;
- member = rd_calloc(1, sizeof(*member));
- member->client_id = rd_strdup(client_id);
- member->consumer_id = rd_strdup(consumer_id);
- if (group_instance_id)
- member->group_instance_id = rd_strdup(group_instance_id);
- member->host = rd_strdup(host);
- if (assignment)
- member->assignment.partitions =
- rd_kafka_topic_partition_list_copy(assignment);
- else
- member->assignment.partitions =
- rd_kafka_topic_partition_list_new(0);
- return member;
-}
-
-/**
- * @brief Allocate a new MemberDescription, copy of \p src
- * and return it.
- *
- * @param src The MemberDescription to copy.
- * @return A new allocated MemberDescription object,
- * Use rd_kafka_MemberDescription_destroy() to free when done.
- */
-static rd_kafka_MemberDescription_t *
-rd_kafka_MemberDescription_copy(const rd_kafka_MemberDescription_t *src) {
- return rd_kafka_MemberDescription_new(src->client_id, src->consumer_id,
- src->group_instance_id, src->host,
- src->assignment.partitions);
-}
-
-/**
- * @brief MemberDescription copy, compatible with rd_list_copy_to.
- *
- * @param elem The MemberDescription to copy-
- * @param opaque Not used.
- */
-static void *rd_kafka_MemberDescription_list_copy(const void *elem,
- void *opaque) {
- return rd_kafka_MemberDescription_copy(elem);
-}
-
-static void
-rd_kafka_MemberDescription_destroy(rd_kafka_MemberDescription_t *member) {
- rd_free(member->client_id);
- rd_free(member->consumer_id);
- rd_free(member->host);
- if (member->group_instance_id != NULL)
- rd_free(member->group_instance_id);
- if (member->assignment.partitions)
- rd_kafka_topic_partition_list_destroy(
- member->assignment.partitions);
- rd_free(member);
-}
-
-static void rd_kafka_MemberDescription_free(void *member) {
- rd_kafka_MemberDescription_destroy(member);
-}
-
-const char *rd_kafka_MemberDescription_client_id(
- const rd_kafka_MemberDescription_t *member) {
- return member->client_id;
-}
-
-const char *rd_kafka_MemberDescription_group_instance_id(
- const rd_kafka_MemberDescription_t *member) {
- return member->group_instance_id;
-}
-
-const char *rd_kafka_MemberDescription_consumer_id(
- const rd_kafka_MemberDescription_t *member) {
- return member->consumer_id;
-}
-
-const char *
-rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member) {
- return member->host;
-}
-
-const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(
- const rd_kafka_MemberDescription_t *member) {
- return &member->assignment;
-}
-
-const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(
- const rd_kafka_MemberAssignment_t *assignment) {
- return assignment->partitions;
-}
-
-
-/**
- * @brief Create a new ConsumerGroupDescription object.
- *
- * @param group_id The group id.
- * @param is_simple_consumer_group Is the group simple?
- * @param members List of members (rd_kafka_MemberDescription_t) of this
- * group.
- * @param partition_assignor (optional) Chosen assignor.
- * @param state Group state.
- * @param coordinator (optional) Group coordinator.
- * @param error (optional) Error received for this group.
- * @return A new allocated ConsumerGroupDescription object.
- * Use rd_kafka_ConsumerGroupDescription_destroy() to free when done.
- */
-static rd_kafka_ConsumerGroupDescription_t *
-rd_kafka_ConsumerGroupDescription_new(const char *group_id,
- rd_bool_t is_simple_consumer_group,
- const rd_list_t *members,
- const char *partition_assignor,
- rd_kafka_consumer_group_state_t state,
- const rd_kafka_Node_t *coordinator,
- rd_kafka_error_t *error) {
- rd_kafka_ConsumerGroupDescription_t *grpdesc;
- grpdesc = rd_calloc(1, sizeof(*grpdesc));
- grpdesc->group_id = rd_strdup(group_id);
- grpdesc->is_simple_consumer_group = is_simple_consumer_group;
- if (members == NULL) {
- rd_list_init(&grpdesc->members, 0,
- rd_kafka_MemberDescription_free);
- } else {
- rd_list_init_copy(&grpdesc->members, members);
- rd_list_copy_to(&grpdesc->members, members,
- rd_kafka_MemberDescription_list_copy, NULL);
- }
- grpdesc->partition_assignor = !partition_assignor
- ? (char *)partition_assignor
- : rd_strdup(partition_assignor);
- grpdesc->state = state;
- if (coordinator != NULL)
- grpdesc->coordinator = rd_kafka_Node_copy(coordinator);
- grpdesc->error =
- error != NULL ? rd_kafka_error_new(rd_kafka_error_code(error), "%s",
- rd_kafka_error_string(error))
- : NULL;
- return grpdesc;
-}
-
-/**
- * @brief New instance of ConsumerGroupDescription from an error.
- *
- * @param group_id The group id.
- * @param error The error.
- * @return A new allocated ConsumerGroupDescription with the passed error.
- */
-static rd_kafka_ConsumerGroupDescription_t *
-rd_kafka_ConsumerGroupDescription_new_error(const char *group_id,
- rd_kafka_error_t *error) {
- return rd_kafka_ConsumerGroupDescription_new(
- group_id, rd_false, NULL, NULL,
- RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN, NULL, error);
-}
-
-/**
- * @brief Copy \p desc ConsumerGroupDescription.
- *
- * @param desc The group description to copy.
- * @return A new allocated copy of the passed ConsumerGroupDescription.
- */
-static rd_kafka_ConsumerGroupDescription_t *
-rd_kafka_ConsumerGroupDescription_copy(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return rd_kafka_ConsumerGroupDescription_new(
- grpdesc->group_id, grpdesc->is_simple_consumer_group,
- &grpdesc->members, grpdesc->partition_assignor, grpdesc->state,
- grpdesc->coordinator, grpdesc->error);
-}
-
-/**
- * @brief Same as rd_kafka_ConsumerGroupDescription_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-static void *rd_kafka_ConsumerGroupDescription_copy_opaque(const void *grpdesc,
- void *opaque) {
- return rd_kafka_ConsumerGroupDescription_copy(grpdesc);
-}
-
-static void rd_kafka_ConsumerGroupDescription_destroy(
- rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- if (likely(grpdesc->group_id != NULL))
- rd_free(grpdesc->group_id);
- rd_list_destroy(&grpdesc->members);
- if (likely(grpdesc->partition_assignor != NULL))
- rd_free(grpdesc->partition_assignor);
- if (likely(grpdesc->error != NULL))
- rd_kafka_error_destroy(grpdesc->error);
- if (grpdesc->coordinator)
- rd_kafka_Node_destroy(grpdesc->coordinator);
- rd_free(grpdesc);
-}
-
-static void rd_kafka_ConsumerGroupDescription_free(void *ptr) {
- rd_kafka_ConsumerGroupDescription_destroy(ptr);
-}
-
-const char *rd_kafka_ConsumerGroupDescription_group_id(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->group_id;
-}
-
-const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->error;
-}
-
-
-int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->is_simple_consumer_group;
-}
-
-
-const char *rd_kafka_ConsumerGroupDescription_partition_assignor(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->partition_assignor;
-}
-
-
-rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->state;
-}
-
-const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return grpdesc->coordinator;
-}
-
-size_t rd_kafka_ConsumerGroupDescription_member_count(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
- return rd_list_cnt(&grpdesc->members);
-}
-
-const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(
- const rd_kafka_ConsumerGroupDescription_t *grpdesc,
- size_t idx) {
- return (rd_kafka_MemberDescription_t *)rd_list_elem(&grpdesc->members,
- idx);
-}
-
-/**
- * @brief Group arguments comparator for DescribeConsumerGroups args
- */
-static int rd_kafka_DescribeConsumerGroups_cmp(const void *a, const void *b) {
- return strcmp(a, b);
-}
-
-/** @brief Merge the DescribeConsumerGroups response from a single broker
- * into the user response list.
- */
-static void rd_kafka_DescribeConsumerGroups_response_merge(
- rd_kafka_op_t *rko_fanout,
- const rd_kafka_op_t *rko_partial) {
- rd_kafka_ConsumerGroupDescription_t *groupres = NULL;
- rd_kafka_ConsumerGroupDescription_t *newgroupres;
- const char *grp = rko_partial->rko_u.admin_result.opaque;
- int orig_pos;
-
- rd_assert(rko_partial->rko_evtype ==
- RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT);
-
- if (!rko_partial->rko_err) {
- /* Proper results.
- * We only send one group per request, make sure it matches */
- groupres =
- rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
- rd_assert(groupres);
- rd_assert(!strcmp(groupres->group_id, grp));
- newgroupres = rd_kafka_ConsumerGroupDescription_copy(groupres);
- } else {
- /* Op errored, e.g. timeout */
- rd_kafka_error_t *error =
- rd_kafka_error_new(rko_partial->rko_err, NULL);
- newgroupres =
- rd_kafka_ConsumerGroupDescription_new_error(grp, error);
- rd_kafka_error_destroy(error);
- }
-
- /* As a convenience to the application we insert group result
- * in the same order as they were requested. */
- orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp,
- rd_kafka_DescribeConsumerGroups_cmp);
- rd_assert(orig_pos != -1);
-
- /* Make sure result is not already set */
- rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results,
- orig_pos) == NULL);
-
- rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos,
- newgroupres);
-}
-
-
-/**
- * @brief Construct and send DescribeConsumerGroupsRequest to \p rkb
- * with the groups (char *) in \p groups, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-static rd_kafka_resp_err_t rd_kafka_admin_DescribeConsumerGroupsRequest(
- rd_kafka_broker_t *rkb,
- const rd_list_t *groups /*(char*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- int i;
- char *group;
- rd_kafka_resp_err_t err;
- int groups_cnt = rd_list_cnt(groups);
- rd_kafka_error_t *error = NULL;
- char **groups_arr = rd_calloc(groups_cnt, sizeof(*groups_arr));
-
- RD_LIST_FOREACH(group, groups, i) {
- groups_arr[i] = rd_list_elem(groups, i);
- }
- error = rd_kafka_DescribeGroupsRequest(rkb, -1, groups_arr, groups_cnt,
- replyq, resp_cb, opaque);
- rd_free(groups_arr);
-
- if (error) {
- rd_snprintf(errstr, errstr_size, "%s",
- rd_kafka_error_string(error));
- err = rd_kafka_error_code(error);
- rd_kafka_error_destroy(error);
- return err;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parse DescribeConsumerGroupsResponse and create ADMIN_RESULT op.
- */
-static rd_kafka_resp_err_t
-rd_kafka_DescribeConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req,
- rd_kafka_op_t **rko_resultp,
- rd_kafka_buf_t *reply,
- char *errstr,
- size_t errstr_size) {
- const int log_decode_errors = LOG_ERR;
- int nodeid;
- uint16_t port;
- int16_t api_version;
- int32_t cnt;
- rd_kafka_op_t *rko_result = NULL;
- rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
- rd_kafka_Node_t *node = NULL;
- rd_kafka_error_t *error = NULL;
- char *group_id = NULL, *group_state = NULL, *proto_type = NULL,
- *proto = NULL, *host = NULL;
-
- api_version = rd_kafka_buf_ApiVersion(reply);
- if (api_version >= 1) {
- rd_kafka_buf_read_throttle_time(reply);
- }
-
- rd_kafka_buf_read_arraycnt(reply, &cnt, 100000);
-
- rko_result = rd_kafka_admin_result_new(rko_req);
- rd_list_init(&rko_result->rko_u.admin_result.results, cnt,
- rd_kafka_ConsumerGroupDescription_free);
-
- rd_kafka_broker_lock(rkb);
- nodeid = rkb->rkb_nodeid;
- host = rd_strdup(rkb->rkb_origname);
- port = rkb->rkb_port;
- rd_kafka_broker_unlock(rkb);
-
- node = rd_kafka_Node_new(nodeid, host, port, NULL);
- while (cnt-- > 0) {
- int16_t error_code;
- rd_kafkap_str_t GroupId, GroupState, ProtocolType, ProtocolData;
- rd_bool_t is_simple_consumer_group, is_consumer_protocol_type;
- int32_t member_cnt;
- rd_list_t members;
- rd_kafka_ConsumerGroupDescription_t *grpdesc = NULL;
-
- rd_kafka_buf_read_i16(reply, &error_code);
- rd_kafka_buf_read_str(reply, &GroupId);
- rd_kafka_buf_read_str(reply, &GroupState);
- rd_kafka_buf_read_str(reply, &ProtocolType);
- rd_kafka_buf_read_str(reply, &ProtocolData);
- rd_kafka_buf_read_arraycnt(reply, &member_cnt, 100000);
-
- group_id = RD_KAFKAP_STR_DUP(&GroupId);
- group_state = RD_KAFKAP_STR_DUP(&GroupState);
- proto_type = RD_KAFKAP_STR_DUP(&ProtocolType);
- proto = RD_KAFKAP_STR_DUP(&ProtocolData);
-
- if (error_code) {
- error = rd_kafka_error_new(
- error_code, "DescribeConsumerGroups: %s",
- rd_kafka_err2str(error_code));
- }
-
- is_simple_consumer_group = *proto_type == '\0';
- is_consumer_protocol_type =
- !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE);
- if (error == NULL && !is_simple_consumer_group &&
- !is_consumer_protocol_type) {
- error = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "GroupId %s is not a consumer group (%s).",
- group_id, proto_type);
- }
-
- rd_list_init(&members, 0, rd_kafka_MemberDescription_free);
-
- while (member_cnt-- > 0) {
- rd_kafkap_str_t MemberId, ClientId, ClientHost,
- GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
- char *member_id, *client_id, *client_host,
- *group_instance_id = NULL;
- rd_kafkap_bytes_t MemberMetadata, MemberAssignment;
- rd_kafka_MemberDescription_t *member;
- rd_kafka_topic_partition_list_t *partitions = NULL;
- rd_kafka_buf_t *rkbuf;
-
- rd_kafka_buf_read_str(reply, &MemberId);
- if (api_version >= 4) {
- rd_kafka_buf_read_str(reply, &GroupInstanceId);
- }
- rd_kafka_buf_read_str(reply, &ClientId);
- rd_kafka_buf_read_str(reply, &ClientHost);
- rd_kafka_buf_read_bytes(reply, &MemberMetadata);
- rd_kafka_buf_read_bytes(reply, &MemberAssignment);
- if (error != NULL)
- continue;
-
- if (RD_KAFKAP_BYTES_LEN(&MemberAssignment) != 0) {
- int16_t version;
- /* Parse assignment */
- rkbuf = rd_kafka_buf_new_shadow(
- MemberAssignment.data,
- RD_KAFKAP_BYTES_LEN(&MemberAssignment),
- NULL);
- /* Protocol parser needs a broker handle
- * to log errors on. */
- rkbuf->rkbuf_rkb = rkb;
- /* Decreased in rd_kafka_buf_destroy */
- rd_kafka_broker_keep(rkb);
- rd_kafka_buf_read_i16(rkbuf, &version);
- const rd_kafka_topic_partition_field_t fields[] =
- {RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- partitions = rd_kafka_buf_read_topic_partitions(
- rkbuf, 0, fields);
- rd_kafka_buf_destroy(rkbuf);
- if (!partitions)
- rd_kafka_buf_parse_fail(
- reply,
- "Error reading topic partitions");
- }
-
- member_id = RD_KAFKAP_STR_DUP(&MemberId);
- if (!RD_KAFKAP_STR_IS_NULL(&GroupInstanceId)) {
- group_instance_id =
- RD_KAFKAP_STR_DUP(&GroupInstanceId);
- }
- client_id = RD_KAFKAP_STR_DUP(&ClientId);
- client_host = RD_KAFKAP_STR_DUP(&ClientHost);
-
- member = rd_kafka_MemberDescription_new(
- client_id, member_id, group_instance_id,
- client_host, partitions);
- if (partitions)
- rd_kafka_topic_partition_list_destroy(
- partitions);
- rd_list_add(&members, member);
- rd_free(member_id);
- rd_free(group_instance_id);
- rd_free(client_id);
- rd_free(client_host);
- member_id = NULL;
- group_instance_id = NULL;
- client_id = NULL;
- client_host = NULL;
- }
-
- if (api_version >= 3) {
- /* TODO: implement KIP-430 */
- int32_t authorized_operations;
- rd_kafka_buf_read_i32(reply, &authorized_operations);
- }
-
- if (error == NULL) {
- grpdesc = rd_kafka_ConsumerGroupDescription_new(
- group_id, is_simple_consumer_group, &members, proto,
- rd_kafka_consumer_group_state_code(group_state),
- node, error);
- } else {
- grpdesc = rd_kafka_ConsumerGroupDescription_new_error(
- group_id, error);
- }
- rd_list_add(&rko_result->rko_u.admin_result.results, grpdesc);
- if (error)
- rd_kafka_error_destroy(error);
- rd_list_destroy(&members);
- rd_free(group_id);
- rd_free(group_state);
- rd_free(proto_type);
- rd_free(proto);
- error = NULL;
- group_id = NULL;
- group_state = NULL;
- proto_type = NULL;
- proto = NULL;
- }
-
- if (host)
- rd_free(host);
- if (node)
- rd_kafka_Node_destroy(node);
- *rko_resultp = rko_result;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (group_id)
- rd_free(group_id);
- if (group_state)
- rd_free(group_state);
- if (proto_type)
- rd_free(proto_type);
- if (proto)
- rd_free(proto);
- if (error)
- rd_kafka_error_destroy(error);
- if (host)
- rd_free(host);
- if (node)
- rd_kafka_Node_destroy(node);
- if (rko_result)
- rd_kafka_op_destroy(rko_result);
-
- rd_snprintf(
- errstr, errstr_size,
- "DescribeConsumerGroups response protocol parse failure: %s",
- rd_kafka_err2str(reply->rkbuf_err));
-
- return reply->rkbuf_err;
-}
-
-void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk,
- const char **groups,
- size_t groups_cnt,
- const rd_kafka_AdminOptions_t *options,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_op_t *rko_fanout;
- rd_list_t dup_list;
- size_t i;
- static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
- rd_kafka_DescribeConsumerGroups_response_merge,
- rd_kafka_ConsumerGroupDescription_copy_opaque};
-
- rd_assert(rkqu);
-
- rko_fanout = rd_kafka_admin_fanout_op_new(
- rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS,
- RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &fanout_cbs, options,
- rkqu->rkqu_q);
-
- if (groups_cnt == 0) {
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "No groups to describe");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- /* Copy group list and store it on the request op.
- * Maintain original ordering. */
- rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)groups_cnt,
- rd_free);
- for (i = 0; i < groups_cnt; i++)
- rd_list_add(&rko_fanout->rko_u.admin_request.args,
- rd_strdup(groups[i]));
-
- /* Check for duplicates.
- * Make a temporary copy of the group list and sort it to check for
- * duplicates, we don't want the original list sorted since we want
- * to maintain ordering. */
- rd_list_init(&dup_list,
- rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL);
- rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL,
- NULL);
- rd_list_sort(&dup_list, rd_kafka_DescribeConsumerGroups_cmp);
- if (rd_list_find_duplicate(&dup_list,
- rd_kafka_DescribeConsumerGroups_cmp)) {
- rd_list_destroy(&dup_list);
- rd_kafka_admin_result_fail(rko_fanout,
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate groups not allowed");
- rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
- rd_true /*destroy*/);
- return;
- }
-
- rd_list_destroy(&dup_list);
-
- /* Prepare results list where fanned out op's results will be
- * accumulated. */
- rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results,
- (int)groups_cnt, rd_kafka_ConsumerGroupDescription_free);
- rko_fanout->rko_u.admin_request.fanout.outstanding = (int)groups_cnt;
-
- /* Create individual request ops for each group.
- * FIXME: A future optimization is to coalesce all groups for a single
- * coordinator into one op. */
- for (i = 0; i < groups_cnt; i++) {
- static const struct rd_kafka_admin_worker_cbs cbs = {
- rd_kafka_admin_DescribeConsumerGroupsRequest,
- rd_kafka_DescribeConsumerGroupsResponse_parse,
- };
- char *grp =
- rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i);
- rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
- rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS,
- RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &cbs, options,
- rk->rk_ops);
-
- rko->rko_u.admin_request.fanout_parent = rko_fanout;
- rko->rko_u.admin_request.broker_id =
- RD_KAFKA_ADMIN_TARGET_COORDINATOR;
- rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
- rko->rko_u.admin_request.coordkey = rd_strdup(grp);
-
- /* Set the group name as the opaque so the fanout worker use it
- * to fill in errors.
- * References rko_fanout's memory, which will always outlive
- * the fanned out op. */
- rd_kafka_AdminOptions_set_opaque(
- &rko->rko_u.admin_request.options, grp);
-
- rd_list_init(&rko->rko_u.admin_request.args, 1, rd_free);
- rd_list_add(&rko->rko_u.admin_request.args,
- rd_strdup(groups[i]));
-
- rd_kafka_q_enq(rk->rk_ops, rko);
- }
-}
-
-const rd_kafka_ConsumerGroupDescription_t **
-rd_kafka_DescribeConsumerGroups_result_groups(
- const rd_kafka_DescribeConsumerGroups_result_t *result,
- size_t *cntp) {
- const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
- rd_kafka_op_type_t reqtype =
- rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
- rd_assert(reqtype == RD_KAFKA_OP_DESCRIBECONSUMERGROUPS);
-
- *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
- return (const rd_kafka_ConsumerGroupDescription_t **)
- rko->rko_u.admin_result.results.rl_elems;
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h
deleted file mode 100644
index 62fe9e87a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_ADMIN_H_
-#define _RDKAFKA_ADMIN_H_
-
-
-#include "rdstring.h"
-#include "rdkafka_error.h"
-#include "rdkafka_confval.h"
-
-
-
-/**
- * @brief Common AdminOptions type used for all admin APIs.
- *
- * @remark Visit AdminOptions_use() when you change this struct
- * to make sure it is copied properly.
- */
-struct rd_kafka_AdminOptions_s {
- rd_kafka_admin_op_t for_api; /**< Limit allowed options to
- * this API (optional) */
-
- /* Generic */
- rd_kafka_confval_t request_timeout; /**< I32: Full request timeout,
- * includes looking up leader
- * broker,
- * waiting for req/response,
- * etc. */
- rd_ts_t abs_timeout; /**< Absolute timeout calculated
- * from .timeout */
-
- /* Specific for one or more APIs */
- rd_kafka_confval_t operation_timeout; /**< I32: Timeout on broker.
- * Valid for:
- * CreateParititons
- * CreateTopics
- * DeleteRecords
- * DeleteTopics
- */
- rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker),
- * but don't perform action.
- * Valid for:
- * CreateTopics
- * CreatePartitions
- * AlterConfigs
- */
-
- rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
- * absolute application
- * of config.
- * Valid for:
- * AlterConfigs
- */
-
- rd_kafka_confval_t broker; /**< INT: Explicitly override
- * broker id to send
- * requests to.
- * Valid for:
- * all
- */
-
- rd_kafka_confval_t
- require_stable_offsets; /**< BOOL: Whether broker should return
- * stable offsets (transaction-committed).
- * Valid for:
- * ListConsumerGroupOffsets
- */
-
- rd_kafka_confval_t
- match_consumer_group_states; /**< PTR: list of consumer group states
- * to query for.
- * Valid for: ListConsumerGroups.
- */
-
- rd_kafka_confval_t opaque; /**< PTR: Application opaque.
- * Valid for all. */
-};
-
-
-/**
- * @name CreateTopics
- * @{
- */
-
-/**
- * @brief NewTopic type, used with CreateTopics.
- */
-struct rd_kafka_NewTopic_s {
- /* Required */
- char *topic; /**< Topic to be created */
- int num_partitions; /**< Number of partitions to create */
- int replication_factor; /**< Replication factor */
-
- /* Optional */
- rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
- * Array of replica lists indexed by
- * partition, size num_partitions. */
- rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
- * List of configuration entries */
-};
-
-/**@}*/
-
-
-/**
- * @name DeleteTopics
- * @{
- */
-
-/**
- * @brief DeleteTopics result
- */
-struct rd_kafka_DeleteTopics_result_s {
- rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
-};
-
-struct rd_kafka_DeleteTopic_s {
- char *topic; /**< Points to data */
- char data[1]; /**< The topic name is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-
-
-/**
- * @name CreatePartitions
- * @{
- */
-
-
-/**
- * @brief CreatePartitions result
- */
-struct rd_kafka_CreatePartitions_result_s {
- rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
-};
-
-struct rd_kafka_NewPartitions_s {
- char *topic; /**< Points to data */
- size_t total_cnt; /**< New total partition count */
-
- /* Optional */
- rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
- * Array of replica lists indexed by
- * new partition relative index.
- * Size is dynamic since we don't
- * know how many partitions are actually
- * being added by total_cnt */
-
- char data[1]; /**< The topic name is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-
-
-/**
- * @name ConfigEntry
- * @{
- */
-
-/* KIP-248 */
-typedef enum rd_kafka_AlterOperation_t {
- RD_KAFKA_ALTER_OP_ADD = 0,
- RD_KAFKA_ALTER_OP_SET = 1,
- RD_KAFKA_ALTER_OP_DELETE = 2,
-} rd_kafka_AlterOperation_t;
-
-struct rd_kafka_ConfigEntry_s {
- rd_strtup_t *kv; /**< Name/Value pair */
-
- /* Response */
-
- /* Attributes: this is a struct for easy copying */
- struct {
- rd_kafka_AlterOperation_t operation; /**< Operation */
- rd_kafka_ConfigSource_t source; /**< Config source */
- rd_bool_t is_readonly; /**< Value is read-only (on broker) */
- rd_bool_t is_default; /**< Value is at its default */
- rd_bool_t is_sensitive; /**< Value is sensitive */
- rd_bool_t is_synonym; /**< Value is synonym */
- } a;
-
- rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */
-};
-
-/**
- * @brief A cluster ConfigResource constisting of:
- * - resource type (BROKER, TOPIC)
- * - configuration property name
- * - configuration property value
- *
- * https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs
- */
-struct rd_kafka_ConfigResource_s {
- rd_kafka_ResourceType_t restype; /**< Resource type */
- char *name; /**< Resource name, points to .data*/
- rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
- * List of config props */
-
- /* Response */
- rd_kafka_resp_err_t err; /**< Response error code */
- char *errstr; /**< Response error string */
-
- char data[1]; /**< The name is allocated along with
- * the struct here. */
-};
-
-
-
-/**@}*/
-
-/**
- * @name AlterConfigs
- * @{
- */
-
-
-
-struct rd_kafka_AlterConfigs_result_s {
- rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
-};
-
-struct rd_kafka_ConfigResource_result_s {
- rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
- * List of config resources, sans config
- * but with response error values. */
-};
-
-/**@}*/
-
-
-
-/**
- * @name DescribeConfigs
- * @{
- */
-
-struct rd_kafka_DescribeConfigs_result_s {
- rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */
-};
-
-/**@}*/
-
-
-/**
- * @name DeleteGroups
- * @{
- */
-
-
-struct rd_kafka_DeleteGroup_s {
- char *group; /**< Points to data */
- char data[1]; /**< The group name is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-
-/**
- * @name DeleteRecords
- * @{
- */
-
-struct rd_kafka_DeleteRecords_s {
- rd_kafka_topic_partition_list_t *offsets;
-};
-
-/**@}*/
-
-
-/**
- * @name DeleteConsumerGroupOffsets
- * @{
- */
-
-/**
- * @brief DeleteConsumerGroupOffsets result
- */
-struct rd_kafka_DeleteConsumerGroupOffsets_result_s {
- rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
-};
-
-struct rd_kafka_DeleteConsumerGroupOffsets_s {
- char *group; /**< Points to data */
- rd_kafka_topic_partition_list_t *partitions;
- char data[1]; /**< The group name is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-/**
- * @name CreateAcls
- * @{
- */
-
-/**
- * @brief AclBinding type, used with CreateAcls.
- */
-struct rd_kafka_AclBinding_s {
- rd_kafka_ResourceType_t restype; /**< Resource type */
- char *name; /**< Resource name, points to .data */
- rd_kafka_ResourcePatternType_t
- resource_pattern_type; /**< Resource pattern type */
- char *principal; /**< Access Control Entry principal */
- char *host; /**< Access Control Entry host */
- rd_kafka_AclOperation_t operation; /**< AclOperation enumeration */
- rd_kafka_AclPermissionType_t
- permission_type; /**< AclPermissionType enumeration */
- rd_kafka_error_t *error; /**< Response error, or NULL on success. */
-};
-/**@}*/
-
-/**
- * @name DeleteAcls
- * @{
- */
-
-/**
- * @brief DeleteAcls_result type, used with DeleteAcls.
- */
-struct rd_kafka_DeleteAcls_result_response_s {
- rd_kafka_error_t *error; /**< Response error object, or NULL */
- rd_list_t matching_acls; /**< Type (rd_kafka_AclBinding_t *) */
-};
-
-/**@}*/
-
-
-/**
- * @name AlterConsumerGroupOffsets
- * @{
- */
-
-/**
- * @brief AlterConsumerGroupOffsets result
- */
-struct rd_kafka_AlterConsumerGroupOffsets_result_s {
- rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
-};
-
-struct rd_kafka_AlterConsumerGroupOffsets_s {
- char *group_id; /**< Points to data */
- rd_kafka_topic_partition_list_t *partitions;
- char data[1]; /**< The group id is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-
-/**
- * @name ListConsumerGroupOffsets
- * @{
- */
-
-/**
- * @brief ListConsumerGroupOffsets result
- */
-struct rd_kafka_ListConsumerGroupOffsets_result_s {
- rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
-};
-
-struct rd_kafka_ListConsumerGroupOffsets_s {
- char *group_id; /**< Points to data */
- rd_kafka_topic_partition_list_t *partitions;
- char data[1]; /**< The group id is allocated along with
- * the struct here. */
-};
-
-/**@}*/
-
-/**
- * @name ListConsumerGroups
- * @{
- */
-
-/**
- * @struct ListConsumerGroups result for a single group
- */
-struct rd_kafka_ConsumerGroupListing_s {
- char *group_id; /**< Group id */
- /** Is it a simple consumer group? That means empty protocol_type. */
- rd_bool_t is_simple_consumer_group;
- rd_kafka_consumer_group_state_t state; /**< Consumer group state. */
-};
-
-
-/**
- * @struct ListConsumerGroups results and errors
- */
-struct rd_kafka_ListConsumerGroupsResult_s {
- rd_list_t valid; /**< List of valid ConsumerGroupListing
- (rd_kafka_ConsumerGroupListing_t *) */
- rd_list_t errors; /**< List of errors (rd_kafka_error_t *) */
-};
-
-/**@}*/
-
-/**
- * @name DescribeConsumerGroups
- * @{
- */
-
-/**
- * @struct Assignment of a consumer group member.
- *
- */
-struct rd_kafka_MemberAssignment_s {
- /** Partitions assigned to current member. */
- rd_kafka_topic_partition_list_t *partitions;
-};
-
-/**
- * @struct Description of a consumer group member.
- *
- */
-struct rd_kafka_MemberDescription_s {
- char *client_id; /**< Client id */
- char *consumer_id; /**< Consumer id */
- char *group_instance_id; /**< Group instance id */
- char *host; /**< Group member host */
- rd_kafka_MemberAssignment_t assignment; /**< Member assignment */
-};
-
-/**
- * @struct DescribeConsumerGroups result
- */
-struct rd_kafka_ConsumerGroupDescription_s {
- /** Group id */
- char *group_id;
- /** Is it a simple consumer group? That means empty protocol_type. */
- rd_bool_t is_simple_consumer_group;
- /** List of members.
- * Type (rd_kafka_MemberDescription_t *): members list */
- rd_list_t members;
- /** Protocol type */
- char *protocol_type;
- /** Partition assignor identifier. */
- char *partition_assignor;
- /** Consumer group state. */
- rd_kafka_consumer_group_state_t state;
- /** Consumer group coordinator. */
- rd_kafka_Node_t *coordinator;
- /** Group specific error. */
- rd_kafka_error_t *error;
-};
-
-/**@}*/
-
-#endif /* _RDKAFKA_ADMIN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c
deleted file mode 100644
index dc4bdae94..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c
+++ /dev/null
@@ -1,968 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name Consumer assignment state.
- *
- * Responsible for managing the state of assigned partitions.
- *
- *
- ******************************************************************************
- * rd_kafka_assignment_serve()
- * ---------------------------
- *
- * It is important to call rd_kafka_assignment_serve() after each change
- * to the assignment through assignment_add, assignment_subtract or
- * assignment_clear as those functions only modify the assignment but does
- * not take any action to transition partitions to or from the assignment
- * states.
- *
- * The reason assignment_serve() is not automatically called from these
- * functions is for the caller to be able to set the current state before
- * the side-effects of serve() kick in, such as the call to
- * rd_kafka_cgrp_assignment_done() that in turn will set the cgrp state.
- *
- *
- *
- ******************************************************************************
- * Querying for committed offsets (.queried list)
- * ----------------------------------------------
- *
- * We only allow one outstanding query (fetch committed offset), this avoids
- * complex handling of partitions that are assigned, unassigned and reassigned
- * all within the window of a OffsetFetch request.
- * Consider the following case:
- *
- * 1. tp1 and tp2 are incrementally assigned.
- * 2. An OffsetFetchRequest is sent for tp1 and tp2
- * 3. tp2 is incremental unassigned.
- * 4. Broker sends OffsetFetchResponse with offsets tp1=10, tp2=20.
- * 4. Some other consumer commits offsets 30 for tp2.
- * 5. tp2 is incrementally assigned again.
- * 6. The OffsetFetchResponse is received.
- *
- * Without extra handling the consumer would start fetching tp1 at offset 10
- * (which is correct) and tp2 at offset 20 (which is incorrect, the last
- * committed offset is now 30).
- *
- * To alleviate this situation we remove unassigned partitions from the
- * .queried list, and in the OffsetFetch response handler we only use offsets
- * for partitions that are on the .queried list.
- *
- * To make sure the tp1 offset is used and not re-queried we only allow
- * one outstanding OffsetFetch request at the time, meaning that at step 5
- * a new OffsetFetch request will not be sent and tp2 will remain in the
- * .pending list until the outstanding OffsetFetch response is received in
- * step 6. At this point tp2 will transition to .queried and a new
- * OffsetFetch request will be sent.
- *
- * This explanation is more verbose than the code involved.
- *
- ******************************************************************************
- *
- *
- * @remark Try to keep any cgrp state out of this file.
- *
- * FIXME: There are some pretty obvious optimizations that needs to be done here
- * with regards to partition_list_t lookups. But we can do that when
- * we know the current implementation works correctly.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_request.h"
-
-
-static void rd_kafka_assignment_dump(rd_kafka_t *rk) {
- rd_kafka_dbg(rk, CGRP, "DUMP",
- "Assignment dump (started_cnt=%d, wait_stop_cnt=%d)",
- rk->rk_consumer.assignment.started_cnt,
- rk->rk_consumer.assignment.wait_stop_cnt);
-
- rd_kafka_topic_partition_list_log(rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP,
- rk->rk_consumer.assignment.all);
-
- rd_kafka_topic_partition_list_log(rk, "DUMP_PND", RD_KAFKA_DBG_CGRP,
- rk->rk_consumer.assignment.pending);
-
- rd_kafka_topic_partition_list_log(rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP,
- rk->rk_consumer.assignment.queried);
-
- rd_kafka_topic_partition_list_log(rk, "DUMP_REM", RD_KAFKA_DBG_CGRP,
- rk->rk_consumer.assignment.removed);
-}
-
-/**
- * @brief Apply the fetched committed offsets to the current assignment's
- * queried partitions.
- *
- * @param err is the request-level error, if any. The caller is responsible
- * for raising this error to the application. It is only used here
- * to avoid taking actions.
- *
- * Called from the FetchOffsets response handler below.
- */
-static void
-rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err) {
- rd_kafka_topic_partition_t *rktpar;
-
- RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) {
- /* May be NULL, borrow ref. */
- rd_kafka_toppar_t *rktp =
- rd_kafka_topic_partition_toppar(rk, rktpar);
-
- if (!rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.queried, rktpar->topic,
- rktpar->partition)) {
- rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
- "Ignoring OffsetFetch "
- "response for %s [%" PRId32
- "] which is no "
- "longer in the queried list "
- "(possibly unassigned?)",
- rktpar->topic, rktpar->partition);
- continue;
- }
-
- if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
- rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
- /* Ongoing transactions are blocking offset retrieval.
- * This is typically retried from the OffsetFetch
- * handler but we can come here if the assignment
- * (and thus the assignment.version) was changed while
- * the OffsetFetch request was in-flight, in which case
- * we put this partition back on the pending list for
- * later handling by the assignment state machine. */
-
- rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
- "Adding %s [%" PRId32
- "] back to pending "
- "list because on-going transaction is "
- "blocking offset retrieval",
- rktpar->topic, rktpar->partition);
-
- rd_kafka_topic_partition_list_add_copy(
- rk->rk_consumer.assignment.pending, rktpar);
-
- } else if (rktpar->err) {
- /* Partition-level error */
- rd_kafka_consumer_err(
- rk->rk_consumer.q, RD_KAFKA_NODEID_UA, rktpar->err,
- 0, rktpar->topic, rktp, RD_KAFKA_OFFSET_INVALID,
- "Failed to fetch committed offset for "
- "group \"%s\" topic %s [%" PRId32 "]: %s",
- rk->rk_group_id->str, rktpar->topic,
- rktpar->partition, rd_kafka_err2str(rktpar->err));
-
- /* The partition will not be added back to .pending
- * and thus only reside on .all until the application
- * unassigns it and possible re-assigns it. */
-
- } else if (!err) {
- /* If rktpar->offset is RD_KAFKA_OFFSET_INVALID it means
- * there was no committed offset for this partition.
- * serve_pending() will now start this partition
- * since the offset is set to INVALID (rather than
- * STORED) and the partition fetcher will employ
- * auto.offset.reset to know what to do. */
-
- /* Add partition to pending list where serve()
- * will start the fetcher. */
- rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
- "Adding %s [%" PRId32
- "] back to pending "
- "list with offset %s",
- rktpar->topic, rktpar->partition,
- rd_kafka_offset2str(rktpar->offset));
-
- rd_kafka_topic_partition_list_add_copy(
- rk->rk_consumer.assignment.pending, rktpar);
- }
- /* Do nothing for request-level errors (err is set). */
- }
-
- if (offsets->cnt > 0)
- rd_kafka_assignment_serve(rk);
-}
-
-
-
-/**
- * @brief Reply handler for OffsetFetch queries from the assignment code.
- *
- * @param opaque Is a malloced int64_t* containing the assignment version at the
- * time of the request.
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_topic_partition_list_t *offsets = NULL;
- int64_t *req_assignment_version = (int64_t *)opaque;
- /* Only allow retries if there's been no change to the assignment,
- * otherwise rely on assignment state machine to retry. */
- rd_bool_t allow_retry =
- *req_assignment_version == rk->rk_consumer.assignment.version;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* Termination, quick cleanup. */
- rd_free(req_assignment_version);
- return;
- }
-
- err = rd_kafka_handle_OffsetFetch(
- rk, rkb, err, reply, request, &offsets,
- rd_true /* Update toppars */, rd_true /* Add parts */, allow_retry);
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
- if (offsets)
- rd_kafka_topic_partition_list_destroy(offsets);
- return; /* retrying */
- }
-
- rd_free(req_assignment_version);
-
- /* offsets may be NULL for certain errors, such
- * as ERR__TRANSPORT. */
- if (!offsets && !allow_retry) {
- rd_dassert(err);
- if (!err)
- err = RD_KAFKA_RESP_ERR__NO_OFFSET;
-
- rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error: %s",
- rd_kafka_err2str(err));
- rd_kafka_consumer_err(
- rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
- NULL, RD_KAFKA_OFFSET_INVALID,
- "Failed to fetch committed "
- "offsets for partitions "
- "in group \"%s\": %s",
- rk->rk_group_id->str, rd_kafka_err2str(err));
-
- return;
- }
-
-
-
- if (err) {
- rd_kafka_dbg(rk, CGRP, "OFFSET",
- "Offset fetch error for %d partition(s): %s",
- offsets->cnt, rd_kafka_err2str(err));
- rd_kafka_consumer_err(
- rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
- NULL, RD_KAFKA_OFFSET_INVALID,
- "Failed to fetch committed offsets for "
- "%d partition(s) in group \"%s\": %s",
- offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err));
- }
-
- /* Apply the fetched offsets to the assignment */
- rd_kafka_assignment_apply_offsets(rk, offsets, err);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-}
-
-
-/**
- * @brief Decommission all partitions in the removed list.
- *
- * @returns >0 if there are removal operations in progress, else 0.
- */
-static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
- rd_kafka_topic_partition_t *rktpar;
- int valid_offsets = 0;
-
- RD_KAFKA_TPLIST_FOREACH(rktpar, rk->rk_consumer.assignment.removed) {
- rd_kafka_toppar_t *rktp =
- rd_kafka_topic_partition_ensure_toppar(
- rk, rktpar, rd_true); /* Borrow ref */
- int was_pending, was_queried;
-
- /* Remove partition from pending and querying lists,
- * if it happens to be there.
- * Outstanding OffsetFetch query results will be ignored
- * for partitions that are no longer on the .queried list. */
- was_pending = rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.pending, rktpar->topic,
- rktpar->partition);
- was_queried = rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.queried, rktpar->topic,
- rktpar->partition);
-
- if (rktp->rktp_started) {
- /* Partition was started, stop the fetcher. */
- rd_assert(rk->rk_consumer.assignment.started_cnt > 0);
-
- rd_kafka_toppar_op_fetch_stop(
- rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
- rk->rk_consumer.assignment.wait_stop_cnt++;
- }
-
- /* Reset the (lib) pause flag which may have been set by
- * the cgrp when scheduling the rebalance callback. */
- rd_kafka_toppar_op_pause_resume(rktp, rd_false /*resume*/,
- RD_KAFKA_TOPPAR_F_LIB_PAUSE,
- RD_KAFKA_NO_REPLYQ);
-
- rd_kafka_toppar_lock(rktp);
-
- /* Save the currently stored offset and epoch on .removed
- * so it will be committed below. */
- rd_kafka_topic_partition_set_from_fetch_pos(
- rktpar, rktp->rktp_stored_pos);
- valid_offsets += !RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset);
-
- /* Reset the stored offset to invalid so that
- * a manual offset-less commit() or the auto-committer
- * will not commit a stored offset from a previous
- * assignment (issue #2782). */
- rd_kafka_offset_store0(
- rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
- rd_true, RD_DONT_LOCK);
-
- /* Partition is no longer desired */
- rd_kafka_toppar_desired_del(rktp);
-
- rd_assert((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ASSIGNED;
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_dbg(rk, CGRP, "REMOVE",
- "Removing %s [%" PRId32
- "] from assignment "
- "(started=%s, pending=%s, queried=%s, "
- "stored offset=%s)",
- rktpar->topic, rktpar->partition,
- RD_STR_ToF(rktp->rktp_started),
- RD_STR_ToF(was_pending), RD_STR_ToF(was_queried),
- rd_kafka_offset2str(rktpar->offset));
- }
-
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REMOVE",
- "Served %d removed partition(s), "
- "with %d offset(s) to commit",
- rk->rk_consumer.assignment.removed->cnt, valid_offsets);
-
- /* If enable.auto.commit=true:
- * Commit final offsets to broker for the removed partitions,
- * unless this is a consumer destruction with a close() call. */
- if (valid_offsets > 0 &&
- rk->rk_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER &&
- rk->rk_cgrp && rk->rk_conf.enable_auto_commit &&
- !rd_kafka_destroy_flags_no_consumer_close(rk))
- rd_kafka_cgrp_assigned_offsets_commit(
- rk->rk_cgrp, rk->rk_consumer.assignment.removed,
- rd_false /* use offsets from .removed */,
- "unassigned partitions");
-
- rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.removed);
-
- return rk->rk_consumer.assignment.wait_stop_cnt +
- rk->rk_consumer.wait_commit_cnt;
-}
-
-
-/**
- * @brief Serve all partitions in the pending list.
- *
- * This either (asynchronously) queries the partition's committed offset, or
- * if the start offset is known, starts the partition fetcher.
- *
- * @returns >0 if there are pending operations in progress for the current
- * assignment, else 0.
- */
-static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
- rd_kafka_topic_partition_list_t *partitions_to_query = NULL;
- /* We can query committed offsets only if all of the following are true:
- * - We have a group coordinator.
- * - There are no outstanding commits (since we might need to
- * read back those commits as our starting position).
- * - There are no outstanding queries already (since we want to
- * avoid using a earlier queries response for a partition that
- * is unassigned and then assigned again).
- */
- rd_kafka_broker_t *coord =
- rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL;
- rd_bool_t can_query_offsets =
- coord && rk->rk_consumer.wait_commit_cnt == 0 &&
- rk->rk_consumer.assignment.queried->cnt == 0;
- int i;
-
- if (can_query_offsets)
- partitions_to_query = rd_kafka_topic_partition_list_new(
- rk->rk_consumer.assignment.pending->cnt);
-
- /* Scan the list backwards so removals are cheap (no array shuffle) */
- for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) {
- rd_kafka_topic_partition_t *rktpar =
- &rk->rk_consumer.assignment.pending->elems[i];
- /* Borrow ref */
- rd_kafka_toppar_t *rktp =
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
-
- rd_assert(!rktp->rktp_started);
-
- if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) ||
- rktpar->offset == RD_KAFKA_OFFSET_BEGINNING ||
- rktpar->offset == RD_KAFKA_OFFSET_END ||
- rktpar->offset == RD_KAFKA_OFFSET_INVALID ||
- rktpar->offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
- /* The partition fetcher can handle absolute
- * as well as beginning/end/tail start offsets, so we're
- * ready to start the fetcher now.
- * The INVALID offset means there was no committed
- * offset and the partition fetcher will employ
- * auto.offset.reset.
- *
- * Start fetcher for partition and forward partition's
- * fetchq to consumer group's queue. */
-
- rd_kafka_dbg(rk, CGRP, "SRVPEND",
- "Starting pending assigned partition "
- "%s [%" PRId32 "] at %s",
- rktpar->topic, rktpar->partition,
- rd_kafka_fetch_pos2str(
- rd_kafka_topic_partition_get_fetch_pos(
- rktpar)));
-
- /* Reset the (lib) pause flag which may have been set by
- * the cgrp when scheduling the rebalance callback. */
- rd_kafka_toppar_op_pause_resume(
- rktp, rd_false /*resume*/,
- RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ);
-
- /* Start the fetcher */
- rktp->rktp_started = rd_true;
- rk->rk_consumer.assignment.started_cnt++;
-
- rd_kafka_toppar_op_fetch_start(
- rktp,
- rd_kafka_topic_partition_get_fetch_pos(rktpar),
- rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ);
-
-
- } else if (can_query_offsets) {
- /* Else use the last committed offset for partition.
- * We can't rely on any internal cached committed offset
- * so we'll accumulate a list of partitions that need
- * to be queried and then send FetchOffsetsRequest
- * to the group coordinator. */
-
- rd_dassert(!rd_kafka_topic_partition_list_find(
- rk->rk_consumer.assignment.queried, rktpar->topic,
- rktpar->partition));
-
- rd_kafka_topic_partition_list_add_copy(
- partitions_to_query, rktpar);
-
- rd_kafka_topic_partition_list_add_copy(
- rk->rk_consumer.assignment.queried, rktpar);
-
- rd_kafka_dbg(rk, CGRP, "SRVPEND",
- "Querying committed offset for pending "
- "assigned partition %s [%" PRId32 "]",
- rktpar->topic, rktpar->partition);
-
-
- } else {
- rd_kafka_dbg(
- rk, CGRP, "SRVPEND",
- "Pending assignment partition "
- "%s [%" PRId32
- "] can't fetch committed "
- "offset yet "
- "(cgrp state %s, awaiting %d commits, "
- "%d partition(s) already being queried)",
- rktpar->topic, rktpar->partition,
- rk->rk_cgrp
- ? rd_kafka_cgrp_state_names[rk->rk_cgrp
- ->rkcg_state]
- : "n/a",
- rk->rk_consumer.wait_commit_cnt,
- rk->rk_consumer.assignment.queried->cnt);
-
- continue; /* Keep rktpar on pending list */
- }
-
- /* Remove rktpar from the pending list */
- rd_kafka_topic_partition_list_del_by_idx(
- rk->rk_consumer.assignment.pending, i);
- }
-
-
- if (!can_query_offsets) {
- if (coord)
- rd_kafka_broker_destroy(coord);
- return rk->rk_consumer.assignment.pending->cnt +
- rk->rk_consumer.assignment.queried->cnt;
- }
-
-
- if (partitions_to_query->cnt > 0) {
- int64_t *req_assignment_version = rd_malloc(sizeof(int64_t));
- *req_assignment_version = rk->rk_consumer.assignment.version;
-
- rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
- "Fetching committed offsets for "
- "%d pending partition(s) in assignment",
- partitions_to_query->cnt);
-
- rd_kafka_OffsetFetchRequest(
- coord, rk->rk_group_id->str, partitions_to_query,
- rk->rk_conf.isolation_level ==
- RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/,
- 0, /* Timeout */
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_assignment_handle_OffsetFetch,
- /* Must be freed by handler */
- (void *)req_assignment_version);
- }
-
- if (coord)
- rd_kafka_broker_destroy(coord);
-
- rd_kafka_topic_partition_list_destroy(partitions_to_query);
-
- return rk->rk_consumer.assignment.pending->cnt +
- rk->rk_consumer.assignment.queried->cnt;
-}
-
-
-
-/**
- * @brief Serve updates to the assignment.
- *
- * Call on:
- * - assignment changes
- * - wait_commit_cnt reaches 0
- * - partition fetcher is stopped
- */
-void rd_kafka_assignment_serve(rd_kafka_t *rk) {
- int inp_removals = 0;
- int inp_pending = 0;
-
- rd_kafka_assignment_dump(rk);
-
- /* Serve any partitions that should be removed */
- if (rk->rk_consumer.assignment.removed->cnt > 0)
- inp_removals = rd_kafka_assignment_serve_removals(rk);
-
- /* Serve any partitions in the pending list that need further action,
- * unless we're waiting for a previous assignment change (an unassign
- * in some form) to propagate, or outstanding offset commits
- * to finish (since we might need the committed offsets as start
- * offsets). */
- if (rk->rk_consumer.assignment.wait_stop_cnt == 0 &&
- rk->rk_consumer.wait_commit_cnt == 0 && inp_removals == 0 &&
- rk->rk_consumer.assignment.pending->cnt > 0)
- inp_pending = rd_kafka_assignment_serve_pending(rk);
-
- if (inp_removals + inp_pending +
- rk->rk_consumer.assignment.queried->cnt +
- rk->rk_consumer.assignment.wait_stop_cnt +
- rk->rk_consumer.wait_commit_cnt ==
- 0) {
- /* No assignment operations in progress,
- * signal assignment done back to cgrp to let it
- * transition to its next state if necessary.
- * We may emit this signalling more than necessary and it is
- * up to the cgrp to only take action if needed, based on its
- * state. */
- rd_kafka_cgrp_assignment_done(rk->rk_cgrp);
- } else {
- rd_kafka_dbg(rk, CGRP, "ASSIGNMENT",
- "Current assignment of %d partition(s) "
- "with %d pending adds, %d offset queries, "
- "%d partitions awaiting stop and "
- "%d offset commits in progress",
- rk->rk_consumer.assignment.all->cnt, inp_pending,
- rk->rk_consumer.assignment.queried->cnt,
- rk->rk_consumer.assignment.wait_stop_cnt,
- rk->rk_consumer.wait_commit_cnt);
- }
-}
-
-
-/**
- * @returns true if the current or previous assignment has operations in
- * progress, such as waiting for partition fetchers to stop.
- */
-rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk) {
- return rk->rk_consumer.wait_commit_cnt > 0 ||
- rk->rk_consumer.assignment.wait_stop_cnt > 0 ||
- rk->rk_consumer.assignment.pending->cnt > 0 ||
- rk->rk_consumer.assignment.queried->cnt > 0 ||
- rk->rk_consumer.assignment.removed->cnt > 0;
-}
-
-
-/**
- * @brief Clear the current assignment.
- *
- * @remark Make sure to call rd_kafka_assignment_serve() after successful
- * return from this function.
- *
- * @returns the number of partitions removed.
- */
-int rd_kafka_assignment_clear(rd_kafka_t *rk) {
- int cnt = rk->rk_consumer.assignment.all->cnt;
-
- if (cnt == 0) {
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
- "No current assignment to clear");
- return 0;
- }
-
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
- "Clearing current assignment of %d partition(s)",
- rk->rk_consumer.assignment.all->cnt);
-
- rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.pending);
- rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.queried);
-
- rd_kafka_topic_partition_list_add_list(
- rk->rk_consumer.assignment.removed, rk->rk_consumer.assignment.all);
- rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.all);
-
- rk->rk_consumer.assignment.version++;
-
- return cnt;
-}
-
-
-/**
- * @brief Adds \p partitions to the current assignment.
- *
- * Will return error if trying to add a partition that is already in the
- * assignment.
- *
- * @remark Make sure to call rd_kafka_assignment_serve() after successful
- * return from this function.
- */
-rd_kafka_error_t *
-rd_kafka_assignment_add(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_bool_t was_empty = rk->rk_consumer.assignment.all->cnt == 0;
- int i;
-
- /* Make sure there are no duplicates, invalid partitions, or
- * invalid offsets in the input partitions. */
- rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
-
- for (i = 0; i < partitions->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
- const rd_kafka_topic_partition_t *prev =
- i > 0 ? &partitions->elems[i - 1] : NULL;
-
- if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) &&
- rktpar->offset != RD_KAFKA_OFFSET_BEGINNING &&
- rktpar->offset != RD_KAFKA_OFFSET_END &&
- rktpar->offset != RD_KAFKA_OFFSET_STORED &&
- rktpar->offset != RD_KAFKA_OFFSET_INVALID &&
- rktpar->offset > RD_KAFKA_OFFSET_TAIL_BASE)
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "%s [%" PRId32
- "] has invalid start offset %" PRId64,
- rktpar->topic, rktpar->partition, rktpar->offset);
-
- if (prev && !rd_kafka_topic_partition_cmp(rktpar, prev))
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Duplicate %s [%" PRId32 "] in input list",
- rktpar->topic, rktpar->partition);
-
- if (rd_kafka_topic_partition_list_find(
- rk->rk_consumer.assignment.all, rktpar->topic,
- rktpar->partition))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__CONFLICT,
- "%s [%" PRId32
- "] is already part of the "
- "current assignment",
- rktpar->topic,
- rktpar->partition);
-
- /* Translate RD_KAFKA_OFFSET_INVALID to RD_KAFKA_OFFSET_STORED,
- * i.e., read from committed offset, since we use INVALID
- * internally to differentiate between querying for
- * committed offset (STORED) and no committed offset (INVALID).
- */
- if (rktpar->offset == RD_KAFKA_OFFSET_INVALID)
- rktpar->offset = RD_KAFKA_OFFSET_STORED;
-
- /* Get toppar object for each partition.
- * This is to make sure the rktp stays alive while unassigning
- * any previous assignment in the call to
- * assignment_clear() below. */
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
- }
-
- /* Mark all partition objects as assigned and reset the stored
- * offsets back to invalid in case it was explicitly stored during
- * the time the partition was not assigned. */
- for (i = 0; i < partitions->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
- rd_kafka_toppar_t *rktp =
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
-
- rd_kafka_toppar_lock(rktp);
-
- rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ASSIGNED;
-
- /* Reset the stored offset to INVALID to avoid the race
- * condition described in rdkafka_offset.h */
- rd_kafka_offset_store0(
- rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
- rd_true /* force */, RD_DONT_LOCK);
-
- rd_kafka_toppar_unlock(rktp);
- }
-
-
- /* Add the new list of partitions to the current assignment.
- * Only need to sort the final assignment if it was non-empty
- * to begin with since \p partitions is sorted above. */
- rd_kafka_topic_partition_list_add_list(rk->rk_consumer.assignment.all,
- partitions);
- if (!was_empty)
- rd_kafka_topic_partition_list_sort(
- rk->rk_consumer.assignment.all, NULL, NULL);
-
- /* And add to .pending for serve_pending() to handle. */
- rd_kafka_topic_partition_list_add_list(
- rk->rk_consumer.assignment.pending, partitions);
-
-
- rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "ASSIGNMENT",
- "Added %d partition(s) to assignment which "
- "now consists of %d partition(s) where of %d are in "
- "pending state and %d are being queried",
- partitions->cnt, rk->rk_consumer.assignment.all->cnt,
- rk->rk_consumer.assignment.pending->cnt,
- rk->rk_consumer.assignment.queried->cnt);
-
- rk->rk_consumer.assignment.version++;
-
- return NULL;
-}
-
-
-/**
- * @brief Remove \p partitions from the current assignment.
- *
- * Will return error if trying to remove a partition that is not in the
- * assignment.
- *
- * @remark Make sure to call rd_kafka_assignment_serve() after successful
- * return from this function.
- */
-rd_kafka_error_t *
-rd_kafka_assignment_subtract(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- int i;
- int matched_queried_partitions = 0;
- int assignment_pre_cnt;
-
- if (rk->rk_consumer.assignment.all->cnt == 0 && partitions->cnt > 0)
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Can't subtract from empty assignment");
-
- /* Verify that all partitions in \p partitions are in the assignment
- * before starting to modify the assignment. */
- rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
-
- for (i = 0; i < partitions->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
-
- if (!rd_kafka_topic_partition_list_find(
- rk->rk_consumer.assignment.all, rktpar->topic,
- rktpar->partition))
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "%s [%" PRId32
- "] can't be unassigned since "
- "it is not in the current assignment",
- rktpar->topic, rktpar->partition);
-
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
- }
-
-
- assignment_pre_cnt = rk->rk_consumer.assignment.all->cnt;
-
- /* Remove partitions in reverse order to avoid excessive
- * array shuffling of .all.
- * Add the removed partitions to .pending for serve() to handle. */
- for (i = partitions->cnt - 1; i >= 0; i--) {
- const rd_kafka_topic_partition_t *rktpar =
- &partitions->elems[i];
-
- if (!rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.all, rktpar->topic,
- rktpar->partition))
- RD_BUG("Removed partition %s [%" PRId32
- "] not found "
- "in assignment.all",
- rktpar->topic, rktpar->partition);
-
- if (rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.queried, rktpar->topic,
- rktpar->partition))
- matched_queried_partitions++;
- else
- rd_kafka_topic_partition_list_del(
- rk->rk_consumer.assignment.pending, rktpar->topic,
- rktpar->partition);
-
- /* Add to .removed list which will be served by
- * serve_removals(). */
- rd_kafka_topic_partition_list_add_copy(
- rk->rk_consumer.assignment.removed, rktpar);
- }
-
- rd_kafka_dbg(rk, CGRP, "REMOVEASSIGN",
- "Removed %d partition(s) "
- "(%d with outstanding offset queries) from assignment "
- "of %d partition(s)",
- partitions->cnt, matched_queried_partitions,
- assignment_pre_cnt);
-
- if (rk->rk_consumer.assignment.all->cnt == 0) {
- /* Some safe checking */
- rd_assert(rk->rk_consumer.assignment.pending->cnt == 0);
- rd_assert(rk->rk_consumer.assignment.queried->cnt == 0);
- }
-
- rk->rk_consumer.assignment.version++;
-
- return NULL;
-}
-
-
-/**
- * @brief Call when partition fetcher has stopped.
- */
-void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp) {
- rd_assert(rk->rk_consumer.assignment.wait_stop_cnt > 0);
- rk->rk_consumer.assignment.wait_stop_cnt--;
-
- rd_assert(rktp->rktp_started);
- rktp->rktp_started = rd_false;
-
- rd_assert(rk->rk_consumer.assignment.started_cnt > 0);
- rk->rk_consumer.assignment.started_cnt--;
-
- /* If this was the last partition we awaited stop for, serve the
- * assignment to transition any existing assignment to the next state */
- if (rk->rk_consumer.assignment.wait_stop_cnt == 0) {
- rd_kafka_dbg(rk, CGRP, "STOPSERVE",
- "All partitions awaiting stop are now "
- "stopped: serving assignment");
- rd_kafka_assignment_serve(rk);
- }
-}
-
-
-/**
- * @brief Pause fetching of the currently assigned partitions.
- *
- * Partitions will be resumed by calling rd_kafka_assignment_resume() or
- * from either serve_removals() or serve_pending() above.
- */
-void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason) {
-
- if (rk->rk_consumer.assignment.all->cnt == 0)
- return;
-
- rd_kafka_dbg(rk, CGRP, "PAUSE",
- "Pausing fetchers for %d assigned partition(s): %s",
- rk->rk_consumer.assignment.all->cnt, reason);
-
- rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_ASYNC,
- RD_KAFKA_TOPPAR_F_LIB_PAUSE,
- rk->rk_consumer.assignment.all);
-}
-
-/**
- * @brief Resume fetching of the currently assigned partitions which have
- * previously been paused by rd_kafka_assignment_pause().
- */
-void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason) {
-
- if (rk->rk_consumer.assignment.all->cnt == 0)
- return;
-
- rd_kafka_dbg(rk, CGRP, "PAUSE",
- "Resuming fetchers for %d assigned partition(s): %s",
- rk->rk_consumer.assignment.all->cnt, reason);
-
- rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_ASYNC,
- RD_KAFKA_TOPPAR_F_LIB_PAUSE,
- rk->rk_consumer.assignment.all);
-}
-
-
-
-/**
- * @brief Destroy assignment state (but not \p assignment itself)
- */
-void rd_kafka_assignment_destroy(rd_kafka_t *rk) {
- if (!rk->rk_consumer.assignment.all)
- return; /* rd_kafka_assignment_init() not called */
- rd_kafka_topic_partition_list_destroy(rk->rk_consumer.assignment.all);
- rd_kafka_topic_partition_list_destroy(
- rk->rk_consumer.assignment.pending);
- rd_kafka_topic_partition_list_destroy(
- rk->rk_consumer.assignment.queried);
- rd_kafka_topic_partition_list_destroy(
- rk->rk_consumer.assignment.removed);
-}
-
-
-/**
- * @brief Initialize the assignment struct.
- */
-void rd_kafka_assignment_init(rd_kafka_t *rk) {
- rk->rk_consumer.assignment.all = rd_kafka_topic_partition_list_new(100);
- rk->rk_consumer.assignment.pending =
- rd_kafka_topic_partition_list_new(100);
- rk->rk_consumer.assignment.queried =
- rd_kafka_topic_partition_list_new(100);
- rk->rk_consumer.assignment.removed =
- rd_kafka_topic_partition_list_new(100);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h
deleted file mode 100644
index fa51bb10c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_ASSIGNMENT_H_
-#define _RDKAFKA_ASSIGNMENT_H_
-
-typedef struct rd_kafka_assignment_s {
- /** All currently assigned partitions. */
- rd_kafka_topic_partition_list_t *all;
- /** Partitions in need of action (subset of .all) */
- rd_kafka_topic_partition_list_t *pending;
- /** Partitions that are being queried for committed
- * offsets (subset of .all) */
- rd_kafka_topic_partition_list_t *queried;
- /** Partitions that have been removed from the assignment
- * but not yet decommissioned. (not included in .all) */
- rd_kafka_topic_partition_list_t *removed;
- /** Number of started partitions */
- int started_cnt;
- /** Number of partitions being stopped. */
- int wait_stop_cnt;
- /** Assignment version: any change to the assignment will bump this
- * version by one. This is used to know if a protocol response is
- * outdated or not.
- * @locks_required none
- * @locality rdkafka main thread */
- int64_t version;
-} rd_kafka_assignment_t;
-
-
-int rd_kafka_assignment_clear(rd_kafka_t *rk);
-rd_kafka_error_t *
-rd_kafka_assignment_add(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions);
-rd_kafka_error_t *
-rd_kafka_assignment_subtract(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions);
-void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp);
-void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason);
-void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason);
-void rd_kafka_assignment_serve(rd_kafka_t *rk);
-rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk);
-void rd_kafka_assignment_destroy(rd_kafka_t *rk);
-void rd_kafka_assignment_init(rd_kafka_t *rk);
-
-#endif /* _RDKAFKA_ASSIGNMENT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c
deleted file mode 100644
index 792573845..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c
+++ /dev/null
@@ -1,1065 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_request.h"
-#include "rdunittest.h"
-
-#include <ctype.h>
-
-/**
- * Clear out and free any memory used by the member, but not the rkgm itself.
- */
-void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) {
- if (rkgm->rkgm_owned)
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
-
- if (rkgm->rkgm_subscription)
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription);
-
- if (rkgm->rkgm_assignment)
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment);
-
- rd_list_destroy(&rkgm->rkgm_eligible);
-
- if (rkgm->rkgm_member_id)
- rd_kafkap_str_destroy(rkgm->rkgm_member_id);
-
- if (rkgm->rkgm_group_instance_id)
- rd_kafkap_str_destroy(rkgm->rkgm_group_instance_id);
-
- if (rkgm->rkgm_userdata)
- rd_kafkap_bytes_destroy(rkgm->rkgm_userdata);
-
- if (rkgm->rkgm_member_metadata)
- rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata);
-
- memset(rkgm, 0, sizeof(*rkgm));
-}
-
-
-/**
- * @brief Group member comparator (takes rd_kafka_group_member_t *)
- */
-int rd_kafka_group_member_cmp(const void *_a, const void *_b) {
- const rd_kafka_group_member_t *a = (const rd_kafka_group_member_t *)_a;
- const rd_kafka_group_member_t *b = (const rd_kafka_group_member_t *)_b;
-
- /* Use the group instance id to compare static group members */
- if (!RD_KAFKAP_STR_IS_NULL(a->rkgm_group_instance_id) &&
- !RD_KAFKAP_STR_IS_NULL(b->rkgm_group_instance_id))
- return rd_kafkap_str_cmp(a->rkgm_group_instance_id,
- b->rkgm_group_instance_id);
-
- return rd_kafkap_str_cmp(a->rkgm_member_id, b->rkgm_member_id);
-}
-
-
-/**
- * Returns true if member subscribes to topic, else false.
- */
-int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
- const rd_kafka_group_member_t *rkgm,
- const char *topic) {
- int i;
-
- /* Match against member's subscription. */
- for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rkgm->rkgm_subscription->elems[i];
-
- if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, topic,
- NULL))
- return 1;
- }
-
- return 0;
-}
-
-
-rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
- const rd_list_t *topics,
- const void *userdata,
- size_t userdata_size,
- const rd_kafka_topic_partition_list_t *owned_partitions) {
-
- rd_kafka_buf_t *rkbuf;
- rd_kafkap_bytes_t *kbytes;
- int i;
- int topic_cnt = rd_list_cnt(topics);
- const rd_kafka_topic_info_t *tinfo;
- size_t len;
-
- /*
- * MemberMetadata => Version Subscription AssignmentStrategies
- * Version => int16
- * Subscription => Topics UserData
- * Topics => [String]
- * UserData => Bytes
- * OwnedPartitions => [Topic Partitions] // added in v1
- * Topic => string
- * Partitions => [int32]
- */
-
- rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size);
-
- /* Version */
- rd_kafka_buf_write_i16(rkbuf, 1);
- rd_kafka_buf_write_i32(rkbuf, topic_cnt);
- RD_LIST_FOREACH(tinfo, topics, i)
- rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1);
- if (userdata)
- rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size);
- else /* Kafka 0.9.0.0 can't parse NULL bytes, so we provide empty,
- * which is compatible with all of the built-in Java client
- * assignors at the present time (up to and including v2.5) */
- rd_kafka_buf_write_bytes(rkbuf, "", 0);
- /* Following data is ignored by v0 consumers */
- if (!owned_partitions)
- /* If there are no owned partitions, this is specified as an
- * empty array, not NULL. */
- rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */
- else {
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(
- rkbuf, owned_partitions,
- rd_false /*don't skip invalid offsets*/,
- rd_false /*any offset*/, fields);
- }
-
- /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
- rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
- len = rd_slice_remains(&rkbuf->rkbuf_reader);
- kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
- rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
- rd_kafka_buf_destroy(rkbuf);
-
- return kbytes;
-}
-
-
-
-rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
- const rd_kafka_assignor_t *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions) {
- return rd_kafka_consumer_protocol_member_metadata_new(topics, NULL, 0,
- owned_partitions);
-}
-
-
-
-/**
- * Returns 1 if all subscriptions are satifised for this member, else 0.
- */
-static int rd_kafka_member_subscription_match(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_group_member_t *rkgm,
- const rd_kafka_metadata_topic_t *topic_metadata,
- rd_kafka_assignor_topic_t *eligible_topic) {
- int i;
- int has_regex = 0;
- int matched = 0;
-
- /* Match against member's subscription. */
- for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rkgm->rkgm_subscription->elems[i];
- int matched_by_regex = 0;
-
- if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar,
- topic_metadata->topic,
- &matched_by_regex)) {
- rd_list_add(&rkgm->rkgm_eligible,
- (void *)topic_metadata);
- matched++;
- has_regex += matched_by_regex;
- }
- }
-
- if (matched)
- rd_list_add(&eligible_topic->members, rkgm);
-
- if (!has_regex &&
- rd_list_cnt(&rkgm->rkgm_eligible) == rkgm->rkgm_subscription->cnt)
- return 1; /* All subscriptions matched */
- else
- return 0;
-}
-
-
-static void rd_kafka_assignor_topic_destroy(rd_kafka_assignor_topic_t *at) {
- rd_list_destroy(&at->members);
- rd_free(at);
-}
-
-int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b) {
- const rd_kafka_assignor_topic_t *a =
- *(const rd_kafka_assignor_topic_t *const *)_a;
- const rd_kafka_assignor_topic_t *b =
- *(const rd_kafka_assignor_topic_t *const *)_b;
-
- return strcmp(a->metadata->topic, b->metadata->topic);
-}
-
-/**
- * Determine the complete set of topics that match at least one of
- * the group member subscriptions. Associate with each of these the
- * complete set of members that are subscribed to it. The result is
- * returned in `eligible_topics`.
- */
-static void
-rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
- rd_list_t *eligible_topics,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- int member_cnt) {
- int ti;
- rd_kafka_assignor_topic_t *eligible_topic = NULL;
-
- rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10),
- (void *)rd_kafka_assignor_topic_destroy);
-
- /* For each topic in the cluster, scan through the member list
- * to find matching subscriptions. */
- for (ti = 0; ti < metadata->topic_cnt; ti++) {
- int i;
-
- /* Ignore topics in blacklist */
- if (rkcg->rkcg_rk->rk_conf.topic_blacklist &&
- rd_kafka_pattern_match(
- rkcg->rkcg_rk->rk_conf.topic_blacklist,
- metadata->topics[ti].topic)) {
- rd_kafka_dbg(rkcg->rkcg_rk,
- TOPIC | RD_KAFKA_DBG_ASSIGNOR, "BLACKLIST",
- "Assignor ignoring blacklisted "
- "topic \"%s\"",
- metadata->topics[ti].topic);
- continue;
- }
-
- if (!eligible_topic)
- eligible_topic = rd_calloc(1, sizeof(*eligible_topic));
-
- rd_list_init(&eligible_topic->members, member_cnt, NULL);
-
- /* For each member: scan through its topic subscription */
- for (i = 0; i < member_cnt; i++) {
- /* Match topic against existing metadata,
- incl regex matching. */
- rd_kafka_member_subscription_match(
- rkcg, &members[i], &metadata->topics[ti],
- eligible_topic);
- }
-
- if (rd_list_empty(&eligible_topic->members)) {
- rd_list_destroy(&eligible_topic->members);
- continue;
- }
-
- eligible_topic->metadata = &metadata->topics[ti];
- rd_list_add(eligible_topics, eligible_topic);
- eligible_topic = NULL;
- }
-
- if (eligible_topic)
- rd_free(eligible_topic);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_assignor_run(rd_kafka_cgrp_t *rkcg,
- const rd_kafka_assignor_t *rkas,
- rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- int member_cnt,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_resp_err_t err;
- rd_ts_t ts_start = rd_clock();
- int i;
- rd_list_t eligible_topics;
- int j;
-
- /* Construct eligible_topics, a map of:
- * topic -> set of members that are subscribed to it. */
- rd_kafka_member_subscriptions_map(rkcg, &eligible_topics, metadata,
- members, member_cnt);
-
-
- if (rkcg->rkcg_rk->rk_conf.debug &
- (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
- "Group \"%s\" running %s assignor for "
- "%d member(s) and "
- "%d eligible subscribed topic(s):",
- rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
- member_cnt, eligible_topics.rl_cnt);
-
- for (i = 0; i < member_cnt; i++) {
- const rd_kafka_group_member_t *member = &members[i];
-
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR,
- "ASSIGN",
- " Member \"%.*s\"%s with "
- "%d owned partition(s) and "
- "%d subscribed topic(s):",
- RD_KAFKAP_STR_PR(member->rkgm_member_id),
- !rd_kafkap_str_cmp(member->rkgm_member_id,
- rkcg->rkcg_member_id)
- ? " (me)"
- : "",
- member->rkgm_owned ? member->rkgm_owned->cnt : 0,
- member->rkgm_subscription->cnt);
- for (j = 0; j < member->rkgm_subscription->cnt; j++) {
- const rd_kafka_topic_partition_t *p =
- &member->rkgm_subscription->elems[j];
- rd_kafka_dbg(rkcg->rkcg_rk,
- CGRP | RD_KAFKA_DBG_ASSIGNOR,
- "ASSIGN", " %s [%" PRId32 "]",
- p->topic, p->partition);
- }
- }
- }
-
- /* Call assignors assign callback */
- err = rkas->rkas_assign_cb(
- rkcg->rkcg_rk, rkas, rkcg->rkcg_member_id->str, metadata, members,
- member_cnt, (rd_kafka_assignor_topic_t **)eligible_topics.rl_elems,
- eligible_topics.rl_cnt, errstr, errstr_size, rkas->rkas_opaque);
-
- if (err) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
- "Group \"%s\" %s assignment failed "
- "for %d member(s): %s",
- rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
- (int)member_cnt, errstr);
- } else if (rkcg->rkcg_rk->rk_conf.debug &
- (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
- "Group \"%s\" %s assignment for %d member(s) "
- "finished in %.3fms:",
- rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
- (int)member_cnt, (float)(rd_clock() - ts_start) / 1000.0f);
- for (i = 0; i < member_cnt; i++) {
- const rd_kafka_group_member_t *member = &members[i];
-
- rd_kafka_dbg(rkcg->rkcg_rk,
- CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
- " Member \"%.*s\"%s assigned "
- "%d partition(s):",
- RD_KAFKAP_STR_PR(member->rkgm_member_id),
- !rd_kafkap_str_cmp(member->rkgm_member_id,
- rkcg->rkcg_member_id)
- ? " (me)"
- : "",
- member->rkgm_assignment->cnt);
- for (j = 0; j < member->rkgm_assignment->cnt; j++) {
- const rd_kafka_topic_partition_t *p =
- &member->rkgm_assignment->elems[j];
- rd_kafka_dbg(rkcg->rkcg_rk,
- CGRP | RD_KAFKA_DBG_ASSIGNOR,
- "ASSIGN", " %s [%" PRId32 "]",
- p->topic, p->partition);
- }
- }
- }
-
- rd_list_destroy(&eligible_topics);
-
- return err;
-}
-
-
-/**
- * Assignor protocol string comparator
- */
-static int rd_kafka_assignor_cmp_str(const void *_a, const void *_b) {
- const char *a = _a;
- const rd_kafka_assignor_t *b = _b;
-
- return rd_kafkap_str_cmp_str2(a, b->rkas_protocol_name);
-}
-
-/**
- * Find assignor by protocol name.
- *
- * Locality: any
- * Locks: none
- */
-rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk,
- const char *protocol) {
- return (rd_kafka_assignor_t *)rd_list_find(
- &rk->rk_conf.partition_assignors, protocol,
- rd_kafka_assignor_cmp_str);
-}
-
-
-/**
- * Destroys an assignor (but does not unlink).
- */
-static void rd_kafka_assignor_destroy(rd_kafka_assignor_t *rkas) {
- rd_kafkap_str_destroy(rkas->rkas_protocol_type);
- rd_kafkap_str_destroy(rkas->rkas_protocol_name);
- rd_free(rkas);
-}
-
-
-/**
- * @brief Check that the rebalance protocol of all enabled assignors is
- * the same.
- */
-rd_kafka_resp_err_t
-rd_kafka_assignor_rebalance_protocol_check(const rd_kafka_conf_t *conf) {
- int i;
- rd_kafka_assignor_t *rkas;
- rd_kafka_rebalance_protocol_t rebalance_protocol =
- RD_KAFKA_REBALANCE_PROTOCOL_NONE;
-
- RD_LIST_FOREACH(rkas, &conf->partition_assignors, i) {
- if (!rkas->rkas_enabled)
- continue;
-
- if (rebalance_protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE)
- rebalance_protocol = rkas->rkas_protocol;
- else if (rebalance_protocol != rkas->rkas_protocol)
- return RD_KAFKA_RESP_ERR__CONFLICT;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Add an assignor.
- */
-rd_kafka_resp_err_t rd_kafka_assignor_add(
- rd_kafka_t *rk,
- const char *protocol_type,
- const char *protocol_name,
- rd_kafka_rebalance_protocol_t rebalance_protocol,
- rd_kafka_resp_err_t (*assign_cb)(
- rd_kafka_t *rk,
- const struct rd_kafka_assignor_s *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque),
- rd_kafkap_bytes_t *(*get_metadata_cb)(
- const struct rd_kafka_assignor_s *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions),
- void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
- void **assignor_state,
- const rd_kafka_topic_partition_list_t *assignment,
- const rd_kafkap_bytes_t *userdata,
- const rd_kafka_consumer_group_metadata_t *rkcgm),
- void (*destroy_state_cb)(void *assignor_state),
- int (*unittest_cb)(void),
- void *opaque) {
- rd_kafka_assignor_t *rkas;
-
- if (rd_kafkap_str_cmp_str(rk->rk_conf.group_protocol_type,
- protocol_type))
- return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
-
- if (rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
- rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_EAGER)
- return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
-
- /* Dont overwrite application assignors */
- if ((rkas = rd_kafka_assignor_find(rk, protocol_name)))
- return RD_KAFKA_RESP_ERR__CONFLICT;
-
- rkas = rd_calloc(1, sizeof(*rkas));
-
- rkas->rkas_protocol_name = rd_kafkap_str_new(protocol_name, -1);
- rkas->rkas_protocol_type = rd_kafkap_str_new(protocol_type, -1);
- rkas->rkas_protocol = rebalance_protocol;
- rkas->rkas_assign_cb = assign_cb;
- rkas->rkas_get_metadata_cb = get_metadata_cb;
- rkas->rkas_on_assignment_cb = on_assignment_cb;
- rkas->rkas_destroy_state_cb = destroy_state_cb;
- rkas->rkas_unittest = unittest_cb;
- rkas->rkas_opaque = opaque;
- rkas->rkas_index = INT_MAX;
-
- rd_list_add(&rk->rk_conf.partition_assignors, rkas);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/* Right trim string of whitespaces */
-static void rtrim(char *s) {
- char *e = s + strlen(s);
-
- if (e == s)
- return;
-
- while (e >= s && isspace(*e))
- e--;
-
- *e = '\0';
-}
-
-
-static int rd_kafka_assignor_cmp_idx(const void *ptr1, const void *ptr2) {
- const rd_kafka_assignor_t *rkas1 = (const rd_kafka_assignor_t *)ptr1;
- const rd_kafka_assignor_t *rkas2 = (const rd_kafka_assignor_t *)ptr2;
- return rkas1->rkas_index - rkas2->rkas_index;
-}
-
-
-/**
- * Initialize assignor list based on configuration.
- */
-int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- char *wanted;
- char *s;
- int idx = 0;
-
- rd_list_init(&rk->rk_conf.partition_assignors, 3,
- (void *)rd_kafka_assignor_destroy);
-
- /* Initialize builtin assignors (ignore errors) */
- rd_kafka_range_assignor_init(rk);
- rd_kafka_roundrobin_assignor_init(rk);
- rd_kafka_sticky_assignor_init(rk);
-
- rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy);
-
- s = wanted;
- while (*s) {
- rd_kafka_assignor_t *rkas = NULL;
- char *t;
-
- /* Left trim */
- while (*s == ' ' || *s == ',')
- s++;
-
- if ((t = strchr(s, ','))) {
- *t = '\0';
- t++;
- } else {
- t = s + strlen(s);
- }
-
- /* Right trim */
- rtrim(s);
-
- rkas = rd_kafka_assignor_find(rk, s);
- if (!rkas) {
- rd_snprintf(errstr, errstr_size,
- "Unsupported partition.assignment.strategy:"
- " %s",
- s);
- return -1;
- }
-
- if (!rkas->rkas_enabled) {
- rkas->rkas_enabled = 1;
- rk->rk_conf.enabled_assignor_cnt++;
- rkas->rkas_index = idx;
- idx++;
- }
-
- s = t;
- }
-
- /* Sort the assignors according to the input strategy order
- * since assignors will be scaned from the list sequentially
- * and the strategies earlier in the list have higher priority. */
- rd_list_sort(&rk->rk_conf.partition_assignors,
- rd_kafka_assignor_cmp_idx);
-
- /* Clear the SORTED flag because the list is sorted according to the
- * rkas_index, but will do the search using rkas_protocol_name. */
- rk->rk_conf.partition_assignors.rl_flags &= ~RD_LIST_F_SORTED;
-
- if (rd_kafka_assignor_rebalance_protocol_check(&rk->rk_conf)) {
- rd_snprintf(errstr, errstr_size,
- "All partition.assignment.strategy (%s) assignors "
- "must have the same protocol type, "
- "online migration between assignors with "
- "different protocol types is not supported",
- rk->rk_conf.partition_assignment_strategy);
- return -1;
- }
-
- return 0;
-}
-
-
-
-/**
- * Free assignors
- */
-void rd_kafka_assignors_term(rd_kafka_t *rk) {
- rd_list_destroy(&rk->rk_conf.partition_assignors);
-}
-
-
-
-/**
- * @brief Unittest for assignors
- */
-static int ut_assignors(void) {
- const struct {
- const char *name;
- int topic_cnt;
- struct {
- const char *name;
- int partition_cnt;
- } topics[12];
- int member_cnt;
- struct {
- const char *name;
- int topic_cnt;
- const char *topics[12];
- } members[3];
- int expect_cnt;
- struct {
- const char *protocol_name;
- struct {
- int partition_cnt;
- const char *partitions[12]; /* "topic:part" */
- } members[3];
- } expect[2];
- } tests[] = {
- /*
- * Test cases
- */
- {
- .name = "Symmetrical subscription",
- .topic_cnt = 4,
- .topics =
- {
- {"a", 3}, /* a:0 a:1 a:2 */
- {
- "b",
- 4,
- }, /* b:0 b:1 b:2 b:3 */
- {"c", 2}, /* c:0 c:1 */
- {"d", 1}, /* d:0 */
- },
- .member_cnt = 2,
- .members =
- {
- {.name = "consumer1",
- .topic_cnt = 4,
- .topics = {"d", "b", "a", "c"}},
- {.name = "consumer2",
- .topic_cnt = 4,
- .topics = {"a", "b", "c", "d"}},
- },
- .expect_cnt = 2,
- .expect =
- {
- {
- .protocol_name = "range",
- .members =
- {
- /* Consumer1 */
- {6,
- {"a:0", "a:1", "b:0", "b:1", "c:0",
- "d:0"}},
- /* Consumer2 */
- {4, {"a:2", "b:2", "b:3", "c:1"}},
- },
- },
- {
- .protocol_name = "roundrobin",
- .members =
- {
- /* Consumer1 */
- {5, {"a:0", "a:2", "b:1", "b:3", "c:1"}},
- /* Consumer2 */
- {5, {"a:1", "b:0", "b:2", "c:0", "d:0"}},
- },
- },
- },
- },
- {
- .name = "1*3 partitions (asymmetrical)",
- .topic_cnt = 1,
- .topics =
- {
- {"a", 3},
- },
- .member_cnt = 2,
- .members =
- {
- {.name = "consumer1",
- .topic_cnt = 3,
- .topics = {"a", "b", "c"}},
- {.name = "consumer2", .topic_cnt = 1, .topics = {"a"}},
- },
- .expect_cnt = 2,
- .expect =
- {
- {
- .protocol_name = "range",
- .members =
- {
- /* Consumer1.
- * range assignor applies
- * per topic. */
- {2, {"a:0", "a:1"}},
- /* Consumer2 */
- {1, {"a:2"}},
- },
- },
- {
- .protocol_name = "roundrobin",
- .members =
- {
- /* Consumer1 */
- {2, {"a:0", "a:2"}},
- /* Consumer2 */
- {1, {"a:1"}},
- },
- },
- },
- },
- {
- .name = "#2121 (asymmetrical)",
- .topic_cnt = 12,
- .topics =
- {
- {"a", 1},
- {"b", 1},
- {"c", 1},
- {"d", 1},
- {"e", 1},
- {"f", 1},
- {"g", 1},
- {"h", 1},
- {"i", 1},
- {"j", 1},
- {"k", 1},
- {"l", 1},
- },
- .member_cnt = 2,
- .members =
- {
- {
- .name = "consumer1",
- .topic_cnt = 12,
- .topics =
- {
- "a",
- "b",
- "c",
- "d",
- "e",
- "f",
- "g",
- "h",
- "i",
- "j",
- "k",
- "l",
- },
- },
- {
- .name = "consumer2", /* must be second */
- .topic_cnt = 5,
- .topics =
- {
- "b",
- "d",
- "f",
- "h",
- "l",
- },
- },
- },
- .expect_cnt = 2,
- .expect =
- {
- {
- .protocol_name = "range",
- .members =
- {
- /* Consumer1.
- * All partitions. */
- {12,
- {
- "a:0",
- "b:0",
- "c:0",
- "d:0",
- "e:0",
- "f:0",
- "g:0",
- "h:0",
- "i:0",
- "j:0",
- "k:0",
- "l:0",
- }},
- /* Consumer2 */
- {0},
- },
- },
- {
- .protocol_name = "roundrobin",
- .members =
- {
- /* Consumer1 */
- {
- 7,
- {
- "a:0",
- "c:0",
- "e:0",
- "g:0",
- "i:0",
- "j:0",
- "k:0",
- },
- },
- /* Consumer2 */
- {5, {"b:0", "d:0", "f:0", "h:0", "l:0"}},
- },
- },
- },
- },
- {NULL},
- };
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- const rd_kafka_assignor_t *rkas;
- int fails = 0;
- int i;
-
- conf = rd_kafka_conf_new();
- rd_kafka_conf_set(conf, "group.id", "group", NULL, 0);
- rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL,
- 0);
- rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0);
- RD_UT_ASSERT(rk != NULL, "Failed to create consumer");
-
- /* Run through test cases */
- for (i = 0; tests[i].name; i++) {
- int ie, it, im;
- rd_kafka_metadata_t metadata;
- rd_kafka_group_member_t *members;
-
- /* Create topic metadata */
- metadata.topic_cnt = tests[i].topic_cnt;
- metadata.topics =
- rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt);
- memset(metadata.topics, 0,
- sizeof(*metadata.topics) * metadata.topic_cnt);
- for (it = 0; it < metadata.topic_cnt; it++) {
- metadata.topics[it].topic =
- (char *)tests[i].topics[it].name;
- metadata.topics[it].partition_cnt =
- tests[i].topics[it].partition_cnt;
- metadata.topics[it].partitions = NULL; /* Not used */
- }
-
- /* Create members */
- members = rd_alloca(sizeof(*members) * tests[i].member_cnt);
- memset(members, 0, sizeof(*members) * tests[i].member_cnt);
-
- for (im = 0; im < tests[i].member_cnt; im++) {
- rd_kafka_group_member_t *rkgm = &members[im];
- rkgm->rkgm_member_id =
- rd_kafkap_str_new(tests[i].members[im].name, -1);
- rkgm->rkgm_group_instance_id =
- rd_kafkap_str_new(tests[i].members[im].name, -1);
- rd_list_init(&rkgm->rkgm_eligible,
- tests[i].members[im].topic_cnt, NULL);
-
- rkgm->rkgm_subscription =
- rd_kafka_topic_partition_list_new(
- tests[i].members[im].topic_cnt);
- for (it = 0; it < tests[i].members[im].topic_cnt; it++)
- rd_kafka_topic_partition_list_add(
- rkgm->rkgm_subscription,
- tests[i].members[im].topics[it],
- RD_KAFKA_PARTITION_UA);
-
- rkgm->rkgm_userdata = NULL;
-
- rkgm->rkgm_assignment =
- rd_kafka_topic_partition_list_new(
- rkgm->rkgm_subscription->size);
- }
-
- /* For each assignor verify that the assignment
- * matches the expection set out in the test case. */
- for (ie = 0; ie < tests[i].expect_cnt; ie++) {
- rd_kafka_resp_err_t err;
- char errstr[256];
-
- RD_UT_SAY("Test case %s: %s assignor", tests[i].name,
- tests[i].expect[ie].protocol_name);
-
- if (!(rkas = rd_kafka_assignor_find(
- rk, tests[i].expect[ie].protocol_name))) {
- RD_UT_FAIL(
- "Assignor test case %s for %s failed: "
- "assignor not found",
- tests[i].name,
- tests[i].expect[ie].protocol_name);
- }
-
- /* Run assignor */
- err = rd_kafka_assignor_run(
- rk->rk_cgrp, rkas, &metadata, members,
- tests[i].member_cnt, errstr, sizeof(errstr));
-
- RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s",
- tests[i].name,
- tests[i].expect[ie].protocol_name, errstr);
-
- /* Verify assignments */
- for (im = 0; im < tests[i].member_cnt; im++) {
- rd_kafka_group_member_t *rkgm = &members[im];
- int ia;
-
- if (rkgm->rkgm_assignment->cnt !=
- tests[i]
- .expect[ie]
- .members[im]
- .partition_cnt) {
- RD_UT_WARN(
- " Member %.*s assignment count "
- "mismatch: %d != %d",
- RD_KAFKAP_STR_PR(
- rkgm->rkgm_member_id),
- rkgm->rkgm_assignment->cnt,
- tests[i]
- .expect[ie]
- .members[im]
- .partition_cnt);
- fails++;
- }
-
- if (rkgm->rkgm_assignment->cnt > 0)
- rd_kafka_topic_partition_list_sort_by_topic(
- rkgm->rkgm_assignment);
-
- for (ia = 0; ia < rkgm->rkgm_assignment->cnt;
- ia++) {
- rd_kafka_topic_partition_t *p =
- &rkgm->rkgm_assignment->elems[ia];
- char part[64];
- const char *exp =
- ia < tests[i]
- .expect[ie]
- .members[im]
- .partition_cnt
- ? tests[i]
- .expect[ie]
- .members[im]
- .partitions[ia]
- : "(none)";
-
- rd_snprintf(part, sizeof(part), "%s:%d",
- p->topic,
- (int)p->partition);
-
-#if 0 /* Enable to print actual assignment */
- RD_UT_SAY(" Member %.*s assignment "
- "%d/%d %s =? %s",
- RD_KAFKAP_STR_PR(
- rkgm->rkgm_member_id),
- ia,
- rkgm->rkgm_assignment->cnt-1,
- part, exp);
-#endif
-
- if (strcmp(part, exp)) {
- RD_UT_WARN(
- " Member %.*s "
- "assignment %d/%d "
- "mismatch: %s != %s",
- RD_KAFKAP_STR_PR(
- rkgm->rkgm_member_id),
- ia,
- rkgm->rkgm_assignment->cnt -
- 1,
- part, exp);
- fails++;
- }
- }
-
- /* Reset assignment for next loop */
- rd_kafka_topic_partition_list_destroy(
- rkgm->rkgm_assignment);
- rkgm->rkgm_assignment =
- rd_kafka_topic_partition_list_new(
- rkgm->rkgm_subscription->size);
- }
- }
-
- for (im = 0; im < tests[i].member_cnt; im++) {
- rd_kafka_group_member_t *rkgm = &members[im];
- rd_kafka_group_member_clear(rkgm);
- }
- }
-
-
- /* Run assignor-specific unittests */
- RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) {
- if (rkas->rkas_unittest)
- fails += rkas->rkas_unittest();
- }
-
- rd_kafka_destroy(rk);
-
- if (fails)
- return 1;
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Unit tests for assignors
- */
-int unittest_assignors(void) {
- return ut_assignors();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h
deleted file mode 100644
index b90e7dc98..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_ASSIGNOR_H_
-#define _RDKAFKA_ASSIGNOR_H_
-
-
-
-/*!
- * Enumerates the different rebalance protocol types.
- *
- * @sa rd_kafka_rebalance_protocol()
- */
-typedef enum rd_kafka_rebalance_protocol_t {
- RD_KAFKA_REBALANCE_PROTOCOL_NONE, /**< Rebalance protocol is
- unknown */
- RD_KAFKA_REBALANCE_PROTOCOL_EAGER, /**< Eager rebalance
- protocol */
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE /**< Cooperative
- rebalance protocol*/
-} rd_kafka_rebalance_protocol_t;
-
-
-
-typedef struct rd_kafka_group_member_s {
- /** Subscribed topics (partition field is ignored). */
- rd_kafka_topic_partition_list_t *rkgm_subscription;
- /** Partitions assigned to this member after running the assignor.
- * E.g., the current assignment coming out of the rebalance. */
- rd_kafka_topic_partition_list_t *rkgm_assignment;
- /** Partitions reported as currently owned by the member, read
- * from consumer metadata. E.g., the current assignment going into
- * the rebalance. */
- rd_kafka_topic_partition_list_t *rkgm_owned;
- /** List of eligible topics in subscription. E.g., subscribed topics
- * that exist. */
- rd_list_t rkgm_eligible;
- /** Member id (e.g., client.id-some-uuid). */
- rd_kafkap_str_t *rkgm_member_id;
- /** Group instance id. */
- rd_kafkap_str_t *rkgm_group_instance_id;
- /** Member-specific opaque userdata. */
- rd_kafkap_bytes_t *rkgm_userdata;
- /** Member metadata, e.g., the currently owned partitions. */
- rd_kafkap_bytes_t *rkgm_member_metadata;
- /** Group generation id. */
- int rkgm_generation;
-} rd_kafka_group_member_t;
-
-
-int rd_kafka_group_member_cmp(const void *_a, const void *_b);
-
-int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
- const rd_kafka_group_member_t *rkgm,
- const char *topic);
-
-
-/**
- * Structure to hold metadata for a single topic and all its
- * subscribing members.
- */
-typedef struct rd_kafka_assignor_topic_s {
- const rd_kafka_metadata_topic_t *metadata;
- rd_list_t members; /* rd_kafka_group_member_t * */
-} rd_kafka_assignor_topic_t;
-
-
-int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b);
-
-
-typedef struct rd_kafka_assignor_s {
- rd_kafkap_str_t *rkas_protocol_type;
- rd_kafkap_str_t *rkas_protocol_name;
-
- int rkas_enabled;
-
- /** Order for strategies. */
- int rkas_index;
-
- rd_kafka_rebalance_protocol_t rkas_protocol;
-
- rd_kafka_resp_err_t (*rkas_assign_cb)(
- rd_kafka_t *rk,
- const struct rd_kafka_assignor_s *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque);
-
- rd_kafkap_bytes_t *(*rkas_get_metadata_cb)(
- const struct rd_kafka_assignor_s *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions);
-
- void (*rkas_on_assignment_cb)(
- const struct rd_kafka_assignor_s *rkas,
- void **assignor_state,
- const rd_kafka_topic_partition_list_t *assignment,
- const rd_kafkap_bytes_t *assignment_userdata,
- const rd_kafka_consumer_group_metadata_t *rkcgm);
-
- void (*rkas_destroy_state_cb)(void *assignor_state);
-
- int (*rkas_unittest)(void);
-
- void *rkas_opaque;
-} rd_kafka_assignor_t;
-
-
-rd_kafka_resp_err_t rd_kafka_assignor_add(
- rd_kafka_t *rk,
- const char *protocol_type,
- const char *protocol_name,
- rd_kafka_rebalance_protocol_t rebalance_protocol,
- rd_kafka_resp_err_t (*assign_cb)(
- rd_kafka_t *rk,
- const struct rd_kafka_assignor_s *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque),
- rd_kafkap_bytes_t *(*get_metadata_cb)(
- const struct rd_kafka_assignor_s *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions),
- void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
- void **assignor_state,
- const rd_kafka_topic_partition_list_t *assignment,
- const rd_kafkap_bytes_t *userdata,
- const rd_kafka_consumer_group_metadata_t *rkcgm),
- void (*destroy_state_cb)(void *assignor_state),
- int (*unittest_cb)(void),
- void *opaque);
-
-rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
- const rd_list_t *topics,
- const void *userdata,
- size_t userdata_size,
- const rd_kafka_topic_partition_list_t *owned_partitions);
-
-rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
- const rd_kafka_assignor_t *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions);
-
-
-void rd_kafka_assignor_update_subscription(
- const rd_kafka_assignor_t *rkas,
- const rd_kafka_topic_partition_list_t *subscription);
-
-
-rd_kafka_resp_err_t rd_kafka_assignor_run(struct rd_kafka_cgrp_s *rkcg,
- const rd_kafka_assignor_t *rkas,
- rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- int member_cnt,
- char *errstr,
- size_t errstr_size);
-
-rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk,
- const char *protocol);
-
-int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-void rd_kafka_assignors_term(rd_kafka_t *rk);
-
-
-
-void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm);
-
-
-rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk);
-rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk);
-rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk);
-
-#endif /* _RDKAFKA_ASSIGNOR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c
deleted file mode 100644
index 753f03d67..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_aux.h"
-#include "rdkafka_error.h"
-
-rd_kafka_resp_err_t
-rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres) {
- return topicres->err;
-}
-
-const char *
-rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres) {
- return topicres->errstr;
-}
-
-const char *
-rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres) {
- return topicres->topic;
-}
-
-/**
- * @brief Create new topic_result (single allocation).
- *
- * @param topic Topic string, if topic_size is != -1 it does not have to
- * be nul-terminated.
- * @param topic_size Size of topic, or -1 to perform automatic strlen()
- * @param err Error code
- * @param errstr Optional error string.
- *
- * All input arguments are copied.
- */
-
-rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
- ssize_t topic_size,
- rd_kafka_resp_err_t err,
- const char *errstr) {
- size_t tlen = topic_size != -1 ? (size_t)topic_size : strlen(topic);
- size_t elen = errstr ? strlen(errstr) + 1 : 0;
- rd_kafka_topic_result_t *terr;
-
- terr = rd_malloc(sizeof(*terr) + tlen + 1 + elen);
-
- terr->err = err;
-
- terr->topic = terr->data;
- memcpy(terr->topic, topic, tlen);
- terr->topic[tlen] = '\0';
-
- if (errstr) {
- terr->errstr = terr->topic + tlen + 1;
- memcpy(terr->errstr, errstr, elen);
- } else {
- terr->errstr = NULL;
- }
-
- return terr;
-}
-
-
-/**
- * @brief Destroy topic_result
- */
-void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr) {
- rd_free(terr);
-}
-
-/**
- * @brief Destroy-variant suitable for rd_list free_cb use.
- */
-void rd_kafka_topic_result_free(void *ptr) {
- rd_kafka_topic_result_destroy((rd_kafka_topic_result_t *)ptr);
-}
-
-const rd_kafka_error_t *
-rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres) {
- return groupres->error;
-}
-
-const char *
-rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres) {
- return groupres->group;
-}
-
-const rd_kafka_topic_partition_list_t *
-rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres) {
- return groupres->partitions;
-}
-
-rd_kafka_group_result_t *
-rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres) {
- return rd_kafka_group_result_new(
- groupres->group, -1, groupres->partitions,
- groupres->error ? rd_kafka_error_copy(groupres->error) : NULL);
-}
-
-/**
- * @brief Same as rd_kafka_group_result_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-void *rd_kafka_group_result_copy_opaque(const void *src_groupres,
- void *opaque) {
- return rd_kafka_group_result_copy(src_groupres);
-}
-
-
-/**
- * @brief Create new group_result (single allocation).
- *
- * @param group Group string, if group_size is != -1 it does not have to
- * be nul-terminated.
- * @param group_size Size of group, or -1 to perform automatic strlen()
- * @param error Error object, or NULL on success. Takes ownership of \p error.
- *
- * All input arguments are copied.
- */
-
-rd_kafka_group_result_t *
-rd_kafka_group_result_new(const char *group,
- ssize_t group_size,
- const rd_kafka_topic_partition_list_t *partitions,
- rd_kafka_error_t *error) {
- size_t glen = group_size != -1 ? (size_t)group_size : strlen(group);
- rd_kafka_group_result_t *groupres;
-
- groupres = rd_calloc(1, sizeof(*groupres) + glen + 1);
-
-
- groupres->group = groupres->data;
- memcpy(groupres->group, group, glen);
- groupres->group[glen] = '\0';
-
- if (partitions)
- groupres->partitions =
- rd_kafka_topic_partition_list_copy(partitions);
-
- groupres->error = error;
-
- return groupres;
-}
-
-
-/**
- * @brief Destroy group_result
- */
-void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) {
- if (groupres->partitions)
- rd_kafka_topic_partition_list_destroy(groupres->partitions);
- if (groupres->error)
- rd_kafka_error_destroy(groupres->error);
- rd_free(groupres);
-}
-
-/**
- * @brief Destroy-variant suitable for rd_list free_cb use.
- */
-void rd_kafka_group_result_free(void *ptr) {
- rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr);
-}
-
-
-const rd_kafka_error_t *
-rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres) {
- return aclres->error;
-}
-
-/**
- * @brief Allocates and return an acl result, takes ownership of \p error
- * (unless NULL).
- *
- * @returns The new acl result.
- */
-rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error) {
- rd_kafka_acl_result_t *acl_res;
-
- acl_res = rd_calloc(1, sizeof(*acl_res));
-
- acl_res->error = error;
-
- return acl_res;
-}
-
-/**
- * @brief Destroy acl_result
- */
-void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res) {
- if (acl_res->error)
- rd_kafka_error_destroy(acl_res->error);
- rd_free(acl_res);
-}
-
-/**
- * @brief Destroy-variant suitable for rd_list free_cb use.
- */
-void rd_kafka_acl_result_free(void *ptr) {
- rd_kafka_acl_result_destroy((rd_kafka_acl_result_t *)ptr);
-}
-
-
-/**
- * @brief Create a new Node object.
- *
- * @param id The node id.
- * @param host The node host.
- * @param port The node port.
- * @param rack_id (optional) The node rack id.
- * @return A new allocated Node object.
- * Use rd_kafka_Node_destroy() to free when done.
- */
-rd_kafka_Node_t *rd_kafka_Node_new(int id,
- const char *host,
- uint16_t port,
- const char *rack_id) {
- rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret));
- ret->id = id;
- ret->port = port;
- ret->host = rd_strdup(host);
- if (rack_id != NULL)
- ret->rack_id = rd_strdup(rack_id);
- return ret;
-}
-
-/**
- * @brief Copy \p src Node object
- *
- * @param src The Node to copy.
- * @return A new allocated Node object.
- * Use rd_kafka_Node_destroy() to free when done.
- */
-rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) {
- return rd_kafka_Node_new(src->id, src->host, src->port, src->rack_id);
-}
-
-void rd_kafka_Node_destroy(rd_kafka_Node_t *node) {
- rd_free(node->host);
- if (node->rack_id)
- rd_free(node->rack_id);
- rd_free(node);
-}
-
-int rd_kafka_Node_id(const rd_kafka_Node_t *node) {
- return node->id;
-}
-
-const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) {
- return node->host;
-}
-
-uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) {
- return node->port;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h
deleted file mode 100644
index ccf18e91e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_AUX_H_
-#define _RDKAFKA_AUX_H_
-
-/**
- * @name Auxiliary types
- */
-
-#include "rdkafka_conf.h"
-
-/**
- * @brief Topic [ + Error code + Error string ]
- *
- * @remark Public type.
- * @remark Single allocation.
- */
-struct rd_kafka_topic_result_s {
- char *topic; /**< Points to data */
- rd_kafka_resp_err_t err; /**< Error code */
- char *errstr; /**< Points to data after topic, unless NULL */
- char data[1]; /**< topic followed by errstr */
-};
-
-void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr);
-void rd_kafka_topic_result_free(void *ptr);
-
-rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
- ssize_t topic_size,
- rd_kafka_resp_err_t err,
- const char *errstr);
-
-/**
- * @brief Group [ + Error object ]
- *
- * @remark Public type.
- * @remark Single allocation.
- */
-struct rd_kafka_group_result_s {
- char *group; /**< Points to data */
- rd_kafka_error_t *error; /**< Error object, or NULL on success */
- /** Partitions, used by DeleteConsumerGroupOffsets. */
- rd_kafka_topic_partition_list_t *partitions;
- char data[1]; /**< Group name */
-};
-
-void rd_kafka_group_result_destroy(rd_kafka_group_result_t *terr);
-void rd_kafka_group_result_free(void *ptr);
-
-rd_kafka_group_result_t *
-rd_kafka_group_result_new(const char *group,
- ssize_t group_size,
- const rd_kafka_topic_partition_list_t *partitions,
- rd_kafka_error_t *error);
-
-/**
- * @brief Acl creation result [ Error code + Error string ]
- *
- * @remark Public type.
- * @remark Single allocation.
- */
-struct rd_kafka_acl_result_s {
- rd_kafka_error_t *error; /**< Error object, or NULL on success. */
-};
-
-void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res);
-void rd_kafka_acl_result_free(void *ptr);
-
-rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error);
-
-rd_kafka_group_result_t *
-rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres);
-void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque);
-/**@}*/
-
-/**
- * @struct Node represents a broker.
- * It's the public type.
- */
-typedef struct rd_kafka_Node_s {
- int id; /*< Node id */
- char *host; /*< Node host */
- uint16_t port; /*< Node port */
- char *rack_id; /*< (optional) Node rack id */
-} rd_kafka_Node_t;
-
-rd_kafka_Node_t *
-rd_kafka_Node_new(int id, const char *host, uint16_t port, const char *rack_id);
-
-rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src);
-
-void rd_kafka_Node_destroy(rd_kafka_Node_t *node);
-
-#endif /* _RDKAFKA_AUX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c
deleted file mode 100644
index c69ec1767..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Background queue thread and event handling.
- *
- * See rdkafka.h's rd_kafka_conf_set_background_event_cb() for details.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_event.h"
-#include "rdkafka_interceptor.h"
-
-#include <signal.h>
-
-/**
- * @brief Call the registered background_event_cb.
- * @locality rdkafka background queue thread
- */
-static RD_INLINE void rd_kafka_call_background_event_cb(rd_kafka_t *rk,
- rd_kafka_op_t *rko) {
- rd_assert(!rk->rk_background.calling);
- rk->rk_background.calling = 1;
-
- rk->rk_conf.background_event_cb(rk, rko, rk->rk_conf.opaque);
-
- rk->rk_background.calling = 0;
-}
-
-
-/**
- * @brief Background queue handler.
- *
- * Triggers the background_event_cb for all event:able ops,
- * for non-event:able ops:
- * - call op callback if set, else
- * - log and discard the op. This is a user error, forwarding non-event
- * APIs to the background queue.
- */
-static rd_kafka_op_res_t
-rd_kafka_background_queue_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- rd_kafka_op_res_t res;
-
- /*
- * Dispatch Event:able ops to background_event_cb()
- */
- if (likely(rk->rk_conf.background_event_cb &&
- rd_kafka_event_setup(rk, rko))) {
- rd_kafka_call_background_event_cb(rk, rko);
- /* Event must be destroyed by application. */
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- /*
- * Handle non-event:able ops through the standard poll_cb that
- * will trigger type-specific callbacks (and return OP_RES_HANDLED)
- * or do no handling and return OP_RES_PASS.
- * Also signal yield to q_serve() (which implies that op was handled).
- */
- res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_CALLBACK, opaque);
- if (res == RD_KAFKA_OP_RES_HANDLED || res == RD_KAFKA_OP_RES_YIELD)
- return res;
-
- /* Op was not handled, log and destroy it. */
- rd_kafka_log(rk, LOG_NOTICE, "BGQUEUE",
- "No support for handling "
- "non-event op %s in background queue: discarding",
- rd_kafka_op2str(rko->rko_type));
- rd_kafka_op_destroy(rko);
-
- /* Indicate that the op was handled. */
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Main loop for background queue thread.
- */
-int rd_kafka_background_thread_main(void *arg) {
- rd_kafka_t *rk = arg;
-
- rd_kafka_set_thread_name("background");
- rd_kafka_set_thread_sysname("rdk:bg");
-
- rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BACKGROUND);
-
- (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
-
- /* Acquire lock (which was held by thread creator during creation)
- * to synchronise state. */
- rd_kafka_wrlock(rk);
- rd_kafka_wrunlock(rk);
-
- mtx_lock(&rk->rk_init_lock);
- rk->rk_init_wait_cnt--;
- cnd_broadcast(&rk->rk_init_cnd);
- mtx_unlock(&rk->rk_init_lock);
-
- while (likely(!rd_kafka_terminating(rk))) {
- rd_kafka_q_serve(rk->rk_background.q, 10 * 1000, 0,
- RD_KAFKA_Q_CB_RETURN,
- rd_kafka_background_queue_serve, NULL);
- }
-
- /* Inform the user that they terminated the client before
- * all outstanding events were handled. */
- if (rd_kafka_q_len(rk->rk_background.q) > 0)
- rd_kafka_log(rk, LOG_INFO, "BGQUEUE",
- "Purging %d unserved events from background queue",
- rd_kafka_q_len(rk->rk_background.q));
- rd_kafka_q_disable(rk->rk_background.q);
- rd_kafka_q_purge(rk->rk_background.q);
-
- rd_kafka_dbg(rk, GENERIC, "BGQUEUE", "Background queue thread exiting");
-
- rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BACKGROUND);
-
- rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
-
- return 0;
-}
-
-
-/**
- * @brief Create the background thread.
- *
- * @locks_acquired rk_init_lock
- * @locks_required rd_kafka_wrlock()
- */
-rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
-#ifndef _WIN32
- sigset_t newset, oldset;
-#endif
-
- if (rk->rk_background.q) {
- rd_snprintf(errstr, errstr_size,
- "Background thread already created");
- return RD_KAFKA_RESP_ERR__CONFLICT;
- }
-
- rk->rk_background.q = rd_kafka_q_new(rk);
-
- mtx_lock(&rk->rk_init_lock);
- rk->rk_init_wait_cnt++;
-
-#ifndef _WIN32
- /* Block all signals in newly created threads.
- * To avoid race condition we block all signals in the calling
- * thread, which the new thread will inherit its sigmask from,
- * and then restore the original sigmask of the calling thread when
- * we're done creating the thread. */
- sigemptyset(&oldset);
- sigfillset(&newset);
- if (rk->rk_conf.term_sig) {
- struct sigaction sa_term = {.sa_handler =
- rd_kafka_term_sig_handler};
- sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
- }
- pthread_sigmask(SIG_SETMASK, &newset, &oldset);
-#endif
-
-
- if ((thrd_create(&rk->rk_background.thread,
- rd_kafka_background_thread_main, rk)) !=
- thrd_success) {
- rd_snprintf(errstr, errstr_size,
- "Failed to create background thread: %s",
- rd_strerror(errno));
- rd_kafka_q_destroy_owner(rk->rk_background.q);
- rk->rk_background.q = NULL;
- rk->rk_init_wait_cnt--;
- mtx_unlock(&rk->rk_init_lock);
-
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
- mtx_unlock(&rk->rk_init_lock);
-
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c
deleted file mode 100644
index e8fc27b11..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c
+++ /dev/null
@@ -1,5867 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#if defined(__MINGW32__)
-#include <ws2tcpip.h>
-#endif
-
-#ifndef _WIN32
-#define _GNU_SOURCE
-/*
- * AIX defines this and the value needs to be set correctly. For Solaris,
- * src/rd.h defines _POSIX_SOURCE to be 200809L, which corresponds to XPG7,
- * which itself is not compatible with _XOPEN_SOURCE on that platform.
- */
-#if !defined(_AIX) && !defined(__sun)
-#define _XOPEN_SOURCE
-#endif
-#include <signal.h>
-#endif
-
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#include <ctype.h>
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_proto.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_request.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_txnmgr.h"
-#include "rdkafka_fetcher.h"
-#include "rdtime.h"
-#include "rdcrc32.h"
-#include "rdrand.h"
-#include "rdkafka_lz4.h"
-#if WITH_SSL
-#include <openssl/err.h>
-#endif
-#include "rdendian.h"
-#include "rdunittest.h"
-
-
-static const int rd_kafka_max_block_ms = 1000;
-
-const char *rd_kafka_broker_state_names[] = {
- "INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE",
- "AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE",
- "AUTH_REQ"};
-
-const char *rd_kafka_secproto_names[] = {
- [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext",
- [RD_KAFKA_PROTO_SSL] = "ssl",
- [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext",
- [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl",
- NULL};
-
-
-/**
- * @returns true for logical brokers (e.g., coordinators) without an address set
- *
- * @locks_required rkb_lock
- */
-#define rd_kafka_broker_is_addrless(rkb) (*(rkb)->rkb_nodename == '\0')
-
-/**
- * @returns true if the broker needs a persistent connection
- * @locaility broker thread
- */
-static RD_INLINE rd_bool_t
-rd_kafka_broker_needs_persistent_connection(rd_kafka_broker_t *rkb) {
- return rkb->rkb_persistconn.internal ||
- rd_atomic32_get(&rkb->rkb_persistconn.coord);
-}
-
-
-/**
- * @returns > 0 if a connection to this broker is needed, else 0.
- * @locality broker thread
- * @locks none
- */
-static RD_INLINE int rd_kafka_broker_needs_connection(rd_kafka_broker_t *rkb) {
- return rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT &&
- !rd_kafka_terminating(rkb->rkb_rk) &&
- !rd_kafka_fatal_error_code(rkb->rkb_rk) &&
- (!rkb->rkb_rk->rk_conf.sparse_connections ||
- rd_kafka_broker_needs_persistent_connection(rkb));
-}
-
-
-static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko);
-static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb);
-
-
-#define rd_kafka_broker_terminating(rkb) \
- (rd_refcnt_get(&(rkb)->rkb_refcnt) <= 1)
-
-
-/**
- * Construct broker nodename.
- */
-static void rd_kafka_mk_nodename(char *dest,
- size_t dsize,
- const char *name,
- uint16_t port) {
- rd_snprintf(dest, dsize, "%s:%hu", name, port);
-}
-
-/**
- * Construct descriptive broker name
- */
-static void rd_kafka_mk_brokername(char *dest,
- size_t dsize,
- rd_kafka_secproto_t proto,
- const char *nodename,
- int32_t nodeid,
- rd_kafka_confsource_t source) {
-
- /* Prepend protocol name to brokername, unless it is a
- * standard plaintext or logical broker in which case we
- * omit the protocol part. */
- if (proto != RD_KAFKA_PROTO_PLAINTEXT && source != RD_KAFKA_LOGICAL) {
- int r = rd_snprintf(dest, dsize, "%s://",
- rd_kafka_secproto_names[proto]);
- if (r >= (int)dsize) /* Skip proto name if it wont fit.. */
- r = 0;
-
- dest += r;
- dsize -= r;
- }
-
- if (nodeid == RD_KAFKA_NODEID_UA)
- rd_snprintf(dest, dsize, "%s%s", nodename,
- source == RD_KAFKA_LOGICAL
- ? ""
- : (source == RD_KAFKA_INTERNAL ? "/internal"
- : "/bootstrap"));
- else
- rd_snprintf(dest, dsize, "%s/%" PRId32, nodename, nodeid);
-}
-
-
-/**
- * @brief Enable protocol feature(s) for the current broker.
- *
- * @locks broker_lock MUST be held
- * @locality broker thread
- */
-static void rd_kafka_broker_feature_enable(rd_kafka_broker_t *rkb,
- int features) {
- if (features & rkb->rkb_features)
- return;
-
- rkb->rkb_features |= features;
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
- "FEATURE", "Updated enabled protocol features +%s to %s",
- rd_kafka_features2str(features),
- rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Disable protocol feature(s) for the current broker.
- *
- * @locks broker_lock MUST be held
- * @locality broker thread
- */
-static void rd_kafka_broker_feature_disable(rd_kafka_broker_t *rkb,
- int features) {
- if (!(features & rkb->rkb_features))
- return;
-
- rkb->rkb_features &= ~features;
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
- "FEATURE", "Updated enabled protocol features -%s to %s",
- rd_kafka_features2str(features),
- rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Set protocol feature(s) for the current broker.
- *
- * @remark This replaces the previous feature set.
- *
- * @locality broker thread
- * @locks rd_kafka_broker_lock()
- */
-static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) {
- if (rkb->rkb_features == features)
- return;
-
- rkb->rkb_features = features;
- rd_rkb_dbg(rkb, BROKER, "FEATURE",
- "Updated enabled protocol features to %s",
- rd_kafka_features2str(rkb->rkb_features));
-}
-
-
-/**
- * @brief Check and return supported ApiVersion for \p ApiKey.
- *
- * @returns the highest supported ApiVersion in the specified range (inclusive)
- * or -1 if the ApiKey is not supported or no matching ApiVersion.
- * The current feature set is also returned in \p featuresp
- * @locks none
- * @locality any
- */
-int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
- int16_t ApiKey,
- int16_t minver,
- int16_t maxver,
- int *featuresp) {
- struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey};
- struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp;
-
- rd_kafka_broker_lock(rkb);
- if (featuresp)
- *featuresp = rkb->rkb_features;
-
- if (rkb->rkb_features & RD_KAFKA_FEATURE_UNITTEST) {
- /* For unit tests let the broker support everything. */
- rd_kafka_broker_unlock(rkb);
- return maxver;
- }
-
- retp =
- bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
- sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp);
- if (retp)
- ret = *retp;
- rd_kafka_broker_unlock(rkb);
-
- if (!retp)
- return -1;
-
- if (ret.MaxVer < maxver) {
- if (ret.MaxVer < minver)
- return -1;
- else
- return ret.MaxVer;
- } else if (ret.MinVer > maxver)
- return -1;
- else
- return maxver;
-}
-
-
-/**
- * @brief Set broker state.
- *
- * \c rkb->rkb_state is the previous state, while
- * \p state is the new state.
- *
- * @locks rd_kafka_broker_lock() MUST be held.
- * @locality broker thread
- */
-void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state) {
- rd_bool_t trigger_monitors = rd_false;
-
- if ((int)rkb->rkb_state == state)
- return;
-
- rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE",
- "%s: Broker changed state %s -> %s", rkb->rkb_name,
- rd_kafka_broker_state_names[rkb->rkb_state],
- rd_kafka_broker_state_names[state]);
-
- if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
- /* no-op */
- } else if (state == RD_KAFKA_BROKER_STATE_DOWN &&
- !rkb->rkb_down_reported) {
- /* Propagate ALL_BROKERS_DOWN event if all brokers are
- * now down, unless we're terminating.
- * Only trigger for brokers that has an address set,
- * e.g., not logical brokers that lost their address. */
- if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) ==
- rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) -
- rd_atomic32_get(
- &rkb->rkb_rk->rk_broker_addrless_cnt) &&
- !rd_kafka_broker_is_addrless(rkb) &&
- !rd_kafka_terminating(rkb->rkb_rk))
- rd_kafka_op_err(
- rkb->rkb_rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
- "%i/%i brokers are down",
- rd_atomic32_get(&rkb->rkb_rk->rk_broker_down_cnt),
- rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) -
- rd_atomic32_get(
- &rkb->rkb_rk->rk_broker_addrless_cnt));
- rkb->rkb_down_reported = 1;
-
- } else if (rd_kafka_broker_state_is_up(state) &&
- rkb->rkb_down_reported) {
- rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1);
- rkb->rkb_down_reported = 0;
- }
-
- if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
- if (rd_kafka_broker_state_is_up(state) &&
- !rd_kafka_broker_state_is_up(rkb->rkb_state)) {
- /* Up -> Down */
- rd_atomic32_add(&rkb->rkb_rk->rk_broker_up_cnt, 1);
-
- trigger_monitors = rd_true;
-
- if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- rd_atomic32_add(
- &rkb->rkb_rk->rk_logical_broker_up_cnt, 1);
-
- } else if (rd_kafka_broker_state_is_up(rkb->rkb_state) &&
- !rd_kafka_broker_state_is_up(state)) {
- /* ~Down(!Up) -> Up */
- rd_atomic32_sub(&rkb->rkb_rk->rk_broker_up_cnt, 1);
-
- trigger_monitors = rd_true;
-
- if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- rd_atomic32_sub(
- &rkb->rkb_rk->rk_logical_broker_up_cnt, 1);
- }
-
- /* If the connection or connection attempt failed and there
- * are coord_reqs or cgrp awaiting this coordinator to come up
- * then trigger the monitors so that rd_kafka_coord_req_fsm()
- * is triggered, which in turn may trigger a new coordinator
- * query. */
- if (state == RD_KAFKA_BROKER_STATE_DOWN &&
- rd_atomic32_get(&rkb->rkb_persistconn.coord) > 0)
- trigger_monitors = rd_true;
- }
-
- rkb->rkb_state = state;
- rkb->rkb_ts_state = rd_clock();
-
- if (trigger_monitors)
- rd_kafka_broker_trigger_monitors(rkb);
-
- /* Call on_broker_state_change interceptors */
- rd_kafka_interceptors_on_broker_state_change(
- rkb->rkb_rk, rkb->rkb_nodeid,
- rd_kafka_secproto_names[rkb->rkb_proto], rkb->rkb_origname,
- rkb->rkb_port, rd_kafka_broker_state_names[rkb->rkb_state]);
-
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-}
-
-
-/**
- * @brief Set, log and propagate broker fail error.
- *
- * @param rkb Broker connection that failed.
- * @param level Syslog level. LOG_DEBUG will not be logged unless debugging
- * is enabled.
- * @param err The type of error that occurred.
- * @param fmt Format string.
- * @param ap Format string arguments.
- *
- * @locks none
- * @locality broker thread
- */
-static void rd_kafka_broker_set_error(rd_kafka_broker_t *rkb,
- int level,
- rd_kafka_resp_err_t err,
- const char *fmt,
- va_list ap) {
- char errstr[512];
- char extra[128];
- size_t of = 0, ofe;
- rd_bool_t identical, suppress;
- int state_duration_ms = (int)((rd_clock() - rkb->rkb_ts_state) / 1000);
-
-
- /* If this is a logical broker we include its current nodename/address
- * in the log message. */
- rd_kafka_broker_lock(rkb);
- if (rkb->rkb_source == RD_KAFKA_LOGICAL &&
- !rd_kafka_broker_is_addrless(rkb)) {
- of = (size_t)rd_snprintf(errstr, sizeof(errstr),
- "%s: ", rkb->rkb_nodename);
- if (of > sizeof(errstr))
- of = 0; /* If nodename overflows the entire buffer we
- * skip it completely since the error message
- * itself is more important. */
- }
- rd_kafka_broker_unlock(rkb);
-
- ofe = (size_t)rd_vsnprintf(errstr + of, sizeof(errstr) - of, fmt, ap);
- if (ofe > sizeof(errstr) - of)
- ofe = sizeof(errstr) - of;
- of += ofe;
-
- /* Provide more meaningful error messages in certain cases */
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT &&
- !strcmp(errstr, "Disconnected")) {
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) {
- /* A disconnect while requesting ApiVersion typically
- * means we're connecting to a SSL-listener as
- * PLAINTEXT, but may also be caused by connecting to
- * a broker that does not support ApiVersion (<0.10). */
-
- if (rkb->rkb_proto != RD_KAFKA_PROTO_SSL &&
- rkb->rkb_proto != RD_KAFKA_PROTO_SASL_SSL)
- rd_kafka_broker_set_error(
- rkb, level, err,
- "Disconnected while requesting "
- "ApiVersion: "
- "might be caused by incorrect "
- "security.protocol configuration "
- "(connecting to a SSL listener?) or "
- "broker version is < 0.10 "
- "(see api.version.request)",
- ap /*ignored*/);
- else
- rd_kafka_broker_set_error(
- rkb, level, err,
- "Disconnected while requesting "
- "ApiVersion: "
- "might be caused by broker version "
- "< 0.10 (see api.version.request)",
- ap /*ignored*/);
- return;
-
- } else if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP &&
- state_duration_ms < 2000 /*2s*/ &&
- rkb->rkb_rk->rk_conf.security_protocol !=
- RD_KAFKA_PROTO_SASL_SSL &&
- rkb->rkb_rk->rk_conf.security_protocol !=
- RD_KAFKA_PROTO_SASL_PLAINTEXT) {
- /* If disconnected shortly after transitioning to UP
- * state it typically means the broker listener is
- * configured for SASL authentication but the client
- * is not. */
- rd_kafka_broker_set_error(
- rkb, level, err,
- "Disconnected: verify that security.protocol "
- "is correctly configured, broker might "
- "require SASL authentication",
- ap /*ignored*/);
- return;
- }
- }
-
- /* Check if error is identical to last error (prior to appending
- * the variable suffix "after Xms in state Y"), if so we should
- * suppress it. */
- identical = err == rkb->rkb_last_err.err &&
- !strcmp(rkb->rkb_last_err.errstr, errstr);
- suppress = identical && rd_interval(&rkb->rkb_suppress.fail_error,
- 30 * 1000 * 1000 /*30s*/, 0) <= 0;
-
- /* Copy last error prior to adding extras */
- rkb->rkb_last_err.err = err;
- rd_strlcpy(rkb->rkb_last_err.errstr, errstr,
- sizeof(rkb->rkb_last_err.errstr));
-
- /* Time since last state change to help debug connection issues */
- ofe = rd_snprintf(extra, sizeof(extra), "after %dms in state %s",
- state_duration_ms,
- rd_kafka_broker_state_names[rkb->rkb_state]);
-
- /* Number of suppressed identical logs */
- if (identical && !suppress && rkb->rkb_last_err.cnt >= 1 &&
- ofe + 30 < sizeof(extra)) {
- size_t r =
- (size_t)rd_snprintf(extra + ofe, sizeof(extra) - ofe,
- ", %d identical error(s) suppressed",
- rkb->rkb_last_err.cnt);
- if (r < sizeof(extra) - ofe)
- ofe += r;
- else
- ofe = sizeof(extra);
- }
-
- /* Append the extra info if there is enough room */
- if (ofe > 0 && of + ofe + 4 < sizeof(errstr))
- rd_snprintf(errstr + of, sizeof(errstr) - of, " (%s)", extra);
-
- /* Don't log interrupt-wakeups when terminating */
- if (err == RD_KAFKA_RESP_ERR__INTR && rd_kafka_terminating(rkb->rkb_rk))
- suppress = rd_true;
-
- if (!suppress)
- rkb->rkb_last_err.cnt = 1;
- else
- rkb->rkb_last_err.cnt++;
-
- rd_rkb_dbg(rkb, BROKER, "FAIL", "%s (%s)%s%s", errstr,
- rd_kafka_err2name(err),
- identical ? ": identical to last error" : "",
- suppress ? ": error log suppressed" : "");
-
- if (level != LOG_DEBUG && (level <= LOG_CRIT || !suppress)) {
- rd_kafka_log(rkb->rkb_rk, level, "FAIL", "%s: %s",
- rkb->rkb_name, errstr);
-
- /* Send ERR op to application for processing. */
- rd_kafka_q_op_err(rkb->rkb_rk->rk_rep, err, "%s: %s",
- rkb->rkb_name, errstr);
- }
-}
-
-
-/**
- * @brief Failure propagation to application.
- *
- * Will tear down connection to broker and trigger a reconnect.
- *
- * \p level is the log level, <=LOG_INFO will be logged while =LOG_DEBUG will
- * be debug-logged.
- *
- * @locality broker thread
- */
-void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
- int level,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
- rd_kafka_bufq_t tmpq_waitresp, tmpq;
- int old_state;
- rd_kafka_toppar_t *rktp;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- if (rkb->rkb_transport) {
- rd_kafka_transport_close(rkb->rkb_transport);
- rkb->rkb_transport = NULL;
-
- if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)
- rd_atomic32_add(&rkb->rkb_c.disconnects, 1);
- }
-
- rkb->rkb_req_timeouts = 0;
-
- if (rkb->rkb_recv_buf) {
- rd_kafka_buf_destroy(rkb->rkb_recv_buf);
- rkb->rkb_recv_buf = NULL;
- }
-
- va_start(ap, fmt);
- rd_kafka_broker_set_error(rkb, level, err, fmt, ap);
- va_end(ap);
-
- rd_kafka_broker_lock(rkb);
-
- /* If we're currently asking for ApiVersion and the connection
- * went down it probably means the broker does not support that request
- * and tore down the connection. In this case we disable that feature
- * flag. */
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY)
- rd_kafka_broker_feature_disable(rkb,
- RD_KAFKA_FEATURE_APIVERSION);
-
- /* Set broker state */
- old_state = rkb->rkb_state;
- rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN);
-
- /* Unlock broker since a requeue will try to lock it. */
- rd_kafka_broker_unlock(rkb);
-
- rd_atomic64_set(&rkb->rkb_c.ts_send, 0);
- rd_atomic64_set(&rkb->rkb_c.ts_recv, 0);
-
- /*
- * Purge all buffers
- * (put bufs on a temporary queue since bufs may be requeued,
- * make sure outstanding requests are re-enqueued before
- * bufs on outbufs queue.)
- */
- rd_kafka_bufq_init(&tmpq_waitresp);
- rd_kafka_bufq_init(&tmpq);
- rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps);
- rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs);
- rd_atomic32_init(&rkb->rkb_blocking_request_cnt, 0);
-
- /* Purge the in-flight buffers (might get re-enqueued in case
- * of retries). */
- rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err);
-
- /* Purge the waiting-in-output-queue buffers,
- * might also get re-enqueued. */
- rd_kafka_bufq_purge(rkb, &tmpq,
- /* If failure was caused by a timeout,
- * adjust the error code for in-queue requests. */
- err == RD_KAFKA_RESP_ERR__TIMED_OUT
- ? RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE
- : err);
-
- /* Update bufq for connection reset:
- * - Purge connection-setup requests from outbufs since they will be
- * reissued on the next connect.
- * - Reset any partially sent buffer's offset.
- */
- rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs);
-
- /* Extra debugging for tracking termination-hang issues:
- * show what is keeping this broker from decommissioning. */
- if (rd_kafka_terminating(rkb->rkb_rk) &&
- !rd_kafka_broker_terminating(rkb)) {
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM",
- "terminating: broker still has %d refcnt(s), "
- "%" PRId32 " buffer(s), %d partition(s)",
- rd_refcnt_get(&rkb->rkb_refcnt),
- rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
- rkb->rkb_toppar_cnt);
- rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs);
- }
-
- /* If this broker acts as the preferred (follower) replica for any
- * partition, delegate the partition back to the leader. */
- TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
- rd_kafka_toppar_lock(rktp);
- if (unlikely(rktp->rktp_broker != rkb)) {
- /* Currently migrating away from this
- * broker, skip. */
- rd_kafka_toppar_unlock(rktp);
- continue;
- }
- rd_kafka_toppar_unlock(rktp);
-
- if (rktp->rktp_leader_id != rktp->rktp_broker_id) {
- rd_kafka_toppar_delegate_to_leader(rktp);
- }
- }
-
- /* Query for topic leaders to quickly pick up on failover. */
- if (err != RD_KAFKA_RESP_ERR__DESTROY &&
- old_state >= RD_KAFKA_BROKER_STATE_UP)
- rd_kafka_metadata_refresh_known_topics(
- rkb->rkb_rk, NULL, rd_true /*force*/, "broker down");
-}
-
-
-
-/**
- * @brief Handle broker connection close.
- *
- * @locality broker thread
- */
-void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- const char *errstr) {
- int log_level = LOG_ERR;
-
- if (!rkb->rkb_rk->rk_conf.log_connection_close) {
- /* Silence all connection closes */
- log_level = LOG_DEBUG;
-
- } else {
- /* Silence close logs for connections that are idle,
- * it is most likely the broker's idle connection
- * reaper kicking in.
- *
- * Indications there might be an error and not an
- * idle disconnect:
- * - If the connection age is low a disconnect
- * typically indicates a failure, such as protocol mismatch.
- * - If the connection hasn't been idle long enough.
- * - There are outstanding requests, or requests enqueued.
- *
- * For non-idle connections, adjust log level:
- * - requests in-flight: LOG_WARNING
- * - else: LOG_INFO
- */
- rd_ts_t now = rd_clock();
- rd_ts_t minidle =
- RD_MAX(60 * 1000 /*60s*/,
- rkb->rkb_rk->rk_conf.socket_timeout_ms) *
- 1000;
- int inflight = rd_kafka_bufq_cnt(&rkb->rkb_waitresps);
- int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs);
-
- if (rkb->rkb_ts_state + minidle < now &&
- rd_atomic64_get(&rkb->rkb_c.ts_send) + minidle < now &&
- inflight + inqueue == 0)
- log_level = LOG_DEBUG;
- else if (inflight > 1)
- log_level = LOG_WARNING;
- else
- log_level = LOG_INFO;
- }
-
- rd_kafka_broker_fail(rkb, log_level, err, "%s", errstr);
-}
-
-
-/**
- * @brief Purge requests in \p rkbq matching request \p ApiKey
- * and partition \p rktp.
- *
- * @warning ApiKey must be RD_KAFKAP_Produce
- *
- * @returns the number of purged buffers.
- *
- * @locality broker thread
- */
-static int rd_kafka_broker_bufq_purge_by_toppar(rd_kafka_broker_t *rkb,
- rd_kafka_bufq_t *rkbq,
- int64_t ApiKey,
- rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err) {
- rd_kafka_buf_t *rkbuf, *tmp;
- int cnt = 0;
-
- rd_assert(ApiKey == RD_KAFKAP_Produce);
-
- TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) {
-
- if (rkbuf->rkbuf_reqhdr.ApiKey != ApiKey ||
- rkbuf->rkbuf_u.Produce.batch.rktp != rktp ||
- /* Skip partially sent buffers and let them transmit.
- * The alternative would be to kill the connection here,
- * which is more drastic and costly. */
- rd_slice_offset(&rkbuf->rkbuf_reader) > 0)
- continue;
-
- rd_kafka_bufq_deq(rkbq, rkbuf);
-
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
- cnt++;
- }
-
- return cnt;
-}
-
-
-/**
- * Scan bufq for buffer timeouts, trigger buffer callback on timeout.
- *
- * If \p partial_cntp is non-NULL any partially sent buffers will increase
- * the provided counter by 1.
- *
- * @param ApiKey Only match requests with this ApiKey, or -1 for all.
- * @param now If 0, all buffers will time out, else the current clock.
- * @param description "N requests timed out <description>", e.g., "in flight".
- * Only used if log_first_n > 0.
- * @param log_first_n Log the first N request timeouts.
- *
- * @returns the number of timed out buffers.
- *
- * @locality broker thread
- */
-static int rd_kafka_broker_bufq_timeout_scan(rd_kafka_broker_t *rkb,
- int is_waitresp_q,
- rd_kafka_bufq_t *rkbq,
- int *partial_cntp,
- int16_t ApiKey,
- rd_kafka_resp_err_t err,
- rd_ts_t now,
- const char *description,
- int log_first_n) {
- rd_kafka_buf_t *rkbuf, *tmp;
- int cnt = 0;
- int idx = -1;
- const rd_kafka_buf_t *holb;
-
-restart:
- holb = TAILQ_FIRST(&rkbq->rkbq_bufs);
-
- TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) {
- rd_kafka_broker_state_t pre_state, post_state;
-
- idx++;
-
- if (likely(now && rkbuf->rkbuf_ts_timeout > now))
- continue;
-
- if (ApiKey != -1 && rkbuf->rkbuf_reqhdr.ApiKey != ApiKey)
- continue;
-
- if (partial_cntp && rd_slice_offset(&rkbuf->rkbuf_reader) > 0)
- (*partial_cntp)++;
-
- /* Convert rkbuf_ts_sent to elapsed time since request */
- if (rkbuf->rkbuf_ts_sent)
- rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
- else
- rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq;
-
- rd_kafka_bufq_deq(rkbq, rkbuf);
-
- if (now && cnt < log_first_n) {
- char holbstr[256];
- /* Head of line blocking:
- * If this is not the first request in queue, but the
- * initial first request did not time out,
- * it typically means the first request is a
- * long-running blocking one, holding up the
- * sub-sequent requests.
- * In this case log what is likely holding up the
- * requests and what caused this request to time out. */
- if (holb && holb == TAILQ_FIRST(&rkbq->rkbq_bufs)) {
- rd_snprintf(
- holbstr, sizeof(holbstr),
- ": possibly held back by "
- "preceeding%s %sRequest with "
- "timeout in %dms",
- (holb->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING)
- ? " blocking"
- : "",
- rd_kafka_ApiKey2str(
- holb->rkbuf_reqhdr.ApiKey),
- (int)((holb->rkbuf_ts_timeout - now) /
- 1000));
- /* Only log the HOLB once */
- holb = NULL;
- } else {
- *holbstr = '\0';
- }
-
- rd_rkb_log(
- rkb, LOG_NOTICE, "REQTMOUT",
- "Timed out %sRequest %s "
- "(after %" PRId64 "ms, timeout #%d)%s",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- description, rkbuf->rkbuf_ts_sent / 1000, cnt,
- holbstr);
- }
-
- if (is_waitresp_q &&
- rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
- rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0)
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-
- pre_state = rd_kafka_broker_get_state(rkb);
-
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
- cnt++;
-
- /* If the buf_callback() triggered a broker state change
- * (typically through broker_fail()) we can't trust the
- * queue we are scanning to not have been touched, so we
- * either restart the scan or bail out (if broker is now down),
- * depending on the new state. #2326 */
- post_state = rd_kafka_broker_get_state(rkb);
- if (pre_state != post_state) {
- /* If the new state is DOWN it means broker_fail()
- * was called which may have modified the queues,
- * to keep things safe we stop scanning this queue. */
- if (post_state == RD_KAFKA_BROKER_STATE_DOWN)
- break;
- /* Else start scanning the queue from the beginning. */
- goto restart;
- }
- }
-
- return cnt;
-}
-
-
-/**
- * Scan the wait-response and outbuf queues for message timeouts.
- *
- * Locality: Broker thread
- */
-static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) {
- int inflight_cnt, retry_cnt, outq_cnt;
- int partial_cnt = 0;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- /* In-flight requests waiting for response */
- inflight_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 1, &rkb->rkb_waitresps, NULL, -1, RD_KAFKA_RESP_ERR__TIMED_OUT,
- now, "in flight", 5);
- /* Requests in retry queue */
- retry_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_retrybufs, NULL, -1,
- RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0);
- /* Requests in local queue not sent yet.
- * partial_cnt is included in outq_cnt and denotes a request
- * that has been partially transmitted. */
- outq_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1,
- RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0);
-
- if (inflight_cnt + retry_cnt + outq_cnt + partial_cnt > 0) {
- rd_rkb_log(rkb, LOG_WARNING, "REQTMOUT",
- "Timed out %i in-flight, %i retry-queued, "
- "%i out-queue, %i partially-sent requests",
- inflight_cnt, retry_cnt, outq_cnt, partial_cnt);
-
- rkb->rkb_req_timeouts += inflight_cnt + outq_cnt;
- rd_atomic64_add(&rkb->rkb_c.req_timeouts,
- inflight_cnt + outq_cnt);
-
- /* If this was a partially sent request that timed out, or the
- * number of timed out requests have reached the
- * socket.max.fails threshold, we need to take down the
- * connection. */
- if (partial_cnt > 0 ||
- (rkb->rkb_rk->rk_conf.socket_max_fails &&
- rkb->rkb_req_timeouts >=
- rkb->rkb_rk->rk_conf.socket_max_fails &&
- rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)) {
- char rttinfo[32];
- /* Print average RTT (if avail) to help diagnose. */
- rd_avg_calc(&rkb->rkb_avg_rtt, now);
- if (rkb->rkb_avg_rtt.ra_v.avg)
- rd_snprintf(rttinfo, sizeof(rttinfo),
- " (average rtt %.3fms)",
- (float)(rkb->rkb_avg_rtt.ra_v.avg /
- 1000.0f));
- else
- rttinfo[0] = 0;
- rd_kafka_broker_fail(rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__TIMED_OUT,
- "%i request(s) timed out: "
- "disconnect%s",
- rkb->rkb_req_timeouts, rttinfo);
- }
- }
-}
-
-
-
-static ssize_t rd_kafka_broker_send(rd_kafka_broker_t *rkb, rd_slice_t *slice) {
- ssize_t r;
- char errstr[128];
-
- rd_kafka_assert(rkb->rkb_rk,
- rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP);
- rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport);
-
- r = rd_kafka_transport_send(rkb->rkb_transport, slice, errstr,
- sizeof(errstr));
-
- if (r == -1) {
- rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
- "Send failed: %s", errstr);
- rd_atomic64_add(&rkb->rkb_c.tx_err, 1);
- return -1;
- }
-
- rd_atomic64_add(&rkb->rkb_c.tx_bytes, r);
- rd_atomic64_add(&rkb->rkb_c.tx, 1);
- return r;
-}
-
-
-
-static int rd_kafka_broker_resolve(rd_kafka_broker_t *rkb,
- const char *nodename,
- rd_bool_t reset_cached_addr) {
- const char *errstr;
- int save_idx = 0;
-
- if (!*nodename && rkb->rkb_source == RD_KAFKA_LOGICAL) {
- rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__RESOLVE,
- "Logical broker has no address yet");
- return -1;
- }
-
- if (rkb->rkb_rsal &&
- (reset_cached_addr ||
- rkb->rkb_ts_rsal_last +
- (rkb->rkb_rk->rk_conf.broker_addr_ttl * 1000) <
- rd_clock())) {
- /* Address list has expired. */
-
- /* Save the address index to make sure we still round-robin
- * if we get the same address list back */
- save_idx = rkb->rkb_rsal->rsal_curr;
-
- rd_sockaddr_list_destroy(rkb->rkb_rsal);
- rkb->rkb_rsal = NULL;
- }
-
- if (!rkb->rkb_rsal) {
- /* Resolve */
- rkb->rkb_rsal = rd_getaddrinfo(
- nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG,
- rkb->rkb_rk->rk_conf.broker_addr_family, SOCK_STREAM,
- IPPROTO_TCP, rkb->rkb_rk->rk_conf.resolve_cb,
- rkb->rkb_rk->rk_conf.opaque, &errstr);
-
- if (!rkb->rkb_rsal) {
- rd_kafka_broker_fail(
- rkb, LOG_ERR, RD_KAFKA_RESP_ERR__RESOLVE,
- "Failed to resolve '%s': %s", nodename, errstr);
- return -1;
- } else {
- rkb->rkb_ts_rsal_last = rd_clock();
- /* Continue at previous round-robin position */
- if (rkb->rkb_rsal->rsal_cnt > save_idx)
- rkb->rkb_rsal->rsal_curr = save_idx;
- }
- }
-
- return 0;
-}
-
-
-static void rd_kafka_broker_buf_enq0(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf) {
- rd_ts_t now;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- if (rkb->rkb_rk->rk_conf.sparse_connections &&
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) {
- /* Sparse connections:
- * Trigger connection when a new request is enqueued. */
- rkb->rkb_persistconn.internal++;
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(rkb,
- RD_KAFKA_BROKER_STATE_TRY_CONNECT);
- rd_kafka_broker_unlock(rkb);
- }
-
- now = rd_clock();
- rkbuf->rkbuf_ts_enq = now;
- rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_SENT;
-
- /* Calculate request attempt timeout */
- rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now);
-
- if (likely(rkbuf->rkbuf_prio == RD_KAFKA_PRIO_NORMAL)) {
- /* Insert request at tail of queue */
- TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, rkbuf,
- rkbuf_link);
-
- } else {
- /* Insert request after any requests with a higher or
- * equal priority.
- * Also make sure the request is after added any partially
- * sent request (of any prio).
- * We need to check if buf corrid is set rather than
- * rkbuf_of since SSL_write may return 0 and expect the
- * exact same arguments the next call. */
- rd_kafka_buf_t *prev, *after = NULL;
-
- TAILQ_FOREACH(prev, &rkb->rkb_outbufs.rkbq_bufs, rkbuf_link) {
- if (prev->rkbuf_prio < rkbuf->rkbuf_prio &&
- prev->rkbuf_corrid == 0)
- break;
- after = prev;
- }
-
- if (after)
- TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, after,
- rkbuf, rkbuf_link);
- else
- TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, rkbuf,
- rkbuf_link);
- }
-
- rd_atomic32_add(&rkb->rkb_outbufs.rkbq_cnt, 1);
- if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
- rd_atomic32_add(&rkb->rkb_outbufs.rkbq_msg_cnt,
- rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
-}
-
-
-/**
- * Finalize a stuffed rkbuf for sending to broker.
- */
-static void rd_kafka_buf_finalize(rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) {
- size_t totsize;
-
- rd_assert(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE));
-
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
- /* Empty struct tags */
- rd_kafka_buf_write_i8(rkbuf, 0);
- }
-
- /* Calculate total request buffer length. */
- totsize = rd_buf_len(&rkbuf->rkbuf_buf) - 4;
-
- /* Set up a buffer reader for sending the buffer. */
- rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-
- /**
- * Update request header fields
- */
- /* Total request length */
- rd_kafka_buf_update_i32(rkbuf, 0, (int32_t)totsize);
-
- /* ApiVersion */
- rd_kafka_buf_update_i16(rkbuf, 4 + 2, rkbuf->rkbuf_reqhdr.ApiVersion);
-}
-
-
-void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
-
-
- rkbuf->rkbuf_cb = resp_cb;
- rkbuf->rkbuf_opaque = opaque;
-
- rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
-
- rd_kafka_broker_buf_enq0(rkb, rkbuf);
-}
-
-
-/**
- * Enqueue buffer on broker's xmit queue, but fail buffer immediately
- * if broker is not up.
- *
- * Locality: broker thread
- */
-static int rd_kafka_broker_buf_enq2(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf) {
- if (unlikely(rkb->rkb_source == RD_KAFKA_INTERNAL)) {
- /* Fail request immediately if this is the internal broker. */
- rd_kafka_buf_callback(rkb->rkb_rk, rkb,
- RD_KAFKA_RESP_ERR__TRANSPORT, NULL,
- rkbuf);
- return -1;
- }
-
- rd_kafka_broker_buf_enq0(rkb, rkbuf);
-
- return 0;
-}
-
-
-
-/**
- * Enqueue buffer for tranmission.
- * Responses are enqueued on 'replyq' (RD_KAFKA_OP_RECV_BUF)
- *
- * Locality: any thread
- */
-void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
-
- assert(rkbuf->rkbuf_rkb == rkb);
- if (resp_cb) {
- rkbuf->rkbuf_replyq = replyq;
- rkbuf->rkbuf_cb = resp_cb;
- rkbuf->rkbuf_opaque = opaque;
- } else {
- rd_dassert(!replyq.q);
- }
-
- /* Unmaked buffers will be finalized after the make callback. */
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE))
- rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
-
- if (thrd_is_current(rkb->rkb_thread)) {
- rd_kafka_broker_buf_enq2(rkb, rkbuf);
-
- } else {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF);
- rko->rko_u.xbuf.rkbuf = rkbuf;
- rd_kafka_q_enq(rkb->rkb_ops, rko);
- }
-}
-
-
-
-/**
- * @returns the current broker state change version.
- * Pass this value to future rd_kafka_brokers_wait_state_change() calls
- * to avoid the race condition where a state-change happens between
- * an initial call to some API that fails and the sub-sequent
- * .._wait_state_change() call.
- */
-int rd_kafka_brokers_get_state_version(rd_kafka_t *rk) {
- int version;
- mtx_lock(&rk->rk_broker_state_change_lock);
- version = rk->rk_broker_state_change_version;
- mtx_unlock(&rk->rk_broker_state_change_lock);
- return version;
-}
-
-/**
- * @brief Wait at most \p timeout_ms for any state change for any broker.
- * \p stored_version is the value previously returned by
- * rd_kafka_brokers_get_state_version() prior to another API call
- * that failed due to invalid state.
- *
- * Triggers:
- * - broker state changes
- * - broker transitioning from blocking to non-blocking
- * - partition leader changes
- * - group state changes
- *
- * @remark There is no guarantee that a state change actually took place.
- *
- * @returns 1 if a state change was signaled (maybe), else 0 (timeout)
- *
- * @locality any thread
- */
-int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk,
- int stored_version,
- int timeout_ms) {
- int r;
- mtx_lock(&rk->rk_broker_state_change_lock);
- if (stored_version != rk->rk_broker_state_change_version)
- r = 1;
- else
- r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd,
- &rk->rk_broker_state_change_lock,
- timeout_ms) == thrd_success;
- mtx_unlock(&rk->rk_broker_state_change_lock);
- return r;
-}
-
-
-/**
- * @brief Same as rd_kafka_brokers_wait_state_change() but will trigger
- * the wakeup asynchronously through the provided \p eonce.
- *
- * If the eonce was added to the wait list its reference count
- * will have been updated, this reference is later removed by
- * rd_kafka_broker_state_change_trigger_eonce() by calling trigger().
- *
- * @returns 1 if the \p eonce was added to the wait-broker-state-changes list,
- * or 0 if the \p stored_version is outdated in which case the
- * caller should redo the broker lookup.
- */
-int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
- int stored_version,
- rd_kafka_enq_once_t *eonce) {
- int r = 1;
- mtx_lock(&rk->rk_broker_state_change_lock);
-
- if (stored_version != rk->rk_broker_state_change_version)
- r = 0;
- else {
- rd_kafka_enq_once_add_source(eonce, "wait broker state change");
- rd_list_add(&rk->rk_broker_state_change_waiters, eonce);
- }
-
- mtx_unlock(&rk->rk_broker_state_change_lock);
- return r;
-}
-
-
-/**
- * @brief eonce trigger callback for rd_list_apply() call in
- * rd_kafka_brokers_broadcast_state_change()
- */
-static int rd_kafka_broker_state_change_trigger_eonce(void *elem,
- void *opaque) {
- rd_kafka_enq_once_t *eonce = elem;
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
- "broker state change");
- return 0; /* remove eonce from list */
-}
-
-
-/**
- * @brief Broadcast broker state change to listeners, if any.
- *
- * @locality any thread
- */
-void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) {
-
- rd_kafka_dbg(rk, GENERIC, "BROADCAST", "Broadcasting state change");
-
- mtx_lock(&rk->rk_broker_state_change_lock);
-
- /* Bump version */
- rk->rk_broker_state_change_version++;
-
- /* Trigger waiters */
- rd_list_apply(&rk->rk_broker_state_change_waiters,
- rd_kafka_broker_state_change_trigger_eonce, NULL);
-
- /* Broadcast to listeners */
- cnd_broadcast(&rk->rk_broker_state_change_cnd);
-
- mtx_unlock(&rk->rk_broker_state_change_lock);
-}
-
-
-/**
- * @returns a random broker (with refcnt increased) with matching \p state
- * and where the \p filter function returns 0.
- *
- * Uses reservoir sampling.
- *
- * @param is_up Any broker that is up (UP or UPDATE state), \p state is ignored.
- * @param filtered_cnt Optional pointer to integer which will be set to the
- * number of brokers that matches the \p state or \p is_up but
- * were filtered out by \p filter.
- * @param filter is an optional callback used to filter out undesired brokers.
- * The filter function should return 1 to filter out a broker,
- * or 0 to keep it in the list of eligible brokers to return.
- * rd_kafka_broker_lock() is held during the filter callback.
- *
- *
- * @locks rd_kafka_*lock() MUST be held
- * @locality any
- */
-static rd_kafka_broker_t *
-rd_kafka_broker_random0(const char *func,
- int line,
- rd_kafka_t *rk,
- rd_bool_t is_up,
- int state,
- int *filtered_cnt,
- int (*filter)(rd_kafka_broker_t *rk, void *opaque),
- void *opaque) {
- rd_kafka_broker_t *rkb, *good = NULL;
- int cnt = 0;
- int fcnt = 0;
-
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- continue;
-
- rd_kafka_broker_lock(rkb);
- if ((is_up && rd_kafka_broker_state_is_up(rkb->rkb_state)) ||
- (!is_up && (int)rkb->rkb_state == state)) {
- if (filter && filter(rkb, opaque)) {
- /* Filtered out */
- fcnt++;
- } else {
- if (cnt < 1 || rd_jitter(0, cnt) < 1) {
- if (good)
- rd_kafka_broker_destroy(good);
- rd_kafka_broker_keep_fl(func, line,
- rkb);
- good = rkb;
- }
- cnt += 1;
- }
- }
- rd_kafka_broker_unlock(rkb);
- }
-
- if (filtered_cnt)
- *filtered_cnt = fcnt;
-
- return good;
-}
-
-#define rd_kafka_broker_random(rk, state, filter, opaque) \
- rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \
- NULL, filter, opaque)
-
-
-/**
- * @returns the broker (with refcnt increased) with the highest weight based
- * based on the provided weighing function.
- *
- * If multiple brokers share the same weight reservoir sampling will be used
- * to randomly select one.
- *
- * @param weight_cb Weighing function that should return the sort weight
- * for the given broker.
- * Higher weight is better.
- * A weight of <= 0 will filter out the broker.
- * The passed broker object is locked.
- * @param features (optional) Required broker features.
- *
- * @locks_required rk(read)
- * @locality any
- */
-static rd_kafka_broker_t *
-rd_kafka_broker_weighted(rd_kafka_t *rk,
- int (*weight_cb)(rd_kafka_broker_t *rkb),
- int features) {
- rd_kafka_broker_t *rkb, *good = NULL;
- int highest = 0;
- int cnt = 0;
-
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- int weight;
-
- rd_kafka_broker_lock(rkb);
- if (features && (rkb->rkb_features & features) != features)
- weight = 0;
- else
- weight = weight_cb(rkb);
- rd_kafka_broker_unlock(rkb);
-
- if (weight <= 0 || weight < highest)
- continue;
-
- if (weight > highest) {
- highest = weight;
- cnt = 0;
- }
-
- /* If same weight (cnt > 0), use reservoir sampling */
- if (cnt < 1 || rd_jitter(0, cnt) < 1) {
- if (good)
- rd_kafka_broker_destroy(good);
- rd_kafka_broker_keep(rkb);
- good = rkb;
- }
- cnt++;
- }
-
- return good;
-}
-
-/**
- * @brief Weighing function to select a usable broker connections,
- * promoting connections according to the scoring below.
- *
- * Priority order:
- * - is not a bootstrap broker
- * - least idle last 10 minutes (unless blocking)
- * - least idle hours (if above 10 minutes idle)
- * - is not a logical broker (these connections have dedicated use and should
- * preferably not be used for other purposes)
- * - is not blocking
- *
- * Will prefer the most recently used broker connection for two reasons:
- * - this connection is most likely to function properly.
- * - allows truly idle connections to be killed by the broker's/LB's
- * idle connection reaper.
- *
- * Connection must be up.
- *
- * @locks_required rkb
- */
-static int rd_kafka_broker_weight_usable(rd_kafka_broker_t *rkb) {
- int weight = 0;
-
- if (!rd_kafka_broker_state_is_up(rkb->rkb_state))
- return 0;
-
- weight +=
- 2000 * (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb));
- weight += 10 * !RD_KAFKA_BROKER_IS_LOGICAL(rkb);
-
- if (likely(!rd_atomic32_get(&rkb->rkb_blocking_request_cnt))) {
- rd_ts_t tx_last = rd_atomic64_get(&rkb->rkb_c.ts_send);
- int idle = (int)((rd_clock() -
- (tx_last > 0 ? tx_last : rkb->rkb_ts_state)) /
- 1000000);
-
- weight += 1; /* is not blocking */
-
- /* Prefer least idle broker (based on last 10 minutes use) */
- if (idle < 0)
- ; /*clock going backwards? do nothing */
- else if (idle < 600 /*10 minutes*/)
- weight += 1000 + (600 - idle);
- else /* Else least idle hours (capped to 100h) */
- weight += 100 + (100 - RD_MIN((idle / 3600), 100));
- }
-
- return weight;
-}
-
-
-/**
- * @brief Returns a random broker (with refcnt increased) in state \p state.
- *
- * Uses Reservoir sampling.
- *
- * @param filter is optional, see rd_kafka_broker_random().
- *
- * @sa rd_kafka_broker_random
- *
- * @locks rd_kafka_*lock(rk) MUST be held.
- * @locality any thread
- */
-rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk,
- int state,
- int (*filter)(rd_kafka_broker_t *rkb,
- void *opaque),
- void *opaque,
- const char *reason) {
- rd_kafka_broker_t *rkb;
-
- rkb = rd_kafka_broker_random(rk, state, filter, opaque);
-
- if (!rkb && rk->rk_conf.sparse_connections) {
- /* Sparse connections:
- * If no eligible broker was found, schedule
- * a random broker for connecting. */
- rd_kafka_connect_any(rk, reason);
- }
-
- return rkb;
-}
-
-
-/**
- * @brief Returns a random broker (with refcnt increased) which is up.
- *
- * @param filtered_cnt optional, see rd_kafka_broker_random0().
- * @param filter is optional, see rd_kafka_broker_random0().
- *
- * @sa rd_kafka_broker_random
- *
- * @locks rd_kafka_*lock(rk) MUST be held.
- * @locality any thread
- */
-rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk,
- int *filtered_cnt,
- int (*filter)(rd_kafka_broker_t *rkb,
- void *opaque),
- void *opaque,
- const char *reason) {
- rd_kafka_broker_t *rkb;
-
- rkb = rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk,
- rd_true /*is_up*/, -1, filtered_cnt,
- filter, opaque);
-
- if (!rkb && rk->rk_conf.sparse_connections) {
- /* Sparse connections:
- * If no eligible broker was found, schedule
- * a random broker for connecting. */
- rd_kafka_connect_any(rk, reason);
- }
-
- return rkb;
-}
-
-
-/**
- * @brief Spend at most \p timeout_ms to acquire a usable (Up) broker.
- *
- * Prefers the most recently used broker, see rd_kafka_broker_weight_usable().
- *
- * @param features (optional) Required broker features.
- *
- * @returns A probably usable broker with increased refcount, or NULL on timeout
- * @locks rd_kafka_*lock() if !do_lock
- * @locality any
- *
- * @sa rd_kafka_broker_any_up()
- */
-rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk,
- int timeout_ms,
- rd_dolock_t do_lock,
- int features,
- const char *reason) {
- const rd_ts_t ts_end = rd_timeout_init(timeout_ms);
-
- while (1) {
- rd_kafka_broker_t *rkb;
- int remains;
- int version = rd_kafka_brokers_get_state_version(rk);
-
- if (do_lock)
- rd_kafka_rdlock(rk);
-
- rkb = rd_kafka_broker_weighted(
- rk, rd_kafka_broker_weight_usable, features);
-
- if (!rkb && rk->rk_conf.sparse_connections) {
- /* Sparse connections:
- * If no eligible broker was found, schedule
- * a random broker for connecting. */
- rd_kafka_connect_any(rk, reason);
- }
-
- if (do_lock)
- rd_kafka_rdunlock(rk);
-
- if (rkb)
- return rkb;
-
- remains = rd_timeout_remains(ts_end);
- if (rd_timeout_expired(remains))
- return NULL;
-
- rd_kafka_brokers_wait_state_change(rk, version, remains);
- }
-
- return NULL;
-}
-
-
-
-/**
- * @returns the broker handle for \p broker_id using cached metadata
- * information (if available) in state == \p state,
- * with refcount increaesd.
- *
- * Otherwise enqueues the \p eonce on the wait-state-change queue
- * which will be triggered on broker state changes.
- * It may also be triggered erroneously, so the caller
- * should call rd_kafka_broker_get_async() again when
- * the eonce is triggered.
- *
- * @locks none
- * @locality any thread
- */
-rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk,
- int32_t broker_id,
- int state,
- rd_kafka_enq_once_t *eonce) {
- int version;
- do {
- rd_kafka_broker_t *rkb;
-
- version = rd_kafka_brokers_get_state_version(rk);
-
- rd_kafka_rdlock(rk);
- rkb = rd_kafka_broker_find_by_nodeid0(rk, broker_id, state,
- rd_true);
- rd_kafka_rdunlock(rk);
-
- if (rkb)
- return rkb;
-
- } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
-
- return NULL; /* eonce added to wait list */
-}
-
-
-/**
- * @brief Asynchronously look up current list of broker ids until available.
- * Bootstrap and logical brokers are excluded from the list.
- *
- * To be called repeatedly with an valid eonce until a non-NULL
- * list is returned.
- *
- * @param rk Client instance.
- * @param eonce For triggering asynchronously on state change
- * in case broker list isn't yet available.
- * @return List of int32_t with broker nodeids when ready, NULL when the eonce
- * was added to the wait list.
- */
-rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk,
- rd_kafka_enq_once_t *eonce) {
- rd_list_t *nodeids = NULL;
- int version, i, broker_cnt;
-
- do {
- rd_kafka_broker_t *rkb;
- version = rd_kafka_brokers_get_state_version(rk);
-
- rd_kafka_rdlock(rk);
- broker_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
- if (nodeids) {
- if (broker_cnt > rd_list_cnt(nodeids)) {
- rd_list_destroy(nodeids);
- /* Will be recreated just after */
- nodeids = NULL;
- } else {
- rd_list_set_cnt(nodeids, 0);
- }
- }
- if (!nodeids) {
- nodeids = rd_list_new(0, NULL);
- rd_list_init_int32(nodeids, broker_cnt);
- }
- i = 0;
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- rd_kafka_broker_lock(rkb);
- if (rkb->rkb_nodeid != -1 &&
- !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
- rd_list_set_int32(nodeids, i++,
- rkb->rkb_nodeid);
- }
- rd_kafka_broker_unlock(rkb);
- }
- rd_kafka_rdunlock(rk);
-
- if (!rd_list_empty(nodeids))
- return nodeids;
- } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
-
- if (nodeids) {
- rd_list_destroy(nodeids);
- }
- return NULL; /* eonce added to wait list */
-}
-
-
-/**
- * @returns the current controller using cached metadata information,
- * and only if the broker's state == \p state.
- * The reference count is increased for the returned broker.
- *
- * @locks none
- * @locality any thread
- */
-
-static rd_kafka_broker_t *rd_kafka_broker_controller_nowait(rd_kafka_t *rk,
- int state) {
- rd_kafka_broker_t *rkb;
-
- rd_kafka_rdlock(rk);
-
- if (rk->rk_controllerid == -1) {
- rd_kafka_rdunlock(rk);
- rd_kafka_metadata_refresh_brokers(rk, NULL,
- "lookup controller");
- return NULL;
- }
-
- rkb = rd_kafka_broker_find_by_nodeid0(rk, rk->rk_controllerid, state,
- rd_true);
-
- rd_kafka_rdunlock(rk);
-
- return rkb;
-}
-
-
-/**
- * @returns the current controller using cached metadata information if
- * available in state == \p state, with refcount increaesd.
- *
- * Otherwise enqueues the \p eonce on the wait-controller queue
- * which will be triggered on controller updates or broker state
- * changes. It may also be triggered erroneously, so the caller
- * should call rd_kafka_broker_controller_async() again when
- * the eonce is triggered.
- *
- * @locks none
- * @locality any thread
- */
-rd_kafka_broker_t *
-rd_kafka_broker_controller_async(rd_kafka_t *rk,
- int state,
- rd_kafka_enq_once_t *eonce) {
- int version;
- do {
- rd_kafka_broker_t *rkb;
-
- version = rd_kafka_brokers_get_state_version(rk);
-
- rkb = rd_kafka_broker_controller_nowait(rk, state);
- if (rkb)
- return rkb;
-
- } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
-
- return NULL; /* eonce added to wait list */
-}
-
-
-/**
- * @returns the current controller using cached metadata information,
- * blocking up to \p abs_timeout for the controller to be known
- * and to reach state == \p state. The reference count is increased
- * for the returned broker.
- *
- * @locks none
- * @locality any thread
- */
-rd_kafka_broker_t *
-rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout) {
-
- while (1) {
- int version = rd_kafka_brokers_get_state_version(rk);
- rd_kafka_broker_t *rkb;
- int remains_ms;
-
- rkb = rd_kafka_broker_controller_nowait(rk, state);
- if (rkb)
- return rkb;
-
- remains_ms = rd_timeout_remains(abs_timeout);
- if (rd_timeout_expired(remains_ms))
- return NULL;
-
- rd_kafka_brokers_wait_state_change(rk, version, remains_ms);
- }
-}
-
-
-
-/**
- * Find a waitresp (rkbuf awaiting response) by the correlation id.
- */
-static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb,
- int32_t corrid) {
- rd_kafka_buf_t *rkbuf;
- rd_ts_t now = rd_clock();
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link)
- if (rkbuf->rkbuf_corrid == corrid) {
- /* Convert ts_sent to RTT */
- rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
- rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent);
-
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
- rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1)
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-
- rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf);
- return rkbuf;
- }
- return NULL;
-}
-
-
-
-/**
- * Map a response message to a request.
- */
-static int rd_kafka_req_response(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_buf_t *req;
- int log_decode_errors = LOG_ERR;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
-
- /* Find corresponding request message by correlation id */
- if (unlikely(!(req = rd_kafka_waitresp_find(
- rkb, rkbuf->rkbuf_reshdr.CorrId)))) {
- /* unknown response. probably due to request timeout */
- rd_atomic64_add(&rkb->rkb_c.rx_corrid_err, 1);
- rd_rkb_dbg(rkb, BROKER, "RESPONSE",
- "Response for unknown CorrId %" PRId32
- " (timed out?)",
- rkbuf->rkbuf_reshdr.CorrId);
- rd_kafka_interceptors_on_response_received(
- rkb->rkb_rk, -1, rd_kafka_broker_name(rkb), rkb->rkb_nodeid,
- -1, -1, rkbuf->rkbuf_reshdr.CorrId, rkbuf->rkbuf_totlen, -1,
- RD_KAFKA_RESP_ERR__NOENT);
- rd_kafka_buf_destroy(rkbuf);
- return -1;
- }
-
- rd_rkb_dbg(rkb, PROTOCOL, "RECV",
- "Received %sResponse (v%hd, %" PRIusz
- " bytes, CorrId %" PRId32 ", rtt %.2fms)",
- rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey),
- req->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen,
- rkbuf->rkbuf_reshdr.CorrId,
- (float)req->rkbuf_ts_sent / 1000.0f);
-
- /* Copy request's header and certain flags to response object's
- * reqhdr for convenience. */
- rkbuf->rkbuf_reqhdr = req->rkbuf_reqhdr;
- rkbuf->rkbuf_flags |=
- (req->rkbuf_flags & RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK);
- rkbuf->rkbuf_ts_sent = req->rkbuf_ts_sent; /* copy rtt */
-
- /* Set up response reader slice starting past the response header */
- rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf,
- RD_KAFKAP_RESHDR_SIZE,
- rd_buf_len(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE);
-
- /* In case of flexibleVersion, skip the response header tags.
- * The ApiVersion request/response is different since it needs
- * be backwards compatible and thus has no header tags. */
- if (req->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion)
- rd_kafka_buf_skip_tags(rkbuf);
-
- if (!rkbuf->rkbuf_rkb) {
- rkbuf->rkbuf_rkb = rkb;
- rd_kafka_broker_keep(rkbuf->rkbuf_rkb);
- } else
- rd_assert(rkbuf->rkbuf_rkb == rkb);
-
- /* Call callback. */
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, rkbuf, req);
-
- return 0;
-
-err_parse:
- rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, rkbuf->rkbuf_err, NULL, req);
- rd_kafka_buf_destroy(rkbuf);
- return -1;
-}
-
-
-
-int rd_kafka_recv(rd_kafka_broker_t *rkb) {
- rd_kafka_buf_t *rkbuf;
- ssize_t r;
- /* errstr is not set by buf_read errors, so default it here. */
- char errstr[512] = "Protocol parse failure";
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- const int log_decode_errors = LOG_ERR;
-
-
- /* It is impossible to estimate the correct size of the response
- * so we split the read up in two parts: first we read the protocol
- * length and correlation id (i.e., the Response header), and then
- * when we know the full length of the response we allocate a new
- * buffer and call receive again.
- * All this in an async fashion (e.g., partial reads).
- */
- if (!(rkbuf = rkb->rkb_recv_buf)) {
- /* No receive in progress: create new buffer */
-
- rkbuf = rd_kafka_buf_new(2, RD_KAFKAP_RESHDR_SIZE);
-
- rkb->rkb_recv_buf = rkbuf;
-
- /* Set up buffer reader for the response header. */
- rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_RESHDR_SIZE,
- RD_KAFKAP_RESHDR_SIZE);
- }
-
- rd_dassert(rd_buf_write_remains(&rkbuf->rkbuf_buf) > 0);
-
- r = rd_kafka_transport_recv(rkb->rkb_transport, &rkbuf->rkbuf_buf,
- errstr, sizeof(errstr));
- if (unlikely(r <= 0)) {
- if (r == 0)
- return 0; /* EAGAIN */
- err = RD_KAFKA_RESP_ERR__TRANSPORT;
- rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
- goto err;
- }
-
- rd_atomic64_set(&rkb->rkb_c.ts_recv, rd_clock());
-
- if (rkbuf->rkbuf_totlen == 0) {
- /* Packet length not known yet. */
-
- if (unlikely(rd_buf_write_pos(&rkbuf->rkbuf_buf) <
- RD_KAFKAP_RESHDR_SIZE)) {
- /* Need response header for packet length and corrid.
- * Wait for more data. */
- return 0;
- }
-
- rd_assert(!rkbuf->rkbuf_rkb);
- rkbuf->rkbuf_rkb = rkb; /* Protocol parsing code needs
- * the rkb for logging, but we dont
- * want to keep a reference to the
- * broker this early since that extra
- * refcount will mess with the broker's
- * refcount-based termination code. */
-
- /* Initialize reader */
- rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0,
- RD_KAFKAP_RESHDR_SIZE);
-
- /* Read protocol header */
- rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size);
- rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId);
-
- rkbuf->rkbuf_rkb = NULL; /* Reset */
-
- rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size;
-
- /* Make sure message size is within tolerable limits. */
- if (rkbuf->rkbuf_totlen < 4 /*CorrId*/ ||
- rkbuf->rkbuf_totlen >
- (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) {
- rd_snprintf(errstr, sizeof(errstr),
- "Invalid response size %" PRId32
- " (0..%i): "
- "increase receive.message.max.bytes",
- rkbuf->rkbuf_reshdr.Size,
- rkb->rkb_rk->rk_conf.recv_max_msg_size);
- err = RD_KAFKA_RESP_ERR__BAD_MSG;
- rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
- goto err;
- }
-
- rkbuf->rkbuf_totlen -= 4; /*CorrId*/
-
- if (rkbuf->rkbuf_totlen > 0) {
- /* Allocate another buffer that fits all data (short of
- * the common response header). We want all
- * data to be in contigious memory. */
-
- rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf,
- rkbuf->rkbuf_totlen);
- }
- }
-
- if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE ==
- rkbuf->rkbuf_totlen) {
- /* Message is complete, pass it on to the original requester. */
- rkb->rkb_recv_buf = NULL;
- rd_atomic64_add(&rkb->rkb_c.rx, 1);
- rd_atomic64_add(&rkb->rkb_c.rx_bytes,
- rd_buf_write_pos(&rkbuf->rkbuf_buf));
- rd_kafka_req_response(rkb, rkbuf);
- }
-
- return 1;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- if (!strcmp(errstr, "Disconnected"))
- rd_kafka_broker_conn_closed(rkb, err, errstr);
- else
- rd_kafka_broker_fail(rkb, LOG_ERR, err, "Receive failed: %s",
- errstr);
- return -1;
-}
-
-
-/**
- * Linux version of socket_cb providing racefree CLOEXEC.
- */
-int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque) {
-#ifdef SOCK_CLOEXEC
- return socket(domain, type | SOCK_CLOEXEC, protocol);
-#else
- return rd_kafka_socket_cb_generic(domain, type, protocol, opaque);
-#endif
-}
-
-/**
- * Fallback version of socket_cb NOT providing racefree CLOEXEC,
- * but setting CLOEXEC after socket creation (if FD_CLOEXEC is defined).
- */
-int rd_kafka_socket_cb_generic(int domain,
- int type,
- int protocol,
- void *opaque) {
- int s;
- int on = 1;
- s = (int)socket(domain, type, protocol);
- if (s == -1)
- return -1;
-#ifdef FD_CLOEXEC
- if (fcntl(s, F_SETFD, FD_CLOEXEC, &on) == -1)
- fprintf(stderr,
- "WARNING: librdkafka: %s: "
- "fcntl(FD_CLOEXEC) failed: %s: ignoring\n",
- __FUNCTION__, rd_strerror(errno));
-#endif
- return s;
-}
-
-
-
-/**
- * @brief Update the reconnect backoff.
- * Should be called when a connection is made, or all addresses
- * a broker resolves to has been exhausted without successful connect.
- *
- * @locality broker thread
- * @locks none
- */
-static void
-rd_kafka_broker_update_reconnect_backoff(rd_kafka_broker_t *rkb,
- const rd_kafka_conf_t *conf,
- rd_ts_t now) {
- int backoff;
-
- /* If last connection attempt was more than reconnect.backoff.max.ms
- * ago, reset the reconnect backoff to the initial
- * reconnect.backoff.ms value. */
- if (rkb->rkb_ts_reconnect + (conf->reconnect_backoff_max_ms * 1000) <
- now)
- rkb->rkb_reconnect_backoff_ms = conf->reconnect_backoff_ms;
-
- /* Apply -25%...+50% jitter to next backoff. */
- backoff = rd_jitter((int)((float)rkb->rkb_reconnect_backoff_ms * 0.75),
- (int)((float)rkb->rkb_reconnect_backoff_ms * 1.5));
-
- /* Cap to reconnect.backoff.max.ms. */
- backoff = RD_MIN(backoff, conf->reconnect_backoff_max_ms);
-
- /* Set time of next reconnect */
- rkb->rkb_ts_reconnect = now + (backoff * 1000);
- rkb->rkb_reconnect_backoff_ms = RD_MIN(
- rkb->rkb_reconnect_backoff_ms * 2, conf->reconnect_backoff_max_ms);
-}
-
-
-/**
- * @brief Calculate time until next reconnect attempt.
- *
- * @returns the number of milliseconds to the next connection attempt, or 0
- * if immediate.
- * @locality broker thread
- * @locks none
- */
-
-static RD_INLINE int
-rd_kafka_broker_reconnect_backoff(const rd_kafka_broker_t *rkb, rd_ts_t now) {
- rd_ts_t remains;
-
- if (unlikely(rkb->rkb_ts_reconnect == 0))
- return 0; /* immediate */
-
- remains = rkb->rkb_ts_reconnect - now;
- if (remains <= 0)
- return 0; /* immediate */
-
- return (int)(remains / 1000);
-}
-
-
-/**
- * @brief Unittest for reconnect.backoff.ms
- */
-static int rd_ut_reconnect_backoff(void) {
- rd_kafka_broker_t rkb = RD_ZERO_INIT;
- rd_kafka_conf_t conf = {.reconnect_backoff_ms = 10,
- .reconnect_backoff_max_ms = 90};
- rd_ts_t now = 1000000;
- int backoff;
-
- rkb.rkb_reconnect_backoff_ms = conf.reconnect_backoff_ms;
-
- /* broker's backoff is the initial reconnect.backoff.ms=10 */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 7, 15, "%d");
-
- /* .. 20 */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 15, 30, "%d");
-
- /* .. 40 */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 30, 60, "%d");
-
- /* .. 80, the jitter is capped at reconnect.backoff.max.ms=90 */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 60, conf.reconnect_backoff_max_ms, "%d");
-
- /* .. 90, capped by reconnect.backoff.max.ms */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 67, conf.reconnect_backoff_max_ms, "%d");
-
- /* .. 90, should remain at capped value. */
- rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
- backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
- RD_UT_ASSERT_RANGE(backoff, 67, conf.reconnect_backoff_max_ms, "%d");
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Initiate asynchronous connection attempt to the next address
- * in the broker's address list.
- * While the connect is asynchronous and its IO served in the
- * CONNECT state, the initial name resolve is blocking.
- *
- * @returns -1 on error, 0 if broker does not have a hostname, or 1
- * if the connection is now in progress.
- */
-static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) {
- const rd_sockaddr_inx_t *sinx;
- char errstr[512];
- char nodename[RD_KAFKA_NODENAME_SIZE];
- rd_bool_t reset_cached_addr = rd_false;
-
- rd_rkb_dbg(rkb, BROKER, "CONNECT", "broker in state %s connecting",
- rd_kafka_broker_state_names[rkb->rkb_state]);
-
- rd_atomic32_add(&rkb->rkb_c.connects, 1);
-
- rd_kafka_broker_lock(rkb);
- rd_strlcpy(nodename, rkb->rkb_nodename, sizeof(nodename));
-
- /* If the nodename was changed since the last connect,
- * reset the address cache. */
- reset_cached_addr = (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch);
- rkb->rkb_connect_epoch = rkb->rkb_nodename_epoch;
- /* Logical brokers might not have a hostname set, in which case
- * we should not try to connect. */
- if (*nodename)
- rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_CONNECT);
- rd_kafka_broker_unlock(rkb);
-
- if (!*nodename) {
- rd_rkb_dbg(rkb, BROKER, "CONNECT",
- "broker has no address yet: postponing connect");
- return 0;
- }
-
- rd_kafka_broker_update_reconnect_backoff(rkb, &rkb->rkb_rk->rk_conf,
- rd_clock());
-
- if (rd_kafka_broker_resolve(rkb, nodename, reset_cached_addr) == -1)
- return -1;
-
- sinx = rd_sockaddr_list_next(rkb->rkb_rsal);
-
- rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport);
-
- if (!(rkb->rkb_transport = rd_kafka_transport_connect(
- rkb, sinx, errstr, sizeof(errstr)))) {
- rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
- "%s", errstr);
- return -1;
- }
-
- rkb->rkb_ts_connect = rd_clock();
-
- return 1;
-}
-
-
-/**
- * @brief Call when connection is ready to transition to fully functional
- * UP state.
- *
- * @locality Broker thread
- */
-void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) {
-
- rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight;
-
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP);
- rd_kafka_broker_unlock(rkb);
-
- /* Request metadata (async):
- * try locally known topics first and if there are none try
- * getting just the broker list. */
- if (rd_kafka_metadata_refresh_known_topics(
- NULL, rkb, rd_false /*dont force*/, "connected") ==
- RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected");
-}
-
-
-
-static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb);
-
-
-/**
- * @brief Parses and handles SaslMechanism response, transitions
- * the broker state.
- *
- */
-static void rd_kafka_broker_handle_SaslHandshake(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int32_t MechCnt;
- int16_t ErrorCode;
- int i = 0;
- char *mechs = "(n/a)";
- size_t msz, mof = 0;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- rd_kafka_buf_read_i32(rkbuf, &MechCnt);
-
- if (MechCnt < 0 || MechCnt > 100)
- rd_kafka_buf_parse_fail(
- rkbuf, "Invalid MechanismCount %" PRId32, MechCnt);
-
- /* Build a CSV string of supported mechanisms. */
- msz = RD_MIN(511, 1 + (MechCnt * 32));
- mechs = rd_alloca(msz);
- *mechs = '\0';
-
- for (i = 0; i < MechCnt; i++) {
- rd_kafkap_str_t mech;
- rd_kafka_buf_read_str(rkbuf, &mech);
-
- mof += rd_snprintf(mechs + mof, msz - mof, "%s%.*s",
- i ? "," : "", RD_KAFKAP_STR_PR(&mech));
-
- if (mof >= msz)
- break;
- }
-
- rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER,
- "SASLMECHS", "Broker supported SASL mechanisms: %s", mechs);
-
- if (ErrorCode) {
- err = ErrorCode;
- goto err;
- }
-
- /* Circle back to connect_auth() to start proper AUTH state. */
- rd_kafka_broker_connect_auth(rkb);
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "SASL %s mechanism handshake failed: %s: "
- "broker's supported mechanisms: %s",
- rkb->rkb_rk->rk_conf.sasl.mechanisms,
- rd_kafka_err2str(err), mechs);
-}
-
-
-/**
- * @brief Transition state to:
- * - AUTH_HANDSHAKE (if SASL is configured and handshakes supported)
- * - AUTH (if SASL is configured but no handshake is required or
- * not supported, or has already taken place.)
- * - UP (if SASL is not configured)
- *
- * @locks_acquired rkb
- */
-static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb) {
-
- if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT ||
- rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) {
-
- rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH",
- "Auth in state %s (handshake %ssupported)",
- rd_kafka_broker_state_names[rkb->rkb_state],
- (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)
- ? ""
- : "not ");
-
- /* Broker >= 0.10.0: send request to select mechanism */
- if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE &&
- (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
-
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE);
- rd_kafka_broker_unlock(rkb);
-
- rd_kafka_SaslHandshakeRequest(
- rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms,
- RD_KAFKA_NO_REPLYQ,
- rd_kafka_broker_handle_SaslHandshake, NULL);
- } else {
- /* Either Handshake succeeded (protocol selected)
- * or Handshakes were not supported.
- * In both cases continue with authentication. */
- char sasl_errstr[512];
-
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb,
- (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ)
- ? RD_KAFKA_BROKER_STATE_AUTH_REQ
- : RD_KAFKA_BROKER_STATE_AUTH_LEGACY);
- rd_kafka_broker_unlock(rkb);
-
- if (rd_kafka_sasl_client_new(
- rkb->rkb_transport, sasl_errstr,
- sizeof(sasl_errstr)) == -1) {
- rd_kafka_broker_fail(
- rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "Failed to initialize "
- "SASL authentication: %s",
- sasl_errstr);
- return;
- }
- }
-
- return;
- }
-
- /* No authentication required. */
- rd_kafka_broker_connect_up(rkb);
-}
-
-
-/**
- * @brief Specify API versions to use for this connection.
- *
- * @param apis is an allocated list of supported partitions.
- * If NULL the default set will be used based on the
- * \p broker.version.fallback property.
- * @param api_cnt number of elements in \p apis
- *
- * @remark \p rkb takes ownership of \p apis.
- *
- * @locality Broker thread
- * @locks_required rkb
- */
-static void rd_kafka_broker_set_api_versions(rd_kafka_broker_t *rkb,
- struct rd_kafka_ApiVersion *apis,
- size_t api_cnt) {
-
- if (rkb->rkb_ApiVersions)
- rd_free(rkb->rkb_ApiVersions);
-
-
- if (!apis) {
- rd_rkb_dbg(
- rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION",
- "Using (configuration fallback) %s protocol features",
- rkb->rkb_rk->rk_conf.broker_version_fallback);
-
-
- rd_kafka_get_legacy_ApiVersions(
- rkb->rkb_rk->rk_conf.broker_version_fallback, &apis,
- &api_cnt, rkb->rkb_rk->rk_conf.broker_version_fallback);
-
- /* Make a copy to store on broker. */
- rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt);
- }
-
- rkb->rkb_ApiVersions = apis;
- rkb->rkb_ApiVersions_cnt = api_cnt;
-
- /* Update feature set based on supported broker APIs. */
- rd_kafka_broker_features_set(
- rkb, rd_kafka_features_check(rkb, apis, api_cnt));
-}
-
-
-/**
- * Handler for ApiVersion response.
- */
-static void rd_kafka_broker_handle_ApiVersion(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- struct rd_kafka_ApiVersion *apis = NULL;
- size_t api_cnt = 0;
- int16_t retry_ApiVersion = -1;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, &apis,
- &api_cnt);
-
- /* Broker does not support our ApiVersionRequest version,
- * see if we can downgrade to an older version. */
- if (err == RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) {
- size_t i;
-
- /* Find the broker's highest supported version for
- * ApiVersionRequest and use that to retry. */
- for (i = 0; i < api_cnt; i++) {
- if (apis[i].ApiKey == RD_KAFKAP_ApiVersion) {
- retry_ApiVersion =
- RD_MIN(request->rkbuf_reqhdr.ApiVersion - 1,
- apis[i].MaxVer);
- break;
- }
- }
-
- /* Before v3 the broker would not return its supported
- * ApiVersionRequests, so we go straight for version 0. */
- if (i == api_cnt && request->rkbuf_reqhdr.ApiVersion > 0)
- retry_ApiVersion = 0;
-
- } else if (err == RD_KAFKA_RESP_ERR_INVALID_REQUEST) {
- rd_rkb_log(rkb, LOG_ERR, "APIVERSION",
- "ApiVersionRequest v%hd failed due to "
- "invalid request: "
- "check client.software.name (\"%s\") and "
- "client.software.version (\"%s\") "
- "for invalid characters: "
- "falling back to older request version",
- request->rkbuf_reqhdr.ApiVersion,
- rk->rk_conf.sw_name, rk->rk_conf.sw_version);
- retry_ApiVersion = 0;
- }
-
- if (err && apis)
- rd_free(apis);
-
- if (retry_ApiVersion != -1) {
- /* Retry request with a lower version */
- rd_rkb_dbg(
- rkb, BROKER | RD_KAFKA_DBG_FEATURE | RD_KAFKA_DBG_PROTOCOL,
- "APIVERSION",
- "ApiVersionRequest v%hd failed due to %s: "
- "retrying with v%hd",
- request->rkbuf_reqhdr.ApiVersion, rd_kafka_err2name(err),
- retry_ApiVersion);
- rd_kafka_ApiVersionRequest(
- rkb, retry_ApiVersion, RD_KAFKA_NO_REPLYQ,
- rd_kafka_broker_handle_ApiVersion, NULL);
- return;
- }
-
-
- if (err) {
- if (rkb->rkb_transport)
- rd_kafka_broker_fail(
- rkb, LOG_WARNING, RD_KAFKA_RESP_ERR__TRANSPORT,
- "ApiVersionRequest failed: %s: "
- "probably due to broker version < 0.10 "
- "(see api.version.request configuration)",
- rd_kafka_err2str(err));
- return;
- }
-
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_api_versions(rkb, apis, api_cnt);
- rd_kafka_broker_unlock(rkb);
-
- rd_kafka_broker_connect_auth(rkb);
-}
-
-
-/**
- * Call when asynchronous connection attempt completes, either succesfully
- * (if errstr is NULL) or fails.
- *
- * @locks_acquired rkb
- * @locality broker thread
- */
-void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr) {
-
- if (errstr) {
- /* Connect failed */
- rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
- "%s", errstr);
- return;
- }
-
- /* Connect succeeded */
- rkb->rkb_connid++;
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "CONNECTED",
- "Connected (#%d)", rkb->rkb_connid);
- rkb->rkb_max_inflight = 1; /* Hold back other requests until
- * ApiVersion, SaslHandshake, etc
- * are done. */
-
- rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN);
-
- rd_kafka_broker_lock(rkb);
-
- if (rkb->rkb_rk->rk_conf.api_version_request &&
- rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) {
- /* Use ApiVersion to query broker for supported API versions. */
- rd_kafka_broker_feature_enable(rkb,
- RD_KAFKA_FEATURE_APIVERSION);
- }
-
- if (!(rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION)) {
- /* Use configured broker.version.fallback to
- * figure out API versions.
- * In case broker.version.fallback indicates a version
- * that supports ApiVersionRequest it will update
- * rkb_features to have FEATURE_APIVERSION set which will
- * trigger an ApiVersionRequest below. */
- rd_kafka_broker_set_api_versions(rkb, NULL, 0);
- }
-
- if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) {
- /* Query broker for supported API versions.
- * This may fail with a disconnect on non-supporting brokers
- * so hold off any other requests until we get a response,
- * and if the connection is torn down we disable this feature.
- */
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_APIVERSION_QUERY);
- rd_kafka_broker_unlock(rkb);
-
- rd_kafka_ApiVersionRequest(
- rkb, -1 /* Use highest version we support */,
- RD_KAFKA_NO_REPLYQ, rd_kafka_broker_handle_ApiVersion,
- NULL);
- } else {
- rd_kafka_broker_unlock(rkb);
-
- /* Authenticate if necessary */
- rd_kafka_broker_connect_auth(rkb);
- }
-}
-
-
-
-/**
- * @brief Checks if the given API request+version is supported by the broker.
- * @returns 1 if supported, else 0.
- * @locality broker thread
- * @locks none
- */
-static RD_INLINE int rd_kafka_broker_request_supported(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf) {
- struct rd_kafka_ApiVersion skel = {.ApiKey =
- rkbuf->rkbuf_reqhdr.ApiKey};
- struct rd_kafka_ApiVersion *ret;
-
- if (unlikely(rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_ApiVersion))
- return 1; /* ApiVersion requests are used to detect
- * the supported API versions, so should always
- * be allowed through. */
-
- /* First try feature flags, if any, which may cover a larger
- * set of APIs. */
- if (rkbuf->rkbuf_features)
- return (rkb->rkb_features & rkbuf->rkbuf_features) ==
- rkbuf->rkbuf_features;
-
- /* Then try the ApiVersion map. */
- ret =
- bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
- sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp);
- if (!ret)
- return 0;
-
- return ret->MinVer <= rkbuf->rkbuf_reqhdr.ApiVersion &&
- rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer;
-}
-
-
-/**
- * Send queued messages to broker
- *
- * Locality: io thread
- */
-int rd_kafka_send(rd_kafka_broker_t *rkb) {
- rd_kafka_buf_t *rkbuf;
- unsigned int cnt = 0;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
- rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
- (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) {
- ssize_t r;
- size_t pre_of = rd_slice_offset(&rkbuf->rkbuf_reader);
- rd_ts_t now;
-
- if (unlikely(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)) {
- /* Request has not been created/baked yet,
- * call its make callback. */
- rd_kafka_resp_err_t err;
-
- err = rkbuf->rkbuf_make_req_cb(
- rkb, rkbuf, rkbuf->rkbuf_make_opaque);
-
- rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_NEED_MAKE;
-
- /* Free the make_opaque */
- if (rkbuf->rkbuf_free_make_opaque_cb &&
- rkbuf->rkbuf_make_opaque) {
- rkbuf->rkbuf_free_make_opaque_cb(
- rkbuf->rkbuf_make_opaque);
- rkbuf->rkbuf_make_opaque = NULL;
- }
-
- if (unlikely(err)) {
- rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL,
- "MAKEREQ",
- "Failed to make %sRequest: %s",
- rd_kafka_ApiKey2str(
- rkbuf->rkbuf_reqhdr.ApiKey),
- rd_kafka_err2str(err));
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, err,
- NULL, rkbuf);
- continue;
- }
-
- rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
- }
-
- /* Check for broker support */
- if (unlikely(!rd_kafka_broker_request_supported(rkb, rkbuf))) {
- rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
- rd_rkb_dbg(
- rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "UNSUPPORTED",
- "Failing %sResponse "
- "(v%hd, %" PRIusz " bytes, CorrId %" PRId32
- "): "
- "request not supported by broker "
- "(missing api.version.request=false or "
- "incorrect broker.version.fallback config?)",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen,
- rkbuf->rkbuf_reshdr.CorrId);
- rd_kafka_buf_callback(
- rkb->rkb_rk, rkb,
- RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, NULL,
- rkbuf);
- continue;
- }
-
- /* Set CorrId header field, unless this is the latter part
- * of a partial send in which case the corrid has already
- * been set.
- * Due to how SSL_write() will accept a buffer but still
- * return 0 in some cases we can't rely on the buffer offset
- * but need to use corrid to check this. SSL_write() expects
- * us to send the same buffer again when 0 is returned.
- */
- if (rkbuf->rkbuf_corrid == 0 ||
- rkbuf->rkbuf_connid != rkb->rkb_connid) {
- rd_assert(rd_slice_offset(&rkbuf->rkbuf_reader) == 0);
- rkbuf->rkbuf_corrid = ++rkb->rkb_corrid;
- rd_kafka_buf_update_i32(rkbuf, 4 + 2 + 2,
- rkbuf->rkbuf_corrid);
- rkbuf->rkbuf_connid = rkb->rkb_connid;
- } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) {
- rd_kafka_assert(NULL,
- rkbuf->rkbuf_connid == rkb->rkb_connid);
- }
-
- if (0) {
- rd_rkb_dbg(
- rkb, PROTOCOL, "SEND",
- "Send %s corrid %" PRId32
- " at "
- "offset %" PRIusz "/%" PRIusz,
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_corrid, pre_of,
- rd_slice_size(&rkbuf->rkbuf_reader));
- }
-
- if ((r = rd_kafka_broker_send(rkb, &rkbuf->rkbuf_reader)) == -1)
- return -1;
-
- now = rd_clock();
- rd_atomic64_set(&rkb->rkb_c.ts_send, now);
-
- /* Partial send? Continue next time. */
- if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) {
- rd_rkb_dbg(
- rkb, PROTOCOL, "SEND",
- "Sent partial %sRequest "
- "(v%hd, "
- "%" PRIdsz "+%" PRIdsz "/%" PRIusz
- " bytes, "
- "CorrId %" PRId32 ")",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion, (ssize_t)pre_of, r,
- rd_slice_size(&rkbuf->rkbuf_reader),
- rkbuf->rkbuf_corrid);
- return 0;
- }
-
- rd_rkb_dbg(rkb, PROTOCOL, "SEND",
- "Sent %sRequest (v%hd, %" PRIusz " bytes @ %" PRIusz
- ", "
- "CorrId %" PRId32 ")",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion,
- rd_slice_size(&rkbuf->rkbuf_reader), pre_of,
- rkbuf->rkbuf_corrid);
-
- rd_atomic64_add(&rkb->rkb_c.reqtype[rkbuf->rkbuf_reqhdr.ApiKey],
- 1);
-
- /* Notify transport layer of full request sent */
- if (likely(rkb->rkb_transport != NULL))
- rd_kafka_transport_request_sent(rkb, rkbuf);
-
- /* Entire buffer sent, unlink from outbuf */
- rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_SENT;
-
- /* Store time for RTT calculation */
- rkbuf->rkbuf_ts_sent = now;
-
- /* Add to outbuf_latency averager */
- rd_avg_add(&rkb->rkb_avg_outbuf_latency,
- rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq);
-
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
- rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1)
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
-
- /* Put buffer on response wait list unless we are not
- * expecting a response (required_acks=0). */
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE))
- rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf);
- else { /* Call buffer callback for delivery report. */
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
- }
-
- cnt++;
- }
-
- return cnt;
-}
-
-
-/**
- * Add 'rkbuf' to broker 'rkb's retry queue.
- */
-void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
-
- /* Restore original replyq since replyq.q will have been NULLed
- * by buf_callback()/replyq_enq(). */
- if (!rkbuf->rkbuf_replyq.q && rkbuf->rkbuf_orig_replyq.q) {
- rkbuf->rkbuf_replyq = rkbuf->rkbuf_orig_replyq;
- rd_kafka_replyq_clear(&rkbuf->rkbuf_orig_replyq);
- }
-
- /* If called from another thread than rkb's broker thread
- * enqueue the buffer on the broker's op queue. */
- if (!thrd_is_current(rkb->rkb_thread)) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY);
- rko->rko_u.xbuf.rkbuf = rkbuf;
- rd_kafka_q_enq(rkb->rkb_ops, rko);
- return;
- }
-
- rd_rkb_dbg(rkb, PROTOCOL, "RETRY",
- "Retrying %sRequest (v%hd, %" PRIusz
- " bytes, retry %d/%d, "
- "prev CorrId %" PRId32 ") in %dms",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion,
- rd_slice_size(&rkbuf->rkbuf_reader), rkbuf->rkbuf_retries,
- rkbuf->rkbuf_max_retries, rkbuf->rkbuf_corrid,
- rkb->rkb_rk->rk_conf.retry_backoff_ms);
-
- rd_atomic64_add(&rkb->rkb_c.tx_retries, 1);
-
- rkbuf->rkbuf_ts_retry =
- rd_clock() + (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000);
- /* Precaution: time out the request if it hasn't moved from the
- * retry queue within the retry interval (such as when the broker is
- * down). */
- // FIXME: implememt this properly.
- rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5 * 1000 * 1000);
-
- /* Reset send offset */
- rd_slice_seek(&rkbuf->rkbuf_reader, 0);
- rkbuf->rkbuf_corrid = 0;
-
- rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf);
-}
-
-
-/**
- * Move buffers that have expired their retry backoff time from the
- * retry queue to the outbuf.
- */
-static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb,
- rd_ts_t *next_wakeup) {
- rd_ts_t now = rd_clock();
- rd_kafka_buf_t *rkbuf;
- int cnt = 0;
-
- while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) {
- if (rkbuf->rkbuf_ts_retry > now) {
- if (rkbuf->rkbuf_ts_retry < *next_wakeup)
- *next_wakeup = rkbuf->rkbuf_ts_retry;
- break;
- }
-
- rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf);
-
- rd_kafka_broker_buf_enq0(rkb, rkbuf);
- cnt++;
- }
-
- if (cnt > 0)
- rd_rkb_dbg(rkb, BROKER, "RETRY",
- "Moved %d retry buffer(s) to output queue", cnt);
-}
-
-
-/**
- * @brief Propagate delivery report for entire message queue.
- *
- * @param err The error which will be set on each message.
- * @param status The status which will be set on each message.
- *
- * To avoid extra iterations, the \p err and \p status are set on
- * the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al
- */
-void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_resp_err_t err) {
- rd_kafka_t *rk = rkt->rkt_rk;
-
- if (unlikely(rd_kafka_msgq_len(rkmq) == 0))
- return;
-
- if (err && rd_kafka_is_transactional(rk))
- rd_atomic64_add(&rk->rk_eos.txn_dr_fails,
- rd_kafka_msgq_len(rkmq));
-
- /* Call on_acknowledgement() interceptors */
- rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err);
-
- if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE &&
- (!rk->rk_conf.dr_err_only || err)) {
- /* Pass all messages to application thread in one op. */
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_DR);
- rko->rko_err = err;
- rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt);
- rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
-
- /* Move all messages to op's msgq */
- rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq);
-
- rd_kafka_q_enq(rk->rk_rep, rko);
-
- } else {
- /* No delivery report callback. */
-
- /* Destroy the messages right away. */
- rd_kafka_msgq_purge(rk, rkmq);
- }
-}
-
-
-/**
- * @brief Trigger delivery reports for implicitly acked messages.
- *
- * @locks none
- * @locality broker thread - either last or current leader
- */
-void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- uint64_t last_msgid) {
- rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked);
- rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2);
- rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
-
- if (rktp->rktp_rkt->rkt_conf.required_acks != 0)
- status = RD_KAFKA_MSG_STATUS_PERSISTED;
-
- rd_kafka_msgq_move_acked(&acked, &rktp->rktp_xmit_msgq, last_msgid,
- status);
- rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, status);
-
- /* Insert acked2 into acked in correct order */
- rd_kafka_msgq_insert_msgq(&acked, &acked2,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
-
- if (!rd_kafka_msgq_len(&acked))
- return;
-
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "IMPLICITACK",
- "%.*s [%" PRId32
- "] %d message(s) implicitly acked "
- "by subsequent batch success "
- "(msgids %" PRIu64 "..%" PRIu64
- ", "
- "last acked %" PRIu64 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_msgq_len(&acked),
- rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid,
- rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid,
- last_msgid);
-
- /* Trigger delivery reports */
- rd_kafka_dr_msgq(rktp->rktp_rkt, &acked, RD_KAFKA_RESP_ERR_NO_ERROR);
-}
-
-
-
-/**
- * @brief Map existing partitions to this broker using the
- * toppar's leader_id. Only undelegated partitions
- * matching this broker are mapped.
- *
- * @locks none
- * @locality any
- */
-static void rd_kafka_broker_map_partitions(rd_kafka_broker_t *rkb) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_topic_t *rkt;
- int cnt = 0;
-
- if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- return;
-
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- int i;
-
- rd_kafka_topic_wrlock(rkt);
- for (i = 0; i < rkt->rkt_partition_cnt; i++) {
- rd_kafka_toppar_t *rktp = rkt->rkt_p[i];
-
- /* Only map undelegated partitions matching this
- * broker*/
- rd_kafka_toppar_lock(rktp);
- if (rktp->rktp_leader_id == rkb->rkb_nodeid &&
- !(rktp->rktp_broker && rktp->rktp_next_broker)) {
- rd_kafka_toppar_broker_update(
- rktp, rktp->rktp_leader_id, rkb,
- "broker node information updated");
- cnt++;
- }
- rd_kafka_toppar_unlock(rktp);
- }
- rd_kafka_topic_wrunlock(rkt);
- }
- rd_kafka_rdunlock(rk);
-
- rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_BROKER, "LEADER",
- "Mapped %d partition(s) to broker", cnt);
-}
-
-
-/**
- * @brief Broker id comparator
- */
-static int rd_kafka_broker_cmp_by_id(const void *_a, const void *_b) {
- const rd_kafka_broker_t *a = _a, *b = _b;
- return RD_CMP(a->rkb_nodeid, b->rkb_nodeid);
-}
-
-
-/**
- * @brief Set the broker logname (used in logs) to a copy of \p logname.
- *
- * @locality any
- * @locks none
- */
-static void rd_kafka_broker_set_logname(rd_kafka_broker_t *rkb,
- const char *logname) {
- mtx_lock(&rkb->rkb_logname_lock);
- if (rkb->rkb_logname)
- rd_free(rkb->rkb_logname);
- rkb->rkb_logname = rd_strdup(logname);
- mtx_unlock(&rkb->rkb_logname_lock);
-}
-
-
-
-/**
- * @brief Prepare destruction of the broker object.
- *
- * Since rd_kafka_broker_terminating() relies on the refcnt of the
- * broker to reach 1, we need to loose any self-references
- * to avoid a hang (waiting for refcnt decrease) on destruction.
- *
- * @locality broker thread
- * @locks none
- */
-static void rd_kafka_broker_prepare_destroy(rd_kafka_broker_t *rkb) {
- rd_kafka_broker_monitor_del(&rkb->rkb_coord_monitor);
-}
-
-
-/**
- * @brief Serve a broker op (an op posted by another thread to be handled by
- * this broker's thread).
- *
- * @returns true if calling op loop should break out, else false to continue.
- * @locality broker thread
- * @locks none
- */
-static RD_WARN_UNUSED_RESULT rd_bool_t
-rd_kafka_broker_op_serve(rd_kafka_broker_t *rkb, rd_kafka_op_t *rko) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_resp_err_t topic_err;
- rd_bool_t wakeup = rd_false;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- switch (rko->rko_type) {
- case RD_KAFKA_OP_NODE_UPDATE: {
- enum { _UPD_NAME = 0x1, _UPD_ID = 0x2 } updated = 0;
- char brokername[RD_KAFKA_NODENAME_SIZE];
-
- /* Need kafka_wrlock for updating rk_broker_by_id */
- rd_kafka_wrlock(rkb->rkb_rk);
- rd_kafka_broker_lock(rkb);
-
- if (strcmp(rkb->rkb_nodename, rko->rko_u.node.nodename)) {
- rd_rkb_dbg(rkb, BROKER, "UPDATE",
- "Nodename changed from %s to %s",
- rkb->rkb_nodename, rko->rko_u.node.nodename);
- rd_strlcpy(rkb->rkb_nodename, rko->rko_u.node.nodename,
- sizeof(rkb->rkb_nodename));
- rkb->rkb_nodename_epoch++;
- updated |= _UPD_NAME;
- }
-
- if (rko->rko_u.node.nodeid != -1 &&
- !RD_KAFKA_BROKER_IS_LOGICAL(rkb) &&
- rko->rko_u.node.nodeid != rkb->rkb_nodeid) {
- int32_t old_nodeid = rkb->rkb_nodeid;
- rd_rkb_dbg(rkb, BROKER, "UPDATE",
- "NodeId changed from %" PRId32
- " to %" PRId32,
- rkb->rkb_nodeid, rko->rko_u.node.nodeid);
-
- rkb->rkb_nodeid = rko->rko_u.node.nodeid;
-
- /* Update system thread name */
- rd_kafka_set_thread_sysname("rdk:broker%" PRId32,
- rkb->rkb_nodeid);
-
- /* Update broker_by_id sorted list */
- if (old_nodeid == -1)
- rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb);
- rd_list_sort(&rkb->rkb_rk->rk_broker_by_id,
- rd_kafka_broker_cmp_by_id);
-
- updated |= _UPD_ID;
- }
-
- rd_kafka_mk_brokername(brokername, sizeof(brokername),
- rkb->rkb_proto, rkb->rkb_nodename,
- rkb->rkb_nodeid, RD_KAFKA_LEARNED);
- if (strcmp(rkb->rkb_name, brokername)) {
- /* Udate the name copy used for logging. */
- rd_kafka_broker_set_logname(rkb, brokername);
-
- rd_rkb_dbg(rkb, BROKER, "UPDATE",
- "Name changed from %s to %s", rkb->rkb_name,
- brokername);
- rd_strlcpy(rkb->rkb_name, brokername,
- sizeof(rkb->rkb_name));
- }
- rd_kafka_broker_unlock(rkb);
- rd_kafka_wrunlock(rkb->rkb_rk);
-
- if (updated & _UPD_NAME)
- rd_kafka_broker_fail(rkb, LOG_DEBUG,
- RD_KAFKA_RESP_ERR__TRANSPORT,
- "Broker hostname updated");
- else if (updated & _UPD_ID) {
- /* Map existing partitions to this broker. */
- rd_kafka_broker_map_partitions(rkb);
-
- /* If broker is currently in state up we need
- * to trigger a state change so it exits its
- * state&type based .._serve() loop. */
- rd_kafka_broker_lock(rkb);
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP)
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_UPDATE);
- rd_kafka_broker_unlock(rkb);
- }
-
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
- break;
- }
-
- case RD_KAFKA_OP_XMIT_BUF:
- rd_kafka_broker_buf_enq2(rkb, rko->rko_u.xbuf.rkbuf);
- rko->rko_u.xbuf.rkbuf = NULL; /* buffer now owned by broker */
- if (rko->rko_replyq.q) {
- /* Op will be reused for forwarding response. */
- rko = NULL;
- }
- break;
-
- case RD_KAFKA_OP_XMIT_RETRY:
- rd_kafka_broker_buf_retry(rkb, rko->rko_u.xbuf.rkbuf);
- rko->rko_u.xbuf.rkbuf = NULL;
- break;
-
- case RD_KAFKA_OP_PARTITION_JOIN:
- /*
- * Add partition to broker toppars
- */
- rktp = rko->rko_rktp;
- rd_kafka_toppar_lock(rktp);
-
- /* Abort join if instance is terminating */
- if (rd_kafka_terminating(rkb->rkb_rk) ||
- (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) {
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: not joining broker: "
- "%s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_terminating(rkb->rkb_rk)
- ? "instance is terminating"
- : "partition removed");
-
- rd_kafka_broker_destroy(rktp->rktp_next_broker);
- rktp->rktp_next_broker = NULL;
- rd_kafka_toppar_unlock(rktp);
- break;
- }
-
- /* See if we are still the next broker */
- if (rktp->rktp_next_broker != rkb) {
- rd_rkb_dbg(
- rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: not joining broker "
- "(next broker %s)",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rktp->rktp_next_broker
- ? rd_kafka_broker_name(rktp->rktp_next_broker)
- : "(none)");
-
- /* Need temporary refcount so we can safely unlock
- * after q_enq(). */
- rd_kafka_toppar_keep(rktp);
-
- /* No, forward this op to the new next broker. */
- rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko);
- rko = NULL;
-
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp);
-
- break;
- }
-
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: joining broker "
- "(rktp %p, %d message(s) queued)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rktp, rd_kafka_msgq_len(&rktp->rktp_msgq));
-
- rd_kafka_assert(NULL,
- !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB));
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_RKB;
- rd_kafka_toppar_keep(rktp);
- rd_kafka_broker_lock(rkb);
- TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink);
- rkb->rkb_toppar_cnt++;
- rd_kafka_broker_unlock(rkb);
- rktp->rktp_broker = rkb;
- rd_assert(!rktp->rktp_msgq_wakeup_q);
- rktp->rktp_msgq_wakeup_q = rd_kafka_q_keep(rkb->rkb_ops);
- rd_kafka_broker_keep(rkb);
-
- if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) {
- rd_kafka_broker_active_toppar_add(rkb, rktp, "joining");
-
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- /* Wait for all outstanding requests from
- * the previous leader to finish before
- * producing anything to this new leader. */
- rd_kafka_idemp_drain_toppar(
- rktp,
- "wait for outstanding requests to "
- "finish before producing to "
- "new leader");
- }
- }
-
- rd_kafka_broker_destroy(rktp->rktp_next_broker);
- rktp->rktp_next_broker = NULL;
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
- break;
-
- case RD_KAFKA_OP_PARTITION_LEAVE:
- /*
- * Remove partition from broker toppars
- */
- rktp = rko->rko_rktp;
-
- /* If there is a topic-wide error, use it as error code
- * when failing messages below. */
- topic_err = rd_kafka_topic_get_error(rktp->rktp_rkt);
-
- rd_kafka_toppar_lock(rktp);
-
- /* Multiple PARTITION_LEAVEs are possible during partition
- * migration, make sure we're supposed to handle this one. */
- if (unlikely(rktp->rktp_broker != rkb)) {
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: "
- "ignoring PARTITION_LEAVE: "
- "not delegated to broker (%s)",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rktp->rktp_broker
- ? rd_kafka_broker_name(rktp->rktp_broker)
- : "none");
- rd_kafka_toppar_unlock(rktp);
- break;
- }
- rd_kafka_toppar_unlock(rktp);
-
- /* Remove from fetcher list */
- rd_kafka_toppar_fetch_decide(rktp, rkb, 1 /*force remove*/);
-
- if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) {
- /* Purge any ProduceRequests for this toppar
- * in the output queue. */
- rd_kafka_broker_bufq_purge_by_toppar(
- rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp,
- RD_KAFKA_RESP_ERR__RETRY);
- }
-
-
- rd_kafka_toppar_lock(rktp);
-
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: leaving broker "
- "(%d messages in xmitq, next broker %s, rktp %p)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_msgq_len(&rktp->rktp_xmit_msgq),
- rktp->rktp_next_broker
- ? rd_kafka_broker_name(rktp->rktp_next_broker)
- : "(none)",
- rktp);
-
- /* Insert xmitq(broker-local) messages to the msgq(global)
- * at their sorted position to maintain ordering. */
- rd_kafka_msgq_insert_msgq(
- &rktp->rktp_msgq, &rktp->rktp_xmit_msgq,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
-
- if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER)
- rd_kafka_broker_active_toppar_del(rkb, rktp, "leaving");
-
- rd_kafka_broker_lock(rkb);
- TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink);
- rkb->rkb_toppar_cnt--;
- rd_kafka_broker_unlock(rkb);
- rd_kafka_broker_destroy(rktp->rktp_broker);
- if (rktp->rktp_msgq_wakeup_q) {
- rd_kafka_q_destroy(rktp->rktp_msgq_wakeup_q);
- rktp->rktp_msgq_wakeup_q = NULL;
- }
- rktp->rktp_broker = NULL;
-
- rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB);
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_RKB;
-
- if (rktp->rktp_next_broker) {
- /* There is a next broker we need to migrate to. */
- rko->rko_type = RD_KAFKA_OP_PARTITION_JOIN;
- rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko);
- rko = NULL;
- } else {
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
- "Topic %s [%" PRId32
- "]: no next broker, "
- "failing %d message(s) in partition queue",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_msgq_len(&rktp->rktp_msgq));
- rd_kafka_assert(NULL, rd_kafka_msgq_len(
- &rktp->rktp_xmit_msgq) == 0);
- rd_kafka_dr_msgq(
- rktp->rktp_rkt, &rktp->rktp_msgq,
- rd_kafka_terminating(rkb->rkb_rk)
- ? RD_KAFKA_RESP_ERR__DESTROY
- : (topic_err
- ? topic_err
- : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION));
- }
-
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp); /* from JOIN */
-
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
- break;
-
- case RD_KAFKA_OP_TERMINATE:
- /* nop: just a wake-up. */
- rd_rkb_dbg(rkb, BROKER, "TERM",
- "Received TERMINATE op in state %s: "
- "%d refcnts, %d toppar(s), %d active toppar(s), "
- "%d outbufs, %d waitresps, %d retrybufs",
- rd_kafka_broker_state_names[rkb->rkb_state],
- rd_refcnt_get(&rkb->rkb_refcnt), rkb->rkb_toppar_cnt,
- rkb->rkb_active_toppar_cnt,
- (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
- (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps),
- (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs));
- /* Expedite termination by bringing down the broker
- * and trigger a state change.
- * This makes sure any eonce dependent on state changes
- * are triggered. */
- rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY,
- "Client is terminating");
-
- rd_kafka_broker_prepare_destroy(rkb);
- wakeup = rd_true;
- break;
-
- case RD_KAFKA_OP_WAKEUP:
- wakeup = rd_true;
- break;
-
- case RD_KAFKA_OP_PURGE:
- rd_kafka_broker_handle_purge_queues(rkb, rko);
- rko = NULL; /* the rko is reused for the reply */
- break;
-
- case RD_KAFKA_OP_CONNECT:
- /* Sparse connections: connection requested, transition
- * to TRY_CONNECT state to trigger new connection. */
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) {
- rd_rkb_dbg(rkb, BROKER, "CONNECT",
- "Received CONNECT op");
- rkb->rkb_persistconn.internal++;
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
- rd_kafka_broker_unlock(rkb);
-
- } else if (rkb->rkb_state >=
- RD_KAFKA_BROKER_STATE_TRY_CONNECT) {
- rd_bool_t do_disconnect = rd_false;
-
- /* If the nodename was changed since the last connect,
- * close the current connection. */
-
- rd_kafka_broker_lock(rkb);
- do_disconnect =
- (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch);
- rd_kafka_broker_unlock(rkb);
-
- if (do_disconnect)
- rd_kafka_broker_fail(
- rkb, LOG_DEBUG,
- RD_KAFKA_RESP_ERR__TRANSPORT,
- "Closing connection due to "
- "nodename change");
- }
-
- /* Expedite next reconnect */
- rkb->rkb_ts_reconnect = 0;
-
- wakeup = rd_true;
- break;
-
- default:
- rd_kafka_assert(rkb->rkb_rk, !*"unhandled op type");
- break;
- }
-
- if (rko)
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
-
- return wakeup;
-}
-
-
-
-/**
- * @brief Serve broker ops.
- * @returns the number of ops served
- */
-static RD_WARN_UNUSED_RESULT int
-rd_kafka_broker_ops_serve(rd_kafka_broker_t *rkb, rd_ts_t timeout_us) {
- rd_kafka_op_t *rko;
- int cnt = 0;
-
- while ((rko = rd_kafka_q_pop(rkb->rkb_ops, timeout_us, 0)) &&
- (cnt++, !rd_kafka_broker_op_serve(rkb, rko)))
- timeout_us = RD_POLL_NOWAIT;
-
- return cnt;
-}
-
-/**
- * @brief Serve broker ops and IOs.
- *
- * If a connection exists, poll IO first based on timeout.
- * Use remaining timeout for ops queue poll.
- *
- * If no connection, poll ops queue using timeout.
- *
- * Sparse connections: if there's need for a connection, set
- * timeout to NOWAIT.
- *
- * @param abs_timeout Maximum block time (absolute time).
- *
- * @returns true on wakeup (broker state machine needs to be served),
- * else false.
- *
- * @locality broker thread
- * @locks none
- */
-static RD_WARN_UNUSED_RESULT rd_bool_t
-rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) {
- rd_ts_t now;
- rd_bool_t wakeup;
-
- if (unlikely(rd_kafka_terminating(rkb->rkb_rk)))
- abs_timeout = rd_clock() + 1000;
- else if (unlikely(rd_kafka_broker_needs_connection(rkb)))
- abs_timeout = RD_POLL_NOWAIT;
- else if (unlikely(abs_timeout == RD_POLL_INFINITE))
- abs_timeout =
- rd_clock() + ((rd_ts_t)rd_kafka_max_block_ms * 1000);
-
-
- if (likely(rkb->rkb_transport != NULL)) {
- /* Poll and serve IO events and also poll the ops queue.
- *
- * The return value indicates if ops_serve() below should
- * use a timeout or not.
- *
- * If there are ops enqueued cut the timeout short so
- * that they're processed as soon as possible.
- */
- if (abs_timeout > 0 && rd_kafka_q_len(rkb->rkb_ops) > 0)
- abs_timeout = RD_POLL_NOWAIT;
-
- if (rd_kafka_transport_io_serve(
- rkb->rkb_transport, rkb->rkb_ops,
- rd_timeout_remains(abs_timeout)))
- abs_timeout = RD_POLL_NOWAIT;
- }
-
-
- /* Serve broker ops */
- wakeup =
- rd_kafka_broker_ops_serve(rkb, rd_timeout_remains_us(abs_timeout));
-
- rd_atomic64_add(&rkb->rkb_c.wakeups, 1);
-
- /* An op might have triggered the need for a connection, if so
- * transition to TRY_CONNECT state. */
- if (unlikely(rd_kafka_broker_needs_connection(rkb) &&
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT)) {
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(rkb,
- RD_KAFKA_BROKER_STATE_TRY_CONNECT);
- rd_kafka_broker_unlock(rkb);
- wakeup = rd_true;
- }
-
- /* Scan queues for timeouts. */
- now = rd_clock();
- if (rd_interval(&rkb->rkb_timeout_scan_intvl, 1000000, now) > 0)
- rd_kafka_broker_timeout_scan(rkb, now);
-
- return wakeup;
-}
-
-
-/**
- * @brief Consumer: Serve the toppars assigned to this broker.
- *
- * @returns the minimum Fetch backoff time (abs timestamp) for the
- * partitions to fetch.
- *
- * @locality broker thread
- */
-static rd_ts_t rd_kafka_broker_consumer_toppars_serve(rd_kafka_broker_t *rkb) {
- rd_kafka_toppar_t *rktp, *rktp_tmp;
- rd_ts_t min_backoff = RD_TS_MAX;
-
- TAILQ_FOREACH_SAFE(rktp, &rkb->rkb_toppars, rktp_rkblink, rktp_tmp) {
- rd_ts_t backoff;
-
- /* Serve toppar to update desired rktp state */
- backoff = rd_kafka_broker_consumer_toppar_serve(rkb, rktp);
- if (backoff < min_backoff)
- min_backoff = backoff;
- }
-
- return min_backoff;
-}
-
-
-/**
- * @brief Scan toppar's xmit and producer queue for message timeouts and
- * enqueue delivery reports for timed out messages.
- *
- * @param abs_next_timeout will be set to the next message timeout, or 0
- * if no timeout.
- *
- * @returns the number of messages timed out.
- *
- * @locality toppar's broker handler thread
- * @locks toppar_lock MUST be held
- */
-static int rd_kafka_broker_toppar_msgq_scan(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_ts_t now,
- rd_ts_t *abs_next_timeout) {
- rd_kafka_msgq_t xtimedout = RD_KAFKA_MSGQ_INITIALIZER(xtimedout);
- rd_kafka_msgq_t qtimedout = RD_KAFKA_MSGQ_INITIALIZER(qtimedout);
- int xcnt, qcnt, cnt;
- uint64_t first, last;
- rd_ts_t next;
-
- *abs_next_timeout = 0;
-
- xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, &xtimedout,
- now, &next);
- if (next && next < *abs_next_timeout)
- *abs_next_timeout = next;
-
- qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, &qtimedout, now,
- &next);
- if (next && (!*abs_next_timeout || next < *abs_next_timeout))
- *abs_next_timeout = next;
-
- cnt = xcnt + qcnt;
- if (likely(cnt == 0))
- return 0;
-
- /* Insert queue-timedout into xmitqueue-timedout in a sorted fashion */
- rd_kafka_msgq_insert_msgq(&xtimedout, &qtimedout,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
-
- first = rd_kafka_msgq_first(&xtimedout)->rkm_u.producer.msgid;
- last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid;
-
- rd_rkb_dbg(rkb, MSG, "TIMEOUT",
- "%s [%" PRId32
- "]: timed out %d+%d message(s) "
- "(MsgId %" PRIu64 "..%" PRIu64
- "): message.timeout.ms exceeded",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, xcnt,
- qcnt, first, last);
-
- /* Trigger delivery report for timed out messages */
- rd_kafka_dr_msgq(rktp->rktp_rkt, &xtimedout,
- RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
-
- return cnt;
-}
-
-
-/**
- * @brief Producer: Check this broker's toppars for message timeouts.
- *
- * This is only used by the internal broker to enforce message timeouts.
- *
- * @returns the next absolute scan time.
- *
- * @locality internal broker thread.
- */
-static rd_ts_t rd_kafka_broker_toppars_timeout_scan(rd_kafka_broker_t *rkb,
- rd_ts_t now) {
- rd_kafka_toppar_t *rktp;
- rd_ts_t next = now + (1000 * 1000);
-
- TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
- rd_ts_t this_next;
-
- rd_kafka_toppar_lock(rktp);
-
- if (unlikely(rktp->rktp_broker != rkb)) {
- /* Currently migrating away from this
- * broker. */
- rd_kafka_toppar_unlock(rktp);
- continue;
- }
-
- /* Scan queues for msg timeouts */
- rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &this_next);
-
- rd_kafka_toppar_unlock(rktp);
-
- if (this_next && this_next < next)
- next = this_next;
- }
-
- return next;
-}
-
-
-/**
- * @brief Idle function for the internal broker handle.
- */
-static void rd_kafka_broker_internal_serve(rd_kafka_broker_t *rkb,
- rd_ts_t abs_timeout) {
- int initial_state = rkb->rkb_state;
- rd_bool_t wakeup;
-
- if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER) {
- /* Consumer */
- do {
- rd_kafka_broker_consumer_toppars_serve(rkb);
-
- wakeup = rd_kafka_broker_ops_io_serve(rkb, abs_timeout);
-
- } while (!rd_kafka_broker_terminating(rkb) &&
- (int)rkb->rkb_state == initial_state && !wakeup &&
- !rd_timeout_expired(rd_timeout_remains(abs_timeout)));
- } else {
- /* Producer */
- rd_ts_t next_timeout_scan = 0;
-
- do {
- rd_ts_t now = rd_clock();
-
- if (now >= next_timeout_scan)
- next_timeout_scan =
- rd_kafka_broker_toppars_timeout_scan(rkb,
- now);
-
- wakeup = rd_kafka_broker_ops_io_serve(
- rkb, RD_MIN(abs_timeout, next_timeout_scan));
-
- } while (!rd_kafka_broker_terminating(rkb) &&
- (int)rkb->rkb_state == initial_state && !wakeup &&
- !rd_timeout_expired(rd_timeout_remains(abs_timeout)));
- }
-}
-
-
-/**
- * @returns the number of requests that may be enqueued before
- * queue.backpressure.threshold is reached.
- */
-
-static RD_INLINE unsigned int
-rd_kafka_broker_outbufs_space(rd_kafka_broker_t *rkb) {
- int r = rkb->rkb_rk->rk_conf.queue_backpressure_thres -
- rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt);
- return r < 0 ? 0 : (unsigned int)r;
-}
-
-
-
-/**
- * @brief Update \p *next_wakeup_ptr to \p maybe_next_wakeup if it is sooner.
- *
- * Both parameters are absolute timestamps.
- * \p maybe_next_wakeup must not be 0.
- */
-#define rd_kafka_set_next_wakeup(next_wakeup_ptr, maybe_next_wakeup) \
- do { \
- rd_ts_t *__n = (next_wakeup_ptr); \
- rd_ts_t __m = (maybe_next_wakeup); \
- rd_dassert(__m != 0); \
- if (__m < *__n) \
- *__n = __m; \
- } while (0)
-
-
-/**
- * @brief Serve a toppar for producing.
- *
- * @param next_wakeup will be updated to when the next wake-up/attempt is
- * desired. Does not take the current value into
- * consideration, even if it is lower.
- * @param do_timeout_scan perform msg timeout scan
- * @param may_send if set to false there is something on the global level
- * that prohibits sending messages, such as a transactional
- * state.
- * @param flushing App is calling flush(): override linger.ms as immediate.
- *
- * @returns the number of messages produced.
- *
- * @locks none
- * @locality broker thread
- */
-static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const rd_kafka_pid_t pid,
- rd_ts_t now,
- rd_ts_t *next_wakeup,
- rd_bool_t do_timeout_scan,
- rd_bool_t may_send,
- rd_bool_t flushing) {
- int cnt = 0;
- int r;
- rd_kafka_msg_t *rkm;
- int move_cnt = 0;
- int max_requests;
- int reqcnt;
- int inflight = 0;
- uint64_t epoch_base_msgid = 0;
- rd_bool_t batch_ready = rd_false;
-
- /* By limiting the number of not-yet-sent buffers (rkb_outbufs) we
- * provide a backpressure mechanism to the producer loop
- * which allows larger message batches to accumulate and thus
- * increase throughput.
- * This comes at no latency cost since there are already
- * buffers enqueued waiting for transmission. */
- max_requests = rd_kafka_broker_outbufs_space(rkb);
-
- rd_kafka_toppar_lock(rktp);
-
- if (unlikely(rktp->rktp_broker != rkb)) {
- /* Currently migrating away from this
- * broker. */
- rd_kafka_toppar_unlock(rktp);
- return 0;
- }
-
- if (unlikely(do_timeout_scan)) {
- int timeoutcnt;
- rd_ts_t next;
-
- /* Scan queues for msg timeouts */
- timeoutcnt =
- rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &next);
-
- if (next)
- rd_kafka_set_next_wakeup(next_wakeup, next);
-
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- if (!rd_kafka_pid_valid(pid)) {
- /* If we don't have a PID, we can't transmit
- * any messages. */
- rd_kafka_toppar_unlock(rktp);
- return 0;
-
- } else if (timeoutcnt > 0) {
- /* Message timeouts will lead to gaps the in
- * the message sequence and thus trigger
- * OutOfOrderSequence errors from the broker.
- * Bump the epoch to reset the base msgid after
- * draining all partitions. */
-
- /* Must not hold toppar lock */
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_idemp_drain_epoch_bump(
- rkb->rkb_rk, RD_KAFKA_RESP_ERR__TIMED_OUT,
- "%d message(s) timed out "
- "on %s [%" PRId32 "]",
- timeoutcnt, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
- return 0;
- }
- }
- }
-
- if (unlikely(!may_send)) {
- /* Sends prohibited on the broker or instance level */
- max_requests = 0;
- } else if (unlikely(rd_kafka_fatal_error_code(rkb->rkb_rk))) {
- /* Fatal error has been raised, don't produce. */
- max_requests = 0;
- } else if (unlikely(RD_KAFKA_TOPPAR_IS_PAUSED(rktp))) {
- /* Partition is paused */
- max_requests = 0;
- } else if (unlikely(rd_kafka_is_transactional(rkb->rkb_rk) &&
- !rd_kafka_txn_toppar_may_send_msg(rktp))) {
- /* Partition not registered in transaction yet */
- max_requests = 0;
- } else if (max_requests > 0) {
- /* Move messages from locked partition produce queue
- * to broker-local xmit queue. */
- if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) {
-
- rd_kafka_msgq_insert_msgq(
- &rktp->rktp_xmit_msgq, &rktp->rktp_msgq,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
- }
-
- /* Calculate maximum wait-time to honour
- * queue.buffering.max.ms contract.
- * Unless flushing in which case immediate
- * wakeups are allowed. */
- batch_ready = rd_kafka_msgq_allow_wakeup_at(
- &rktp->rktp_msgq, &rktp->rktp_xmit_msgq,
- /* Only update the broker thread wakeup time
- * if connection is up and messages can actually be
- * sent, otherwise the wakeup can't do much. */
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP ? next_wakeup
- : NULL,
- now, flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
- /* Batch message count threshold */
- rkb->rkb_rk->rk_conf.batch_num_messages,
- /* Batch total size threshold */
- rkb->rkb_rk->rk_conf.batch_size);
- }
-
- rd_kafka_toppar_unlock(rktp);
-
-
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- /* Update the partition's cached PID, and reset the
- * base msg sequence if necessary */
- rd_bool_t did_purge = rd_false;
-
- if (unlikely(!rd_kafka_pid_eq(pid, rktp->rktp_eos.pid))) {
- /* Flush any ProduceRequests for this partition in the
- * output buffer queue to speed up recovery. */
- rd_kafka_broker_bufq_purge_by_toppar(
- rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp,
- RD_KAFKA_RESP_ERR__RETRY);
- did_purge = rd_true;
-
- if (rd_kafka_pid_valid(rktp->rktp_eos.pid))
- rd_rkb_dbg(
- rkb, QUEUE, "TOPPAR",
- "%.*s [%" PRId32
- "] PID has changed: "
- "must drain requests for all "
- "partitions before resuming reset "
- "of PID",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- }
-
- inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight);
-
- if (unlikely(rktp->rktp_eos.wait_drain)) {
- if (inflight) {
- /* Waiting for in-flight requests to
- * drain/finish before producing anything more.
- * This is used to recover to a consistent
- * state when the partition leader
- * has changed, or timed out messages
- * have been removed from the queue. */
-
- rd_rkb_dbg(
- rkb, QUEUE, "TOPPAR",
- "%.*s [%" PRId32
- "] waiting for "
- "%d in-flight request(s) to drain "
- "from queue before continuing "
- "to produce",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, inflight);
-
- /* Flush any ProduceRequests for this
- * partition in the output buffer queue to
- * speed up draining. */
- if (!did_purge)
- rd_kafka_broker_bufq_purge_by_toppar(
- rkb, &rkb->rkb_outbufs,
- RD_KAFKAP_Produce, rktp,
- RD_KAFKA_RESP_ERR__RETRY);
-
- return 0;
- }
-
- rd_rkb_dbg(rkb, QUEUE, "TOPPAR",
- "%.*s [%" PRId32
- "] all in-flight requests "
- "drained from queue",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
-
- rktp->rktp_eos.wait_drain = rd_false;
- }
-
- /* Limit the number of in-flight requests (per partition)
- * to the broker's sequence de-duplication window. */
- max_requests = RD_MIN(max_requests,
- RD_KAFKA_IDEMP_MAX_INFLIGHT - inflight);
- }
-
-
- /* Check if allowed to create and enqueue a ProduceRequest */
- if (max_requests <= 0)
- return 0;
-
- r = rktp->rktp_xmit_msgq.rkmq_msg_cnt;
- if (r == 0)
- return 0;
-
- rd_kafka_msgq_verify_order(rktp, &rktp->rktp_xmit_msgq, 0, rd_false);
-
- rd_rkb_dbg(rkb, QUEUE, "TOPPAR",
- "%.*s [%" PRId32
- "] %d message(s) in "
- "xmit queue (%d added from partition queue)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, r, move_cnt);
-
- rkm = TAILQ_FIRST(&rktp->rktp_xmit_msgq.rkmq_msgs);
- rd_dassert(rkm != NULL);
-
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- /* Update the partition's cached PID, and reset the
- * base msg sequence if necessary */
- if (unlikely(!rd_kafka_pid_eq(pid, rktp->rktp_eos.pid))) {
- /* Attempt to change the pid, it will fail if there
- * are outstanding messages in-flight, in which case
- * we eventually come back here to retry. */
- if (!rd_kafka_toppar_pid_change(
- rktp, pid, rkm->rkm_u.producer.msgid))
- return 0;
- }
-
- rd_kafka_toppar_lock(rktp);
- /* Idempotent producer epoch base msgid, this is passed to the
- * ProduceRequest and msgset writer to adjust the protocol-level
- * per-message sequence number. */
- epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid;
- rd_kafka_toppar_unlock(rktp);
- }
-
- if (unlikely(rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP)) {
- /* There are messages to send but connection is not up. */
- rd_rkb_dbg(rkb, BROKER, "TOPPAR",
- "%.*s [%" PRId32
- "] "
- "%d message(s) queued but broker not up",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, r);
- rkb->rkb_persistconn.internal++;
- return 0;
- }
-
- /* Attempt to fill the batch size, but limit our waiting
- * to queue.buffering.max.ms, batch.num.messages, and batch.size. */
- if (!batch_ready) {
- /* Wait for more messages or queue.buffering.max.ms
- * to expire. */
- return 0;
- }
-
- /* Send Produce requests for this toppar, honouring the
- * queue backpressure threshold. */
- for (reqcnt = 0; reqcnt < max_requests; reqcnt++) {
- r = rd_kafka_ProduceRequest(rkb, rktp, pid, epoch_base_msgid);
- if (likely(r > 0))
- cnt += r;
- else
- break;
- }
-
- /* Update the allowed wake-up time based on remaining messages
- * in the queue. */
- if (cnt > 0) {
- rd_kafka_toppar_lock(rktp);
- batch_ready = rd_kafka_msgq_allow_wakeup_at(
- &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, next_wakeup, now,
- flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
- /* Batch message count threshold */
- rkb->rkb_rk->rk_conf.batch_num_messages,
- /* Batch total size threshold */
- rkb->rkb_rk->rk_conf.batch_size);
- rd_kafka_toppar_unlock(rktp);
- }
-
- return cnt;
-}
-
-
-
-/**
- * @brief Produce from all toppars assigned to this broker.
- *
- * @param next_wakeup is updated if the next IO/ops timeout should be
- * less than the input value (i.e., sooner).
- *
- * @returns the total number of messages produced.
- */
-static int rd_kafka_broker_produce_toppars(rd_kafka_broker_t *rkb,
- rd_ts_t now,
- rd_ts_t *next_wakeup,
- rd_bool_t do_timeout_scan) {
- rd_kafka_toppar_t *rktp;
- int cnt = 0;
- rd_ts_t ret_next_wakeup = *next_wakeup;
- rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER;
- rd_bool_t may_send = rd_true;
- rd_bool_t flushing = rd_false;
-
- /* Round-robin serve each toppar. */
- rktp = rkb->rkb_active_toppar_next;
- if (unlikely(!rktp))
- return 0;
-
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- /* Idempotent producer: get a copy of the current pid. */
- pid = rd_kafka_idemp_get_pid(rkb->rkb_rk);
-
- /* If we don't have a valid pid, or the transaction state
- * prohibits sending messages, return immedatiely,
- * unless the per-partition timeout scan needs to run.
- * The broker threads are woken up when a PID is acquired
- * or the transaction state changes. */
- if (!rd_kafka_pid_valid(pid))
- may_send = rd_false;
- else if (rd_kafka_is_transactional(rkb->rkb_rk) &&
- !rd_kafka_txn_may_send_msg(rkb->rkb_rk))
- may_send = rd_false;
-
- if (!may_send && !do_timeout_scan)
- return 0;
- }
-
- flushing = may_send && rd_atomic32_get(&rkb->rkb_rk->rk_flushing) > 0;
-
- do {
- rd_ts_t this_next_wakeup = ret_next_wakeup;
-
- /* Try producing toppar */
- cnt += rd_kafka_toppar_producer_serve(
- rkb, rktp, pid, now, &this_next_wakeup, do_timeout_scan,
- may_send, flushing);
-
- rd_kafka_set_next_wakeup(&ret_next_wakeup, this_next_wakeup);
-
- } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
- rktp_activelink)) !=
- rkb->rkb_active_toppar_next);
-
- /* Update next starting toppar to produce in round-robin list. */
- rd_kafka_broker_active_toppar_next(
- rkb,
- CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, rktp_activelink));
-
- *next_wakeup = ret_next_wakeup;
-
- return cnt;
-}
-
-/**
- * @brief Producer serving
- */
-static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb,
- rd_ts_t abs_timeout) {
- rd_interval_t timeout_scan;
- unsigned int initial_state = rkb->rkb_state;
- rd_ts_t now;
- int cnt = 0;
-
- rd_interval_init(&timeout_scan);
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- rd_kafka_broker_lock(rkb);
-
- while (!rd_kafka_broker_terminating(rkb) &&
- rkb->rkb_state == initial_state &&
- (abs_timeout > (now = rd_clock()))) {
- rd_bool_t do_timeout_scan;
- rd_ts_t next_wakeup = abs_timeout;
- rd_bool_t overshot;
-
- rd_kafka_broker_unlock(rkb);
-
- /* Perform timeout scan on first iteration, thus
- * on each state change, to make sure messages in
- * partition rktp_xmit_msgq are timed out before
- * being attempted to re-transmit. */
- overshot = rd_interval(&timeout_scan, 1000 * 1000, now) >= 0;
- do_timeout_scan = cnt++ == 0 || overshot;
-
- rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup,
- do_timeout_scan);
-
- /* Check and move retry buffers */
- if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0))
- rd_kafka_broker_retry_bufs_move(rkb, &next_wakeup);
-
- if (rd_kafka_broker_ops_io_serve(rkb, next_wakeup))
- return; /* Wakeup */
-
- rd_kafka_broker_lock(rkb);
- }
-
- rd_kafka_broker_unlock(rkb);
-}
-
-
-
-/**
- * Consumer serving
- */
-static void rd_kafka_broker_consumer_serve(rd_kafka_broker_t *rkb,
- rd_ts_t abs_timeout) {
- unsigned int initial_state = rkb->rkb_state;
- rd_ts_t now;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- rd_kafka_broker_lock(rkb);
-
- while (!rd_kafka_broker_terminating(rkb) &&
- rkb->rkb_state == initial_state &&
- abs_timeout > (now = rd_clock())) {
- rd_ts_t min_backoff;
-
- rd_kafka_broker_unlock(rkb);
-
- /* Serve toppars */
- min_backoff = rd_kafka_broker_consumer_toppars_serve(rkb);
- if (rkb->rkb_ts_fetch_backoff > now &&
- rkb->rkb_ts_fetch_backoff < min_backoff)
- min_backoff = rkb->rkb_ts_fetch_backoff;
-
- if (min_backoff < RD_TS_MAX &&
- rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP) {
- /* There are partitions to fetch but the
- * connection is not up. */
- rkb->rkb_persistconn.internal++;
- }
-
- /* Send Fetch request message for all underflowed toppars
- * if the connection is up and there are no outstanding
- * fetch requests for this connection. */
- if (!rkb->rkb_fetching &&
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) {
- if (min_backoff < now) {
- rd_kafka_broker_fetch_toppars(rkb, now);
- min_backoff = abs_timeout;
- } else if (min_backoff < RD_TS_MAX)
- rd_rkb_dbg(rkb, FETCH, "FETCH",
- "Fetch backoff for %" PRId64 "ms",
- (min_backoff - now) / 1000);
- } else {
- /* Nothing needs to be done, next wakeup
- * is from ops, state change, IO, or this timeout */
- min_backoff = abs_timeout;
- }
-
- /* Check and move retry buffers */
- if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0))
- rd_kafka_broker_retry_bufs_move(rkb, &min_backoff);
-
- if (min_backoff > abs_timeout)
- min_backoff = abs_timeout;
-
- if (rd_kafka_broker_ops_io_serve(rkb, min_backoff))
- return; /* Wakeup */
-
- rd_kafka_broker_lock(rkb);
- }
-
- rd_kafka_broker_unlock(rkb);
-}
-
-
-
-/**
- * @brief Check if connections.max.idle.ms has been exceeded and if so
- * close the connection.
- *
- * @remark Must only be called if connections.max.idle.ms > 0 and
- * the current broker state is UP (or UPDATE).
- *
- * @locality broker thread
- */
-static RD_INLINE void rd_kafka_broker_idle_check(rd_kafka_broker_t *rkb) {
- rd_ts_t ts_send = rd_atomic64_get(&rkb->rkb_c.ts_send);
- rd_ts_t ts_recv = rd_atomic64_get(&rkb->rkb_c.ts_recv);
- rd_ts_t ts_last_activity = RD_MAX(ts_send, ts_recv);
- int idle_ms;
-
- /* If nothing has been sent yet, use the connection time as
- * last activity. */
- if (unlikely(!ts_last_activity))
- ts_last_activity = rkb->rkb_ts_state;
-
- idle_ms = (int)((rd_clock() - ts_last_activity) / 1000);
-
- if (likely(idle_ms < rkb->rkb_rk->rk_conf.connections_max_idle_ms))
- return;
-
- rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__TRANSPORT,
- "Connection max idle time exceeded "
- "(%dms since last activity)",
- idle_ms);
-}
-
-
-/**
- * @brief Serve broker thread according to client type.
- * May be called in any broker state.
- *
- * This function is to be called from the state machine in
- * rd_kafka_broker_thread_main, and will return when
- * there was a state change, or the handle is terminating.
- *
- * Broker threads are triggered by three things:
- * - Ops from other parts of librdkafka / app.
- * This is the rkb_ops queue which is served from
- * rd_kafka_broker_ops_io_serve().
- * - IO from broker socket.
- * The ops queue is also IO-triggered to provide
- * quick wakeup when thread is blocking on IO.
- * Also serverd from rd_kafka_broker_ops_io_serve().
- * When there is no broker socket only the ops
- * queue is served.
- * - Ops/IO timeout when there were no ops or
- * IO events within a variable timeout.
- *
- * For each iteration of the loops in producer_serve(), consumer_serve(),
- * etc, the Ops and IO are polled, and the client type specific
- * logic is executed. For the consumer this logic checks which partitions
- * to fetch or backoff, and sends Fetch requests.
- * The producer checks for messages to batch and transmit.
- * All types check for request timeouts, etc.
- *
- * Wakeups
- * =======
- * The logic returns a next wakeup time which controls how long the
- * next Ops/IO poll may block before the logic wants to run again;
- * this is typically controlled by `linger.ms` on the Producer
- * and fetch backoffs on the consumer.
- *
- * Remote threads may also want to wake up the Ops/IO poll so that
- * the logic is run more quickly. For example when a new message
- * is enqueued by produce() it is important that it is batched
- * and transmitted within the configured `linger.ms`.
- *
- * Any op enqueued on the broker ops queue (rkb_ops) will automatically
- * trigger a wakeup of the broker thread (either by wakeup_fd IO event
- * or by the conditional variable of rkb_ops being triggered - or both).
- *
- * Produced messages are not enqueued on the rkb_ops queue but on
- * the partition's rktp_msgq message queue. To provide quick wakeups
- * the partition has a reference to the partition's current leader broker
- * thread's rkb_ops queue, rktp_msgq_wakeup_q.
- * When enqueuing a message on the partition queue and the queue was
- * previously empty, the rktp_msgq_wakeup_q (which is rkb_ops) is woken up
- * by rd_kafka_q_yield(), which sets a YIELD flag and triggers the cond var
- * to wake up the broker thread (without allocating and enqueuing an rko).
- * This also triggers the wakeup_fd of rkb_ops, if necessary.
- *
- * When sparse connections is enabled the broker will linger in the
- * INIT state until there's a need for a connection, in which case
- * it will set its state to DOWN to trigger the connection.
- * This is controlled both by the shared rkb_persistconn atomic counters
- * that may be updated from other parts of the code, as well as the
- * temporary per broker_serve() rkb_persistconn.internal counter which
- * is used by the broker handler code to detect if a connection is needed,
- * such as when a partition is being produced to.
- *
- *
- * @param timeout_ms The maximum timeout for blocking Ops/IO.
- *
- * @locality broker thread
- * @locks none
- */
-static void rd_kafka_broker_serve(rd_kafka_broker_t *rkb, int timeout_ms) {
- rd_ts_t abs_timeout;
-
- if (unlikely(rd_kafka_terminating(rkb->rkb_rk) ||
- timeout_ms == RD_POLL_NOWAIT))
- timeout_ms = 1;
- else if (timeout_ms == RD_POLL_INFINITE)
- timeout_ms = rd_kafka_max_block_ms;
-
- abs_timeout = rd_timeout_init(timeout_ms);
- /* Must be a valid absolute time from here on. */
- rd_assert(abs_timeout > 0);
-
- /* rkb_persistconn.internal is the per broker_serve()
- * automatic counter that keeps track of anything
- * in the producer/consumer logic needs this broker connection
- * to be up.
- * The value is reset here on each serve(). If there are queued
- * requests we know right away that a connection is needed. */
- rkb->rkb_persistconn.internal =
- rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt) > 0;
-
- if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
- rd_kafka_broker_internal_serve(rkb, abs_timeout);
- return;
- }
-
- if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER)
- rd_kafka_broker_producer_serve(rkb, abs_timeout);
- else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER)
- rd_kafka_broker_consumer_serve(rkb, abs_timeout);
-
- if (rkb->rkb_rk->rk_conf.connections_max_idle_ms &&
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP)
- rd_kafka_broker_idle_check(rkb);
-}
-
-
-/**
- * @returns true if all broker addresses have been tried.
- *
- * @locality broker thread
- * @locks_required none
- * @locks_acquired none
- */
-static rd_bool_t
-rd_kafka_broker_addresses_exhausted(const rd_kafka_broker_t *rkb) {
- return !rkb->rkb_rsal || rkb->rkb_rsal->rsal_cnt == 0 ||
- rkb->rkb_rsal->rsal_curr + 1 == rkb->rkb_rsal->rsal_cnt;
-}
-
-
-static int rd_kafka_broker_thread_main(void *arg) {
- rd_kafka_broker_t *rkb = arg;
- rd_kafka_t *rk = rkb->rkb_rk;
-
- rd_kafka_set_thread_name("%s", rkb->rkb_name);
- rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid);
-
- rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BROKER);
-
- (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
-
- /* Our own refcount was increased just prior to thread creation,
- * when refcount drops to 1 it is just us left and the broker
- * thread should terminate. */
-
- /* Acquire lock (which was held by thread creator during creation)
- * to synchronise state. */
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_unlock(rkb);
-
- rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread");
-
- while (!rd_kafka_broker_terminating(rkb)) {
- int backoff;
- int r;
- rd_kafka_broker_state_t orig_state;
-
- redo:
- orig_state = rkb->rkb_state;
-
- switch (rkb->rkb_state) {
- case RD_KAFKA_BROKER_STATE_INIT:
- /* Check if there is demand for a connection
- * to this broker, if so jump to TRY_CONNECT state. */
- if (!rd_kafka_broker_needs_connection(rkb)) {
- rd_kafka_broker_serve(rkb,
- rd_kafka_max_block_ms);
- break;
- }
-
- /* The INIT state also exists so that an initial
- * connection failure triggers a state transition
- * which might trigger a ALL_BROKERS_DOWN error. */
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
- rd_kafka_broker_unlock(rkb);
- goto redo; /* effectively a fallthru to TRY_CONNECT */
-
- case RD_KAFKA_BROKER_STATE_DOWN:
- rd_kafka_broker_lock(rkb);
- if (rkb->rkb_rk->rk_conf.sparse_connections)
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_INIT);
- else
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
- rd_kafka_broker_unlock(rkb);
- goto redo; /* effectively a fallthru to TRY_CONNECT */
-
- case RD_KAFKA_BROKER_STATE_TRY_CONNECT:
- if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_UP);
- rd_kafka_broker_unlock(rkb);
- break;
- }
-
- if (unlikely(rd_kafka_terminating(rkb->rkb_rk)))
- rd_kafka_broker_serve(rkb, 1000);
-
- if (!rd_kafka_sasl_ready(rkb->rkb_rk)) {
- /* SASL provider not yet ready. */
- rd_kafka_broker_serve(rkb,
- rd_kafka_max_block_ms);
- /* Continue while loop to try again (as long as
- * we are not terminating). */
- continue;
- }
-
- /* Throttle & jitter reconnects to avoid
- * thundering horde of reconnecting clients after
- * a broker / network outage. Issue #403 */
- backoff =
- rd_kafka_broker_reconnect_backoff(rkb, rd_clock());
- if (backoff > 0) {
- rd_rkb_dbg(rkb, BROKER, "RECONNECT",
- "Delaying next reconnect by %dms",
- backoff);
- rd_kafka_broker_serve(rkb, (int)backoff);
- continue;
- }
-
- /* Initiate asynchronous connection attempt.
- * Only the host lookup is blocking here. */
- r = rd_kafka_broker_connect(rkb);
- if (r == -1) {
- /* Immediate failure, most likely host
- * resolving failed.
- * Try the next resolve result until we've
- * tried them all, in which case we sleep a
- * short while to avoid busy looping. */
- if (rd_kafka_broker_addresses_exhausted(rkb))
- rd_kafka_broker_serve(
- rkb, rd_kafka_max_block_ms);
- } else if (r == 0) {
- /* Broker has no hostname yet, wait
- * for hostname to be set and connection
- * triggered by received OP_CONNECT. */
- rd_kafka_broker_serve(rkb,
- rd_kafka_max_block_ms);
- } else {
- /* Connection in progress, state will
- * have changed to STATE_CONNECT. */
- }
-
- break;
-
- case RD_KAFKA_BROKER_STATE_CONNECT:
- case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE:
- case RD_KAFKA_BROKER_STATE_AUTH_LEGACY:
- case RD_KAFKA_BROKER_STATE_AUTH_REQ:
- case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE:
- case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY:
- /* Asynchronous connect in progress. */
- rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms);
-
- /* Connect failure.
- * Try the next resolve result until we've
- * tried them all, in which case we back off the next
- * connection attempt to avoid busy looping. */
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN &&
- rd_kafka_broker_addresses_exhausted(rkb))
- rd_kafka_broker_update_reconnect_backoff(
- rkb, &rkb->rkb_rk->rk_conf, rd_clock());
- else if (
- rkb->rkb_state == orig_state &&
- rd_clock() >=
- (rkb->rkb_ts_connect +
- (rd_ts_t)rk->rk_conf
- .socket_connection_setup_timeout_ms *
- 1000))
- rd_kafka_broker_fail(
- rkb, LOG_WARNING,
- RD_KAFKA_RESP_ERR__TRANSPORT,
- "Connection setup timed out in state %s",
- rd_kafka_broker_state_names
- [rkb->rkb_state]);
-
- break;
-
- case RD_KAFKA_BROKER_STATE_UPDATE:
- /* FALLTHRU */
- case RD_KAFKA_BROKER_STATE_UP:
- rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms);
-
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) {
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(
- rkb, RD_KAFKA_BROKER_STATE_UP);
- rd_kafka_broker_unlock(rkb);
- }
- break;
- }
-
- if (rd_kafka_terminating(rkb->rkb_rk)) {
- /* Handle is terminating: fail the send+retry queue
- * to speed up termination, otherwise we'll
- * need to wait for request timeouts. */
- r = rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_outbufs, NULL, -1,
- RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0);
- r += rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_retrybufs, NULL, -1,
- RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0);
- rd_rkb_dbg(
- rkb, BROKER, "TERMINATE",
- "Handle is terminating in state %s: "
- "%d refcnts (%p), %d toppar(s), "
- "%d active toppar(s), "
- "%d outbufs, %d waitresps, %d retrybufs: "
- "failed %d request(s) in retry+outbuf",
- rd_kafka_broker_state_names[rkb->rkb_state],
- rd_refcnt_get(&rkb->rkb_refcnt), &rkb->rkb_refcnt,
- rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt,
- (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
- (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps),
- (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), r);
- }
- }
-
- if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
- rd_kafka_wrlock(rkb->rkb_rk);
- TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link);
- if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- rd_list_remove(&rkb->rkb_rk->rk_broker_by_id, rkb);
- (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1);
- rd_kafka_wrunlock(rkb->rkb_rk);
- }
-
- rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY,
- "Broker handle is terminating");
-
- /* Disable and drain ops queue.
- * Simply purging the ops queue risks leaving dangling references
- * for ops such as PARTITION_JOIN/PARTITION_LEAVE where the broker
- * reference is not maintained in the rko (but in rktp_next_leader).
- * #1596 */
- rd_kafka_q_disable(rkb->rkb_ops);
- while (rd_kafka_broker_ops_serve(rkb, RD_POLL_NOWAIT))
- ;
-
- rd_kafka_broker_destroy(rkb);
-
-#if WITH_SSL
- /* Remove OpenSSL per-thread error state to avoid memory leaks */
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
- /*(OpenSSL libraries handle thread init and deinit)
- * https://github.com/openssl/openssl/pull/1048 */
-#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
- ERR_remove_thread_state(NULL);
-#endif
-#endif
-
- rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BROKER);
-
- rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
-
- return 0;
-}
-
-
-/**
- * Final destructor. Refcnt must be 0.
- */
-void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) {
-
- rd_assert(thrd_is_current(rkb->rkb_thread));
- rd_assert(TAILQ_EMPTY(&rkb->rkb_monitors));
- rd_assert(TAILQ_EMPTY(&rkb->rkb_outbufs.rkbq_bufs));
- rd_assert(TAILQ_EMPTY(&rkb->rkb_waitresps.rkbq_bufs));
- rd_assert(TAILQ_EMPTY(&rkb->rkb_retrybufs.rkbq_bufs));
- rd_assert(TAILQ_EMPTY(&rkb->rkb_toppars));
-
- if (rkb->rkb_source != RD_KAFKA_INTERNAL &&
- (rkb->rkb_rk->rk_conf.security_protocol ==
- RD_KAFKA_PROTO_SASL_PLAINTEXT ||
- rkb->rkb_rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL))
- rd_kafka_sasl_broker_term(rkb);
-
- if (rkb->rkb_wakeup_fd[0] != -1)
- rd_socket_close(rkb->rkb_wakeup_fd[0]);
- if (rkb->rkb_wakeup_fd[1] != -1)
- rd_socket_close(rkb->rkb_wakeup_fd[1]);
-
- if (rkb->rkb_recv_buf)
- rd_kafka_buf_destroy(rkb->rkb_recv_buf);
-
- if (rkb->rkb_rsal)
- rd_sockaddr_list_destroy(rkb->rkb_rsal);
-
- if (rkb->rkb_ApiVersions)
- rd_free(rkb->rkb_ApiVersions);
- rd_free(rkb->rkb_origname);
-
- rd_kafka_q_purge(rkb->rkb_ops);
- rd_kafka_q_destroy_owner(rkb->rkb_ops);
-
- rd_avg_destroy(&rkb->rkb_avg_int_latency);
- rd_avg_destroy(&rkb->rkb_avg_outbuf_latency);
- rd_avg_destroy(&rkb->rkb_avg_rtt);
- rd_avg_destroy(&rkb->rkb_avg_throttle);
-
- mtx_lock(&rkb->rkb_logname_lock);
- rd_free(rkb->rkb_logname);
- rkb->rkb_logname = NULL;
- mtx_unlock(&rkb->rkb_logname_lock);
- mtx_destroy(&rkb->rkb_logname_lock);
-
- mtx_destroy(&rkb->rkb_lock);
-
- rd_refcnt_destroy(&rkb->rkb_refcnt);
-
- rd_free(rkb);
-}
-
-
-/**
- * Returns the internal broker with refcnt increased.
- */
-rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk) {
- rd_kafka_broker_t *rkb;
-
- mtx_lock(&rk->rk_internal_rkb_lock);
- rkb = rk->rk_internal_rkb;
- if (rkb)
- rd_kafka_broker_keep(rkb);
- mtx_unlock(&rk->rk_internal_rkb_lock);
-
- return rkb;
-}
-
-
-/**
- * Adds a broker with refcount set to 1.
- * If 'source' is RD_KAFKA_INTERNAL an internal broker is added
- * that does not actually represent or connect to a real broker, it is used
- * for serving unassigned toppar's op queues.
- *
- * Locks: rd_kafka_wrlock(rk) must be held
- */
-rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
- rd_kafka_confsource_t source,
- rd_kafka_secproto_t proto,
- const char *name,
- uint16_t port,
- int32_t nodeid) {
- rd_kafka_broker_t *rkb;
-#ifndef _WIN32
- int r;
- sigset_t newset, oldset;
-#endif
-
- rkb = rd_calloc(1, sizeof(*rkb));
-
- if (source != RD_KAFKA_LOGICAL) {
- rd_kafka_mk_nodename(rkb->rkb_nodename,
- sizeof(rkb->rkb_nodename), name, port);
- rd_kafka_mk_brokername(rkb->rkb_name, sizeof(rkb->rkb_name),
- proto, rkb->rkb_nodename, nodeid,
- source);
- } else {
- /* Logical broker does not have a nodename (address) or port
- * at initialization. */
- rd_snprintf(rkb->rkb_name, sizeof(rkb->rkb_name), "%s", name);
- }
-
- rkb->rkb_source = source;
- rkb->rkb_rk = rk;
- rkb->rkb_ts_state = rd_clock();
- rkb->rkb_nodeid = nodeid;
- rkb->rkb_proto = proto;
- rkb->rkb_port = port;
- rkb->rkb_origname = rd_strdup(name);
-
- mtx_init(&rkb->rkb_lock, mtx_plain);
- mtx_init(&rkb->rkb_logname_lock, mtx_plain);
- rkb->rkb_logname = rd_strdup(rkb->rkb_name);
- TAILQ_INIT(&rkb->rkb_toppars);
- CIRCLEQ_INIT(&rkb->rkb_active_toppars);
- TAILQ_INIT(&rkb->rkb_monitors);
- rd_kafka_bufq_init(&rkb->rkb_outbufs);
- rd_kafka_bufq_init(&rkb->rkb_waitresps);
- rd_kafka_bufq_init(&rkb->rkb_retrybufs);
- rkb->rkb_ops = rd_kafka_q_new(rk);
- rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2,
- rk->rk_conf.stats_interval_ms ? 1 : 0);
- rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000,
- 2, rk->rk_conf.stats_interval_ms ? 1 : 0);
- rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2,
- rk->rk_conf.stats_interval_ms ? 1 : 0);
- rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2,
- rk->rk_conf.stats_interval_ms ? 1 : 0);
- rd_refcnt_init(&rkb->rkb_refcnt, 0);
- rd_kafka_broker_keep(rkb); /* rk_broker's refcount */
-
- rkb->rkb_reconnect_backoff_ms = rk->rk_conf.reconnect_backoff_ms;
- rd_atomic32_init(&rkb->rkb_persistconn.coord, 0);
-
- rd_atomic64_init(&rkb->rkb_c.ts_send, 0);
- rd_atomic64_init(&rkb->rkb_c.ts_recv, 0);
-
- /* ApiVersion fallback interval */
- if (rkb->rkb_rk->rk_conf.api_version_request) {
- rd_interval_init(&rkb->rkb_ApiVersion_fail_intvl);
- rd_interval_fixed(
- &rkb->rkb_ApiVersion_fail_intvl,
- (rd_ts_t)rkb->rkb_rk->rk_conf.api_version_fallback_ms *
- 1000);
- }
-
- rd_interval_init(&rkb->rkb_suppress.unsupported_compression);
- rd_interval_init(&rkb->rkb_suppress.unsupported_kip62);
- rd_interval_init(&rkb->rkb_suppress.fail_error);
-
-#ifndef _WIN32
- /* Block all signals in newly created thread.
- * To avoid race condition we block all signals in the calling
- * thread, which the new thread will inherit its sigmask from,
- * and then restore the original sigmask of the calling thread when
- * we're done creating the thread.
- * NOTE: term_sig remains unblocked since we use it on termination
- * to quickly interrupt system calls. */
- sigemptyset(&oldset);
- sigfillset(&newset);
- if (rkb->rkb_rk->rk_conf.term_sig)
- sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig);
- pthread_sigmask(SIG_SETMASK, &newset, &oldset);
-#endif
-
- /*
- * Fd-based queue wake-ups using a non-blocking pipe.
- * Writes are best effort, if the socket queue is full
- * the write fails (silently) but this has no effect on latency
- * since the POLLIN flag will already have been raised for fd.
- */
- rkb->rkb_wakeup_fd[0] = -1;
- rkb->rkb_wakeup_fd[1] = -1;
-
-#ifndef _WIN32
- if ((r = rd_pipe_nonblocking(rkb->rkb_wakeup_fd)) == -1) {
- rd_rkb_log(rkb, LOG_ERR, "WAKEUPFD",
- "Failed to setup broker queue wake-up fds: "
- "%s: disabling low-latency mode",
- rd_strerror(r));
-
- } else if (source == RD_KAFKA_INTERNAL) {
- /* nop: internal broker has no IO transport. */
-
- } else {
- char onebyte = 1;
-
- rd_rkb_dbg(rkb, QUEUE, "WAKEUPFD",
- "Enabled low-latency ops queue wake-ups");
- rd_kafka_q_io_event_enable(rkb->rkb_ops, rkb->rkb_wakeup_fd[1],
- &onebyte, sizeof(onebyte));
- }
-#endif
-
- /* Lock broker's lock here to synchronise state, i.e., hold off
- * the broker thread until we've finalized the rkb. */
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_keep(rkb); /* broker thread's refcnt */
- if (thrd_create(&rkb->rkb_thread, rd_kafka_broker_thread_main, rkb) !=
- thrd_success) {
- rd_kafka_broker_unlock(rkb);
-
- rd_kafka_log(rk, LOG_CRIT, "THREAD",
- "Unable to create broker thread");
-
- /* Send ERR op back to application for processing. */
- rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
- "Unable to create broker thread");
-
- rd_free(rkb);
-
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-
- return NULL;
- }
-
- if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
- if (rk->rk_conf.security_protocol ==
- RD_KAFKA_PROTO_SASL_PLAINTEXT ||
- rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL)
- rd_kafka_sasl_broker_init(rkb);
-
- /* Insert broker at head of list, idea is that
- * newer brokers are more relevant than old ones,
- * and in particular LEARNED brokers are more relevant
- * than CONFIGURED (bootstrap) and LOGICAL brokers. */
- TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link);
- (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1);
-
- if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
- rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb);
- rd_list_sort(&rkb->rkb_rk->rk_broker_by_id,
- rd_kafka_broker_cmp_by_id);
- }
-
- rd_rkb_dbg(rkb, BROKER, "BROKER",
- "Added new broker with NodeId %" PRId32,
- rkb->rkb_nodeid);
- }
-
- /* Call on_broker_state_change interceptors */
- rd_kafka_interceptors_on_broker_state_change(
- rk, rkb->rkb_nodeid, rd_kafka_secproto_names[rkb->rkb_proto],
- rkb->rkb_origname, rkb->rkb_port,
- rd_kafka_broker_state_names[rkb->rkb_state]);
-
- rd_kafka_broker_unlock(rkb);
-
- /* Add broker state monitor for the coordinator request to use.
- * This is needed by the transactions implementation and DeleteGroups.
- */
- rd_kafka_broker_monitor_add(&rkb->rkb_coord_monitor, rkb, rk->rk_ops,
- rd_kafka_coord_rkb_monitor_cb);
-
-
-#ifndef _WIN32
- /* Restore sigmask of caller */
- pthread_sigmask(SIG_SETMASK, &oldset, NULL);
-#endif
-
- return rkb;
-}
-
-
-/**
- * @brief Adds a logical broker.
- *
- * Logical brokers act just like any broker handle, but will not have
- * an initial address set. The address (or nodename is it is called
- * internally) can be set from another broker handle
- * by calling rd_kafka_broker_set_nodename().
- *
- * This allows maintaining a logical group coordinator broker
- * handle that can ambulate between real broker addresses.
- *
- * Logical broker constraints:
- * - will not have a broker-id set (-1).
- * - will not have a port set (0).
- * - the address for the broker may change.
- * - the name of broker will not correspond to the address,
- * but the \p name given here.
- *
- * @returns a new broker, holding a refcount for the caller.
- *
- * @locality any rdkafka thread
- * @locks none
- */
-rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk,
- const char *name) {
- rd_kafka_broker_t *rkb;
-
- rd_kafka_wrlock(rk);
- rkb = rd_kafka_broker_add(rk, RD_KAFKA_LOGICAL,
- rk->rk_conf.security_protocol, name,
- 0 /*port*/, -1 /*brokerid*/);
- rd_assert(rkb && *"failed to create broker thread");
- rd_kafka_wrunlock(rk);
-
- rd_atomic32_add(&rk->rk_broker_addrless_cnt, 1);
-
- rd_dassert(RD_KAFKA_BROKER_IS_LOGICAL(rkb));
- rd_kafka_broker_keep(rkb);
- return rkb;
-}
-
-
-/**
- * @brief Update the nodename (address) of broker \p rkb
- * with the nodename from broker \p from_rkb (may be NULL).
- *
- * If \p rkb is connected, the connection will be torn down.
- * A new connection may be attempted to the new address
- * if a persistent connection is needed (standard connection rules).
- *
- * The broker's logname is also updated to include \p from_rkb's
- * broker id.
- *
- * @param from_rkb Use the nodename from this broker. If NULL, clear
- * the \p rkb nodename.
- *
- * @remark Must only be called for logical brokers.
- *
- * @locks none
- */
-void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb,
- rd_kafka_broker_t *from_rkb) {
- char nodename[RD_KAFKA_NODENAME_SIZE];
- char brokername[RD_KAFKA_NODENAME_SIZE];
- int32_t nodeid;
- rd_bool_t changed = rd_false;
-
- rd_assert(RD_KAFKA_BROKER_IS_LOGICAL(rkb));
-
- rd_assert(rkb != from_rkb);
-
- /* Get nodename from from_rkb */
- if (from_rkb) {
- rd_kafka_broker_lock(from_rkb);
- rd_strlcpy(nodename, from_rkb->rkb_nodename, sizeof(nodename));
- nodeid = from_rkb->rkb_nodeid;
- rd_kafka_broker_unlock(from_rkb);
- } else {
- *nodename = '\0';
- nodeid = -1;
- }
-
- /* Set nodename on rkb */
- rd_kafka_broker_lock(rkb);
- if (strcmp(rkb->rkb_nodename, nodename)) {
- rd_rkb_dbg(rkb, BROKER, "NODENAME",
- "Broker nodename changed from \"%s\" to \"%s\"",
- rkb->rkb_nodename, nodename);
- rd_strlcpy(rkb->rkb_nodename, nodename,
- sizeof(rkb->rkb_nodename));
- rkb->rkb_nodename_epoch++;
- changed = rd_true;
- }
-
- if (rkb->rkb_nodeid != nodeid) {
- rd_rkb_dbg(rkb, BROKER, "NODEID",
- "Broker nodeid changed from %" PRId32 " to %" PRId32,
- rkb->rkb_nodeid, nodeid);
- rkb->rkb_nodeid = nodeid;
- }
-
- rd_kafka_broker_unlock(rkb);
-
- /* Update the log name to include (or exclude) the nodeid.
- * The nodeid is appended as "..logname../nodeid" */
- rd_kafka_mk_brokername(brokername, sizeof(brokername), rkb->rkb_proto,
- rkb->rkb_name, nodeid, rkb->rkb_source);
-
- rd_kafka_broker_set_logname(rkb, brokername);
-
- if (!changed)
- return;
-
- if (!rd_kafka_broker_is_addrless(rkb))
- rd_atomic32_sub(&rkb->rkb_rk->rk_broker_addrless_cnt, 1);
- else
- rd_atomic32_add(&rkb->rkb_rk->rk_broker_addrless_cnt, 1);
-
- /* Trigger a disconnect & reconnect */
- rd_kafka_broker_schedule_connection(rkb);
-}
-
-
-/**
- * @brief Find broker by nodeid (not -1) and
- * possibly filtered by state (unless -1).
- *
- * @param do_connect If sparse connections are enabled and the broker is found
- * but not up, a connection will be triggered.
- *
- * @locks: rd_kafka_*lock() MUST be held
- * @remark caller must release rkb reference by rd_kafka_broker_destroy()
- */
-rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- int32_t nodeid,
- int state,
- rd_bool_t do_connect) {
- rd_kafka_broker_t *rkb;
- rd_kafka_broker_t skel = {.rkb_nodeid = nodeid};
-
- if (rd_kafka_terminating(rk))
- return NULL;
-
- rkb = rd_list_find(&rk->rk_broker_by_id, &skel,
- rd_kafka_broker_cmp_by_id);
-
- if (!rkb)
- return NULL;
-
- if (state != -1) {
- int broker_state;
- rd_kafka_broker_lock(rkb);
- broker_state = (int)rkb->rkb_state;
- rd_kafka_broker_unlock(rkb);
-
- if (broker_state != state) {
- if (do_connect &&
- broker_state == RD_KAFKA_BROKER_STATE_INIT &&
- rk->rk_conf.sparse_connections)
- rd_kafka_broker_schedule_connection(rkb);
- return NULL;
- }
- }
-
- rd_kafka_broker_keep_fl(func, line, rkb);
- return rkb;
-}
-
-/**
- * Locks: rd_kafka_rdlock(rk) must be held
- * NOTE: caller must release rkb reference by rd_kafka_broker_destroy()
- */
-static rd_kafka_broker_t *rd_kafka_broker_find(rd_kafka_t *rk,
- rd_kafka_secproto_t proto,
- const char *name,
- uint16_t port) {
- rd_kafka_broker_t *rkb;
- char nodename[RD_KAFKA_NODENAME_SIZE];
-
- rd_kafka_mk_nodename(nodename, sizeof(nodename), name, port);
-
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
- continue;
-
- rd_kafka_broker_lock(rkb);
- if (!rd_kafka_terminating(rk) && rkb->rkb_proto == proto &&
- !strcmp(rkb->rkb_nodename, nodename)) {
- rd_kafka_broker_keep(rkb);
- rd_kafka_broker_unlock(rkb);
- return rkb;
- }
- rd_kafka_broker_unlock(rkb);
- }
-
- return NULL;
-}
-
-
-/**
- * Parse a broker host name.
- * The string 'name' is modified and null-terminated portions of it
- * are returned in 'proto', 'host', and 'port'.
- *
- * Returns 0 on success or -1 on parse error.
- */
-static int rd_kafka_broker_name_parse(rd_kafka_t *rk,
- char **name,
- rd_kafka_secproto_t *proto,
- const char **host,
- uint16_t *port) {
- char *s = *name;
- char *orig;
- char *n, *t, *t2;
-
- /* Save a temporary copy of the original name for logging purposes */
- rd_strdupa(&orig, *name);
-
- /* Find end of this name (either by delimiter or end of string */
- if ((n = strchr(s, ',')))
- *n = '\0';
- else
- n = s + strlen(s) - 1;
-
-
- /* Check if this looks like an url. */
- if ((t = strstr(s, "://"))) {
- int i;
- /* "proto://host[:port]" */
-
- if (t == s) {
- rd_kafka_log(rk, LOG_WARNING, "BROKER",
- "Broker name \"%s\" parse error: "
- "empty protocol name",
- orig);
- return -1;
- }
-
- /* Make protocol uppercase */
- for (t2 = s; t2 < t; t2++)
- *t2 = toupper(*t2);
-
- *t = '\0';
-
- /* Find matching protocol by name. */
- for (i = 0; i < RD_KAFKA_PROTO_NUM; i++)
- if (!rd_strcasecmp(s, rd_kafka_secproto_names[i]))
- break;
-
- /* Unsupported protocol */
- if (i == RD_KAFKA_PROTO_NUM) {
- rd_kafka_log(rk, LOG_WARNING, "BROKER",
- "Broker name \"%s\" parse error: "
- "unsupported protocol \"%s\"",
- orig, s);
-
- return -1;
- }
-
- *proto = i;
-
- /* Enforce protocol */
- if (rk->rk_conf.security_protocol != *proto) {
- rd_kafka_log(
- rk, LOG_WARNING, "BROKER",
- "Broker name \"%s\" parse error: "
- "protocol \"%s\" does not match "
- "security.protocol setting \"%s\"",
- orig, s,
- rd_kafka_secproto_names[rk->rk_conf
- .security_protocol]);
- return -1;
- }
-
- /* Hostname starts here */
- s = t + 3;
-
- /* Ignore anything that looks like the path part of an URL */
- if ((t = strchr(s, '/')))
- *t = '\0';
-
- } else
- *proto = rk->rk_conf.security_protocol; /* Default protocol */
-
-
- *port = RD_KAFKA_PORT;
- /* Check if port has been specified, but try to identify IPv6
- * addresses first:
- * t = last ':' in string
- * t2 = first ':' in string
- * If t and t2 are equal then only one ":" exists in name
- * and thus an IPv4 address with port specified.
- * Else if not equal and t is prefixed with "]" then it's an
- * IPv6 address with port specified.
- * Else no port specified. */
- if ((t = strrchr(s, ':')) &&
- ((t2 = strchr(s, ':')) == t || *(t - 1) == ']')) {
- *t = '\0';
- *port = atoi(t + 1);
- }
-
- /* Empty host name -> localhost */
- if (!*s)
- s = "localhost";
-
- *host = s;
- *name = n + 1; /* past this name. e.g., next name/delimiter to parse */
-
- return 0;
-}
-
-/**
- * @brief Adds a (csv list of) broker(s).
- * Returns the number of brokers succesfully added.
- *
- * @locality any thread
- * @locks none
- */
-int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist) {
- char *s_copy = rd_strdup(brokerlist);
- char *s = s_copy;
- int cnt = 0;
- rd_kafka_broker_t *rkb;
- int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
-
- /* Parse comma-separated list of brokers. */
- while (*s) {
- uint16_t port;
- const char *host;
- rd_kafka_secproto_t proto;
-
- if (*s == ',' || *s == ' ') {
- s++;
- continue;
- }
-
- if (rd_kafka_broker_name_parse(rk, &s, &proto, &host, &port) ==
- -1)
- break;
-
- rd_kafka_wrlock(rk);
-
- if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) &&
- rkb->rkb_source == RD_KAFKA_CONFIGURED) {
- cnt++;
- } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto,
- host, port,
- RD_KAFKA_NODEID_UA) != NULL)
- cnt++;
-
- /* If rd_kafka_broker_find returned a broker its
- * reference needs to be released
- * See issue #193 */
- if (rkb)
- rd_kafka_broker_destroy(rkb);
-
- rd_kafka_wrunlock(rk);
- }
-
- rd_free(s_copy);
-
- if (rk->rk_conf.sparse_connections && cnt > 0 && pre_cnt == 0) {
- /* Sparse connections:
- * If this was the first set of brokers added,
- * select a random one to trigger the initial cluster
- * connection. */
- rd_kafka_rdlock(rk);
- rd_kafka_connect_any(rk, "bootstrap servers added");
- rd_kafka_rdunlock(rk);
- }
-
- return cnt;
-}
-
-
-int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist) {
- return rd_kafka_brokers_add0(rk, brokerlist);
-}
-
-
-/**
- * @brief Adds a new broker or updates an existing one.
- *
- * @param rkbp if non-NULL, will be set to the broker object with
- * refcount increased, or NULL on error.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_broker_update(rd_kafka_t *rk,
- rd_kafka_secproto_t proto,
- const struct rd_kafka_metadata_broker *mdb,
- rd_kafka_broker_t **rkbp) {
- rd_kafka_broker_t *rkb;
- char nodename[RD_KAFKA_NODENAME_SIZE];
- int needs_update = 0;
-
- rd_kafka_mk_nodename(nodename, sizeof(nodename), mdb->host, mdb->port);
-
- rd_kafka_wrlock(rk);
- if (unlikely(rd_kafka_terminating(rk))) {
- /* Dont update metadata while terminating, do this
- * after acquiring lock for proper synchronisation */
- rd_kafka_wrunlock(rk);
- if (rkbp)
- *rkbp = NULL;
- return;
- }
-
- if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) {
- /* Broker matched by nodeid, see if we need to update
- * the hostname. */
- if (strcmp(rkb->rkb_nodename, nodename))
- needs_update = 1;
- } else if ((rkb = rd_kafka_broker_find(rk, proto, mdb->host,
- mdb->port))) {
- /* Broker matched by hostname (but not by nodeid),
- * update the nodeid. */
- needs_update = 1;
-
- } else if ((rkb = rd_kafka_broker_add(rk, RD_KAFKA_LEARNED, proto,
- mdb->host, mdb->port, mdb->id))) {
- rd_kafka_broker_keep(rkb);
- }
-
- rd_kafka_wrunlock(rk);
-
- if (rkb) {
- /* Existing broker */
- if (needs_update) {
- rd_kafka_op_t *rko;
- rko = rd_kafka_op_new(RD_KAFKA_OP_NODE_UPDATE);
- rd_strlcpy(rko->rko_u.node.nodename, nodename,
- sizeof(rko->rko_u.node.nodename));
- rko->rko_u.node.nodeid = mdb->id;
- /* Perform a blocking op request so that all
- * broker-related state, such as the rk broker list,
- * is up to date by the time this call returns.
- * Ignore&destroy the response. */
- rd_kafka_op_err_destroy(
- rd_kafka_op_req(rkb->rkb_ops, rko, -1));
- }
- }
-
- if (rkbp)
- *rkbp = rkb;
- else if (rkb)
- rd_kafka_broker_destroy(rkb);
-}
-
-
-/**
- * @returns the broker id, or RD_KAFKA_NODEID_UA if \p rkb is NULL.
- *
- * @locality any
- * @locks_required none
- * @locks_acquired rkb_lock
- */
-int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb) {
- int32_t broker_id;
-
- if (unlikely(!rkb))
- return RD_KAFKA_NODEID_UA;
-
- /* Avoid locking if already on the broker thread */
- if (thrd_is_current(rkb->rkb_thread))
- return rkb->rkb_nodeid;
-
- rd_kafka_broker_lock(rkb);
- broker_id = rkb->rkb_nodeid;
- rd_kafka_broker_unlock(rkb);
-
- return broker_id;
-}
-
-
-/**
- * Returns a thread-safe temporary copy of the broker name.
- * Must not be called more than 4 times from the same expression.
- *
- * Locks: none
- * Locality: any thread
- */
-const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb) {
- static RD_TLS char ret[4][RD_KAFKA_NODENAME_SIZE];
- static RD_TLS int reti = 0;
-
- reti = (reti + 1) % 4;
- mtx_lock(&rkb->rkb_logname_lock);
- rd_snprintf(ret[reti], sizeof(ret[reti]), "%s", rkb->rkb_logname);
- mtx_unlock(&rkb->rkb_logname_lock);
-
- return ret[reti];
-}
-
-
-
-/**
- * @brief Send dummy OP to broker thread to wake it up from IO sleep.
- *
- * @locality any
- * @locks any
- */
-void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_WAKEUP);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
- rd_kafka_q_enq(rkb->rkb_ops, rko);
- rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up: %s", reason);
-}
-
-/**
- * @brief Wake up all broker threads that are in at least state \p min_state
- *
- * @locality any
- * @locks none: rd_kafka_*lock() MUST NOT be held
- *
- * @returns the number of broker threads woken up
- */
-int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
- int min_state,
- const char *reason) {
- int cnt = 0;
- rd_kafka_broker_t *rkb;
-
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
- int do_wakeup;
-
- rd_kafka_broker_lock(rkb);
- do_wakeup = (int)rkb->rkb_state >= min_state;
- rd_kafka_broker_unlock(rkb);
-
- if (do_wakeup) {
- rd_kafka_broker_wakeup(rkb, reason);
- cnt += 1;
- }
- }
- rd_kafka_rdunlock(rk);
-
- if (cnt > 0)
- rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_QUEUE, "WAKEUP",
- "Wake-up sent to %d broker thread%s in "
- "state >= %s: %s",
- cnt, cnt > 1 ? "s" : "",
- rd_kafka_broker_state_names[min_state], reason);
-
- return cnt;
-}
-
-/**
- * @brief Filter out brokers that have at least one connection attempt.
- */
-static int rd_kafka_broker_filter_never_connected(rd_kafka_broker_t *rkb,
- void *opaque) {
- return rd_atomic32_get(&rkb->rkb_c.connects);
-}
-
-
-/**
- * @brief Sparse connections:
- * Select a random broker to connect to if no brokers are up.
- *
- * This is a non-blocking call, the connection is
- * performed by the selected broker thread.
- *
- * @locality any
- * @locks rd_kafka_rdlock() MUST be held
- */
-void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason) {
- rd_kafka_broker_t *rkb;
- rd_ts_t suppr;
-
- /* Don't count connections to logical brokers since they serve
- * a specific purpose (group coordinator) and their connections
- * should not be reused for other purposes.
- * rd_kafka_broker_random() will not return LOGICAL brokers. */
- if (rd_atomic32_get(&rk->rk_broker_up_cnt) -
- rd_atomic32_get(&rk->rk_logical_broker_up_cnt) >
- 0 ||
- rd_atomic32_get(&rk->rk_broker_cnt) -
- rd_atomic32_get(&rk->rk_broker_addrless_cnt) ==
- 0)
- return;
-
- mtx_lock(&rk->rk_suppress.sparse_connect_lock);
- suppr = rd_interval(&rk->rk_suppress.sparse_connect_random,
- rk->rk_conf.sparse_connect_intvl * 1000, 0);
- mtx_unlock(&rk->rk_suppress.sparse_connect_lock);
-
- if (suppr <= 0) {
- rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
- "Not selecting any broker for cluster connection: "
- "still suppressed for %" PRId64 "ms: %s",
- -suppr / 1000, reason);
- return;
- }
-
- /* First pass: only match brokers never connected to,
- * to try to exhaust the available brokers
- * so that an ERR_ALL_BROKERS_DOWN error can be raised. */
- rkb = rd_kafka_broker_random(rk, RD_KAFKA_BROKER_STATE_INIT,
- rd_kafka_broker_filter_never_connected,
- NULL);
- /* Second pass: match any non-connected/non-connecting broker. */
- if (!rkb)
- rkb = rd_kafka_broker_random(rk, RD_KAFKA_BROKER_STATE_INIT,
- NULL, NULL);
-
- if (!rkb) {
- /* No brokers matched:
- * this happens if there are brokers in > INIT state,
- * in which case they're already connecting. */
-
- rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
- "Cluster connection already in progress: %s",
- reason);
- return;
- }
-
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
- "Selected for cluster connection: "
- "%s (broker has %d connection attempt(s))",
- reason, rd_atomic32_get(&rkb->rkb_c.connects));
-
- rd_kafka_broker_schedule_connection(rkb);
-
- rd_kafka_broker_destroy(rkb); /* refcnt from ..broker_random() */
-}
-
-
-
-/**
- * @brief Send PURGE queue request to broker.
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb,
- int purge_flags,
- rd_kafka_replyq_t replyq) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
- rko->rko_replyq = replyq;
- rko->rko_u.purge.flags = purge_flags;
- rd_kafka_q_enq(rkb->rkb_ops, rko);
-}
-
-
-/**
- * @brief Handle purge queues request
- *
- * @locality broker thread
- * @locks none
- */
-static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko) {
- int purge_flags = rko->rko_u.purge.flags;
- int inflight_cnt = 0, retry_cnt = 0, outq_cnt = 0, partial_cnt = 0;
-
- rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGE",
- "Purging queues with flags %s",
- rd_kafka_purge_flags2str(purge_flags));
-
-
- /**
- * First purge any Produce requests to move the
- * messages from the request's message queue to delivery reports.
- */
-
- /* Purge in-flight ProduceRequests */
- if (purge_flags & RD_KAFKA_PURGE_F_INFLIGHT)
- inflight_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce,
- RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0);
-
- if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) {
- /* Requests in retry queue */
- retry_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce,
- RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0);
-
- /* Requests in transmit queue not completely sent yet.
- * partial_cnt is included in outq_cnt and denotes a request
- * that has been partially transmitted. */
- outq_cnt = rd_kafka_broker_bufq_timeout_scan(
- rkb, 0, &rkb->rkb_outbufs, &partial_cnt, RD_KAFKAP_Produce,
- RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0);
-
- /* Purging a partially transmitted request will mess up
- * the protocol stream, so we need to disconnect from the broker
- * to get a clean protocol socket. */
- if (partial_cnt)
- rd_kafka_broker_fail(
- rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__PURGE_QUEUE,
- "Purged %d partially sent request: "
- "forcing disconnect",
- partial_cnt);
- }
-
- rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
- "Purged %i in-flight, %i retry-queued, "
- "%i out-queue, %i partially-sent requests",
- inflight_cnt, retry_cnt, outq_cnt, partial_cnt);
-
- /* Purge partition queues */
- if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) {
- rd_kafka_toppar_t *rktp;
- int msg_cnt = 0;
- int part_cnt = 0;
-
- TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
- int r;
-
- r = rd_kafka_toppar_purge_queues(
- rktp, purge_flags, rd_true /*include xmit msgq*/);
- if (r > 0) {
- msg_cnt += r;
- part_cnt++;
- }
- }
-
- rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
- "Purged %i message(s) from %d partition(s)", msg_cnt,
- part_cnt);
- }
-
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
-}
-
-
-/**
- * @brief Add toppar to broker's active list list.
- *
- * For consumer this means the fetch list.
- * For producers this is all partitions assigned to this broker.
- *
- * @locality broker thread
- * @locks rktp_lock MUST be held
- */
-void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const char *reason) {
- int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER;
-
- if (is_consumer && rktp->rktp_fetch)
- return; /* Already added */
-
- CIRCLEQ_INSERT_TAIL(&rkb->rkb_active_toppars, rktp, rktp_activelink);
- rkb->rkb_active_toppar_cnt++;
-
- if (is_consumer)
- rktp->rktp_fetch = 1;
-
- if (unlikely(rkb->rkb_active_toppar_cnt == 1))
- rd_kafka_broker_active_toppar_next(rkb, rktp);
-
- rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
- "Added %.*s [%" PRId32
- "] to %s list (%d entries, opv %d, "
- "%d messages queued): %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, is_consumer ? "fetch" : "active",
- rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version,
- rd_kafka_msgq_len(&rktp->rktp_msgq), reason);
-}
-
-
-/**
- * @brief Remove toppar from active list.
- *
- * Locality: broker thread
- * Locks: none
- */
-void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const char *reason) {
- int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER;
-
- if (is_consumer && !rktp->rktp_fetch)
- return; /* Not added */
-
- CIRCLEQ_REMOVE(&rkb->rkb_active_toppars, rktp, rktp_activelink);
- rd_kafka_assert(NULL, rkb->rkb_active_toppar_cnt > 0);
- rkb->rkb_active_toppar_cnt--;
-
- if (is_consumer)
- rktp->rktp_fetch = 0;
-
- if (rkb->rkb_active_toppar_next == rktp) {
- /* Update next pointer */
- rd_kafka_broker_active_toppar_next(
- rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
- rktp_activelink));
- }
-
- rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
- "Removed %.*s [%" PRId32
- "] from %s list "
- "(%d entries, opv %d): %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, is_consumer ? "fetch" : "active",
- rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version,
- reason);
-}
-
-
-/**
- * @brief Schedule connection for \p rkb.
- * Will trigger disconnection for logical brokers whose nodename
- * was changed.
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_CONNECT);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
- rd_kafka_q_enq(rkb->rkb_ops, rko);
-}
-
-
-/**
- * @brief Add need for persistent connection to \p rkb
- * with rkb_persistconn atomic counter \p acntp
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb,
- rd_atomic32_t *acntp) {
-
- if (rd_atomic32_add(acntp, 1) == 1) {
- /* First one, trigger event. */
- rd_kafka_broker_schedule_connection(rkb);
- }
-}
-
-
-/**
- * @brief Remove need for persistent connection to \p rkb
- * with rkb_persistconn atomic counter \p acntp
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb,
- rd_atomic32_t *acntp) {
- int32_t r = rd_atomic32_sub(acntp, 1);
- rd_assert(r >= 0);
-}
-
-
-
-/**
- * @brief OP_BROKER_MONITOR callback trampoline which
- * calls the rkbmon's callback.
- *
- * @locality monitoree's op handler thread
- * @locks none
- */
-static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY)
- rko->rko_u.broker_monitor.cb(rko->rko_u.broker_monitor.rkb);
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * @brief Trigger ops for registered monitors when the broker
- * state goes from or to UP.
- *
- * @locality broker thread
- * @locks rkb_lock MUST be held
- */
-static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb) {
- rd_kafka_broker_monitor_t *rkbmon;
-
- TAILQ_FOREACH(rkbmon, &rkb->rkb_monitors, rkbmon_link) {
- rd_kafka_op_t *rko =
- rd_kafka_op_new_cb(rkb->rkb_rk, RD_KAFKA_OP_BROKER_MONITOR,
- rd_kafka_broker_monitor_op_cb);
- rd_kafka_broker_keep(rkb);
- rko->rko_u.broker_monitor.rkb = rkb;
- rko->rko_u.broker_monitor.cb = rkbmon->rkbmon_cb;
- rd_kafka_q_enq(rkbmon->rkbmon_q, rko);
- }
-}
-
-
-/**
- * @brief Adds a monitor for when the broker goes up or down.
- *
- * The callback will be triggered on the caller's op queue handler thread.
- *
- * Use rd_kafka_broker_is_up() in your callback to get the current
- * state of the broker, since it might have changed since the event
- * was enqueued.
- *
- * @param rkbmon monitoree's monitor.
- * @param rkb broker to monitor.
- * @param rkq queue for event op.
- * @param callback callback to be triggered from \p rkq's op handler.
- * @opaque opaque passed to callback.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
- rd_kafka_broker_t *rkb,
- rd_kafka_q_t *rkq,
- void (*callback)(rd_kafka_broker_t *rkb)) {
- rd_assert(!rkbmon->rkbmon_rkb);
- rkbmon->rkbmon_rkb = rkb;
- rkbmon->rkbmon_q = rkq;
- rd_kafka_q_keep(rkbmon->rkbmon_q);
- rkbmon->rkbmon_cb = callback;
-
- rd_kafka_broker_keep(rkb);
-
- rd_kafka_broker_lock(rkb);
- TAILQ_INSERT_TAIL(&rkb->rkb_monitors, rkbmon, rkbmon_link);
- rd_kafka_broker_unlock(rkb);
-}
-
-
-/**
- * @brief Removes a monitor previously added with
- * rd_kafka_broker_monitor_add().
- *
- * @warning The rkbmon's callback may still be called after
- * _del() has been called due to the buffering nature
- * of op queues.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon) {
- rd_kafka_broker_t *rkb = rkbmon->rkbmon_rkb;
-
- if (!rkb)
- return;
-
- rd_kafka_broker_lock(rkb);
- rkbmon->rkbmon_rkb = NULL;
- rd_kafka_q_destroy(rkbmon->rkbmon_q);
- TAILQ_REMOVE(&rkb->rkb_monitors, rkbmon, rkbmon_link);
- rd_kafka_broker_unlock(rkb);
-
- rd_kafka_broker_destroy(rkb);
-}
-
-/**
- * @name Unit tests
- * @{
- *
- */
-int unittest_broker(void) {
- int fails = 0;
-
- fails += rd_ut_reconnect_backoff();
-
- return fails;
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h
deleted file mode 100644
index 1e454d4d7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_BROKER_H_
-#define _RDKAFKA_BROKER_H_
-
-#include "rdkafka_feature.h"
-
-
-extern const char *rd_kafka_broker_state_names[];
-extern const char *rd_kafka_secproto_names[];
-
-
-/**
- * @enum Broker states
- */
-typedef enum {
- RD_KAFKA_BROKER_STATE_INIT,
- RD_KAFKA_BROKER_STATE_DOWN,
- RD_KAFKA_BROKER_STATE_TRY_CONNECT,
- RD_KAFKA_BROKER_STATE_CONNECT,
- RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE,
- RD_KAFKA_BROKER_STATE_AUTH_LEGACY,
-
- /* Any state >= STATE_UP means the Kafka protocol layer
- * is operational (to some degree). */
- RD_KAFKA_BROKER_STATE_UP,
- RD_KAFKA_BROKER_STATE_UPDATE,
- RD_KAFKA_BROKER_STATE_APIVERSION_QUERY,
- RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE,
- RD_KAFKA_BROKER_STATE_AUTH_REQ,
-} rd_kafka_broker_state_t;
-
-/**
- * @struct Broker state monitor.
- *
- * @warning The monitor object lifetime should be the same as
- * the rd_kafka_t object, not shorter.
- */
-typedef struct rd_kafka_broker_monitor_s {
- TAILQ_ENTRY(rd_kafka_broker_monitor_s) rkbmon_link; /**< rkb_monitors*/
- struct rd_kafka_broker_s *rkbmon_rkb; /**< Broker being monitored. */
- rd_kafka_q_t *rkbmon_q; /**< Queue to enqueue op on. */
-
- /**< Callback triggered on the monitoree's op handler thread.
- * Do note that the callback might be triggered even after
- * it has been deleted due to the queueing nature of op queues. */
- void (*rkbmon_cb)(rd_kafka_broker_t *rkb);
-} rd_kafka_broker_monitor_t;
-
-
-/**
- * @struct Broker instance
- */
-struct rd_kafka_broker_s { /* rd_kafka_broker_t */
- TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
-
- int32_t rkb_nodeid; /**< Broker Node Id.
- * @locks rkb_lock */
-#define RD_KAFKA_NODEID_UA -1
-
- rd_sockaddr_list_t *rkb_rsal;
- rd_ts_t rkb_ts_rsal_last;
- const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */
-
- rd_kafka_transport_t *rkb_transport;
-
- uint32_t rkb_corrid;
- int rkb_connid; /* Connection id, increased by
- * one for each connection by
- * this broker. Used as a safe-guard
- * to help troubleshooting buffer
- * problems across disconnects. */
-
- rd_kafka_q_t *rkb_ops;
-
- mtx_t rkb_lock;
-
- int rkb_blocking_max_ms; /* Maximum IO poll blocking
- * time. */
-
- /* Toppars handled by this broker */
- TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars;
- int rkb_toppar_cnt;
-
- /* Active toppars that are eligible for:
- * - (consumer) fetching due to underflow
- * - (producer) producing
- *
- * The circleq provides round-robin scheduling for both cases.
- */
- CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_active_toppars;
- int rkb_active_toppar_cnt;
- rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar
- * in fetch list.
- * This is used for
- * round-robin. */
-
-
- rd_kafka_cgrp_t *rkb_cgrp;
-
- rd_ts_t rkb_ts_fetch_backoff;
- int rkb_fetching;
-
- rd_kafka_broker_state_t rkb_state; /**< Current broker state */
-
- rd_ts_t rkb_ts_state; /* Timestamp of last
- * state change */
- rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan
- * interval. */
-
- rd_atomic32_t rkb_blocking_request_cnt; /* The number of
- * in-flight blocking
- * requests.
- * A blocking request is
- * one that is known to
- * possibly block on the
- * broker for longer than
- * the typical processing
- * time, e.g.:
- * JoinGroup, SyncGroup */
-
- int rkb_features; /* Protocol features supported
- * by this broker.
- * See RD_KAFKA_FEATURE_* in
- * rdkafka_proto.h */
-
- struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs
- * (MUST be sorted) */
- size_t rkb_ApiVersions_cnt;
- rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long
- * the fallback proto
- * will be used after
- * ApiVersionRequest
- * failure. */
-
- rd_kafka_confsource_t rkb_source;
- struct {
- rd_atomic64_t tx_bytes;
- rd_atomic64_t tx; /**< Kafka requests */
- rd_atomic64_t tx_err;
- rd_atomic64_t tx_retries;
- rd_atomic64_t req_timeouts; /* Accumulated value */
-
- rd_atomic64_t rx_bytes;
- rd_atomic64_t rx; /**< Kafka responses */
- rd_atomic64_t rx_err;
- rd_atomic64_t rx_corrid_err; /* CorrId misses */
- rd_atomic64_t rx_partial; /* Partial messages received
- * and dropped. */
- rd_atomic64_t zbuf_grow; /* Compression/decompression buffer
- grows needed */
- rd_atomic64_t buf_grow; /* rkbuf grows needed */
- rd_atomic64_t wakeups; /* Poll wakeups */
-
- rd_atomic32_t connects; /**< Connection attempts,
- * successful or not. */
-
- rd_atomic32_t disconnects; /**< Disconnects.
- * Always peer-triggered. */
-
- rd_atomic64_t reqtype[RD_KAFKAP__NUM]; /**< Per request-type
- * counter */
-
- rd_atomic64_t ts_send; /**< Timestamp of last send */
- rd_atomic64_t ts_recv; /**< Timestamp of last receive */
- } rkb_c;
-
- int rkb_req_timeouts; /* Current value */
-
- thrd_t rkb_thread;
-
- rd_refcnt_t rkb_refcnt;
-
- rd_kafka_t *rkb_rk;
-
- rd_kafka_buf_t *rkb_recv_buf;
-
- int rkb_max_inflight; /* Maximum number of in-flight
- * requests to broker.
- * Compared to rkb_waitresps length.*/
- rd_kafka_bufq_t rkb_outbufs;
- rd_kafka_bufq_t rkb_waitresps;
- rd_kafka_bufq_t rkb_retrybufs;
-
- rd_avg_t rkb_avg_int_latency; /* Current internal latency period*/
- rd_avg_t rkb_avg_outbuf_latency; /**< Current latency
- * between buf_enq0
- * and writing to socket
- */
- rd_avg_t rkb_avg_rtt; /* Current RTT period */
- rd_avg_t rkb_avg_throttle; /* Current throttle period */
-
- /* These are all protected by rkb_lock */
- char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */
- char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/
- uint16_t rkb_port; /* TCP port */
- char *rkb_origname; /* Original
- * host name */
- int rkb_nodename_epoch; /**< Bumped each time
- * the nodename is changed.
- * Compared to
- * rkb_connect_epoch
- * to trigger a reconnect
- * for logical broker
- * when the nodename is
- * updated. */
- int rkb_connect_epoch; /**< The value of
- * rkb_nodename_epoch at the
- * last connection attempt.
- */
-
- /* Logging name is a copy of rkb_name, protected by its own mutex */
- char *rkb_logname;
- mtx_t rkb_logname_lock;
-
- rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake
- * up from IO-wait when
- * queues have content. */
-
- /**< Current, exponentially increased, reconnect backoff. */
- int rkb_reconnect_backoff_ms;
-
- /**< Absolute timestamp of next allowed reconnect. */
- rd_ts_t rkb_ts_reconnect;
-
- /** Absolute time of last connection attempt. */
- rd_ts_t rkb_ts_connect;
-
- /**< Persistent connection demand is tracked by
- * a counter for each type of demand.
- * The broker thread will maintain a persistent connection
- * if any of the counters are non-zero, and revert to
- * on-demand mode when they all reach zero.
- * After incrementing any of the counters a broker wakeup
- * should be signalled to expedite handling. */
- struct {
- /**< Producer: partitions are being produced to.
- * Consumer: partitions are being fetched from.
- *
- * Counter is maintained by the broker handler thread
- * itself, no need for atomic/locking.
- * Is reset to 0 on each producer|consumer_serve() loop
- * and updated according to current need, which
- * will trigger a state transition to
- * TRY_CONNECT if a connection is needed. */
- int internal;
-
- /**< Consumer: Broker is the group coordinator.
- * Counter is maintained by cgrp logic in
- * rdkafka main thread.
- *
- * Producer: Broker is the transaction coordinator.
- * Counter is maintained by rdkafka_idempotence.c.
- *
- * All: A coord_req_t is waiting for this broker to come up.
- */
-
- rd_atomic32_t coord;
- } rkb_persistconn;
-
- /**< Currently registered state monitors.
- * @locks rkb_lock */
- TAILQ_HEAD(, rd_kafka_broker_monitor_s) rkb_monitors;
-
- /**< Coordinator request's broker monitor.
- * Will trigger the coord_req fsm on broker state change. */
- rd_kafka_broker_monitor_t rkb_coord_monitor;
-
- rd_kafka_secproto_t rkb_proto;
-
- int rkb_down_reported; /* Down event reported */
-#if WITH_SASL_CYRUS
- rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr;
-#endif
-
-
- /*
- * Log suppression
- */
- struct {
- /**< Log: compression type not supported by broker. */
- rd_interval_t unsupported_compression;
-
- /**< Log: KIP-62 not supported by broker. */
- rd_interval_t unsupported_kip62;
-
- /**< Log: KIP-345 not supported by broker. */
- rd_interval_t unsupported_kip345;
-
- /**< Log & Error: identical broker_fail() errors. */
- rd_interval_t fail_error;
- } rkb_suppress;
-
- /** Last error. This is used to suppress repeated logs. */
- struct {
- char errstr[512]; /**< Last error string */
- rd_kafka_resp_err_t err; /**< Last error code */
- int cnt; /**< Number of identical errors */
- } rkb_last_err;
-};
-
-#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
-#define rd_kafka_broker_keep_fl(FUNC, LINE, RKB) \
- rd_refcnt_add_fl(FUNC, LINE, &(RKB)->rkb_refcnt)
-#define rd_kafka_broker_lock(rkb) mtx_lock(&(rkb)->rkb_lock)
-#define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock)
-
-
-/**
- * @brief Locks broker, acquires the states, unlocks, and returns
- * the state.
- * @locks broker_lock MUST NOT be held.
- * @locality any
- */
-static RD_INLINE RD_UNUSED rd_kafka_broker_state_t
-rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) {
- rd_kafka_broker_state_t state;
- rd_kafka_broker_lock(rkb);
- state = rkb->rkb_state;
- rd_kafka_broker_unlock(rkb);
- return state;
-}
-
-
-
-/**
- * @returns true if the broker state is UP or UPDATE
- */
-#define rd_kafka_broker_state_is_up(state) \
- ((state) == RD_KAFKA_BROKER_STATE_UP || \
- (state) == RD_KAFKA_BROKER_STATE_UPDATE)
-
-
-/**
- * @returns true if the broker connection is up, else false.
- * @locks broker_lock MUST NOT be held.
- * @locality any
- */
-static RD_UNUSED RD_INLINE rd_bool_t
-rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) {
- rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb);
- return rd_kafka_broker_state_is_up(state);
-}
-
-
-/**
- * @brief Broker comparator
- */
-static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp(const void *_a,
- const void *_b) {
- const rd_kafka_broker_t *a = _a, *b = _b;
- return RD_CMP(a, b);
-}
-
-
-/**
- * @returns true if broker supports \p features, else false.
- */
-static RD_UNUSED int rd_kafka_broker_supports(rd_kafka_broker_t *rkb,
- int features) {
- const rd_bool_t do_lock = !thrd_is_current(rkb->rkb_thread);
- int r;
-
- if (do_lock)
- rd_kafka_broker_lock(rkb);
-
- r = (rkb->rkb_features & features) == features;
-
- if (do_lock)
- rd_kafka_broker_unlock(rkb);
- return r;
-}
-
-int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
- int16_t ApiKey,
- int16_t minver,
- int16_t maxver,
- int *featuresp);
-
-rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- int32_t nodeid,
- int state,
- rd_bool_t do_connect);
-
-#define rd_kafka_broker_find_by_nodeid0(rk, nodeid, state, do_connect) \
- rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__, __LINE__, rk, nodeid, \
- state, do_connect)
-#define rd_kafka_broker_find_by_nodeid(rk, nodeid) \
- rd_kafka_broker_find_by_nodeid0(rk, nodeid, -1, rd_false)
-
-
-/**
- * Filter out brokers that don't support Idempotent Producer.
- */
-static RD_INLINE RD_UNUSED int
-rd_kafka_broker_filter_non_idempotent(rd_kafka_broker_t *rkb, void *opaque) {
- return !(rkb->rkb_features & RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER);
-}
-
-
-rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk,
- int state,
- int (*filter)(rd_kafka_broker_t *rkb,
- void *opaque),
- void *opaque,
- const char *reason);
-rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk,
- int *filtered_cnt,
- int (*filter)(rd_kafka_broker_t *rkb,
- void *opaque),
- void *opaque,
- const char *reason);
-rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk,
- int timeout_ms,
- rd_dolock_t do_lock,
- int features,
- const char *reason);
-
-rd_kafka_broker_t *
-rd_kafka_broker_prefer(rd_kafka_t *rk, int32_t broker_id, int state);
-
-rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk,
- int32_t broker_id,
- int state,
- rd_kafka_enq_once_t *eonce);
-
-rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk,
- rd_kafka_enq_once_t *eonce);
-
-rd_kafka_broker_t *
-rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout);
-rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk,
- int state,
- rd_kafka_enq_once_t *eonce);
-
-int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist);
-void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state);
-
-void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
- int level,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 4, 5);
-
-void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- const char *errstr);
-
-void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb);
-
-#define rd_kafka_broker_destroy(rkb) \
- rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \
- rd_kafka_broker_destroy_final(rkb))
-
-
-void rd_kafka_broker_update(rd_kafka_t *rk,
- rd_kafka_secproto_t proto,
- const struct rd_kafka_metadata_broker *mdb,
- rd_kafka_broker_t **rkbp);
-rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
- rd_kafka_confsource_t source,
- rd_kafka_secproto_t proto,
- const char *name,
- uint16_t port,
- int32_t nodeid);
-
-rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk,
- const char *name);
-
-/** @define returns true if broker is logical. No locking is needed. */
-#define RD_KAFKA_BROKER_IS_LOGICAL(rkb) ((rkb)->rkb_source == RD_KAFKA_LOGICAL)
-
-void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb,
- rd_kafka_broker_t *from_rkb);
-
-void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb);
-void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr);
-
-int rd_kafka_send(rd_kafka_broker_t *rkb);
-int rd_kafka_recv(rd_kafka_broker_t *rkb);
-
-void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_resp_err_t err);
-
-void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- uint64_t last_msgid);
-
-void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
-
-
-rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk);
-
-void msghdr_print(rd_kafka_t *rk,
- const char *what,
- const struct msghdr *msg,
- int hexdump);
-
-int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb);
-const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb);
-void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason);
-int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
- int min_state,
- const char *reason);
-
-void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason);
-
-void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb,
- int purge_flags,
- rd_kafka_replyq_t replyq);
-
-int rd_kafka_brokers_get_state_version(rd_kafka_t *rk);
-int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk,
- int stored_version,
- int timeout_ms);
-int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
- int stored_version,
- rd_kafka_enq_once_t *eonce);
-void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk);
-
-
-
-/**
- * Updates the current toppar active round-robin next pointer.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_broker_active_toppar_next(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *sugg_next) {
- if (CIRCLEQ_EMPTY(&rkb->rkb_active_toppars) ||
- (void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_active_toppars))
- rkb->rkb_active_toppar_next = NULL;
- else if (sugg_next)
- rkb->rkb_active_toppar_next = sugg_next;
- else
- rkb->rkb_active_toppar_next =
- CIRCLEQ_FIRST(&rkb->rkb_active_toppars);
-}
-
-
-void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const char *reason);
-
-void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const char *reason);
-
-
-void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb);
-
-void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb,
- rd_atomic32_t *acntp);
-
-void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb,
- rd_atomic32_t *acntp);
-
-
-void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
- rd_kafka_broker_t *rkb,
- rd_kafka_q_t *rkq,
- void (*callback)(rd_kafka_broker_t *rkb));
-
-void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon);
-
-int unittest_broker(void);
-
-#endif /* _RDKAFKA_BROKER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c
deleted file mode 100644
index 5a0e131e8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_interceptor.h"
-
-void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) {
-
- switch (rkbuf->rkbuf_reqhdr.ApiKey) {
- case RD_KAFKAP_Metadata:
- if (rkbuf->rkbuf_u.Metadata.topics)
- rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
- if (rkbuf->rkbuf_u.Metadata.reason)
- rd_free(rkbuf->rkbuf_u.Metadata.reason);
- if (rkbuf->rkbuf_u.Metadata.rko)
- rd_kafka_op_reply(rkbuf->rkbuf_u.Metadata.rko,
- RD_KAFKA_RESP_ERR__DESTROY);
- if (rkbuf->rkbuf_u.Metadata.decr) {
- /* Decrease metadata cache's full_.._sent state. */
- mtx_lock(rkbuf->rkbuf_u.Metadata.decr_lock);
- rd_kafka_assert(NULL,
- (*rkbuf->rkbuf_u.Metadata.decr) > 0);
- (*rkbuf->rkbuf_u.Metadata.decr)--;
- mtx_unlock(rkbuf->rkbuf_u.Metadata.decr_lock);
- }
- break;
-
- case RD_KAFKAP_Produce:
- rd_kafka_msgbatch_destroy(&rkbuf->rkbuf_batch);
- break;
- }
-
- if (rkbuf->rkbuf_response)
- rd_kafka_buf_destroy(rkbuf->rkbuf_response);
-
- if (rkbuf->rkbuf_make_opaque && rkbuf->rkbuf_free_make_opaque_cb)
- rkbuf->rkbuf_free_make_opaque_cb(rkbuf->rkbuf_make_opaque);
-
- rd_kafka_replyq_destroy(&rkbuf->rkbuf_replyq);
- rd_kafka_replyq_destroy(&rkbuf->rkbuf_orig_replyq);
-
- rd_buf_destroy(&rkbuf->rkbuf_buf);
-
- if (rkbuf->rkbuf_rktp_vers)
- rd_list_destroy(rkbuf->rkbuf_rktp_vers);
-
- if (rkbuf->rkbuf_rkb)
- rd_kafka_broker_destroy(rkbuf->rkbuf_rkb);
-
- rd_refcnt_destroy(&rkbuf->rkbuf_refcnt);
-
- rd_free(rkbuf);
-}
-
-
-
-/**
- * @brief Pushes \p buf of size \p len as a new segment on the buffer.
- *
- * \p buf will NOT be freed by the buffer.
- */
-void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf,
- const void *buf,
- size_t len,
- int allow_crc_calc,
- void (*free_cb)(void *)) {
- rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb);
-
- if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC))
- rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, buf, len);
-}
-
-
-
-/**
- * @brief Create a new buffer with \p segcmt initial segments and \p size bytes
- * of initial backing memory.
- * The underlying buffer will grow as needed.
- *
- * If \p rk is non-NULL (typical case):
- * Additional space for the Kafka protocol headers is inserted automatically.
- */
-rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) {
- rd_kafka_buf_t *rkbuf;
-
- rkbuf = rd_calloc(1, sizeof(*rkbuf));
-
- rkbuf->rkbuf_flags = flags;
-
- rd_buf_init(&rkbuf->rkbuf_buf, segcnt, size);
- rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
-
- return rkbuf;
-}
-
-
-/**
- * @brief Create new request buffer with the request-header written (will
- * need to be updated with Length, etc, later)
- */
-rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
- int16_t ApiKey,
- int segcnt,
- size_t size,
- rd_bool_t is_flexver) {
- rd_kafka_buf_t *rkbuf;
-
- /* Make room for common protocol request headers */
- size += RD_KAFKAP_REQHDR_SIZE +
- RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_client_id) +
- /* Flexible version adds a tag list to the headers
- * and to the end of the payload, both of which we send
- * as empty (1 byte each). */
- (is_flexver ? 1 + 1 : 0);
- segcnt += 1; /* headers */
-
- rkbuf = rd_kafka_buf_new0(segcnt, size, 0);
-
- rkbuf->rkbuf_rkb = rkb;
- rd_kafka_broker_keep(rkb);
-
- rkbuf->rkbuf_rel_timeout = rkb->rkb_rk->rk_conf.socket_timeout_ms;
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_DEFAULT_RETRIES;
-
- rkbuf->rkbuf_reqhdr.ApiKey = ApiKey;
-
- /* Write request header, will be updated later. */
- /* Length: updated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
- /* ApiKey */
- rd_kafka_buf_write_i16(rkbuf, rkbuf->rkbuf_reqhdr.ApiKey);
- /* ApiVersion: updated later */
- rd_kafka_buf_write_i16(rkbuf, 0);
- /* CorrId: updated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* ClientId */
- rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id);
-
- if (is_flexver) {
- /* Must set flexver after writing the client id since
- * it is still a standard non-compact string. */
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
-
- /* Empty request header tags */
- rd_kafka_buf_write_i8(rkbuf, 0);
- }
-
- return rkbuf;
-}
-
-
-
-/**
- * @brief Create new read-only rkbuf shadowing a memory region.
- *
- * @remark \p free_cb (possibly NULL) will be used to free \p ptr when
- * buffer refcount reaches 0.
- * @remark the buffer may only be read from, not written to.
- *
- * @warning If the caller has log_decode_errors > 0 then it must set up
- * \c rkbuf->rkbuf_rkb to a refcnt-increased broker object.
- */
-rd_kafka_buf_t *
-rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) {
- rd_kafka_buf_t *rkbuf;
-
- rkbuf = rd_calloc(1, sizeof(*rkbuf));
-
- rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None;
-
- rd_buf_init(&rkbuf->rkbuf_buf, 1, 0);
- rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb);
-
- rkbuf->rkbuf_totlen = size;
-
- /* Initialize reader slice */
- rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
-
- rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
-
- return rkbuf;
-}
-
-
-
-void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
- TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
- rd_atomic32_add(&rkbufq->rkbq_cnt, 1);
- if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
- rd_atomic32_add(&rkbufq->rkbq_msg_cnt,
- rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
-}
-
-void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
- TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
- rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0);
- rd_atomic32_sub(&rkbufq->rkbq_cnt, 1);
- if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
- rd_atomic32_sub(&rkbufq->rkbq_msg_cnt,
- rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
-}
-
-void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) {
- TAILQ_INIT(&rkbufq->rkbq_bufs);
- rd_atomic32_init(&rkbufq->rkbq_cnt, 0);
- rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
-}
-
-/**
- * Concat all buffers from 'src' to tail of 'dst'
- */
-void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
- TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link);
- (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
- (void)rd_atomic32_add(&dst->rkbq_msg_cnt,
- rd_atomic32_get(&src->rkbq_msg_cnt));
- rd_kafka_bufq_init(src);
-}
-
-/**
- * Purge the wait-response queue.
- * NOTE: 'rkbufq' must be a temporary queue and not one of rkb_waitresps
- * or rkb_outbufs since buffers may be re-enqueued on those queues.
- * 'rkbufq' needs to be bufq_init():ed before reuse after this call.
- */
-void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb,
- rd_kafka_bufq_t *rkbufq,
- rd_kafka_resp_err_t err) {
- rd_kafka_buf_t *rkbuf, *tmp;
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers",
- rd_atomic32_get(&rkbufq->rkbq_cnt));
-
- TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
- }
-}
-
-
-/**
- * @brief Update bufq for connection reset:
- *
- * - Purge connection-setup API requests from the queue.
- * - Reset any partially sent buffer's offset. (issue #756)
- *
- * Request types purged:
- * ApiVersion
- * SaslHandshake
- */
-void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb,
- rd_kafka_bufq_t *rkbufq) {
- rd_kafka_buf_t *rkbuf, *tmp;
- rd_ts_t now = rd_clock();
-
- rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
-
- rd_rkb_dbg(rkb, QUEUE, "BUFQ",
- "Updating %d buffers on connection reset",
- rd_atomic32_get(&rkbufq->rkbq_cnt));
-
- TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
- switch (rkbuf->rkbuf_reqhdr.ApiKey) {
- case RD_KAFKAP_ApiVersion:
- case RD_KAFKAP_SaslHandshake:
- rd_kafka_bufq_deq(rkbufq, rkbuf);
- rd_kafka_buf_callback(rkb->rkb_rk, rkb,
- RD_KAFKA_RESP_ERR__DESTROY, NULL,
- rkbuf);
- break;
- default:
- /* Reset buffer send position and corrid */
- rd_slice_seek(&rkbuf->rkbuf_reader, 0);
- rkbuf->rkbuf_corrid = 0;
- /* Reset timeout */
- rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now);
- break;
- }
- }
-}
-
-
-void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb,
- const char *fac,
- rd_kafka_bufq_t *rkbq) {
- rd_kafka_buf_t *rkbuf;
- int cnt = rd_kafka_bufq_cnt(rkbq);
- rd_ts_t now;
-
- if (!cnt)
- return;
-
- now = rd_clock();
-
- rd_rkb_dbg(rkb, BROKER, fac, "bufq with %d buffer(s):", cnt);
-
- TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) {
- rd_rkb_dbg(rkb, BROKER, fac,
- " Buffer %s (%" PRIusz " bytes, corrid %" PRId32
- ", "
- "connid %d, prio %d, retry %d in %lldms, "
- "timeout in %lldms)",
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid,
- rkbuf->rkbuf_connid, rkbuf->rkbuf_prio,
- rkbuf->rkbuf_retries,
- rkbuf->rkbuf_ts_retry
- ? (rkbuf->rkbuf_ts_retry - now) / 1000LL
- : 0,
- rkbuf->rkbuf_ts_timeout
- ? (rkbuf->rkbuf_ts_timeout - now) / 1000LL
- : 0);
- }
-}
-
-
-
-/**
- * @brief Calculate the effective timeout for a request attempt
- */
-void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk,
- rd_kafka_buf_t *rkbuf,
- rd_ts_t now) {
- if (likely(rkbuf->rkbuf_rel_timeout)) {
- /* Default:
- * Relative timeout, set request timeout to
- * to now + rel timeout. */
- rkbuf->rkbuf_ts_timeout = now + rkbuf->rkbuf_rel_timeout * 1000;
- } else if (!rkbuf->rkbuf_force_timeout) {
- /* Use absolute timeout, limited by socket.timeout.ms */
- rd_ts_t sock_timeout =
- now + rk->rk_conf.socket_timeout_ms * 1000;
-
- rkbuf->rkbuf_ts_timeout =
- RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout);
- } else {
- /* Use absolue timeout without limit. */
- rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_abs_timeout;
- }
-}
-
-/**
- * Retry failed request, if permitted.
- * @remark \p rkb may be NULL
- * @remark the retry count is only increased for actually transmitted buffers,
- * if there is a failure while the buffers lingers in the output queue
- * (rkb_outbufs) then the retry counter is not increased.
- * Returns 1 if the request was scheduled for retry, else 0.
- */
-int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
- int incr_retry = rd_kafka_buf_was_sent(rkbuf) ? 1 : 0;
-
- /* Don't allow retries of dummy/empty buffers */
- rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0);
-
- if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL ||
- rd_kafka_terminating(rkb->rkb_rk) ||
- rkbuf->rkbuf_retries + incr_retry >
- rkbuf->rkbuf_max_retries))
- return 0;
-
- /* Absolute timeout, check for expiry. */
- if (rkbuf->rkbuf_abs_timeout && rkbuf->rkbuf_abs_timeout < rd_clock())
- return 0; /* Expired */
-
- /* Try again */
- rkbuf->rkbuf_ts_sent = 0;
- rkbuf->rkbuf_ts_timeout = 0; /* Will be updated in calc_timeout() */
- rkbuf->rkbuf_retries += incr_retry;
- rd_kafka_buf_keep(rkbuf);
- rd_kafka_broker_buf_retry(rkb, rkbuf);
- return 1;
-}
-
-
-/**
- * @brief Handle RD_KAFKA_OP_RECV_BUF.
- */
-void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
- rd_kafka_buf_t *request, *response;
- rd_kafka_t *rk;
-
- request = rko->rko_u.xbuf.rkbuf;
- rko->rko_u.xbuf.rkbuf = NULL;
-
- /* NULL on op_destroy() */
- if (request->rkbuf_replyq.q) {
- int32_t version = request->rkbuf_replyq.version;
- /* Current queue usage is done, but retain original replyq for
- * future retries, stealing
- * the current reference. */
- request->rkbuf_orig_replyq = request->rkbuf_replyq;
- rd_kafka_replyq_clear(&request->rkbuf_replyq);
- /* Callback might need to version check so we retain the
- * version across the clear() call which clears it. */
- request->rkbuf_replyq.version = version;
- }
-
- if (!request->rkbuf_cb) {
- rd_kafka_buf_destroy(request);
- return;
- }
-
- /* Let buf_callback() do destroy()s */
- response = request->rkbuf_response; /* May be NULL */
- request->rkbuf_response = NULL;
-
- if (!(rk = rko->rko_rk)) {
- rd_assert(request->rkbuf_rkb != NULL);
- rk = request->rkbuf_rkb->rkb_rk;
- }
-
- rd_kafka_buf_callback(rk, request->rkbuf_rkb, err, response, request);
-}
-
-
-
-/**
- * Call request.rkbuf_cb(), but:
- * - if the rkbuf has a rkbuf_replyq the buffer is enqueued on that queue
- * with op type RD_KAFKA_OP_RECV_BUF.
- * - else call rkbuf_cb().
- *
- * \p response may be NULL.
- *
- * Will decrease refcount for both response and request, eventually.
- *
- * The decision to retry, and the call to buf_retry(), is delegated
- * to the buffer's response callback.
- */
-void rd_kafka_buf_callback(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *response,
- rd_kafka_buf_t *request) {
-
- rd_kafka_interceptors_on_response_received(
- rk, -1, rkb ? rd_kafka_broker_name(rkb) : "",
- rkb ? rd_kafka_broker_id(rkb) : -1, request->rkbuf_reqhdr.ApiKey,
- request->rkbuf_reqhdr.ApiVersion, request->rkbuf_reshdr.CorrId,
- response ? response->rkbuf_totlen : 0,
- response ? response->rkbuf_ts_sent : -1, err);
-
- if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
-
- rd_kafka_assert(NULL, !request->rkbuf_response);
- request->rkbuf_response = response;
-
- /* Increment refcnt since rko_rkbuf will be decref:ed
- * if replyq_enq() fails and we dont want the rkbuf gone in that
- * case. */
- rd_kafka_buf_keep(request);
- rko->rko_u.xbuf.rkbuf = request;
-
- rko->rko_err = err;
-
- /* Copy original replyq for future retries, with its own
- * queue reference. */
- rd_kafka_replyq_copy(&request->rkbuf_orig_replyq,
- &request->rkbuf_replyq);
-
- rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0);
-
- rd_kafka_buf_destroy(request); /* from keep above */
- return;
- }
-
- if (request->rkbuf_cb)
- request->rkbuf_cb(rk, rkb, err, response, request,
- request->rkbuf_opaque);
-
- rd_kafka_buf_destroy(request);
- if (response)
- rd_kafka_buf_destroy(response);
-}
-
-
-
-/**
- * @brief Set the maker callback, which will be called just prior to sending
- * to construct the buffer contents.
- *
- * Use this when the usable ApiVersion must be known but the broker may
- * currently be down.
- *
- * See rd_kafka_make_req_cb_t documentation for more info.
- */
-void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
- rd_kafka_make_req_cb_t *make_cb,
- void *make_opaque,
- void (*free_make_opaque_cb)(void *make_opaque)) {
- rd_assert(!rkbuf->rkbuf_make_req_cb &&
- !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE));
-
- rkbuf->rkbuf_make_req_cb = make_cb;
- rkbuf->rkbuf_make_opaque = make_opaque;
- rkbuf->rkbuf_free_make_opaque_cb = free_make_opaque_cb;
-
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NEED_MAKE;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h
deleted file mode 100644
index b4f606317..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h
+++ /dev/null
@@ -1,1407 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_BUF_H_
-#define _RDKAFKA_BUF_H_
-
-#include "rdkafka_int.h"
-#include "rdcrc32.h"
-#include "rdlist.h"
-#include "rdbuf.h"
-#include "rdkafka_msgbatch.h"
-
-typedef struct rd_kafka_broker_s rd_kafka_broker_t;
-
-#define RD_KAFKA_HEADERS_IOV_CNT 2
-
-
-/**
- * Temporary buffer with memory aligned writes to accommodate
- * effective and platform safe struct writes.
- */
-typedef struct rd_tmpabuf_s {
- size_t size;
- size_t of;
- char *buf;
- int failed;
- int assert_on_fail;
-} rd_tmpabuf_t;
-
-/**
- * @brief Allocate new tmpabuf with \p size bytes pre-allocated.
- */
-static RD_UNUSED void
-rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
- tab->buf = rd_malloc(size);
- tab->size = size;
- tab->of = 0;
- tab->failed = 0;
- tab->assert_on_fail = assert_on_fail;
-}
-
-/**
- * @brief Free memory allocated by tmpabuf
- */
-static RD_UNUSED void rd_tmpabuf_destroy(rd_tmpabuf_t *tab) {
- rd_free(tab->buf);
-}
-
-/**
- * @returns 1 if a previous operation failed.
- */
-static RD_UNUSED RD_INLINE int rd_tmpabuf_failed(rd_tmpabuf_t *tab) {
- return tab->failed;
-}
-
-/**
- * @brief Allocate \p size bytes for writing, returning an aligned pointer
- * to the memory.
- * @returns the allocated pointer (within the tmpabuf) on success or
- * NULL if the requested number of bytes + alignment is not available
- * in the tmpabuf.
- */
-static RD_UNUSED void *
-rd_tmpabuf_alloc0(const char *func, int line, rd_tmpabuf_t *tab, size_t size) {
- void *ptr;
-
- if (unlikely(tab->failed))
- return NULL;
-
- if (unlikely(tab->of + size > tab->size)) {
- if (tab->assert_on_fail) {
- fprintf(stderr,
- "%s: %s:%d: requested size %" PRIusz
- " + %" PRIusz " > %" PRIusz "\n",
- __FUNCTION__, func, line, tab->of, size,
- tab->size);
- assert(!*"rd_tmpabuf_alloc: not enough size in buffer");
- }
- return NULL;
- }
-
- ptr = (void *)(tab->buf + tab->of);
- tab->of += RD_ROUNDUP(size, 8);
-
- return ptr;
-}
-
-#define rd_tmpabuf_alloc(tab, size) \
- rd_tmpabuf_alloc0(__FUNCTION__, __LINE__, tab, size)
-
-/**
- * @brief Write \p buf of \p size bytes to tmpabuf memory in an aligned fashion.
- *
- * @returns the allocated and written-to pointer (within the tmpabuf) on success
- * or NULL if the requested number of bytes + alignment is not
- * available in the tmpabuf.
- */
-static RD_UNUSED void *rd_tmpabuf_write0(const char *func,
- int line,
- rd_tmpabuf_t *tab,
- const void *buf,
- size_t size) {
- void *ptr = rd_tmpabuf_alloc0(func, line, tab, size);
-
- if (likely(ptr && size))
- memcpy(ptr, buf, size);
-
- return ptr;
-}
-#define rd_tmpabuf_write(tab, buf, size) \
- rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size)
-
-
-/**
- * @brief Wrapper for rd_tmpabuf_write() that takes a nul-terminated string.
- */
-static RD_UNUSED char *rd_tmpabuf_write_str0(const char *func,
- int line,
- rd_tmpabuf_t *tab,
- const char *str) {
- return rd_tmpabuf_write0(func, line, tab, str, strlen(str) + 1);
-}
-#define rd_tmpabuf_write_str(tab, str) \
- rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str)
-
-
-
-/**
- * Response handling callback.
- *
- * NOTE: Callbacks must check for 'err == RD_KAFKA_RESP_ERR__DESTROY'
- * which indicates that some entity is terminating (rd_kafka_t, broker,
- * toppar, queue, etc) and the callback may not be called in the
- * correct thread. In this case the callback must perform just
- * the most minimal cleanup and dont trigger any other operations.
- *
- * NOTE: rkb, reply and request may be NULL, depending on error situation.
- */
-typedef void(rd_kafka_resp_cb_t)(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque);
-
-
-/**
- * @brief Sender callback. This callback is used to construct and send (enq)
- * a rkbuf on a particular broker.
- */
-typedef rd_kafka_resp_err_t(rd_kafka_send_req_cb_t)(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *reply_opaque);
-
-
-/**
- * @brief Request maker. A callback that constructs the actual contents
- * of a request.
- *
- * When constructing a request the ApiVersion typically needs to be selected
- * which requires the broker's supported ApiVersions to be known, which in
- * turn requires the broker connection to be UP.
- *
- * As a buffer constructor you have two choices:
- * a. acquire the broker handle, wait for it to come up, and then construct
- * the request buffer, or
- * b. acquire the broker handle, enqueue an uncrafted/unmaked
- * request on the broker request queue, and when the broker is up
- * the make_req_cb will be called for you to construct the request.
- *
- * From a code complexity standpoint, the latter option is usually the least
- * complex and voids the caller to care about any of the broker state.
- * Any information that is required to construct the request is passed through
- * the make_opaque, which can be automatically freed by the buffer code
- * when it has been used, or handled by the caller (in which case it must
- * outlive the lifetime of the buffer).
- *
- * Usage:
- *
- * 1. Construct an rkbuf with the appropriate ApiKey.
- * 2. Make a copy or reference of any data that is needed to construct the
- * request, e.g., through rd_kafka_topic_partition_list_copy(). This
- * data is passed by the make_opaque.
- * 3. Set the make callback by calling rd_kafka_buf_set_maker() and pass
- * the make_opaque data and a free function, if needed.
- * 4. The callback will eventually be called from the broker thread.
- * 5. In the make callback construct the request on the passed rkbuf.
- * 6. The request is sent to the broker and the make_opaque is freed.
- *
- * See rd_kafka_ListOffsetsRequest() in rdkafka_request.c for an example.
- *
- */
-typedef rd_kafka_resp_err_t(rd_kafka_make_req_cb_t)(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- void *make_opaque);
-
-/**
- * @struct Request and response buffer
- *
- */
-struct rd_kafka_buf_s { /* rd_kafka_buf_t */
- TAILQ_ENTRY(rd_kafka_buf_s) rkbuf_link;
-
- int32_t rkbuf_corrid;
-
- rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */
-
- int rkbuf_flags; /* RD_KAFKA_OP_F */
-
- /** What convenience flags to copy from request to response along
- * with the reqhdr. */
-#define RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK (RD_KAFKA_OP_F_FLEXVER)
-
- rd_kafka_prio_t rkbuf_prio; /**< Request priority */
-
- rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */
- rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */
-
- int rkbuf_connid; /* broker connection id (used when buffer
- * was partially sent). */
- size_t rkbuf_totlen; /* recv: total expected length,
- * send: not used */
-
- rd_crc32_t rkbuf_crc; /* Current CRC calculation */
-
- struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header.
- * These fields are encoded
- * and written to output buffer
- * on buffer finalization.
- * Note:
- * The request's
- * reqhdr is copied to the
- * response's reqhdr as a
- * convenience. */
- struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header.
- * Decoded fields are copied
- * here from the buffer
- * to provide an ease-of-use
- * interface to the header */
-
- int32_t rkbuf_expected_size; /* expected size of message */
-
- rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */
- rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used
- * for retries from inside
- * the rkbuf_cb() callback
- * since rkbuf_replyq will
- * have been reset. */
- rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */
- struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */
-
- rd_kafka_make_req_cb_t *rkbuf_make_req_cb; /**< Callback to construct
- * the request itself.
- * Will be used if
- * RD_KAFKA_OP_F_NEED_MAKE
- * is set. */
- void *rkbuf_make_opaque; /**< Opaque passed to rkbuf_make_req_cb.
- * Will be freed automatically after use
- * by the rkbuf code. */
- void (*rkbuf_free_make_opaque_cb)(void *); /**< Free function for
- * rkbuf_make_opaque. */
-
- struct rd_kafka_broker_s *rkbuf_rkb; /**< Optional broker object
- * with refcnt increased used
- * for logging decode errors
- * if log_decode_errors is > 0 */
-
- rd_refcnt_t rkbuf_refcnt;
- void *rkbuf_opaque;
-
- int rkbuf_max_retries; /**< Maximum retries to attempt. */
- int rkbuf_retries; /**< Retries so far. */
-
-
- int rkbuf_features; /* Required feature(s) that must be
- * supported by broker. */
-
- rd_ts_t rkbuf_ts_enq;
- rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission,
- * after response: RTT. */
-
- /* Request timeouts:
- * rkbuf_ts_timeout is the effective absolute request timeout used
- * by the timeout scanner to see if a request has timed out.
- * It is set when a request is enqueued on the broker transmit
- * queue based on the relative or absolute timeout:
- *
- * rkbuf_rel_timeout is the per-request-transmit relative timeout,
- * this value is reused for each sub-sequent retry of a request.
- *
- * rkbuf_abs_timeout is the absolute request timeout, spanning
- * all retries.
- * This value is effectively limited by socket.timeout.ms for
- * each transmission, but the absolute timeout for a request's
- * lifetime is the absolute value.
- *
- * Use rd_kafka_buf_set_timeout() to set a relative timeout
- * that will be reused on retry,
- * or rd_kafka_buf_set_abs_timeout() to set a fixed absolute timeout
- * for the case where the caller knows the request will be
- * semantically outdated when that absolute time expires, such as for
- * session.timeout.ms-based requests.
- *
- * The decision to retry a request is delegated to the rkbuf_cb
- * response callback, which should use rd_kafka_err_action()
- * and check the return actions for RD_KAFKA_ERR_ACTION_RETRY to be set
- * and then call rd_kafka_buf_retry().
- * rd_kafka_buf_retry() will enqueue the request on the rkb_retrybufs
- * queue with a backoff time of retry.backoff.ms.
- * The rkb_retrybufs queue is served by the broker thread's timeout
- * scanner.
- * @warning rkb_retrybufs is NOT purged on broker down.
- */
- rd_ts_t rkbuf_ts_timeout; /* Request timeout (absolute time). */
- rd_ts_t
- rkbuf_abs_timeout; /* Absolute timeout for request, including
- * retries.
- * Mutually exclusive with rkbuf_rel_timeout*/
- int rkbuf_rel_timeout; /* Relative timeout (ms), used for retries.
- * Defaults to socket.timeout.ms.
- * Mutually exclusive with rkbuf_abs_timeout*/
- rd_bool_t rkbuf_force_timeout; /**< Force request timeout to be
- * remaining abs_timeout regardless
- * of socket.timeout.ms. */
-
-
- int64_t rkbuf_offset; /* Used by OffsetCommit */
-
- rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map.
- * Used by FetchRequest. */
-
- rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */
-
- union {
- struct {
- rd_list_t *topics; /* Requested topics (char *) */
- char *reason; /* Textual reason */
- rd_kafka_op_t *rko; /* Originating rko with replyq
- * (if any) */
- rd_bool_t all_topics; /**< Full/All topics requested */
- rd_bool_t cgrp_update; /**< Update cgrp with topic
- * status from response. */
-
- int *decr; /* Decrement this integer by one
- * when request is complete:
- * typically points to metadata
- * cache's full_.._sent.
- * Will be performed with
- * decr_lock held. */
- mtx_t *decr_lock;
-
- } Metadata;
- struct {
- rd_kafka_msgbatch_t batch; /**< MessageSet/batch */
- } Produce;
- struct {
- rd_bool_t commit; /**< true = txn commit,
- * false = txn abort */
- } EndTxn;
- } rkbuf_u;
-
-#define rkbuf_batch rkbuf_u.Produce.batch
-
- const char *rkbuf_uflow_mitigation; /**< Buffer read underflow
- * human readable mitigation
- * string (const memory).
- * This is used to hint the
- * user why the underflow
- * might have occurred, which
- * depends on request type. */
-};
-
-
-
-/**
- * @name Read buffer interface
- *
- * Memory reading helper macros to be used when parsing network responses.
- *
- * Assumptions:
- * - an 'err_parse:' goto-label must be available for error bailouts,
- * the error code will be set in rkbuf->rkbuf_err
- * - local `int log_decode_errors` variable set to the logging level
- * to log parse errors (or 0 to turn off logging).
- */
-
-#define rd_kafka_buf_parse_fail(rkbuf, ...) \
- do { \
- if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
- rd_rkb_log( \
- rkbuf->rkbuf_rkb, log_decode_errors, "PROTOERR", \
- "Protocol parse failure for %s v%hd%s " \
- "at %" PRIusz "/%" PRIusz \
- " (%s:%i) " \
- "(incorrect broker.version.fallback?)", \
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \
- rkbuf->rkbuf_reqhdr.ApiVersion, \
- (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER \
- ? "(flex)" \
- : ""), \
- rd_slice_offset(&rkbuf->rkbuf_reader), \
- rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \
- __LINE__); \
- rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \
- "PROTOERR", __VA_ARGS__); \
- } \
- (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \
- goto err_parse; \
- } while (0)
-
-/**
- * @name Fail buffer reading due to buffer underflow.
- */
-#define rd_kafka_buf_underflow_fail(rkbuf, wantedlen, ...) \
- do { \
- if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
- char __tmpstr[256]; \
- rd_snprintf(__tmpstr, sizeof(__tmpstr), \
- ": " __VA_ARGS__); \
- if (strlen(__tmpstr) == 2) \
- __tmpstr[0] = '\0'; \
- rd_rkb_log( \
- rkbuf->rkbuf_rkb, log_decode_errors, "PROTOUFLOW", \
- "Protocol read buffer underflow " \
- "for %s v%hd " \
- "at %" PRIusz "/%" PRIusz \
- " (%s:%i): " \
- "expected %" PRIusz \
- " bytes > " \
- "%" PRIusz " remaining bytes (%s)%s", \
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \
- rkbuf->rkbuf_reqhdr.ApiVersion, \
- rd_slice_offset(&rkbuf->rkbuf_reader), \
- rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \
- __LINE__, wantedlen, \
- rd_slice_remains(&rkbuf->rkbuf_reader), \
- rkbuf->rkbuf_uflow_mitigation \
- ? rkbuf->rkbuf_uflow_mitigation \
- : "incorrect broker.version.fallback?", \
- __tmpstr); \
- } \
- (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \
- goto err_parse; \
- } while (0)
-
-
-/**
- * Returns the number of remaining bytes available to read.
- */
-#define rd_kafka_buf_read_remain(rkbuf) rd_slice_remains(&(rkbuf)->rkbuf_reader)
-
-/**
- * Checks that at least 'len' bytes remain to be read in buffer, else fails.
- */
-#define rd_kafka_buf_check_len(rkbuf, len) \
- do { \
- size_t __len0 = (size_t)(len); \
- if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \
- rd_kafka_buf_underflow_fail(rkbuf, __len0); \
- } \
- } while (0)
-
-/**
- * Skip (as in read and ignore) the next 'len' bytes.
- */
-#define rd_kafka_buf_skip(rkbuf, len) \
- do { \
- size_t __len1 = (size_t)(len); \
- if (__len1 && \
- !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
- rd_kafka_buf_check_len(rkbuf, __len1); \
- } while (0)
-
-/**
- * Skip (as in read and ignore) up to fixed position \p pos.
- */
-#define rd_kafka_buf_skip_to(rkbuf, pos) \
- do { \
- size_t __len1 = \
- (size_t)(pos)-rd_slice_offset(&(rkbuf)->rkbuf_reader); \
- if (__len1 && \
- !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
- rd_kafka_buf_check_len(rkbuf, __len1); \
- } while (0)
-
-
-
-/**
- * Read 'len' bytes and copy to 'dstptr'
- */
-#define rd_kafka_buf_read(rkbuf, dstptr, len) \
- do { \
- size_t __len2 = (size_t)(len); \
- if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \
- rd_kafka_buf_check_len(rkbuf, __len2); \
- } while (0)
-
-
-/**
- * @brief Read \p len bytes at slice offset \p offset and copy to \p dstptr
- * without affecting the current reader position.
- */
-#define rd_kafka_buf_peek(rkbuf, offset, dstptr, len) \
- do { \
- size_t __len2 = (size_t)(len); \
- if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, dstptr, \
- __len2)) \
- rd_kafka_buf_check_len(rkbuf, (offset) + (__len2)); \
- } while (0)
-
-
-/**
- * Read a 16,32,64-bit integer and store it in 'dstptr'
- */
-#define rd_kafka_buf_read_i64(rkbuf, dstptr) \
- do { \
- int64_t _v; \
- int64_t *_vp = dstptr; \
- rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *_vp = be64toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_peek_i64(rkbuf, of, dstptr) \
- do { \
- int64_t _v; \
- int64_t *_vp = dstptr; \
- rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
- *_vp = be64toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_read_i32(rkbuf, dstptr) \
- do { \
- int32_t _v; \
- int32_t *_vp = dstptr; \
- rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *_vp = be32toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_peek_i32(rkbuf, of, dstptr) \
- do { \
- int32_t _v; \
- int32_t *_vp = dstptr; \
- rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
- *_vp = be32toh(_v); \
- } while (0)
-
-
-/* Same as .._read_i32 but does a direct assignment.
- * dst is assumed to be a scalar, not pointer. */
-#define rd_kafka_buf_read_i32a(rkbuf, dst) \
- do { \
- int32_t _v; \
- rd_kafka_buf_read(rkbuf, &_v, 4); \
- dst = (int32_t)be32toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_read_i16(rkbuf, dstptr) \
- do { \
- int16_t _v; \
- int16_t *_vp = dstptr; \
- rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *_vp = (int16_t)be16toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_peek_i16(rkbuf, of, dstptr) \
- do { \
- int16_t _v; \
- int16_t *_vp = dstptr; \
- rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
- *_vp = be16toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_read_i16a(rkbuf, dst) \
- do { \
- int16_t _v; \
- rd_kafka_buf_read(rkbuf, &_v, 2); \
- dst = (int16_t)be16toh(_v); \
- } while (0)
-
-#define rd_kafka_buf_read_i8(rkbuf, dst) rd_kafka_buf_read(rkbuf, dst, 1)
-
-#define rd_kafka_buf_peek_i8(rkbuf, of, dst) \
- rd_kafka_buf_peek(rkbuf, of, dst, 1)
-
-#define rd_kafka_buf_read_bool(rkbuf, dstptr) \
- do { \
- int8_t _v; \
- rd_bool_t *_dst = dstptr; \
- rd_kafka_buf_read(rkbuf, &_v, 1); \
- *_dst = (rd_bool_t)_v; \
- } while (0)
-
-
-/**
- * @brief Read varint and store in int64_t \p dst
- */
-#define rd_kafka_buf_read_varint(rkbuf, dstptr) \
- do { \
- int64_t _v; \
- int64_t *_vp = dstptr; \
- size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v); \
- if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
- rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
- "varint parsing failed"); \
- *_vp = _v; \
- } while (0)
-
-
-/**
- * @brief Read unsigned varint and store in uint64_t \p dst
- */
-#define rd_kafka_buf_read_uvarint(rkbuf, dstptr) \
- do { \
- uint64_t _v; \
- uint64_t *_vp = dstptr; \
- size_t _r = \
- rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, &_v); \
- if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
- rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
- "uvarint parsing failed"); \
- *_vp = _v; \
- } while (0)
-
-
-/**
- * @brief Read Kafka COMPACT_STRING (VARINT+N) or
- * standard String representation (2+N).
- *
- * The kstr data will be updated to point to the rkbuf. */
-#define rd_kafka_buf_read_str(rkbuf, kstr) \
- do { \
- int _klen; \
- if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
- uint64_t _uva; \
- rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
- (kstr)->len = ((int32_t)_uva) - 1; \
- _klen = (kstr)->len; \
- } else { \
- rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \
- _klen = RD_KAFKAP_STR_LEN(kstr); \
- } \
- if (RD_KAFKAP_STR_IS_NULL(kstr)) \
- (kstr)->str = NULL; \
- else if (RD_KAFKAP_STR_LEN(kstr) == 0) \
- (kstr)->str = ""; \
- else if (!((kstr)->str = rd_slice_ensure_contig( \
- &rkbuf->rkbuf_reader, _klen))) \
- rd_kafka_buf_check_len(rkbuf, _klen); \
- } while (0)
-
-/* Read Kafka String representation (2+N) and write it to the \p tmpabuf
- * with a trailing nul byte. */
-#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) \
- do { \
- rd_kafkap_str_t _kstr; \
- size_t _slen; \
- char *_dst; \
- rd_kafka_buf_read_str(rkbuf, &_kstr); \
- _slen = RD_KAFKAP_STR_LEN(&_kstr); \
- if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \
- rd_kafka_buf_parse_fail( \
- rkbuf, \
- "Not enough room in tmpabuf: " \
- "%" PRIusz "+%" PRIusz " > %" PRIusz, \
- (tmpabuf)->of, _slen + 1, (tmpabuf)->size); \
- _dst[_slen] = '\0'; \
- dst = (void *)_dst; \
- } while (0)
-
-/**
- * Skip a string.
- */
-#define rd_kafka_buf_skip_str(rkbuf) \
- do { \
- int16_t _slen; \
- rd_kafka_buf_read_i16(rkbuf, &_slen); \
- rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \
- } while (0)
-
-/* Read Kafka Bytes representation (4+N).
- * The 'kbytes' will be updated to point to rkbuf data */
-#define rd_kafka_buf_read_bytes(rkbuf, kbytes) \
- do { \
- int _klen; \
- rd_kafka_buf_read_i32a(rkbuf, _klen); \
- (kbytes)->len = _klen; \
- if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
- (kbytes)->data = NULL; \
- (kbytes)->len = 0; \
- } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \
- (kbytes)->data = ""; \
- else if (!((kbytes)->data = rd_slice_ensure_contig( \
- &(rkbuf)->rkbuf_reader, _klen))) \
- rd_kafka_buf_check_len(rkbuf, _klen); \
- } while (0)
-
-
-/**
- * @brief Read \p size bytes from buffer, setting \p *ptr to the start
- * of the memory region.
- */
-#define rd_kafka_buf_read_ptr(rkbuf, ptr, size) \
- do { \
- size_t _klen = size; \
- if (!(*(ptr) = (void *)rd_slice_ensure_contig( \
- &(rkbuf)->rkbuf_reader, _klen))) \
- rd_kafka_buf_check_len(rkbuf, _klen); \
- } while (0)
-
-
-/**
- * @brief Read varint-lengted Kafka Bytes representation
- */
-#define rd_kafka_buf_read_bytes_varint(rkbuf, kbytes) \
- do { \
- int64_t _len2; \
- size_t _r = \
- rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_len2); \
- if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
- rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
- "varint parsing failed"); \
- (kbytes)->len = (int32_t)_len2; \
- if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
- (kbytes)->data = NULL; \
- (kbytes)->len = 0; \
- } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \
- (kbytes)->data = ""; \
- else if (!((kbytes)->data = rd_slice_ensure_contig( \
- &(rkbuf)->rkbuf_reader, (size_t)_len2))) \
- rd_kafka_buf_check_len(rkbuf, _len2); \
- } while (0)
-
-
-/**
- * @brief Read throttle_time_ms (i32) from response and pass the value
- * to the throttle handling code.
- */
-#define rd_kafka_buf_read_throttle_time(rkbuf) \
- do { \
- int32_t _throttle_time_ms; \
- rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \
- rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \
- (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \
- _throttle_time_ms); \
- } while (0)
-
-
-/**
- * @brief Discard all KIP-482 Tags at the current position in the buffer.
- */
-#define rd_kafka_buf_skip_tags(rkbuf) \
- do { \
- uint64_t _tagcnt; \
- if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
- break; \
- rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \
- while (_tagcnt-- > 0) { \
- uint64_t _tagtype, _taglen; \
- rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
- rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
- if (_taglen > 1) \
- rd_kafka_buf_skip(rkbuf, \
- (size_t)(_taglen - 1)); \
- } \
- } while (0)
-
-/**
- * @brief Write tags at the current position in the buffer.
- * @remark Currently always writes empty tags.
- * @remark Change to ..write_uvarint() when actual tags are supported.
- */
-#define rd_kafka_buf_write_tags(rkbuf) \
- do { \
- if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
- break; \
- rd_kafka_buf_write_i8(rkbuf, 0); \
- } while (0)
-
-
-/**
- * @brief Reads an ARRAY or COMPACT_ARRAY count depending on buffer type.
- */
-#define rd_kafka_buf_read_arraycnt(rkbuf, arrcnt, maxval) \
- do { \
- if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
- uint64_t _uva; \
- rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
- *(arrcnt) = (int32_t)_uva - 1; \
- } else { \
- rd_kafka_buf_read_i32(rkbuf, arrcnt); \
- } \
- if (*(arrcnt) < -1 || \
- ((maxval) != -1 && *(arrcnt) > (maxval))) \
- rd_kafka_buf_parse_fail( \
- rkbuf, "ApiArrayCnt %" PRId32 " out of range", \
- *(arrcnt)); \
- } while (0)
-
-
-
-/**
- * @returns true if buffer has been sent on wire, else 0.
- */
-#define rd_kafka_buf_was_sent(rkbuf) ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT)
-
-typedef struct rd_kafka_bufq_s {
- TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs;
- rd_atomic32_t rkbq_cnt;
- rd_atomic32_t rkbq_msg_cnt;
-} rd_kafka_bufq_t;
-
-#define rd_kafka_bufq_cnt(rkbq) rd_atomic32_get(&(rkbq)->rkbq_cnt)
-
-/**
- * @brief Set buffer's request timeout to relative \p timeout_ms measured
- * from the time the buffer is sent on the underlying socket.
- *
- * @param now Reuse current time from existing rd_clock() var, else 0.
- *
- * The relative timeout value is reused upon request retry.
- */
-static RD_INLINE void
-rd_kafka_buf_set_timeout(rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) {
- if (!now)
- now = rd_clock();
- rkbuf->rkbuf_rel_timeout = timeout_ms;
- rkbuf->rkbuf_abs_timeout = 0;
-}
-
-
-/**
- * @brief Calculate the effective timeout for a request attempt
- */
-void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk,
- rd_kafka_buf_t *rkbuf,
- rd_ts_t now);
-
-
-/**
- * @brief Set buffer's request timeout to relative \p timeout_ms measured
- * from \p now.
- *
- * @param now Reuse current time from existing rd_clock() var, else 0.
- * @param force If true: force request timeout to be same as remaining
- * abs timeout, regardless of socket.timeout.ms.
- * If false: cap each request timeout to socket.timeout.ms.
- *
- * The remaining time is used as timeout for request retries.
- */
-static RD_INLINE void rd_kafka_buf_set_abs_timeout0(rd_kafka_buf_t *rkbuf,
- int timeout_ms,
- rd_ts_t now,
- rd_bool_t force) {
- if (!now)
- now = rd_clock();
- rkbuf->rkbuf_rel_timeout = 0;
- rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000);
- rkbuf->rkbuf_force_timeout = force;
-}
-
-#define rd_kafka_buf_set_abs_timeout(rkbuf, timeout_ms, now) \
- rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_false)
-
-
-#define rd_kafka_buf_set_abs_timeout_force(rkbuf, timeout_ms, now) \
- rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_true)
-
-
-#define rd_kafka_buf_keep(rkbuf) rd_refcnt_add(&(rkbuf)->rkbuf_refcnt)
-#define rd_kafka_buf_destroy(rkbuf) \
- rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \
- rd_kafka_buf_destroy_final(rkbuf))
-
-void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf);
-void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf,
- const void *buf,
- size_t len,
- int allow_crc_calc,
- void (*free_cb)(void *));
-#define rd_kafka_buf_push(rkbuf, buf, len, free_cb) \
- rd_kafka_buf_push0(rkbuf, buf, len, 1 /*allow_crc*/, free_cb)
-rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags);
-#define rd_kafka_buf_new(segcnt, size) rd_kafka_buf_new0(segcnt, size, 0)
-rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
- int16_t ApiKey,
- int segcnt,
- size_t size,
- rd_bool_t is_flexver);
-#define rd_kafka_buf_new_request(rkb, ApiKey, segcnt, size) \
- rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, rd_false)
-
-#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \
- is_flexver) \
- rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver)
-
-rd_kafka_buf_t *
-rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *));
-void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
-void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
-void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq);
-void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src);
-void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb,
- rd_kafka_bufq_t *rkbufq,
- rd_kafka_resp_err_t err);
-void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb,
- rd_kafka_bufq_t *rkbufq);
-void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb,
- const char *fac,
- rd_kafka_bufq_t *rkbq);
-
-int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
-
-void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
-void rd_kafka_buf_callback(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *response,
- rd_kafka_buf_t *request);
-
-
-
-/**
- *
- * Write buffer interface
- *
- */
-
-/**
- * Set request API type version
- */
-static RD_UNUSED RD_INLINE void
-rd_kafka_buf_ApiVersion_set(rd_kafka_buf_t *rkbuf,
- int16_t version,
- int features) {
- rkbuf->rkbuf_reqhdr.ApiVersion = version;
- rkbuf->rkbuf_features = features;
-}
-
-
-/**
- * @returns the ApiVersion for a request
- */
-#define rd_kafka_buf_ApiVersion(rkbuf) ((rkbuf)->rkbuf_reqhdr.ApiVersion)
-
-
-
-/**
- * Write (copy) data to buffer at current write-buffer position.
- * There must be enough space allocated in the rkbuf.
- * Returns offset to written destination buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write(rd_kafka_buf_t *rkbuf,
- const void *data,
- size_t len) {
- size_t r;
-
- r = rd_buf_write(&rkbuf->rkbuf_buf, data, len);
-
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)
- rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, data, len);
-
- return r;
-}
-
-
-
-/**
- * Write (copy) 'data' to buffer at 'ptr'.
- * There must be enough space to fit 'len'.
- * This will overwrite the buffer at given location and length.
- *
- * NOTE: rd_kafka_buf_update() MUST NOT be called when a CRC calculation
- * is in progress (between rd_kafka_buf_crc_init() & .._crc_finalize())
- */
-static RD_INLINE void rd_kafka_buf_update(rd_kafka_buf_t *rkbuf,
- size_t of,
- const void *data,
- size_t len) {
- rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
- rd_buf_write_update(&rkbuf->rkbuf_buf, of, data, len);
-}
-
-/**
- * Write int8_t to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i8(rd_kafka_buf_t *rkbuf, int8_t v) {
- return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int8_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i8()`.
- */
-static RD_INLINE void
-rd_kafka_buf_update_i8(rd_kafka_buf_t *rkbuf, size_t of, int8_t v) {
- rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Write int16_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i16(rd_kafka_buf_t *rkbuf,
- int16_t v) {
- v = htobe16(v);
- return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int16_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i16()`.
- */
-static RD_INLINE void
-rd_kafka_buf_update_i16(rd_kafka_buf_t *rkbuf, size_t of, int16_t v) {
- v = htobe16(v);
- rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Write int32_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i32(rd_kafka_buf_t *rkbuf,
- int32_t v) {
- v = (int32_t)htobe32(v);
- return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int32_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i32()`.
- */
-static RD_INLINE void
-rd_kafka_buf_update_i32(rd_kafka_buf_t *rkbuf, size_t of, int32_t v) {
- v = htobe32(v);
- rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-/**
- * Update int32_t in buffer at offset 'of'.
- * 'of' should have been previously returned by `.._buf_write_i32()`.
- */
-static RD_INLINE void
-rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) {
- v = htobe32(v);
- rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-
-/**
- * @brief Write varint-encoded signed value to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
- int64_t v) {
- char varint[RD_UVARINT_ENC_SIZEOF(v)];
- size_t sz;
-
- sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
-
- return rd_kafka_buf_write(rkbuf, varint, sz);
-}
-
-/**
- * @brief Write varint-encoded unsigned value to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
- uint64_t v) {
- char varint[RD_UVARINT_ENC_SIZEOF(v)];
- size_t sz;
-
- sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
-
- return rd_kafka_buf_write(rkbuf, varint, sz);
-}
-
-
-
-/**
- * @brief Write standard or flexver arround count field to buffer.
- * Use this when the array count is known beforehand, else use
- * rd_kafka_buf_write_arraycnt_pos().
- */
-static RD_INLINE RD_UNUSED size_t
-rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) {
-
- /* Count must fit in 31-bits minus the per-byte carry-bit */
- rd_assert(cnt + 1 < (size_t)(INT_MAX >> 4));
-
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))
- return rd_kafka_buf_write_i32(rkbuf, (int32_t)cnt);
-
- /* CompactArray has a base of 1, 0 is for Null arrays */
- cnt += 1;
- return rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)cnt);
-}
-
-
-/**
- * @brief Write array count field to buffer (i32) for later update with
- * rd_kafka_buf_finalize_arraycnt().
- */
-#define rd_kafka_buf_write_arraycnt_pos(rkbuf) rd_kafka_buf_write_i32(rkbuf, 0)
-
-
-/**
- * @brief Write the final array count to the position returned from
- * rd_kafka_buf_write_arraycnt_pos().
- *
- * Update int32_t in buffer at offset 'of' but serialize it as
- * compact uvarint (that must not exceed 4 bytes storage)
- * if the \p rkbuf is marked as FLEXVER, else just update it as
- * as a standard update_i32().
- *
- * @remark For flexibleVersions this will shrink the buffer and move data
- * and may thus be costly.
- */
-static RD_INLINE void
-rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) {
- char buf[sizeof(int32_t)];
- size_t sz, r;
-
- rd_assert(cnt < (size_t)INT_MAX);
-
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
- rd_kafka_buf_update_i32(rkbuf, of, (int32_t)cnt);
- return;
- }
-
- /* CompactArray has a base of 1, 0 is for Null arrays */
- cnt += 1;
-
- sz = rd_uvarint_enc_u64(buf, sizeof(buf), (uint64_t)cnt);
- rd_assert(!RD_UVARINT_OVERFLOW(sz));
- if (cnt < 127)
- rd_assert(sz == 1);
- rd_buf_write_update(&rkbuf->rkbuf_buf, of, buf, sz);
-
- if (sz < sizeof(int32_t)) {
- /* Varint occupies less space than the allotted 4 bytes, erase
- * the remaining bytes. */
- r = rd_buf_erase(&rkbuf->rkbuf_buf, of + sz,
- sizeof(int32_t) - sz);
- rd_assert(r == sizeof(int32_t) - sz);
- }
-}
-
-
-/**
- * Write int64_t to buffer.
- * The value will be endian-swapped before write.
- */
-static RD_INLINE size_t rd_kafka_buf_write_i64(rd_kafka_buf_t *rkbuf,
- int64_t v) {
- v = htobe64(v);
- return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
-}
-
-/**
- * Update int64_t in buffer at address 'ptr'.
- * 'of' should have been previously returned by `.._buf_write_i64()`.
- */
-static RD_INLINE void
-rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) {
- v = htobe64(v);
- rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
-}
-
-
-/**
- * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
- *
- * @remark Copies the string.
- *
- * @returns the offset in \p rkbuf where the string was written.
- */
-static RD_INLINE size_t rd_kafka_buf_write_kstr(rd_kafka_buf_t *rkbuf,
- const rd_kafkap_str_t *kstr) {
- size_t len, r;
-
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
- /* Standard string */
- if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr))
- return rd_kafka_buf_write_i16(rkbuf, -1);
-
- if (RD_KAFKAP_STR_IS_SERIALIZED(kstr))
- return rd_kafka_buf_write(rkbuf,
- RD_KAFKAP_STR_SER(kstr),
- RD_KAFKAP_STR_SIZE(kstr));
-
- len = RD_KAFKAP_STR_LEN(kstr);
- r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len);
- rd_kafka_buf_write(rkbuf, kstr->str, len);
-
- return r;
- }
-
- /* COMPACT_STRING lengths are:
- * 0 = NULL,
- * 1 = empty
- * N.. = length + 1
- */
- if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr))
- len = 0;
- else
- len = RD_KAFKAP_STR_LEN(kstr) + 1;
-
- r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
- if (len > 1)
- rd_kafka_buf_write(rkbuf, kstr->str, len - 1);
- return r;
-}
-
-
-
-/**
- * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
- *
- * @remark Copies the string.
- */
-static RD_INLINE size_t rd_kafka_buf_write_str(rd_kafka_buf_t *rkbuf,
- const char *str,
- size_t len) {
- size_t r;
-
- if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
- /* Standard string */
- if (!str)
- len = RD_KAFKAP_STR_LEN_NULL;
- else if (len == (size_t)-1)
- len = strlen(str);
- r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len);
- if (str)
- rd_kafka_buf_write(rkbuf, str, len);
- return r;
- }
-
- /* COMPACT_STRING lengths are:
- * 0 = NULL,
- * 1 = empty
- * N.. = length + 1
- */
- if (!str)
- len = 0;
- else if (len == (size_t)-1)
- len = strlen(str) + 1;
- else
- len++;
-
- r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
- if (len > 1)
- rd_kafka_buf_write(rkbuf, str, len - 1);
- return r;
-}
-
-
-
-/**
- * Push (i.e., no copy) Kafka string to buffer iovec
- */
-static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf,
- const rd_kafkap_str_t *kstr) {
- rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr),
- RD_KAFKAP_STR_SIZE(kstr), NULL);
-}
-
-
-
-/**
- * Write (copy) Kafka bytes to buffer.
- */
-static RD_INLINE size_t
-rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf,
- const rd_kafkap_bytes_t *kbytes) {
- size_t len;
-
- if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes))
- return rd_kafka_buf_write_i32(rkbuf, -1);
-
- if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes))
- return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
- RD_KAFKAP_BYTES_SIZE(kbytes));
-
- len = RD_KAFKAP_BYTES_LEN(kbytes);
- rd_kafka_buf_write_i32(rkbuf, (int32_t)len);
- rd_kafka_buf_write(rkbuf, kbytes->data, len);
-
- return 4 + len;
-}
-
-/**
- * Push (i.e., no copy) Kafka bytes to buffer iovec
- */
-static RD_INLINE void
-rd_kafka_buf_push_kbytes(rd_kafka_buf_t *rkbuf,
- const rd_kafkap_bytes_t *kbytes) {
- rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
- RD_KAFKAP_BYTES_SIZE(kbytes), NULL);
-}
-
-/**
- * Write (copy) binary bytes to buffer as Kafka bytes encapsulate data.
- */
-static RD_INLINE size_t rd_kafka_buf_write_bytes(rd_kafka_buf_t *rkbuf,
- const void *payload,
- size_t size) {
- size_t r;
- if (!payload)
- size = RD_KAFKAP_BYTES_LEN_NULL;
- r = rd_kafka_buf_write_i32(rkbuf, (int32_t)size);
- if (payload)
- rd_kafka_buf_write(rkbuf, payload, size);
- return r;
-}
-
-
-/**
- * @brief Write bool to buffer.
- */
-static RD_INLINE size_t rd_kafka_buf_write_bool(rd_kafka_buf_t *rkbuf,
- rd_bool_t v) {
- return rd_kafka_buf_write_i8(rkbuf, (int8_t)v);
-}
-
-
-/**
- * Write Kafka Message to buffer
- * The number of bytes written is returned in '*outlenp'.
- *
- * Returns the buffer offset of the first byte.
- */
-size_t rd_kafka_buf_write_Message(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- int64_t Offset,
- int8_t MagicByte,
- int8_t Attributes,
- int64_t Timestamp,
- const void *key,
- int32_t key_len,
- const void *payload,
- int32_t len,
- int *outlenp);
-
-/**
- * Start calculating CRC from now and track it in '*crcp'.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init(rd_kafka_buf_t *rkbuf) {
- rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC;
- rkbuf->rkbuf_crc = rd_crc32_init();
-}
-
-/**
- * Finalizes CRC calculation and returns the calculated checksum.
- */
-static RD_INLINE RD_UNUSED rd_crc32_t
-rd_kafka_buf_crc_finalize(rd_kafka_buf_t *rkbuf) {
- rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC;
- return rd_crc32_finalize(rkbuf->rkbuf_crc);
-}
-
-
-
-/**
- * @brief Check if buffer's replyq.version is outdated.
- * @param rkbuf: may be NULL, for convenience.
- *
- * @returns 1 if this is an outdated buffer, else 0.
- */
-static RD_UNUSED RD_INLINE int
-rd_kafka_buf_version_outdated(const rd_kafka_buf_t *rkbuf, int version) {
- return rkbuf && rkbuf->rkbuf_replyq.version &&
- rkbuf->rkbuf_replyq.version < version;
-}
-
-
-void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
- rd_kafka_make_req_cb_t *make_cb,
- void *make_opaque,
- void (*free_make_opaque_cb)(void *make_opaque));
-
-#endif /* _RDKAFKA_BUF_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c
deleted file mode 100644
index 2a19e4549..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name SSL certificates
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport_int.h"
-
-
-#if WITH_SSL
-#include "rdkafka_ssl.h"
-
-#include <openssl/x509.h>
-#include <openssl/evp.h>
-
-/**
- * @brief OpenSSL password query callback using a conf struct.
- *
- * @locality application thread
- */
-static int
-rd_kafka_conf_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) {
- const rd_kafka_conf_t *conf = userdata;
- int pwlen;
-
- if (!conf->ssl.key_password)
- return -1;
-
- pwlen = (int)strlen(conf->ssl.key_password);
- memcpy(buf, conf->ssl.key_password, RD_MIN(pwlen, size));
-
- return pwlen;
-}
-
-
-
-static const char *rd_kafka_cert_type_names[] = {"public-key", "private-key",
- "CA"};
-
-static const char *rd_kafka_cert_enc_names[] = {"PKCS#12", "DER", "PEM"};
-
-
-/**
- * @brief Destroy a certificate
- */
-static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) {
- if (rd_refcnt_sub(&cert->refcnt) > 0)
- return;
-
- if (cert->x509)
- X509_free(cert->x509);
- if (cert->pkey)
- EVP_PKEY_free(cert->pkey);
- if (cert->store)
- X509_STORE_free(cert->store);
-
- rd_free(cert);
-}
-
-
-/**
- * @brief Create a copy of a cert
- */
-static rd_kafka_cert_t *rd_kafka_cert_dup(rd_kafka_cert_t *src) {
- rd_refcnt_add(&src->refcnt);
- return src;
-}
-
-
-#if OPENSSL_VERSION_NUMBER < 0x30000000
-/**
- * @brief Print the OpenSSL error stack to stdout, for development use.
- */
-static RD_UNUSED void rd_kafka_print_ssl_errors(void) {
- unsigned long l;
- const char *file, *data;
- int line, flags;
-
- while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) !=
- 0) {
- char buf[256];
-
- ERR_error_string_n(l, buf, sizeof(buf));
-
- printf("ERR: %s:%d: %s: %s:\n", file, line, buf,
- (flags & ERR_TXT_STRING) ? data : "");
- printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", l,
- ERR_lib_error_string(l), ERR_func_error_string(l), file,
- line,
- (flags & ERR_TXT_STRING) && data && *data
- ? data
- : ERR_reason_error_string(l),
- data, data ? (int)strlen(data) : -1,
- flags & ERR_TXT_STRING);
- }
-}
-#endif
-
-
-/**
- * @returns a cert structure with a copy of the memory in \p buffer on success,
- * or NULL on failure in which case errstr will have a human-readable
- * error string written to it.
- */
-static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
- rd_kafka_cert_type_t type,
- rd_kafka_cert_enc_t encoding,
- const void *buffer,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- static const rd_bool_t
- valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = {
- /* Valid encodings per certificate type */
- [RD_KAFKA_CERT_PUBLIC_KEY] = {[RD_KAFKA_CERT_ENC_PKCS12] =
- rd_true,
- [RD_KAFKA_CERT_ENC_DER] = rd_true,
- [RD_KAFKA_CERT_ENC_PEM] =
- rd_true},
- [RD_KAFKA_CERT_PRIVATE_KEY] =
- {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
- [RD_KAFKA_CERT_ENC_DER] = rd_true,
- [RD_KAFKA_CERT_ENC_PEM] = rd_true},
- [RD_KAFKA_CERT_CA] = {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
- [RD_KAFKA_CERT_ENC_DER] = rd_true,
- [RD_KAFKA_CERT_ENC_PEM] = rd_true},
- };
- const char *action = "", *ssl_errstr = NULL, *extra = "";
- BIO *bio;
- rd_kafka_cert_t *cert = NULL;
- PKCS12 *p12 = NULL;
-
- if ((int)type < 0 || type >= RD_KAFKA_CERT__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
- (int)type);
- return NULL;
- }
-
- if ((int)encoding < 0 || encoding >= RD_KAFKA_CERT_ENC__CNT) {
- rd_snprintf(errstr, errstr_size,
- "Invalid certificate encoding %d", (int)encoding);
- return NULL;
- }
-
- if (!valid[type][encoding]) {
- rd_snprintf(errstr, errstr_size,
- "Invalid encoding %s for certificate type %s",
- rd_kafka_cert_enc_names[encoding],
- rd_kafka_cert_type_names[type]);
- return NULL;
- }
-
- action = "read memory";
- bio = BIO_new_mem_buf((void *)buffer, (long)size);
- if (!bio)
- goto fail;
-
- if (encoding == RD_KAFKA_CERT_ENC_PKCS12) {
- action = "read PKCS#12";
- p12 = d2i_PKCS12_bio(bio, NULL);
- if (!p12)
- goto fail;
- }
-
- cert = rd_calloc(1, sizeof(*cert));
- cert->type = type;
- cert->encoding = encoding;
-
- rd_refcnt_init(&cert->refcnt, 1);
-
- switch (type) {
- case RD_KAFKA_CERT_CA:
- cert->store = X509_STORE_new();
-
- switch (encoding) {
- case RD_KAFKA_CERT_ENC_PKCS12: {
- EVP_PKEY *ign_pkey;
- X509 *ign_cert;
- STACK_OF(X509) *cas = NULL;
- int i;
-
- action = "parse PKCS#12";
- if (!PKCS12_parse(p12, conf->ssl.key_password,
- &ign_pkey, &ign_cert, &cas))
- goto fail;
-
- EVP_PKEY_free(ign_pkey);
- X509_free(ign_cert);
-
- if (!cas || sk_X509_num(cas) < 1) {
- action =
- "retrieve at least one CA "
- "cert from PKCS#12";
- if (cas)
- sk_X509_pop_free(cas, X509_free);
- goto fail;
- }
-
- for (i = 0; i < sk_X509_num(cas); i++) {
- if (!X509_STORE_add_cert(
- cert->store, sk_X509_value(cas, i))) {
- action =
- "add certificate to "
- "X.509 store";
- sk_X509_pop_free(cas, X509_free);
- goto fail;
- }
- }
-
- sk_X509_pop_free(cas, X509_free);
- } break;
-
- case RD_KAFKA_CERT_ENC_DER: {
- X509 *x509;
-
- action = "read DER / X.509 ASN.1";
- if (!(x509 = d2i_X509_bio(bio, NULL)))
- goto fail;
-
- if (!X509_STORE_add_cert(cert->store, x509)) {
- action =
- "add certificate to "
- "X.509 store";
- X509_free(x509);
- goto fail;
- }
-
- X509_free(x509);
- } break;
-
- case RD_KAFKA_CERT_ENC_PEM: {
- X509 *x509;
- int cnt = 0;
-
- action = "read PEM";
-
- /* This will read one certificate per call
- * until an error occurs or the end of the
- * buffer is reached (which is an error
- * we'll need to clear). */
- while ((x509 = PEM_read_bio_X509(
- bio, NULL, rd_kafka_conf_ssl_passwd_cb,
- (void *)conf))) {
-
- if (!X509_STORE_add_cert(cert->store, x509)) {
- action =
- "add certificate to "
- "X.509 store";
- X509_free(x509);
- goto fail;
- }
-
- X509_free(x509);
- cnt++;
- }
-
- if (!BIO_eof(bio)) {
- /* Encountered parse error before
- * reaching end, propagate error and
- * fail. */
- goto fail;
- }
-
- if (!cnt) {
- action =
- "retrieve at least one "
- "CA cert from PEM";
-
- goto fail;
- }
-
- /* Reached end, which is raised as an error,
- * so clear it since it is not. */
- ERR_clear_error();
- } break;
-
- default:
- RD_NOTREACHED();
- break;
- }
- break;
-
-
- case RD_KAFKA_CERT_PUBLIC_KEY:
- switch (encoding) {
- case RD_KAFKA_CERT_ENC_PKCS12: {
- EVP_PKEY *ign_pkey;
-
- action = "parse PKCS#12";
- if (!PKCS12_parse(p12, conf->ssl.key_password,
- &ign_pkey, &cert->x509, NULL))
- goto fail;
-
- EVP_PKEY_free(ign_pkey);
-
- action = "retrieve public key";
- if (!cert->x509)
- goto fail;
- } break;
-
- case RD_KAFKA_CERT_ENC_DER:
- action = "read DER / X.509 ASN.1";
- cert->x509 = d2i_X509_bio(bio, NULL);
- if (!cert->x509)
- goto fail;
- break;
-
- case RD_KAFKA_CERT_ENC_PEM:
- action = "read PEM";
- cert->x509 = PEM_read_bio_X509(
- bio, NULL, rd_kafka_conf_ssl_passwd_cb,
- (void *)conf);
- if (!cert->x509)
- goto fail;
- break;
-
- default:
- RD_NOTREACHED();
- break;
- }
- break;
-
-
- case RD_KAFKA_CERT_PRIVATE_KEY:
- switch (encoding) {
- case RD_KAFKA_CERT_ENC_PKCS12: {
- X509 *x509;
-
- action = "parse PKCS#12";
- if (!PKCS12_parse(p12, conf->ssl.key_password,
- &cert->pkey, &x509, NULL))
- goto fail;
-
- X509_free(x509);
-
- action = "retrieve private key";
- if (!cert->pkey)
- goto fail;
- } break;
-
- case RD_KAFKA_CERT_ENC_DER:
- action =
- "read DER / X.509 ASN.1 and "
- "convert to EVP_PKEY";
- cert->pkey = d2i_PrivateKey_bio(bio, NULL);
- if (!cert->pkey)
- goto fail;
- break;
-
- case RD_KAFKA_CERT_ENC_PEM:
- action = "read PEM";
- cert->pkey = PEM_read_bio_PrivateKey(
- bio, NULL, rd_kafka_conf_ssl_passwd_cb,
- (void *)conf);
- if (!cert->pkey)
- goto fail;
- break;
-
- default:
- RD_NOTREACHED();
- break;
- }
- break;
-
- default:
- RD_NOTREACHED();
- break;
- }
-
- if (bio)
- BIO_free(bio);
- if (p12)
- PKCS12_free(p12);
-
- return cert;
-
-fail:
- ssl_errstr = rd_kafka_ssl_last_error_str();
-
- /* OpenSSL 3.x does not provide obsolete ciphers out of the box, so
- * let's try to identify such an error message and guide the user
- * to what to do (set up a provider config file and point to it
- * through the OPENSSL_CONF environment variable).
- * We could call OSSL_PROVIDER_load("legacy") here, but that would be
- * a non-obvious side-effect of calling this set function. */
- if (strstr(action, "parse") && strstr(ssl_errstr, "Algorithm"))
- extra =
- ": legacy ciphers may require loading OpenSSL's \"legacy\" "
- "provider through an OPENSSL_CONF configuration file";
-
- rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s%s",
- action, rd_kafka_cert_type_names[type],
- rd_kafka_cert_enc_names[encoding], ssl_errstr, extra);
-
- if (cert)
- rd_kafka_cert_destroy(cert);
- if (bio)
- BIO_free(bio);
- if (p12)
- PKCS12_free(p12);
-
- return NULL;
-}
-#endif /* WITH_SSL */
-
-
-/**
- * @name Public API
- * @brief These public methods must be available regardless if
- * librdkafka was built with OpenSSL or not.
- * @{
- */
-
-rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf,
- rd_kafka_cert_type_t cert_type,
- rd_kafka_cert_enc_t cert_enc,
- const void *buffer,
- size_t size,
- char *errstr,
- size_t errstr_size) {
-#if !WITH_SSL
- rd_snprintf(errstr, errstr_size,
- "librdkafka not built with OpenSSL support");
- return RD_KAFKA_CONF_INVALID;
-#else
- rd_kafka_cert_t *cert;
- rd_kafka_cert_t **cert_map[RD_KAFKA_CERT__CNT] = {
- [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert,
- [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key,
- [RD_KAFKA_CERT_CA] = &conf->ssl.ca};
- rd_kafka_cert_t **certp;
-
- if ((int)cert_type < 0 || cert_type >= RD_KAFKA_CERT__CNT) {
- rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
- (int)cert_type);
- return RD_KAFKA_CONF_INVALID;
- }
-
- /* Make sure OpenSSL is loaded */
- rd_kafka_global_init();
-
- certp = cert_map[cert_type];
-
- if (!buffer) {
- /* Clear current value */
- if (*certp) {
- rd_kafka_cert_destroy(*certp);
- *certp = NULL;
- }
- return RD_KAFKA_CONF_OK;
- }
-
- cert = rd_kafka_cert_new(conf, cert_type, cert_enc, buffer, size,
- errstr, errstr_size);
- if (!cert)
- return RD_KAFKA_CONF_INVALID;
-
- if (*certp)
- rd_kafka_cert_destroy(*certp);
-
- *certp = cert;
-
- return RD_KAFKA_CONF_OK;
-#endif
-}
-
-
-
-/**
- * @brief Destructor called when configuration object is destroyed.
- */
-void rd_kafka_conf_cert_dtor(int scope, void *pconf) {
-#if WITH_SSL
- rd_kafka_conf_t *conf = pconf;
- assert(scope == _RK_GLOBAL);
- if (conf->ssl.key) {
- rd_kafka_cert_destroy(conf->ssl.key);
- conf->ssl.key = NULL;
- }
- if (conf->ssl.cert) {
- rd_kafka_cert_destroy(conf->ssl.cert);
- conf->ssl.cert = NULL;
- }
- if (conf->ssl.ca) {
- rd_kafka_cert_destroy(conf->ssl.ca);
- conf->ssl.ca = NULL;
- }
-#endif
-}
-
-/**
- * @brief Copy-constructor called when configuration object \p psrcp is
- * duplicated to \p dstp.
- */
-void rd_kafka_conf_cert_copy(int scope,
- void *pdst,
- const void *psrc,
- void *dstptr,
- const void *srcptr,
- size_t filter_cnt,
- const char **filter) {
-#if WITH_SSL
- rd_kafka_conf_t *dconf = pdst;
- const rd_kafka_conf_t *sconf = psrc;
-
- assert(scope == _RK_GLOBAL);
-
- /* Free and reset any exist certs on the destination conf */
- rd_kafka_conf_cert_dtor(scope, pdst);
-
- if (sconf->ssl.key)
- dconf->ssl.key = rd_kafka_cert_dup(sconf->ssl.key);
-
- if (sconf->ssl.cert)
- dconf->ssl.cert = rd_kafka_cert_dup(sconf->ssl.cert);
-
- if (sconf->ssl.ca)
- dconf->ssl.ca = rd_kafka_cert_dup(sconf->ssl.ca);
-#endif
-}
-
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h
deleted file mode 100644
index b53f46c01..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_CERT_H_
-#define _RDKAFKA_CERT_H_
-
-
-/**
- * @struct rd_kafka_cert
- *
- * @brief Internal representation of a cert_type,cert_enc,memory tuple.
- *
- * @remark Certificates are read-only after construction.
- */
-typedef struct rd_kafka_cert_s {
- rd_kafka_cert_type_t type;
- rd_kafka_cert_enc_t encoding;
- rd_refcnt_t refcnt;
-#if WITH_SSL
- X509 *x509; /**< Certificate (public key) */
- EVP_PKEY *pkey; /**< Private key */
- X509_STORE *store; /**< CA certificate chain store */
-#endif
-} rd_kafka_cert_t;
-
-void rd_kafka_conf_cert_dtor(int scope, void *pconf);
-void rd_kafka_conf_cert_copy(int scope,
- void *pdst,
- const void *psrc,
- void *dstptr,
- const void *srcptr,
- size_t filter_cnt,
- const char **filter);
-
-#endif /* _RDKAFKA_CERT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c
deleted file mode 100644
index 026e93321..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c
+++ /dev/null
@@ -1,5969 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_interceptor.h"
-#include "rdmap.h"
-
-#include "rdunittest.h"
-
-#include <ctype.h>
-#include <stdarg.h>
-
-static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg);
-static rd_kafka_error_t *
-rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *assignment);
-static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg);
-static rd_kafka_error_t *
-rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *partitions);
-static rd_kafka_error_t *
-rd_kafka_cgrp_incremental_unassign(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *partitions);
-
-static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque);
-
-static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg,
- const char *reason);
-
-static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg);
-
-static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg,
- rd_bool_t assignment_lost,
- rd_bool_t initiating,
- const char *reason);
-static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg,
- rd_bool_t assignment_lost,
- rd_bool_t initiating,
- const char *reason);
-
-static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg);
-
-static void
-rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg);
-static rd_kafka_resp_err_t
-rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *rktparlist);
-
-static void rd_kafka_cgrp_group_assignment_set(
- rd_kafka_cgrp_t *rkcg,
- const rd_kafka_topic_partition_list_t *partitions);
-static void rd_kafka_cgrp_group_assignment_modify(
- rd_kafka_cgrp_t *rkcg,
- rd_bool_t add,
- const rd_kafka_topic_partition_list_t *partitions);
-
-static void
-rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *assignment);
-
-
-/**
- * @returns true if the current assignment is lost.
- */
-rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg) {
- return rd_atomic32_get(&rkcg->rkcg_assignment_lost) != 0;
-}
-
-
-/**
- * @brief Call when the current assignment has been lost, with a
- * human-readable reason.
- */
-static void rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg,
- char *fmt,
- ...) RD_FORMAT(printf, 2, 3);
-static void
-rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) {
- va_list ap;
- char reason[256];
-
- if (!rkcg->rkcg_group_assignment)
- return;
-
- va_start(ap, fmt);
- rd_vsnprintf(reason, sizeof(reason), fmt, ap);
- va_end(ap);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST",
- "Group \"%s\": "
- "current assignment of %d partition(s) lost: %s",
- rkcg->rkcg_group_id->str, rkcg->rkcg_group_assignment->cnt,
- reason);
-
- rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_true);
-}
-
-
-/**
- * @brief Call when the current assignment is no longer considered lost, with a
- * human-readable reason.
- */
-static void
-rd_kafka_cgrp_assignment_clear_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) {
- va_list ap;
- char reason[256];
-
- if (!rd_atomic32_get(&rkcg->rkcg_assignment_lost))
- return;
-
- va_start(ap, fmt);
- rd_vsnprintf(reason, sizeof(reason), fmt, ap);
- va_end(ap);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST",
- "Group \"%s\": "
- "current assignment no longer considered lost: %s",
- rkcg->rkcg_group_id->str, reason);
-
- rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_false);
-}
-
-
-/**
- * @brief The rebalance protocol currently in use. This will be
- * RD_KAFKA_REBALANCE_PROTOCOL_NONE if the consumer has not
- * (yet) joined a group, else it will match the rebalance
- * protocol of the configured assignor(s).
- *
- * @locality main thread
- */
-rd_kafka_rebalance_protocol_t
-rd_kafka_cgrp_rebalance_protocol(rd_kafka_cgrp_t *rkcg) {
- if (!rkcg->rkcg_assignor)
- return RD_KAFKA_REBALANCE_PROTOCOL_NONE;
- return rkcg->rkcg_assignor->rkas_protocol;
-}
-
-
-
-/**
- * @returns true if the cgrp is awaiting a protocol response. This prohibits
- * the join-state machine to proceed before the current state
- * is done.
- */
-static rd_bool_t rd_kafka_cgrp_awaiting_response(rd_kafka_cgrp_t *rkcg) {
- return rkcg->rkcg_wait_resp != -1;
-}
-
-
-/**
- * @brief Set flag indicating we are waiting for a coordinator response
- * for the given request.
- *
- * This is used for specific requests to postpone rejoining the group if
- * there are outstanding JoinGroup or SyncGroup requests.
- *
- * @locality main thread
- */
-static void rd_kafka_cgrp_set_wait_resp(rd_kafka_cgrp_t *rkcg, int16_t ApiKey) {
- rd_assert(rkcg->rkcg_wait_resp == -1);
- rkcg->rkcg_wait_resp = ApiKey;
-}
-
-/**
- * @brief Clear the flag that says we're waiting for a coordinator response
- * for the given \p request.
- *
- * @param request Original request, possibly NULL (for errors).
- *
- * @locality main thread
- */
-static void rd_kafka_cgrp_clear_wait_resp(rd_kafka_cgrp_t *rkcg,
- int16_t ApiKey) {
- rd_assert(rkcg->rkcg_wait_resp == ApiKey);
- rkcg->rkcg_wait_resp = -1;
-}
-
-
-
-/**
- * @struct Auxillary glue type used for COOPERATIVE rebalance set operations.
- */
-typedef struct PartitionMemberInfo_s {
- const rd_kafka_group_member_t *member;
- rd_bool_t members_match;
-} PartitionMemberInfo_t;
-
-static PartitionMemberInfo_t *
-PartitionMemberInfo_new(const rd_kafka_group_member_t *member,
- rd_bool_t members_match) {
- PartitionMemberInfo_t *pmi;
-
- pmi = rd_calloc(1, sizeof(*pmi));
- pmi->member = member;
- pmi->members_match = members_match;
-
- return pmi;
-}
-
-static void PartitionMemberInfo_free(void *p) {
- PartitionMemberInfo_t *pmi = p;
- rd_free(pmi);
-}
-
-typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
- PartitionMemberInfo_t *) map_toppar_member_info_t;
-
-
-/**
- * @returns true if consumer has joined the group and thus requires a leave.
- */
-#define RD_KAFKA_CGRP_HAS_JOINED(rkcg) \
- (rkcg->rkcg_member_id != NULL && \
- RD_KAFKAP_STR_LEN((rkcg)->rkcg_member_id) > 0)
-
-
-/**
- * @returns true if cgrp is waiting for a rebalance_cb to be handled by
- * the application.
- */
-#define RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) \
- ((rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL)
-
-/**
- * @returns true if a rebalance is in progress.
- *
- * 1. In WAIT_JOIN or WAIT_METADATA state with a member-id set,
- * this happens on rejoin.
- * 2. In WAIT_SYNC waiting for the group to rebalance on the broker.
- * 3. in *_WAIT_UNASSIGN_TO_COMPLETE waiting for unassigned partitions to
- * stop fetching, et.al.
- * 4. In _WAIT_*ASSIGN_CALL waiting for the application to handle the
- * assignment changes in its rebalance callback and then call *assign().
- * 5. An incremental rebalancing is in progress.
- * 6. A rebalance-induced rejoin is in progress.
- */
-#define RD_KAFKA_CGRP_REBALANCING(rkcg) \
- ((RD_KAFKA_CGRP_HAS_JOINED(rkcg) && \
- ((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) || \
- (rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \
- (rkcg)->rkcg_join_state == \
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL || \
- (rkcg)->rkcg_rebalance_incr_assignment != NULL || \
- (rkcg)->rkcg_rebalance_rejoin)
-
-
-
-const char *rd_kafka_cgrp_state_names[] = {
- "init", "term", "query-coord",
- "wait-coord", "wait-broker", "wait-broker-transport",
- "up"};
-
-const char *rd_kafka_cgrp_join_state_names[] = {
- "init",
- "wait-join",
- "wait-metadata",
- "wait-sync",
- "wait-assign-call",
- "wait-unassign-call",
- "wait-unassign-to-complete",
- "wait-incr-unassign-to-complete",
- "steady",
-};
-
-
-/**
- * @brief Change the cgrp state.
- *
- * @returns 1 if the state was changed, else 0.
- */
-static int rd_kafka_cgrp_set_state(rd_kafka_cgrp_t *rkcg, int state) {
- if ((int)rkcg->rkcg_state == state)
- return 0;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPSTATE",
- "Group \"%.*s\" changed state %s -> %s "
- "(join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_state_names[state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
- rkcg->rkcg_state = state;
- rkcg->rkcg_ts_statechange = rd_clock();
-
- rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk);
-
- return 1;
-}
-
-
-void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state) {
- if ((int)rkcg->rkcg_join_state == join_state)
- return;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPJOINSTATE",
- "Group \"%.*s\" changed join state %s -> %s "
- "(state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rd_kafka_cgrp_join_state_names[join_state],
- rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
- rkcg->rkcg_join_state = join_state;
-}
-
-
-void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription);
- rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members);
- rd_kafka_cgrp_set_member_id(rkcg, NULL);
- if (rkcg->rkcg_group_instance_id)
- rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id);
-
- rd_kafka_q_destroy_owner(rkcg->rkcg_q);
- rd_kafka_q_destroy_owner(rkcg->rkcg_ops);
- rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q);
- rd_kafka_assert(rkcg->rkcg_rk, TAILQ_EMPTY(&rkcg->rkcg_topics));
- rd_kafka_assert(rkcg->rkcg_rk, rd_list_empty(&rkcg->rkcg_toppars));
- rd_list_destroy(&rkcg->rkcg_toppars);
- rd_list_destroy(rkcg->rkcg_subscribed_topics);
- rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics);
- if (rkcg->rkcg_assignor && rkcg->rkcg_assignor->rkas_destroy_state_cb)
- rkcg->rkcg_assignor->rkas_destroy_state_cb(
- rkcg->rkcg_assignor_state);
- rd_free(rkcg);
-}
-
-
-
-/**
- * @brief Update the absolute session timeout following a successfull
- * response from the coordinator.
- * This timeout is used to enforce the session timeout in the
- * consumer itself.
- *
- * @param reset if true the timeout is updated even if the session has expired.
- */
-static RD_INLINE void
-rd_kafka_cgrp_update_session_timeout(rd_kafka_cgrp_t *rkcg, rd_bool_t reset) {
- if (reset || rkcg->rkcg_ts_session_timeout != 0)
- rkcg->rkcg_ts_session_timeout =
- rd_clock() +
- (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000);
-}
-
-
-
-rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
- const rd_kafkap_str_t *group_id,
- const rd_kafkap_str_t *client_id) {
- rd_kafka_cgrp_t *rkcg;
-
- rkcg = rd_calloc(1, sizeof(*rkcg));
-
- rkcg->rkcg_rk = rk;
- rkcg->rkcg_group_id = group_id;
- rkcg->rkcg_client_id = client_id;
- rkcg->rkcg_coord_id = -1;
- rkcg->rkcg_generation_id = -1;
- rkcg->rkcg_wait_resp = -1;
-
- rkcg->rkcg_ops = rd_kafka_q_new(rk);
- rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve;
- rkcg->rkcg_ops->rkq_opaque = rkcg;
- rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk);
- rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve;
- rkcg->rkcg_wait_coord_q->rkq_opaque = rkcg->rkcg_ops->rkq_opaque;
- rkcg->rkcg_q = rd_kafka_q_new(rk);
- rkcg->rkcg_group_instance_id =
- rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1);
-
- TAILQ_INIT(&rkcg->rkcg_topics);
- rd_list_init(&rkcg->rkcg_toppars, 32, NULL);
- rd_kafka_cgrp_set_member_id(rkcg, "");
- rkcg->rkcg_subscribed_topics =
- rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
- rd_interval_init(&rkcg->rkcg_coord_query_intvl);
- rd_interval_init(&rkcg->rkcg_heartbeat_intvl);
- rd_interval_init(&rkcg->rkcg_join_intvl);
- rd_interval_init(&rkcg->rkcg_timeout_scan_intvl);
- rd_atomic32_init(&rkcg->rkcg_assignment_lost, rd_false);
- rd_atomic32_init(&rkcg->rkcg_terminated, rd_false);
-
- rkcg->rkcg_errored_topics = rd_kafka_topic_partition_list_new(0);
-
- /* Create a logical group coordinator broker to provide
- * a dedicated connection for group coordination.
- * This is needed since JoinGroup may block for up to
- * max.poll.interval.ms, effectively blocking and timing out
- * any other protocol requests (such as Metadata).
- * The address for this broker will be updated when
- * the group coordinator is assigned. */
- rkcg->rkcg_coord = rd_kafka_broker_add_logical(rk, "GroupCoordinator");
-
- if (rk->rk_conf.enable_auto_commit &&
- rk->rk_conf.auto_commit_interval_ms > 0)
- rd_kafka_timer_start(
- &rk->rk_timers, &rkcg->rkcg_offset_commit_tmr,
- rk->rk_conf.auto_commit_interval_ms * 1000ll,
- rd_kafka_cgrp_offset_commit_tmr_cb, rkcg);
-
- return rkcg;
-}
-
-
-/**
- * @brief Set the group coordinator broker.
- */
-static void rd_kafka_cgrp_coord_set_broker(rd_kafka_cgrp_t *rkcg,
- rd_kafka_broker_t *rkb) {
-
- rd_assert(rkcg->rkcg_curr_coord == NULL);
-
- rd_assert(RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb));
-
- rkcg->rkcg_curr_coord = rkb;
- rd_kafka_broker_keep(rkb);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORDSET",
- "Group \"%.*s\" coordinator set to broker %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_broker_name(rkb));
-
- /* Reset query interval to trigger an immediate
- * coord query if required */
- if (!rd_interval_disabled(&rkcg->rkcg_coord_query_intvl))
- rd_interval_reset(&rkcg->rkcg_coord_query_intvl);
-
- rd_kafka_cgrp_set_state(rkcg,
- RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
-
- rd_kafka_broker_persistent_connection_add(
- rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord);
-
- /* Set the logical coordinator's nodename to the
- * proper broker's nodename, this will trigger a (re)connect
- * to the new address. */
- rd_kafka_broker_set_nodename(rkcg->rkcg_coord, rkb);
-}
-
-
-/**
- * @brief Reset/clear the group coordinator broker.
- */
-static void rd_kafka_cgrp_coord_clear_broker(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_broker_t *rkb = rkcg->rkcg_curr_coord;
-
- rd_assert(rkcg->rkcg_curr_coord);
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORDCLEAR",
- "Group \"%.*s\" broker %s is no longer coordinator",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_broker_name(rkb));
-
- rd_assert(rkcg->rkcg_coord);
-
- rd_kafka_broker_persistent_connection_del(
- rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord);
-
- /* Clear the ephemeral broker's nodename.
- * This will also trigger a disconnect. */
- rd_kafka_broker_set_nodename(rkcg->rkcg_coord, NULL);
-
- rkcg->rkcg_curr_coord = NULL;
- rd_kafka_broker_destroy(rkb); /* from set_coord_broker() */
-}
-
-
-/**
- * @brief Update/set the group coordinator.
- *
- * Will do nothing if there's been no change.
- *
- * @returns 1 if the coordinator, or state, was updated, else 0.
- */
-static int rd_kafka_cgrp_coord_update(rd_kafka_cgrp_t *rkcg, int32_t coord_id) {
-
- /* Don't do anything while terminating */
- if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
- return 0;
-
- /* Check if coordinator changed */
- if (rkcg->rkcg_coord_id != coord_id) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPCOORD",
- "Group \"%.*s\" changing coordinator %" PRId32
- " -> %" PRId32,
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rkcg->rkcg_coord_id, coord_id);
-
- /* Update coord id */
- rkcg->rkcg_coord_id = coord_id;
-
- /* Clear previous broker handle, if any */
- if (rkcg->rkcg_curr_coord)
- rd_kafka_cgrp_coord_clear_broker(rkcg);
- }
-
-
- if (rkcg->rkcg_curr_coord) {
- /* There is already a known coordinator and a
- * corresponding broker handle. */
- if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP)
- return rd_kafka_cgrp_set_state(
- rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
-
- } else if (rkcg->rkcg_coord_id != -1) {
- rd_kafka_broker_t *rkb;
-
- /* Try to find the coordinator broker handle */
- rd_kafka_rdlock(rkcg->rkcg_rk);
- rkb = rd_kafka_broker_find_by_nodeid(rkcg->rkcg_rk, coord_id);
- rd_kafka_rdunlock(rkcg->rkcg_rk);
-
- /* It is possible, due to stale metadata, that the
- * coordinator id points to a broker we still don't know
- * about. In this case the client will continue
- * querying metadata and querying for the coordinator
- * until a match is found. */
-
- if (rkb) {
- /* Coordinator is known and broker handle exists */
- rd_kafka_cgrp_coord_set_broker(rkcg, rkb);
- rd_kafka_broker_destroy(rkb); /*from find_by_nodeid()*/
-
- return 1;
- } else {
- /* Coordinator is known but no corresponding
- * broker handle. */
- return rd_kafka_cgrp_set_state(
- rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER);
- }
-
- } else {
- /* Coordinator still not known, re-query */
- if (rkcg->rkcg_state >= RD_KAFKA_CGRP_STATE_WAIT_COORD)
- return rd_kafka_cgrp_set_state(
- rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
- }
-
- return 0; /* no change */
-}
-
-
-
-/**
- * Handle FindCoordinator response
- */
-static void rd_kafka_cgrp_handle_FindCoordinator(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
- int32_t CoordId;
- rd_kafkap_str_t CoordHost = RD_ZERO_INIT;
- int32_t CoordPort;
- rd_kafka_cgrp_t *rkcg = opaque;
- struct rd_kafka_metadata_broker mdb = RD_ZERO_INIT;
- char *errstr = NULL;
- int actions;
-
- if (likely(!(ErrorCode = err))) {
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- rd_kafkap_str_t ErrorMsg;
-
- rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
-
- if (!RD_KAFKAP_STR_IS_NULL(&ErrorMsg))
- RD_KAFKAP_STR_DUPA(&errstr, &ErrorMsg);
- }
-
- rd_kafka_buf_read_i32(rkbuf, &CoordId);
- rd_kafka_buf_read_str(rkbuf, &CoordHost);
- rd_kafka_buf_read_i32(rkbuf, &CoordPort);
- }
-
- if (ErrorCode)
- goto err;
-
-
- mdb.id = CoordId;
- RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost);
- mdb.port = CoordPort;
-
- rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
- "Group \"%.*s\" coordinator is %s:%i id %" PRId32,
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), mdb.host, mdb.port,
- mdb.id);
- rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb, NULL);
-
- rd_kafka_cgrp_coord_update(rkcg, CoordId);
- rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
- return;
-
-err_parse: /* Parse error */
- ErrorCode = rkbuf->rkbuf_err;
- /* FALLTHRU */
-
-err:
- if (!errstr)
- errstr = (char *)rd_kafka_err2str(ErrorCode);
-
- rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
- "Group \"%.*s\" FindCoordinator response error: %s: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_err2name(ErrorCode), errstr);
-
- if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- actions = rd_kafka_err_action(
- rkb, ErrorCode, request,
-
- RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE,
-
- RD_KAFKA_ERR_ACTION_END);
-
-
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- rd_kafka_cgrp_coord_update(rkcg, -1);
- } else {
- if (!(actions & RD_KAFKA_ERR_ACTION_RETRY) &&
- rkcg->rkcg_last_err != ErrorCode) {
- /* Propagate non-retriable errors to the application */
- rd_kafka_consumer_err(
- rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0,
- NULL, NULL, RD_KAFKA_OFFSET_INVALID,
- "FindCoordinator response error: %s", errstr);
-
- /* Suppress repeated errors */
- rkcg->rkcg_last_err = ErrorCode;
- }
-
- /* Retries are performed by the timer-intervalled
- * coord queries, continue querying */
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
- }
-
- rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
-}
-
-
-/**
- * Query for coordinator.
- * Ask any broker in state UP
- *
- * Locality: main thread
- */
-void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason) {
- rd_kafka_broker_t *rkb;
- rd_kafka_resp_err_t err;
-
- rkb = rd_kafka_broker_any_usable(
- rkcg->rkcg_rk, RD_POLL_NOWAIT, RD_DO_LOCK,
- RD_KAFKA_FEATURE_BROKER_GROUP_COORD, "coordinator query");
-
- if (!rkb) {
- /* Reset the interval because there were no brokers. When a
- * broker becomes available, we want to query it immediately. */
- rd_interval_reset(&rkcg->rkcg_coord_query_intvl);
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY",
- "Group \"%.*s\": "
- "no broker available for coordinator query: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
- return;
- }
-
- rd_rkb_dbg(rkb, CGRP, "CGRPQUERY",
- "Group \"%.*s\": querying for coordinator: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
-
- err = rd_kafka_FindCoordinatorRequest(
- rkb, RD_KAFKA_COORD_GROUP, rkcg->rkcg_group_id->str,
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_handle_FindCoordinator, rkcg);
-
- if (err) {
- rd_rkb_dbg(rkb, CGRP, "CGRPQUERY",
- "Group \"%.*s\": "
- "unable to send coordinator query: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_err2str(err));
- rd_kafka_broker_destroy(rkb);
- return;
- }
-
- if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_QUERY_COORD)
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_COORD);
-
- rd_kafka_broker_destroy(rkb);
-
- /* Back off the next intervalled query since we just sent one. */
- rd_interval_reset_to_now(&rkcg->rkcg_coord_query_intvl, 0);
-}
-
-/**
- * @brief Mark the current coordinator as dead.
- *
- * @locality main thread
- */
-void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg,
- rd_kafka_resp_err_t err,
- const char *reason) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORD",
- "Group \"%.*s\": "
- "marking the coordinator (%" PRId32 ") dead: %s: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id,
- rd_kafka_err2str(err), reason);
-
- rd_kafka_cgrp_coord_update(rkcg, -1);
-
- /* Re-query for coordinator */
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
- rd_kafka_cgrp_coord_query(rkcg, reason);
-}
-
-
-/**
- * @returns a new reference to the current coordinator, if available, else NULL.
- *
- * @locality rdkafka main thread
- * @locks_required none
- * @locks_acquired none
- */
-rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg) {
- if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkcg->rkcg_coord)
- return NULL;
-
- rd_kafka_broker_keep(rkcg->rkcg_coord);
-
- return rkcg->rkcg_coord;
-}
-
-
-/**
- * @brief cgrp handling of LeaveGroup responses
- * @param opaque must be the cgrp handle.
- * @locality rdkafka main thread (unless err==ERR__DESTROY)
- */
-static void rd_kafka_cgrp_handle_LeaveGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = opaque;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
-
- if (err) {
- ErrorCode = err;
- goto err;
- }
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-err:
- if (ErrorCode)
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
- "LeaveGroup response error in state %s: %s",
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_err2str(ErrorCode));
- else
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
- "LeaveGroup response received in state %s",
- rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
-
- if (ErrorCode != RD_KAFKA_RESP_ERR__DESTROY) {
- rd_assert(thrd_is_current(rk->rk_thread));
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE;
- rd_kafka_cgrp_try_terminate(rkcg);
- }
-
-
-
- return;
-
-err_parse:
- ErrorCode = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-static void rd_kafka_cgrp_leave(rd_kafka_cgrp_t *rkcg) {
- char *member_id;
-
- RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id);
-
- /* Leaving the group invalidates the member id, reset it
- * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
- rd_kafka_cgrp_set_member_id(rkcg, "");
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE",
- "Group \"%.*s\": leave (in state %s): "
- "LeaveGroupRequest already in-transit",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
- return;
- }
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE",
- "Group \"%.*s\": leave (in state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
-
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_LEAVE;
-
- if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) {
- rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE",
- "Leaving group");
- rd_kafka_LeaveGroupRequest(
- rkcg->rkcg_coord, rkcg->rkcg_group_id->str, member_id,
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_handle_LeaveGroup, rkcg);
- } else
- rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, rkcg->rkcg_coord,
- RD_KAFKA_RESP_ERR__WAIT_COORD,
- NULL, NULL, rkcg);
-}
-
-
-/**
- * @brief Leave group, if desired.
- *
- * @returns true if a LeaveGroup was issued, else false.
- */
-static rd_bool_t rd_kafka_cgrp_leave_maybe(rd_kafka_cgrp_t *rkcg) {
-
- /* We were not instructed to leave in the first place. */
- if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))
- return rd_false;
-
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE;
-
- /* Don't send Leave when termating with NO_CONSUMER_CLOSE flag */
- if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
- return rd_false;
-
- /* KIP-345: Static group members must not send a LeaveGroupRequest
- * on termination. */
- if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) &&
- rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
- return rd_false;
-
- rd_kafka_cgrp_leave(rkcg);
-
- return rd_true;
-}
-
-
-/**
- * @brief Enqueues a rebalance op, delegating responsibility of calling
- * incremental_assign / incremental_unassign to the application.
- * If there is no rebalance handler configured, or the action
- * should not be delegated to the application for some other
- * reason, incremental_assign / incremental_unassign will be called
- * automatically, immediately.
- *
- * @param rejoin whether or not to rejoin the group following completion
- * of the incremental assign / unassign.
- *
- * @remarks does not take ownership of \p partitions.
- */
-void rd_kafka_rebalance_op_incr(rd_kafka_cgrp_t *rkcg,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- rd_bool_t rejoin,
- const char *reason) {
- rd_kafka_error_t *error;
-
- /* Flag to rejoin after completion of the incr_assign or incr_unassign,
- if required. */
- rkcg->rkcg_rebalance_rejoin = rejoin;
-
- rd_kafka_wrlock(rkcg->rkcg_rk);
- rkcg->rkcg_c.ts_rebalance = rd_clock();
- rkcg->rkcg_c.rebalance_cnt++;
- rd_kafka_wrunlock(rkcg->rkcg_rk);
-
- if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) ||
- rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
- /* Total unconditional unassign in these cases */
- rd_kafka_cgrp_unassign(rkcg);
-
- /* Now serve the assignment to make updates */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
- goto done;
- }
-
- rd_kafka_cgrp_set_join_state(
- rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL
- : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL);
-
- /* Schedule application rebalance callback/event if enabled */
- if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) {
- rd_kafka_op_t *rko;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
- "Group \"%s\": delegating incremental %s of %d "
- "partition(s) to application on queue %s: %s",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "revoke"
- : "assign",
- partitions->cnt,
- rd_kafka_q_dest_name(rkcg->rkcg_q), reason);
-
- /* Pause currently assigned partitions while waiting for
- * rebalance callback to get called to make sure the
- * application will not receive any more messages that
- * might block it from serving the rebalance callback
- * and to not process messages for partitions it
- * might have lost in the rebalance. */
- rd_kafka_assignment_pause(rkcg->rkcg_rk,
- "incremental rebalance");
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE);
- rko->rko_err = err;
- rko->rko_u.rebalance.partitions =
- rd_kafka_topic_partition_list_copy(partitions);
-
- if (rd_kafka_q_enq(rkcg->rkcg_q, rko))
- goto done; /* Rebalance op successfully enqueued */
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
- "Group \"%s\": ops queue is disabled, not "
- "delegating partition %s to application",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "unassign"
- : "assign");
- /* FALLTHRU */
- }
-
- /* No application rebalance callback/event handler, or it is not
- * available, do the assign/unassign ourselves.
- * We need to be careful here not to trigger assignment_serve()
- * since it may call into the cgrp code again, in which case we
- * can't really track what the outcome state will be. */
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- error = rd_kafka_cgrp_incremental_assign(rkcg, partitions);
- else
- error = rd_kafka_cgrp_incremental_unassign(rkcg, partitions);
-
- if (error) {
- rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
- "Group \"%s\": internal incremental %s "
- "of %d partition(s) failed: %s: "
- "unassigning all partitions and rejoining",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "unassign"
- : "assign",
- partitions->cnt, rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_cgrp_set_join_state(rkcg,
- /* This is a clean state for
- * assignment_done() to rejoin
- * from. */
- RD_KAFKA_CGRP_JOIN_STATE_STEADY);
- rd_kafka_assignment_clear(rkcg->rkcg_rk);
- }
-
- /* Now serve the assignment to make updates */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
-
-done:
- /* Update the current group assignment based on the
- * added/removed partitions. */
- rd_kafka_cgrp_group_assignment_modify(
- rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, partitions);
-}
-
-
-/**
- * @brief Enqueues a rebalance op, delegating responsibility of calling
- * assign / unassign to the application. If there is no rebalance
- * handler configured, or the action should not be delegated to the
- * application for some other reason, assign / unassign will be
- * called automatically.
- *
- * @remarks \p partitions is copied.
- */
-static void rd_kafka_rebalance_op(rd_kafka_cgrp_t *rkcg,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *assignment,
- const char *reason) {
- rd_kafka_error_t *error;
-
- rd_kafka_wrlock(rkcg->rkcg_rk);
- rkcg->rkcg_c.ts_rebalance = rd_clock();
- rkcg->rkcg_c.rebalance_cnt++;
- rd_kafka_wrunlock(rkcg->rkcg_rk);
-
- if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) ||
- rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
- /* Unassign */
- rd_kafka_cgrp_unassign(rkcg);
-
- /* Now serve the assignment to make updates */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
- goto done;
- }
-
- rd_assert(assignment != NULL);
-
- rd_kafka_cgrp_set_join_state(
- rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
- ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL
- : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL);
-
- /* Schedule application rebalance callback/event if enabled */
- if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) {
- rd_kafka_op_t *rko;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
- "Group \"%s\": delegating %s of %d partition(s) "
- "to application on queue %s: %s",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "revoke"
- : "assign",
- assignment->cnt,
- rd_kafka_q_dest_name(rkcg->rkcg_q), reason);
-
- /* Pause currently assigned partitions while waiting for
- * rebalance callback to get called to make sure the
- * application will not receive any more messages that
- * might block it from serving the rebalance callback
- * and to not process messages for partitions it
- * might have lost in the rebalance. */
- rd_kafka_assignment_pause(rkcg->rkcg_rk, "rebalance");
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE);
- rko->rko_err = err;
- rko->rko_u.rebalance.partitions =
- rd_kafka_topic_partition_list_copy(assignment);
-
- if (rd_kafka_q_enq(rkcg->rkcg_q, rko))
- goto done; /* Rebalance op successfully enqueued */
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
- "Group \"%s\": ops queue is disabled, not "
- "delegating partition %s to application",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "unassign"
- : "assign");
-
- /* FALLTHRU */
- }
-
- /* No application rebalance callback/event handler, or it is not
- * available, do the assign/unassign ourselves.
- * We need to be careful here not to trigger assignment_serve()
- * since it may call into the cgrp code again, in which case we
- * can't really track what the outcome state will be. */
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- error = rd_kafka_cgrp_assign(rkcg, assignment);
- else
- error = rd_kafka_cgrp_unassign(rkcg);
-
- if (error) {
- rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
- "Group \"%s\": internal %s "
- "of %d partition(s) failed: %s: "
- "unassigning all partitions and rejoining",
- rkcg->rkcg_group_id->str,
- err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
- ? "unassign"
- : "assign",
- rkcg->rkcg_group_assignment->cnt,
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_cgrp_set_join_state(rkcg,
- /* This is a clean state for
- * assignment_done() to rejoin
- * from. */
- RD_KAFKA_CGRP_JOIN_STATE_STEADY);
- rd_kafka_assignment_clear(rkcg->rkcg_rk);
- }
-
- /* Now serve the assignment to make updates */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
-
-done:
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- rd_kafka_cgrp_group_assignment_set(rkcg, assignment);
- else
- rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
-}
-
-
-/**
- * @brief Rejoin the group.
- *
- * @remark This function must not have any side-effects but setting the
- * join state.
- */
-static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...)
- RD_FORMAT(printf, 2, 3);
-
-static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) {
- char reason[512];
- va_list ap;
- char astr[128];
-
- va_start(ap, fmt);
- rd_vsnprintf(reason, sizeof(reason), fmt, ap);
- va_end(ap);
-
- if (rkcg->rkcg_group_assignment)
- rd_snprintf(astr, sizeof(astr), " with %d owned partition(s)",
- rkcg->rkcg_group_assignment->cnt);
- else
- rd_snprintf(astr, sizeof(astr), " without an assignment");
-
- if (rkcg->rkcg_subscription || rkcg->rkcg_next_subscription) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REJOIN",
- "Group \"%s\": %s group%s: %s", rkcg->rkcg_group_id->str,
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT
- ? "Joining"
- : "Rejoining",
- astr, reason);
- } else {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "NOREJOIN",
- "Group \"%s\": Not %s group%s: %s: "
- "no subscribed topics",
- rkcg->rkcg_group_id->str,
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT
- ? "joining"
- : "rejoining",
- astr, reason);
-
- rd_kafka_cgrp_leave_maybe(rkcg);
- }
-
- rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT);
-}
-
-
-/**
- * @brief Collect all assigned or owned partitions from group members.
- * The member field of each result element is set to the associated
- * group member. The members_match field is set to rd_false.
- *
- * @param members Array of group members.
- * @param member_cnt Number of elements in members.
- * @param par_cnt The total number of partitions expected to be collected.
- * @param collect_owned If rd_true, rkgm_owned partitions will be collected,
- * else rkgm_assignment partitions will be collected.
- */
-static map_toppar_member_info_t *
-rd_kafka_collect_partitions(const rd_kafka_group_member_t *members,
- size_t member_cnt,
- size_t par_cnt,
- rd_bool_t collect_owned) {
- size_t i;
- map_toppar_member_info_t *collected = rd_calloc(1, sizeof(*collected));
-
- RD_MAP_INIT(collected, par_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- PartitionMemberInfo_free);
-
- for (i = 0; i < member_cnt; i++) {
- size_t j;
- const rd_kafka_group_member_t *rkgm = &members[i];
- const rd_kafka_topic_partition_list_t *toppars =
- collect_owned ? rkgm->rkgm_owned : rkgm->rkgm_assignment;
-
- for (j = 0; j < (size_t)toppars->cnt; j++) {
- rd_kafka_topic_partition_t *rktpar =
- rd_kafka_topic_partition_copy(&toppars->elems[j]);
- PartitionMemberInfo_t *pmi =
- PartitionMemberInfo_new(rkgm, rd_false);
- RD_MAP_SET(collected, rktpar, pmi);
- }
- }
-
- return collected;
-}
-
-
-/**
- * @brief Set intersection. Returns a set of all elements of \p a that
- * are also elements of \p b. Additionally, compares the members
- * field of matching elements from \p a and \p b and if not NULL
- * and equal, sets the members_match field in the result element
- * to rd_true and the member field to equal that of the elements,
- * else sets the members_match field to rd_false and member field
- * to NULL.
- */
-static map_toppar_member_info_t *
-rd_kafka_member_partitions_intersect(map_toppar_member_info_t *a,
- map_toppar_member_info_t *b) {
- const rd_kafka_topic_partition_t *key;
- const PartitionMemberInfo_t *a_v;
- map_toppar_member_info_t *intersection =
- rd_calloc(1, sizeof(*intersection));
-
- RD_MAP_INIT(
- intersection, RD_MIN(a ? RD_MAP_CNT(a) : 1, b ? RD_MAP_CNT(b) : 1),
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- if (!a || !b)
- return intersection;
-
- RD_MAP_FOREACH(key, a_v, a) {
- rd_bool_t members_match;
- const PartitionMemberInfo_t *b_v = RD_MAP_GET(b, key);
-
- if (b_v == NULL)
- continue;
-
- members_match =
- a_v->member && b_v->member &&
- rd_kafka_group_member_cmp(a_v->member, b_v->member) == 0;
-
- RD_MAP_SET(intersection, rd_kafka_topic_partition_copy(key),
- PartitionMemberInfo_new(b_v->member, members_match));
- }
-
- return intersection;
-}
-
-
-/**
- * @brief Set subtraction. Returns a set of all elements of \p a
- * that are not elements of \p b. Sets the member field in
- * elements in the returned set to equal that of the
- * corresponding element in \p a
- */
-static map_toppar_member_info_t *
-rd_kafka_member_partitions_subtract(map_toppar_member_info_t *a,
- map_toppar_member_info_t *b) {
- const rd_kafka_topic_partition_t *key;
- const PartitionMemberInfo_t *a_v;
- map_toppar_member_info_t *difference =
- rd_calloc(1, sizeof(*difference));
-
- RD_MAP_INIT(difference, a ? RD_MAP_CNT(a) : 1,
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- PartitionMemberInfo_free);
-
- if (!a)
- return difference;
-
- RD_MAP_FOREACH(key, a_v, a) {
- const PartitionMemberInfo_t *b_v =
- b ? RD_MAP_GET(b, key) : NULL;
-
- if (!b_v)
- RD_MAP_SET(
- difference, rd_kafka_topic_partition_copy(key),
- PartitionMemberInfo_new(a_v->member, rd_false));
- }
-
- return difference;
-}
-
-
-/**
- * @brief Adjust the partition assignment as provided by the assignor
- * according to the COOPERATIVE protocol.
- */
-static void rd_kafka_cooperative_protocol_adjust_assignment(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_group_member_t *members,
- int member_cnt) {
-
- /* https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafk\
- a+Consumer+Incremental+Rebalance+Protocol */
-
- int i;
- int expected_max_assignment_size;
- int total_assigned = 0;
- int not_revoking = 0;
- size_t par_cnt = 0;
- const rd_kafka_topic_partition_t *toppar;
- const PartitionMemberInfo_t *pmi;
- map_toppar_member_info_t *assigned;
- map_toppar_member_info_t *owned;
- map_toppar_member_info_t *maybe_revoking;
- map_toppar_member_info_t *ready_to_migrate;
- map_toppar_member_info_t *unknown_but_owned;
-
- for (i = 0; i < member_cnt; i++)
- par_cnt += members[i].rkgm_owned->cnt;
-
- assigned = rd_kafka_collect_partitions(members, member_cnt, par_cnt,
- rd_false /*assigned*/);
-
- owned = rd_kafka_collect_partitions(members, member_cnt, par_cnt,
- rd_true /*owned*/);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
- "Group \"%s\": Partitions owned by members: %d, "
- "partitions assigned by assignor: %d",
- rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(owned),
- (int)RD_MAP_CNT(assigned));
-
- /* Still owned by some members */
- maybe_revoking = rd_kafka_member_partitions_intersect(assigned, owned);
-
- /* Not previously owned by anyone */
- ready_to_migrate = rd_kafka_member_partitions_subtract(assigned, owned);
-
- /* Don't exist in assigned partitions */
- unknown_but_owned =
- rd_kafka_member_partitions_subtract(owned, assigned);
-
- /* Rough guess at a size that is a bit higher than
- * the maximum number of partitions likely to be
- * assigned to any partition. */
- expected_max_assignment_size =
- (int)(RD_MAP_CNT(assigned) / member_cnt) + 4;
-
- for (i = 0; i < member_cnt; i++) {
- rd_kafka_group_member_t *rkgm = &members[i];
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment);
-
- rkgm->rkgm_assignment = rd_kafka_topic_partition_list_new(
- expected_max_assignment_size);
- }
-
- /* For maybe-revoking-partitions, check if the owner has
- * changed. If yes, exclude them from the assigned-partitions
- * list to the new owner. The old owner will realize it does
- * not own it any more, revoke it and then trigger another
- * rebalance for these partitions to finally be reassigned.
- */
- RD_MAP_FOREACH(toppar, pmi, maybe_revoking) {
- if (!pmi->members_match)
- /* Owner has changed. */
- continue;
-
- /* Owner hasn't changed. */
- rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
- toppar->topic,
- toppar->partition);
-
- total_assigned++;
- not_revoking++;
- }
-
- /* For ready-to-migrate-partitions, it is safe to move them
- * to the new member immediately since we know no one owns
- * it before, and hence we can encode the owner from the
- * newly-assigned-partitions directly.
- */
- RD_MAP_FOREACH(toppar, pmi, ready_to_migrate) {
- rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
- toppar->topic,
- toppar->partition);
- total_assigned++;
- }
-
- /* For unknown-but-owned-partitions, it is also safe to just
- * give them back to whoever claimed to be their owners by
- * encoding them directly as well. If this is due to a topic
- * metadata update, then a later rebalance will be triggered
- * anyway.
- */
- RD_MAP_FOREACH(toppar, pmi, unknown_but_owned) {
- rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
- toppar->topic,
- toppar->partition);
- total_assigned++;
- }
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
- "Group \"%s\": COOPERATIVE protocol collection sizes: "
- "maybe revoking: %d, ready to migrate: %d, unknown but "
- "owned: %d",
- rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(maybe_revoking),
- (int)RD_MAP_CNT(ready_to_migrate),
- (int)RD_MAP_CNT(unknown_but_owned));
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
- "Group \"%s\": %d partitions assigned to consumers",
- rkcg->rkcg_group_id->str, total_assigned);
-
- RD_MAP_DESTROY_AND_FREE(maybe_revoking);
- RD_MAP_DESTROY_AND_FREE(ready_to_migrate);
- RD_MAP_DESTROY_AND_FREE(unknown_but_owned);
- RD_MAP_DESTROY_AND_FREE(assigned);
- RD_MAP_DESTROY_AND_FREE(owned);
-}
-
-
-/**
- * @brief Parses and handles the MemberState from a SyncGroupResponse.
- */
-static void rd_kafka_cgrp_handle_SyncGroup_memberstate(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- const rd_kafkap_bytes_t *member_state) {
- rd_kafka_buf_t *rkbuf = NULL;
- rd_kafka_topic_partition_list_t *assignment = NULL;
- const int log_decode_errors = LOG_ERR;
- int16_t Version;
- rd_kafkap_bytes_t UserData;
-
- /* Dont handle new assignments when terminating */
- if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
- err = RD_KAFKA_RESP_ERR__DESTROY;
-
- if (err)
- goto err;
-
- if (RD_KAFKAP_BYTES_LEN(member_state) == 0) {
- /* Empty assignment. */
- assignment = rd_kafka_topic_partition_list_new(0);
- memset(&UserData, 0, sizeof(UserData));
- goto done;
- }
-
- /* Parse assignment from MemberState */
- rkbuf = rd_kafka_buf_new_shadow(
- member_state->data, RD_KAFKAP_BYTES_LEN(member_state), NULL);
- /* Protocol parser needs a broker handle to log errors on. */
- if (rkb) {
- rkbuf->rkbuf_rkb = rkb;
- rd_kafka_broker_keep(rkb);
- } else
- rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk);
-
- rd_kafka_buf_read_i16(rkbuf, &Version);
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- if (!(assignment =
- rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields)))
- goto err_parse;
- rd_kafka_buf_read_bytes(rkbuf, &UserData);
-
-done:
- rd_kafka_cgrp_update_session_timeout(rkcg, rd_true /*reset timeout*/);
-
- rd_assert(rkcg->rkcg_assignor);
- if (rkcg->rkcg_assignor->rkas_on_assignment_cb) {
- char *member_id;
- RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id);
- rd_kafka_consumer_group_metadata_t *cgmd =
- rd_kafka_consumer_group_metadata_new_with_genid(
- rkcg->rkcg_rk->rk_conf.group_id_str,
- rkcg->rkcg_generation_id, member_id,
- rkcg->rkcg_rk->rk_conf.group_instance_id);
- rkcg->rkcg_assignor->rkas_on_assignment_cb(
- rkcg->rkcg_assignor, &(rkcg->rkcg_assignor_state),
- assignment, &UserData, cgmd);
- rd_kafka_consumer_group_metadata_destroy(cgmd);
- }
-
- // FIXME: Remove when we're done debugging.
- rd_kafka_topic_partition_list_log(rkcg->rkcg_rk, "ASSIGNMENT",
- RD_KAFKA_DBG_CGRP, assignment);
-
- /* Set the new assignment */
- rd_kafka_cgrp_handle_assignment(rkcg, assignment);
-
- rd_kafka_topic_partition_list_destroy(assignment);
-
- if (rkbuf)
- rd_kafka_buf_destroy(rkbuf);
-
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-
-err:
- if (rkbuf)
- rd_kafka_buf_destroy(rkbuf);
-
- if (assignment)
- rd_kafka_topic_partition_list_destroy(assignment);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPSYNC",
- "Group \"%s\": synchronization failed: %s: rejoining",
- rkcg->rkcg_group_id->str, rd_kafka_err2str(err));
-
- if (err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
- rd_kafka_set_fatal_error(rkcg->rkcg_rk, err,
- "Fatal consumer error: %s",
- rd_kafka_err2str(err));
- else if (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
- rkcg->rkcg_generation_id = -1;
- else if (err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
- rd_kafka_cgrp_set_member_id(rkcg, "");
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
- (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION ||
- err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID))
- rd_kafka_cgrp_revoke_all_rejoin(
- rkcg, rd_true /*assignment is lost*/,
- rd_true /*this consumer is initiating*/, "SyncGroup error");
- else
- rd_kafka_cgrp_rejoin(rkcg, "SyncGroup error: %s",
- rd_kafka_err2str(err));
-}
-
-
-
-/**
- * @brief Cgrp handler for SyncGroup responses. opaque must be the cgrp handle.
- */
-static void rd_kafka_cgrp_handle_SyncGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = opaque;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
- rd_kafkap_bytes_t MemberState = RD_ZERO_INIT;
- int actions;
-
- if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
- rd_kafka_dbg(
- rkb->rkb_rk, CGRP, "SYNCGROUP",
- "SyncGroup response: discarding outdated request "
- "(now in join-state %s)",
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
- rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
- return;
- }
-
- if (err) {
- ErrorCode = err;
- goto err;
- }
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- rd_kafka_buf_read_bytes(rkbuf, &MemberState);
-
-err:
- actions = rd_kafka_err_action(rkb, ErrorCode, request,
- RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Re-query for coordinator */
- rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_COORD_QUERY, ErrorCode);
- /* FALLTHRU */
- }
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- if (rd_kafka_buf_retry(rkb, request))
- return;
- /* FALLTHRU */
- }
-
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP",
- "SyncGroup response: %s (%d bytes of MemberState data)",
- rd_kafka_err2str(ErrorCode),
- RD_KAFKAP_BYTES_LEN(&MemberState));
-
- rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
-
- if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
- return; /* Termination */
-
- rd_kafka_cgrp_handle_SyncGroup_memberstate(rkcg, rkb, ErrorCode,
- &MemberState);
-
- return;
-
-err_parse:
- ErrorCode = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-/**
- * @brief Run group assignment.
- */
-static void rd_kafka_cgrp_assignor_run(rd_kafka_cgrp_t *rkcg,
- rd_kafka_assignor_t *rkas,
- rd_kafka_resp_err_t err,
- rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- int member_cnt) {
- char errstr[512];
-
- if (err) {
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to get cluster metadata: %s",
- rd_kafka_err2str(err));
- goto err;
- }
-
- *errstr = '\0';
-
- /* Run assignor */
- err = rd_kafka_assignor_run(rkcg, rkas, metadata, members, member_cnt,
- errstr, sizeof(errstr));
-
- if (err) {
- if (!*errstr)
- rd_snprintf(errstr, sizeof(errstr), "%s",
- rd_kafka_err2str(err));
- goto err;
- }
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGNOR",
- "Group \"%s\": \"%s\" assignor run for %d member(s)",
- rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
- member_cnt);
-
- if (rkas->rkas_protocol == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE)
- rd_kafka_cooperative_protocol_adjust_assignment(rkcg, members,
- member_cnt);
-
- rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
-
- rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
-
- /* Respond to broker with assignment set or error */
- rd_kafka_SyncGroupRequest(
- rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
- rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, members,
- err ? 0 : member_cnt, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_handle_SyncGroup, rkcg);
- return;
-
-err:
- rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "ASSIGNOR",
- "Group \"%s\": failed to run assignor \"%s\" for "
- "%d member(s): %s",
- rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
- member_cnt, errstr);
-
- rd_kafka_cgrp_rejoin(rkcg, "%s assignor failed: %s",
- rkas->rkas_protocol_name->str, errstr);
-}
-
-
-
-/**
- * @brief Op callback from handle_JoinGroup
- */
-static rd_kafka_op_res_t
-rd_kafka_cgrp_assignor_handle_Metadata_op(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
-
- if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)
- return RD_KAFKA_OP_RES_HANDLED; /* From outdated state */
-
- if (!rkcg->rkcg_group_leader.members) {
- rd_kafka_dbg(rk, CGRP, "GRPLEADER",
- "Group \"%.*s\": no longer leader: "
- "not running assignor",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- rd_kafka_cgrp_assignor_run(rkcg, rkcg->rkcg_assignor, rko->rko_err,
- rko->rko_u.metadata.md,
- rkcg->rkcg_group_leader.members,
- rkcg->rkcg_group_leader.member_cnt);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * Parse single JoinGroup.Members.MemberMetadata for "consumer" ProtocolType
- *
- * Protocol definition:
- * https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal
- *
- * Returns 0 on success or -1 on error.
- */
-static int rd_kafka_group_MemberMetadata_consumer_read(
- rd_kafka_broker_t *rkb,
- rd_kafka_group_member_t *rkgm,
- const rd_kafkap_bytes_t *MemberMetadata) {
-
- rd_kafka_buf_t *rkbuf;
- int16_t Version;
- int32_t subscription_cnt;
- rd_kafkap_bytes_t UserData;
- const int log_decode_errors = LOG_ERR;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
-
- /* Create a shadow-buffer pointing to the metadata to ease parsing. */
- rkbuf = rd_kafka_buf_new_shadow(
- MemberMetadata->data, RD_KAFKAP_BYTES_LEN(MemberMetadata), NULL);
-
- /* Protocol parser needs a broker handle to log errors on. */
- rkbuf->rkbuf_rkb = rkb;
- rd_kafka_broker_keep(rkb);
-
- rd_kafka_buf_read_i16(rkbuf, &Version);
- rd_kafka_buf_read_i32(rkbuf, &subscription_cnt);
-
- if (subscription_cnt > 10000 || subscription_cnt <= 0)
- goto err;
-
- rkgm->rkgm_subscription =
- rd_kafka_topic_partition_list_new(subscription_cnt);
-
- while (subscription_cnt-- > 0) {
- rd_kafkap_str_t Topic;
- char *topic_name;
- rd_kafka_buf_read_str(rkbuf, &Topic);
- RD_KAFKAP_STR_DUPA(&topic_name, &Topic);
- rd_kafka_topic_partition_list_add(
- rkgm->rkgm_subscription, topic_name, RD_KAFKA_PARTITION_UA);
- }
-
- rd_kafka_buf_read_bytes(rkbuf, &UserData);
- rkgm->rkgm_userdata = rd_kafkap_bytes_copy(&UserData);
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- if (Version >= 1 &&
- !(rkgm->rkgm_owned =
- rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields)))
- goto err;
-
- rd_kafka_buf_destroy(rkbuf);
-
- return 0;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-
-err:
- rd_rkb_dbg(rkb, CGRP, "MEMBERMETA",
- "Failed to parse MemberMetadata for \"%.*s\": %s",
- RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
- rd_kafka_err2str(err));
- if (rkgm->rkgm_subscription) {
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription);
- rkgm->rkgm_subscription = NULL;
- }
-
- rd_kafka_buf_destroy(rkbuf);
- return -1;
-}
-
-
-/**
- * @brief cgrp handler for JoinGroup responses
- * opaque must be the cgrp handle.
- *
- * @locality rdkafka main thread (unless ERR__DESTROY: arbitrary thread)
- */
-static void rd_kafka_cgrp_handle_JoinGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = opaque;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
- int32_t GenerationId;
- rd_kafkap_str_t Protocol, LeaderId;
- rd_kafkap_str_t MyMemberId = RD_KAFKAP_STR_INITIALIZER;
- int32_t member_cnt;
- int actions;
- int i_am_leader = 0;
- rd_kafka_assignor_t *rkas = NULL;
-
- rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_JoinGroup);
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY ||
- rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
- return; /* Terminating */
-
- if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN) {
- rd_kafka_dbg(
- rkb->rkb_rk, CGRP, "JOINGROUP",
- "JoinGroup response: discarding outdated request "
- "(now in join-state %s)",
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
- return;
- }
-
- if (err) {
- ErrorCode = err;
- goto err;
- }
-
- if (request->rkbuf_reqhdr.ApiVersion >= 2)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- rd_kafka_buf_read_i32(rkbuf, &GenerationId);
- rd_kafka_buf_read_str(rkbuf, &Protocol);
- rd_kafka_buf_read_str(rkbuf, &LeaderId);
- rd_kafka_buf_read_str(rkbuf, &MyMemberId);
- rd_kafka_buf_read_i32(rkbuf, &member_cnt);
-
- if (!ErrorCode && RD_KAFKAP_STR_IS_NULL(&Protocol)) {
- /* Protocol not set, we will not be able to find
- * a matching assignor so error out early. */
- ErrorCode = RD_KAFKA_RESP_ERR__BAD_MSG;
- } else if (!ErrorCode) {
- char *protocol_name;
- RD_KAFKAP_STR_DUPA(&protocol_name, &Protocol);
- if (!(rkas = rd_kafka_assignor_find(rkcg->rkcg_rk,
- protocol_name)) ||
- !rkas->rkas_enabled) {
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
- "Unsupported assignment strategy \"%s\"",
- protocol_name);
- if (rkcg->rkcg_assignor) {
- if (rkcg->rkcg_assignor->rkas_destroy_state_cb)
- rkcg->rkcg_assignor
- ->rkas_destroy_state_cb(
- rkcg->rkcg_assignor_state);
- rkcg->rkcg_assignor_state = NULL;
- rkcg->rkcg_assignor = NULL;
- }
- ErrorCode = RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
- }
- }
-
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
- "JoinGroup response: GenerationId %" PRId32
- ", "
- "Protocol %.*s, LeaderId %.*s%s, my MemberId %.*s, "
- "member metadata count "
- "%" PRId32 ": %s",
- GenerationId, RD_KAFKAP_STR_PR(&Protocol),
- RD_KAFKAP_STR_PR(&LeaderId),
- RD_KAFKAP_STR_LEN(&MyMemberId) &&
- !rd_kafkap_str_cmp(&LeaderId, &MyMemberId)
- ? " (me)"
- : "",
- RD_KAFKAP_STR_PR(&MyMemberId), member_cnt,
- ErrorCode ? rd_kafka_err2str(ErrorCode) : "(no error)");
-
- if (!ErrorCode) {
- char *my_member_id;
- RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId);
- rd_kafka_cgrp_set_member_id(rkcg, my_member_id);
- rkcg->rkcg_generation_id = GenerationId;
- i_am_leader = !rd_kafkap_str_cmp(&LeaderId, &MyMemberId);
- } else {
- rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000 * 1000);
- goto err;
- }
-
- if (rkcg->rkcg_assignor && rkcg->rkcg_assignor != rkas) {
- if (rkcg->rkcg_assignor->rkas_destroy_state_cb)
- rkcg->rkcg_assignor->rkas_destroy_state_cb(
- rkcg->rkcg_assignor_state);
- rkcg->rkcg_assignor_state = NULL;
- }
- rkcg->rkcg_assignor = rkas;
-
- if (i_am_leader) {
- rd_kafka_group_member_t *members;
- int i;
- int sub_cnt = 0;
- rd_list_t topics;
- rd_kafka_op_t *rko;
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
- "I am elected leader for group \"%s\" "
- "with %" PRId32 " member(s)",
- rkcg->rkcg_group_id->str, member_cnt);
-
- if (member_cnt > 100000) {
- err = RD_KAFKA_RESP_ERR__BAD_MSG;
- goto err;
- }
-
- rd_list_init(&topics, member_cnt, rd_free);
-
- members = rd_calloc(member_cnt, sizeof(*members));
-
- for (i = 0; i < member_cnt; i++) {
- rd_kafkap_str_t MemberId;
- rd_kafkap_bytes_t MemberMetadata;
- rd_kafka_group_member_t *rkgm;
- rd_kafkap_str_t GroupInstanceId =
- RD_KAFKAP_STR_INITIALIZER;
-
- rd_kafka_buf_read_str(rkbuf, &MemberId);
- if (request->rkbuf_reqhdr.ApiVersion >= 5)
- rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
- rd_kafka_buf_read_bytes(rkbuf, &MemberMetadata);
-
- rkgm = &members[sub_cnt];
- rkgm->rkgm_member_id = rd_kafkap_str_copy(&MemberId);
- rkgm->rkgm_group_instance_id =
- rd_kafkap_str_copy(&GroupInstanceId);
- rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
- rkgm->rkgm_generation = -1;
-
- if (rd_kafka_group_MemberMetadata_consumer_read(
- rkb, rkgm, &MemberMetadata)) {
- /* Failed to parse this member's metadata,
- * ignore it. */
- } else {
- sub_cnt++;
- rkgm->rkgm_assignment =
- rd_kafka_topic_partition_list_new(
- rkgm->rkgm_subscription->cnt);
- rd_kafka_topic_partition_list_get_topic_names(
- rkgm->rkgm_subscription, &topics,
- 0 /*dont include regex*/);
- }
- }
-
- /* FIXME: What to do if parsing failed for some/all members?
- * It is a sign of incompatibility. */
-
-
- rd_kafka_cgrp_group_leader_reset(rkcg,
- "JoinGroup response clean-up");
-
- rd_kafka_assert(NULL, rkcg->rkcg_group_leader.members == NULL);
- rkcg->rkcg_group_leader.members = members;
- rkcg->rkcg_group_leader.member_cnt = sub_cnt;
-
- rd_kafka_cgrp_set_join_state(
- rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
-
- /* The assignor will need metadata so fetch it asynchronously
- * and run the assignor when we get a reply.
- * Create a callback op that the generic metadata code
- * will trigger when metadata has been parsed. */
- rko = rd_kafka_op_new_cb(
- rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
- rd_kafka_cgrp_assignor_handle_Metadata_op);
- rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL);
-
- rd_kafka_MetadataRequest(
- rkb, &topics, "partition assignor",
- rd_false /*!allow_auto_create*/,
- /* cgrp_update=false:
- * Since the subscription list may not be identical
- * across all members of the group and thus the
- * Metadata response may not be identical to this
- * consumer's subscription list, we want to
- * avoid triggering a rejoin or error propagation
- * on receiving the response since some topics
- * may be missing. */
- rd_false, rko);
- rd_list_destroy(&topics);
-
- } else {
- rd_kafka_cgrp_set_join_state(
- rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
-
- rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
-
- rd_kafka_SyncGroupRequest(
- rkb, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
- rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, NULL, 0,
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_handle_SyncGroup, rkcg);
- }
-
-err:
- actions = rd_kafka_err_action(
- rkb, ErrorCode, request, RD_KAFKA_ERR_ACTION_IGNORE,
- RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
-
- RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED,
-
- RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
-
- RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
-
- RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Re-query for coordinator */
- rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_COORD_QUERY, ErrorCode);
- }
-
- /* No need for retries here since the join is intervalled,
- * see rkcg_join_intvl */
-
- if (ErrorCode) {
- if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
- return; /* Termination */
-
- if (ErrorCode == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) {
- rd_kafka_set_fatal_error(rkcg->rkcg_rk, ErrorCode,
- "Fatal consumer error: %s",
- rd_kafka_err2str(ErrorCode));
- ErrorCode = RD_KAFKA_RESP_ERR__FATAL;
-
- } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
- rd_kafka_consumer_err(
- rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0,
- NULL, NULL, RD_KAFKA_OFFSET_INVALID,
- "JoinGroup failed: %s",
- rd_kafka_err2str(ErrorCode));
-
- if (ErrorCode == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
- rd_kafka_cgrp_set_member_id(rkcg, "");
- else if (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
- rkcg->rkcg_generation_id = -1;
- else if (ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) {
- /* KIP-394 requires member.id on initial join
- * group request */
- char *my_member_id;
- RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId);
- rd_kafka_cgrp_set_member_id(rkcg, my_member_id);
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
- }
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
- (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION ||
- ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED))
- rd_kafka_cgrp_revoke_all_rejoin(
- rkcg, rd_true /*assignment is lost*/,
- rd_true /*this consumer is initiating*/,
- "JoinGroup error");
- else
- rd_kafka_cgrp_rejoin(rkcg, "JoinGroup error: %s",
- rd_kafka_err2str(ErrorCode));
- }
-
- return;
-
-err_parse:
- ErrorCode = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-/**
- * @brief Check subscription against requested Metadata.
- */
-static rd_kafka_op_res_t rd_kafka_cgrp_handle_Metadata_op(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
-
- rd_kafka_cgrp_metadata_update_check(rkcg, rd_false /*dont rejoin*/);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief (Async) Refresh metadata (for cgrp's needs)
- *
- * @returns 1 if metadata refresh was requested, or 0 if metadata is
- * up to date, or -1 if no broker is available for metadata requests.
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static int rd_kafka_cgrp_metadata_refresh(rd_kafka_cgrp_t *rkcg,
- int *metadata_agep,
- const char *reason) {
- rd_kafka_t *rk = rkcg->rkcg_rk;
- rd_kafka_op_t *rko;
- rd_list_t topics;
- rd_kafka_resp_err_t err;
-
- rd_list_init(&topics, 8, rd_free);
-
- /* Insert all non-wildcard topics in cache. */
- rd_kafka_metadata_cache_hint_rktparlist(
- rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, 0 /*dont replace*/);
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) {
- /* For wildcard subscriptions make sure the
- * cached full metadata isn't too old. */
- int metadata_age = -1;
-
- if (rk->rk_ts_full_metadata)
- metadata_age =
- (int)(rd_clock() - rk->rk_ts_full_metadata) / 1000;
-
- *metadata_agep = metadata_age;
-
- if (metadata_age != -1 &&
- metadata_age <= rk->rk_conf.metadata_max_age_ms) {
- rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA,
- "CGRPMETADATA",
- "%s: metadata for wildcard subscription "
- "is up to date (%dms old)",
- reason, *metadata_agep);
- rd_list_destroy(&topics);
- return 0; /* Up-to-date */
- }
-
- } else {
- /* Check that all subscribed topics are in the cache. */
- int r;
-
- rd_kafka_topic_partition_list_get_topic_names(
- rkcg->rkcg_subscription, &topics, 0 /*no regexps*/);
-
- rd_kafka_rdlock(rk);
- r = rd_kafka_metadata_cache_topics_count_exists(rk, &topics,
- metadata_agep);
- rd_kafka_rdunlock(rk);
-
- if (r == rd_list_cnt(&topics)) {
- rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA,
- "CGRPMETADATA",
- "%s: metadata for subscription "
- "is up to date (%dms old)",
- reason, *metadata_agep);
- rd_list_destroy(&topics);
- return 0; /* Up-to-date and all topics exist. */
- }
-
- rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA",
- "%s: metadata for subscription "
- "only available for %d/%d topics (%dms old)",
- reason, r, rd_list_cnt(&topics), *metadata_agep);
- }
-
- /* Async request, result will be triggered from
- * rd_kafka_parse_metadata(). */
- rko = rd_kafka_op_new_cb(rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
- rd_kafka_cgrp_handle_Metadata_op);
- rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, 0);
-
- err = rd_kafka_metadata_request(rkcg->rkcg_rk, NULL, &topics,
- rd_false /*!allow auto create */,
- rd_true /*cgrp_update*/, reason, rko);
- if (err) {
- rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA",
- "%s: need to refresh metadata (%dms old) "
- "but no usable brokers available: %s",
- reason, *metadata_agep, rd_kafka_err2str(err));
- rd_kafka_op_destroy(rko);
- }
-
- rd_list_destroy(&topics);
-
- return err ? -1 : 1;
-}
-
-
-
-static void rd_kafka_cgrp_join(rd_kafka_cgrp_t *rkcg) {
- int metadata_age;
-
- if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP ||
- rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_INIT ||
- rd_kafka_cgrp_awaiting_response(rkcg))
- return;
-
- /* On max.poll.interval.ms failure, do not rejoin group until the
- * application has called poll. */
- if ((rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) &&
- rd_kafka_max_poll_exceeded(rkcg->rkcg_rk))
- return;
-
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN",
- "Group \"%.*s\": join with %d subscribed topic(s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_list_cnt(rkcg->rkcg_subscribed_topics));
-
-
- /* See if we need to query metadata to continue:
- * - if subscription contains wildcards:
- * * query all topics in cluster
- *
- * - if subscription does not contain wildcards but
- * some topics are missing from the local metadata cache:
- * * query subscribed topics (all cached ones)
- *
- * - otherwise:
- * * rely on topic metadata cache
- */
- /* We need up-to-date full metadata to continue,
- * refresh metadata if necessary. */
- if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age,
- "consumer join") == 1) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
- "JOIN",
- "Group \"%.*s\": "
- "postponing join until up-to-date "
- "metadata is available",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-
- rd_assert(
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT ||
- /* Possible via rd_kafka_cgrp_modify_subscription */
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY);
-
- rd_kafka_cgrp_set_join_state(
- rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
-
- return; /* ^ async call */
- }
-
- if (rd_list_empty(rkcg->rkcg_subscribed_topics))
- rd_kafka_cgrp_metadata_update_check(rkcg,
- rd_false /*dont join*/);
-
- if (rd_list_empty(rkcg->rkcg_subscribed_topics)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "JOIN",
- "Group \"%.*s\": "
- "no matching topics based on %dms old metadata: "
- "next metadata refresh in %dms",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), metadata_age,
- rkcg->rkcg_rk->rk_conf.metadata_refresh_interval_ms -
- metadata_age);
- return;
- }
-
- rd_rkb_dbg(
- rkcg->rkcg_curr_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "JOIN",
- "Joining group \"%.*s\" with %d subscribed topic(s) and "
- "member id \"%.*s\"",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_list_cnt(rkcg->rkcg_subscribed_topics),
- rkcg->rkcg_member_id ? RD_KAFKAP_STR_LEN(rkcg->rkcg_member_id) : 0,
- rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "");
-
-
- rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN);
-
- rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_JoinGroup);
-
- rd_kafka_JoinGroupRequest(
- rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id,
- rkcg->rkcg_group_instance_id,
- rkcg->rkcg_rk->rk_conf.group_protocol_type,
- rkcg->rkcg_subscribed_topics, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_handle_JoinGroup, rkcg);
-}
-
-/**
- * Rejoin group on update to effective subscribed topics list
- */
-static void rd_kafka_cgrp_revoke_rejoin(rd_kafka_cgrp_t *rkcg,
- const char *reason) {
- /*
- * Clean-up group leader duties, if any.
- */
- rd_kafka_cgrp_group_leader_reset(rkcg, "group (re)join");
-
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "REJOIN",
- "Group \"%.*s\" (re)joining in join-state %s "
- "with %d assigned partition(s): %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
- reason);
-
- rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/,
- rd_true /*initiating*/, reason);
-}
-
-/**
- * @brief Update the effective list of subscribed topics.
- *
- * Set \p tinfos to NULL to clear the list.
- *
- * @param tinfos rd_list_t(rd_kafka_topic_info_t *): new effective topic list
- *
- * @returns true on change, else false.
- *
- * @remark Takes ownership of \p tinfos
- */
-static rd_bool_t rd_kafka_cgrp_update_subscribed_topics(rd_kafka_cgrp_t *rkcg,
- rd_list_t *tinfos) {
- rd_kafka_topic_info_t *tinfo;
- int i;
-
- if (!tinfos) {
- if (!rd_list_empty(rkcg->rkcg_subscribed_topics))
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
- "Group \"%.*s\": "
- "clearing subscribed topics list (%d)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_list_cnt(rkcg->rkcg_subscribed_topics));
- tinfos = rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
-
- } else {
- if (rd_list_cnt(tinfos) == 0)
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
- "Group \"%.*s\": "
- "no topics in metadata matched "
- "subscription",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
- }
-
- /* Sort for comparison */
- rd_list_sort(tinfos, rd_kafka_topic_info_cmp);
-
- /* Compare to existing to see if anything changed. */
- if (!rd_list_cmp(rkcg->rkcg_subscribed_topics, tinfos,
- rd_kafka_topic_info_cmp)) {
- /* No change */
- rd_list_destroy(tinfos);
- return rd_false;
- }
-
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, "SUBSCRIPTION",
- "Group \"%.*s\": effective subscription list changed "
- "from %d to %d topic(s):",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_list_cnt(rkcg->rkcg_subscribed_topics), rd_list_cnt(tinfos));
-
- RD_LIST_FOREACH(tinfo, tinfos, i)
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA,
- "SUBSCRIPTION", " Topic %s with %d partition(s)",
- tinfo->topic, tinfo->partition_cnt);
-
- rd_list_destroy(rkcg->rkcg_subscribed_topics);
-
- rkcg->rkcg_subscribed_topics = tinfos;
-
- return rd_true;
-}
-
-
-/**
- * @brief Handle Heartbeat response.
- */
-void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
- int actions = 0;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT);
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
-
- rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (err)
- goto err;
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- if (ErrorCode) {
- err = ErrorCode;
- goto err;
- }
-
- rd_kafka_cgrp_update_session_timeout(
- rkcg, rd_false /*don't update if session has expired*/);
-
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- rkcg->rkcg_last_heartbeat_err = err;
-
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "HEARTBEAT",
- "Group \"%s\" heartbeat error response in "
- "state %s (join-state %s, %d partition(s) assigned): %s",
- rkcg->rkcg_group_id->str,
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
- rd_kafka_err2str(err));
-
- if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "HEARTBEAT",
- "Heartbeat response: discarding outdated "
- "request (now in join-state %s)",
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
- return;
- }
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__DESTROY:
- /* quick cleanup */
- return;
-
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP:
- case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT",
- "Heartbeat failed due to coordinator (%s) "
- "no longer available: %s: "
- "re-querying for coordinator",
- rkcg->rkcg_curr_coord
- ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
- : "none",
- rd_kafka_err2str(err));
- /* Remain in joined state and keep querying for coordinator */
- actions = RD_KAFKA_ERR_ACTION_REFRESH;
- break;
-
- case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS:
- rd_kafka_cgrp_update_session_timeout(
- rkcg, rd_false /*don't update if session has expired*/);
- /* No further action if already rebalancing */
- if (RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg))
- return;
- rd_kafka_cgrp_group_is_rebalancing(rkcg);
- return;
-
- case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
- rd_kafka_cgrp_set_member_id(rkcg, "");
- rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
- rd_true /*initiating*/,
- "resetting member-id");
- return;
-
- case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
- rkcg->rkcg_generation_id = -1;
- rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
- rd_true /*initiating*/,
- "illegal generation");
- return;
-
- case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID:
- rd_kafka_set_fatal_error(rkcg->rkcg_rk, err,
- "Fatal consumer error: %s",
- rd_kafka_err2str(err));
- rd_kafka_cgrp_revoke_all_rejoin_maybe(
- rkcg, rd_true, /*assignment lost*/
- rd_true, /*initiating*/
- "consumer fenced by "
- "newer instance");
- return;
-
- default:
- actions = rd_kafka_err_action(rkb, err, request,
- RD_KAFKA_ERR_ACTION_END);
- break;
- }
-
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Re-query for coordinator */
- rd_kafka_cgrp_coord_query(rkcg, rd_kafka_err2str(err));
- }
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY &&
- rd_kafka_buf_retry(rkb, request)) {
- /* Retry */
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
- return;
- }
-}
-
-
-
-/**
- * @brief Send Heartbeat
- */
-static void rd_kafka_cgrp_heartbeat(rd_kafka_cgrp_t *rkcg) {
- /* Don't send heartbeats if max.poll.interval.ms was exceeded */
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED)
- return;
-
- /* Skip heartbeat if we have one in transit */
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT)
- return;
-
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
- rd_kafka_HeartbeatRequest(
- rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
- rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id,
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), rd_kafka_cgrp_handle_Heartbeat,
- NULL);
-}
-
-/**
- * Cgrp is now terminated: decommission it and signal back to application.
- */
-static void rd_kafka_cgrp_terminated(rd_kafka_cgrp_t *rkcg) {
- if (rd_atomic32_get(&rkcg->rkcg_terminated))
- return; /* terminated() may be called multiple times,
- * make sure to only terminate once. */
-
- rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
-
- rd_kafka_assert(NULL, !rd_kafka_assignment_in_progress(rkcg->rkcg_rk));
- rd_kafka_assert(NULL, !rkcg->rkcg_group_assignment);
- rd_kafka_assert(NULL, rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0);
- rd_kafka_assert(NULL, rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM);
-
- rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers,
- &rkcg->rkcg_offset_commit_tmr, 1 /*lock*/);
-
- rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
-
- /* Disable and empty ops queue since there will be no
- * (broker) thread serving it anymore after the unassign_broker
- * below.
- * This prevents hang on destroy where responses are enqueued on
- * rkcg_ops without anything serving the queue. */
- rd_kafka_q_disable(rkcg->rkcg_ops);
- rd_kafka_q_purge(rkcg->rkcg_ops);
-
- if (rkcg->rkcg_curr_coord)
- rd_kafka_cgrp_coord_clear_broker(rkcg);
-
- if (rkcg->rkcg_coord) {
- rd_kafka_broker_destroy(rkcg->rkcg_coord);
- rkcg->rkcg_coord = NULL;
- }
-
- rd_atomic32_set(&rkcg->rkcg_terminated, rd_true);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
- "Consumer group sub-system terminated%s",
- rkcg->rkcg_reply_rko ? " (will enqueue reply)" : "");
-
- if (rkcg->rkcg_reply_rko) {
- /* Signal back to application. */
- rd_kafka_replyq_enq(&rkcg->rkcg_reply_rko->rko_replyq,
- rkcg->rkcg_reply_rko, 0);
- rkcg->rkcg_reply_rko = NULL;
- }
-
- /* Remove cgrp application queue forwarding, if any. */
- rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL);
-}
-
-
-/**
- * If a cgrp is terminating and all outstanding ops are now finished
- * then progress to final termination and return 1.
- * Else returns 0.
- */
-static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg) {
-
- if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
- return 1;
-
- if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)))
- return 0;
-
- /* Check if wait-coord queue has timed out. */
- if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 &&
- rkcg->rkcg_ts_terminate +
- (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) <
- rd_clock()) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
- "Group \"%s\": timing out %d op(s) in "
- "wait-for-coordinator queue",
- rkcg->rkcg_group_id->str,
- rd_kafka_q_len(rkcg->rkcg_wait_coord_q));
- rd_kafka_q_disable(rkcg->rkcg_wait_coord_q);
- if (rd_kafka_q_concat(rkcg->rkcg_ops,
- rkcg->rkcg_wait_coord_q) == -1) {
- /* ops queue shut down, purge coord queue */
- rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
- }
- }
-
- if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) &&
- rd_list_empty(&rkcg->rkcg_toppars) &&
- !rd_kafka_assignment_in_progress(rkcg->rkcg_rk) &&
- rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0 &&
- !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)) {
- /* Since we might be deep down in a 'rko' handler
- * called from cgrp_op_serve() we cant call terminated()
- * directly since it will decommission the rkcg_ops queue
- * that might be locked by intermediate functions.
- * Instead set the TERM state and let the cgrp terminate
- * at its own discretion. */
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_TERM);
-
- return 1;
- } else {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "CGRPTERM",
- "Group \"%s\": "
- "waiting for %s%d toppar(s), "
- "%s"
- "%d commit(s)%s%s%s (state %s, join-state %s) "
- "before terminating",
- rkcg->rkcg_group_id->str,
- RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ? "assign call, " : "",
- rd_list_cnt(&rkcg->rkcg_toppars),
- rd_kafka_assignment_in_progress(rkcg->rkcg_rk)
- ? "assignment in progress, "
- : "",
- rkcg->rkcg_rk->rk_consumer.wait_commit_cnt,
- (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)
- ? ", wait-leave,"
- : "",
- rkcg->rkcg_rebalance_rejoin ? ", rebalance_rejoin," : "",
- (rkcg->rkcg_rebalance_incr_assignment != NULL)
- ? ", rebalance_incr_assignment,"
- : "",
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
- return 0;
- }
-}
-
-
-/**
- * @brief Add partition to this cgrp management
- *
- * @locks none
- */
-static void rd_kafka_cgrp_partition_add(rd_kafka_cgrp_t *rkcg,
- rd_kafka_toppar_t *rktp) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTADD",
- "Group \"%s\": add %s [%" PRId32 "]",
- rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
-
- rd_kafka_toppar_lock(rktp);
- rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP));
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_CGRP;
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_keep(rktp);
- rd_list_add(&rkcg->rkcg_toppars, rktp);
-}
-
-/**
- * @brief Remove partition from this cgrp management
- *
- * @locks none
- */
-static void rd_kafka_cgrp_partition_del(rd_kafka_cgrp_t *rkcg,
- rd_kafka_toppar_t *rktp) {
- int cnt = 0, barrier_cnt = 0, message_cnt = 0, other_cnt = 0;
- rd_kafka_op_t *rko;
- rd_kafka_q_t *rkq;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
- "Group \"%s\": delete %s [%" PRId32 "]",
- rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
-
- rd_kafka_toppar_lock(rktp);
- rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP);
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_CGRP;
-
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) {
- /* Partition is being removed from the cluster and it's stopped,
- * so rktp->rktp_fetchq->rkq_fwdq is NULL.
- * Purge remaining operations in rktp->rktp_fetchq->rkq_q,
- * while holding lock, to avoid circular references */
- rkq = rktp->rktp_fetchq;
- mtx_lock(&rkq->rkq_lock);
- rd_assert(!rkq->rkq_fwdq);
-
- rko = TAILQ_FIRST(&rkq->rkq_q);
- while (rko) {
- if (rko->rko_type != RD_KAFKA_OP_BARRIER &&
- rko->rko_type != RD_KAFKA_OP_FETCH) {
- rd_kafka_log(
- rkcg->rkcg_rk, LOG_WARNING, "PARTDEL",
- "Purging toppar fetch queue buffer op"
- "with unexpected type: %s",
- rd_kafka_op2str(rko->rko_type));
- }
-
- if (rko->rko_type == RD_KAFKA_OP_BARRIER)
- barrier_cnt++;
- else if (rko->rko_type == RD_KAFKA_OP_FETCH)
- message_cnt++;
- else
- other_cnt++;
-
- rko = TAILQ_NEXT(rko, rko_link);
- cnt++;
- }
-
- mtx_unlock(&rkq->rkq_lock);
-
- if (cnt) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
- "Purge toppar fetch queue buffer "
- "containing %d op(s) "
- "(%d barrier(s), %d message(s), %d other)"
- " to avoid "
- "circular references",
- cnt, barrier_cnt, message_cnt, other_cnt);
- rd_kafka_q_purge(rktp->rktp_fetchq);
- } else {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
- "Not purging toppar fetch queue buffer."
- " No ops present in the buffer.");
- }
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_list_remove(&rkcg->rkcg_toppars, rktp);
-
- rd_kafka_toppar_destroy(rktp); /* refcnt from _add above */
-
- rd_kafka_cgrp_try_terminate(rkcg);
-}
-
-
-
-/**
- * @brief Defer offset commit (rko) until coordinator is available.
- *
- * @returns 1 if the rko was deferred or 0 if the defer queue is disabled
- * or rko already deferred.
- */
-static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg,
- rd_kafka_op_t *rko,
- const char *reason) {
-
- /* wait_coord_q is disabled session.timeout.ms after
- * group close() has been initated. */
- if (rko->rko_u.offset_commit.ts_timeout != 0 ||
- !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q))
- return 0;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT",
- "Group \"%s\": "
- "unable to OffsetCommit in state %s: %s: "
- "coordinator (%s) is unavailable: "
- "retrying later",
- rkcg->rkcg_group_id->str,
- rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason,
- rkcg->rkcg_curr_coord
- ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
- : "none");
-
- rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS;
- rko->rko_u.offset_commit.ts_timeout =
- rd_clock() +
- (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000);
- rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko);
-
- return 1;
-}
-
-
-/**
- * @brief Update the committed offsets for the partitions in \p offsets,
- *
- * @remark \p offsets may be NULL if \p err is set
- * @returns the number of partitions with errors encountered
- */
-static int rd_kafka_cgrp_update_committed_offsets(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets) {
- int i;
- int errcnt = 0;
-
- /* Update toppars' committed offset or global error */
- for (i = 0; offsets && i < offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
- rd_kafka_toppar_t *rktp;
-
- /* Ignore logical offsets since they were never
- * sent to the broker. */
- if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset))
- continue;
-
- /* Propagate global error to all partitions that don't have
- * explicit error set. */
- if (err && !rktpar->err)
- rktpar->err = err;
-
- if (rktpar->err) {
- rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "OFFSET",
- "OffsetCommit failed for "
- "%s [%" PRId32
- "] at offset "
- "%" PRId64 " in join-state %s: %s",
- rktpar->topic, rktpar->partition,
- rktpar->offset,
- rd_kafka_cgrp_join_state_names
- [rkcg->rkcg_join_state],
- rd_kafka_err2str(rktpar->err));
-
- errcnt++;
- continue;
- }
-
- rktp = rd_kafka_topic_partition_get_toppar(rkcg->rkcg_rk,
- rktpar, rd_false);
- if (!rktp)
- continue;
-
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_committed_pos =
- rd_kafka_topic_partition_get_fetch_pos(rktpar);
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp); /* from get_toppar() */
- }
-
- return errcnt;
-}
-
-
-/**
- * @brief Propagate OffsetCommit results.
- *
- * @param rko_orig The original rko that triggered the commit, this is used
- * to propagate the result.
- * @param err Is the aggregated request-level error, or ERR_NO_ERROR.
- * @param errcnt Are the number of partitions in \p offsets that failed
- * offset commit.
- */
-static void rd_kafka_cgrp_propagate_commit_result(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_op_t *rko_orig,
- rd_kafka_resp_err_t err,
- int errcnt,
- rd_kafka_topic_partition_list_t *offsets) {
-
- const rd_kafka_t *rk = rkcg->rkcg_rk;
- int offset_commit_cb_served = 0;
-
- /* If no special callback is set but a offset_commit_cb has
- * been set in conf then post an event for the latter. */
- if (!rko_orig->rko_u.offset_commit.cb && rk->rk_conf.offset_commit_cb) {
- rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
-
- rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
-
- if (offsets)
- rko_reply->rko_u.offset_commit.partitions =
- rd_kafka_topic_partition_list_copy(offsets);
-
- rko_reply->rko_u.offset_commit.cb =
- rk->rk_conf.offset_commit_cb;
- rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque;
-
- rd_kafka_q_enq(rk->rk_rep, rko_reply);
- offset_commit_cb_served++;
- }
-
-
- /* Enqueue reply to requester's queue, if any. */
- if (rko_orig->rko_replyq.q) {
- rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
-
- rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
-
- /* Copy offset & partitions & callbacks to reply op */
- rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit;
- if (offsets)
- rko_reply->rko_u.offset_commit.partitions =
- rd_kafka_topic_partition_list_copy(offsets);
- if (rko_reply->rko_u.offset_commit.reason)
- rko_reply->rko_u.offset_commit.reason =
- rd_strdup(rko_reply->rko_u.offset_commit.reason);
-
- rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0);
- offset_commit_cb_served++;
- }
-
- if (!offset_commit_cb_served && offsets &&
- (errcnt > 0 || (err != RD_KAFKA_RESP_ERR_NO_ERROR &&
- err != RD_KAFKA_RESP_ERR__NO_OFFSET))) {
- /* If there is no callback or handler for this (auto)
- * commit then log an error (#1043) */
- char tmp[512];
-
- rd_kafka_topic_partition_list_str(
- offsets, tmp, sizeof(tmp),
- /* Print per-partition errors unless there was a
- * request-level error. */
- RD_KAFKA_FMT_F_OFFSET |
- (errcnt ? RD_KAFKA_FMT_F_ONLY_ERR : 0));
-
- rd_kafka_log(
- rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL",
- "Offset commit (%s) failed "
- "for %d/%d partition(s) in join-state %s: "
- "%s%s%s",
- rko_orig->rko_u.offset_commit.reason,
- errcnt ? errcnt : offsets->cnt, offsets->cnt,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- errcnt ? rd_kafka_err2str(err) : "", errcnt ? ": " : "",
- tmp);
- }
-}
-
-
-
-/**
- * @brief Handle OffsetCommitResponse
- * Takes the original 'rko' as opaque argument.
- * @remark \p rkb, rkbuf, and request may be NULL in a number of
- * error cases (e.g., _NO_OFFSET, _WAIT_COORD)
- */
-static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
- rd_kafka_op_t *rko_orig = opaque;
- rd_kafka_topic_partition_list_t *offsets =
- rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */
- int errcnt;
-
- RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT);
-
- err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request,
- offsets, rd_false);
-
- /* Suppress empty commit debug logs if allowed */
- if (err != RD_KAFKA_RESP_ERR__NO_OFFSET ||
- !rko_orig->rko_u.offset_commit.silent_empty) {
- if (rkb)
- rd_rkb_dbg(rkb, CGRP, "COMMIT",
- "OffsetCommit for %d partition(s) in "
- "join-state %s: "
- "%s: returned: %s",
- offsets ? offsets->cnt : -1,
- rd_kafka_cgrp_join_state_names
- [rkcg->rkcg_join_state],
- rko_orig->rko_u.offset_commit.reason,
- rd_kafka_err2str(err));
- else
- rd_kafka_dbg(rk, CGRP, "COMMIT",
- "OffsetCommit for %d partition(s) in "
- "join-state "
- "%s: %s: "
- "returned: %s",
- offsets ? offsets->cnt : -1,
- rd_kafka_cgrp_join_state_names
- [rkcg->rkcg_join_state],
- rko_orig->rko_u.offset_commit.reason,
- rd_kafka_err2str(err));
- }
-
-
- /*
- * Error handling
- */
- switch (err) {
- case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
- /* Revoke assignment and rebalance on unknown member */
- rd_kafka_cgrp_set_member_id(rk->rk_cgrp, "");
- rd_kafka_cgrp_revoke_all_rejoin_maybe(
- rkcg, rd_true /*assignment is lost*/,
- rd_true /*this consumer is initiating*/,
- "OffsetCommit error: Unknown member");
- break;
-
- case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
- /* Revoke assignment and rebalance on illegal generation */
- rk->rk_cgrp->rkcg_generation_id = -1;
- rd_kafka_cgrp_revoke_all_rejoin_maybe(
- rkcg, rd_true /*assignment is lost*/,
- rd_true /*this consumer is initiating*/,
- "OffsetCommit error: Illegal generation");
- break;
-
- case RD_KAFKA_RESP_ERR__IN_PROGRESS:
- return; /* Retrying */
-
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- /* The coordinator is not available, defer the offset commit
- * to when the coordinator is back up again. */
-
- /* Future-proofing, see timeout_scan(). */
- rd_kafka_assert(NULL, err != RD_KAFKA_RESP_ERR__WAIT_COORD);
-
- if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko_orig,
- rd_kafka_err2str(err)))
- return;
- break;
-
- default:
- break;
- }
-
- /* Call on_commit interceptors */
- if (err != RD_KAFKA_RESP_ERR__NO_OFFSET &&
- err != RD_KAFKA_RESP_ERR__DESTROY && offsets && offsets->cnt > 0)
- rd_kafka_interceptors_on_commit(rk, offsets, err);
-
- /* Keep track of outstanding commits */
- rd_kafka_assert(NULL, rk->rk_consumer.wait_commit_cnt > 0);
- rk->rk_consumer.wait_commit_cnt--;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- rd_kafka_op_destroy(rko_orig);
- return; /* Handle is terminating, this op may be handled
- * by the op enq()ing thread rather than the
- * rdkafka main thread, it is not safe to
- * continue here. */
- }
-
- /* Update the committed offsets for each partition's rktp. */
- errcnt = rd_kafka_cgrp_update_committed_offsets(rkcg, err, offsets);
-
- if (err != RD_KAFKA_RESP_ERR__DESTROY &&
- !(err == RD_KAFKA_RESP_ERR__NO_OFFSET &&
- rko_orig->rko_u.offset_commit.silent_empty)) {
- /* Propagate commit results (success or permanent error)
- * unless we're shutting down or commit was empty. */
- rd_kafka_cgrp_propagate_commit_result(rkcg, rko_orig, err,
- errcnt, offsets);
- }
-
- rd_kafka_op_destroy(rko_orig);
-
- /* If the current state was waiting for commits to finish we'll try to
- * transition to the next state. */
- if (rk->rk_consumer.wait_commit_cnt == 0)
- rd_kafka_assignment_serve(rk);
-}
-
-
-static size_t rd_kafka_topic_partition_has_absolute_offset(
- const rd_kafka_topic_partition_t *rktpar,
- void *opaque) {
- return rktpar->offset >= 0 ? 1 : 0;
-}
-
-
-/**
- * Commit a list of offsets.
- * Reuse the orignating 'rko' for the async reply.
- * 'rko->rko_payload' should either by NULL (to commit current assignment) or
- * a proper topic_partition_list_t with offsets to commit.
- * The offset list will be altered.
- *
- * \p rko...silent_empty: if there are no offsets to commit bail out
- * silently without posting an op on the reply queue.
- * \p set_offsets: set offsets and epochs in
- * rko->rko_u.offset_commit.partitions from the rktp's
- * stored offset.
- *
- * Locality: cgrp thread
- */
-static void rd_kafka_cgrp_offsets_commit(rd_kafka_cgrp_t *rkcg,
- rd_kafka_op_t *rko,
- rd_bool_t set_offsets,
- const char *reason) {
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_resp_err_t err;
- int valid_offsets = 0;
- int r;
- rd_kafka_buf_t *rkbuf;
- rd_kafka_op_t *reply;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) {
- /* wait_commit_cnt has already been increased for
- * reprocessed ops. */
- rkcg->rkcg_rk->rk_consumer.wait_commit_cnt++;
- }
-
- /* If offsets is NULL we shall use the current assignment
- * (not the group assignment). */
- if (!rko->rko_u.offset_commit.partitions &&
- rkcg->rkcg_rk->rk_consumer.assignment.all->cnt > 0) {
- if (rd_kafka_cgrp_assignment_is_lost(rkcg)) {
- /* Not committing assigned offsets: assignment lost */
- err = RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST;
- goto err;
- }
-
- rko->rko_u.offset_commit.partitions =
- rd_kafka_topic_partition_list_copy(
- rkcg->rkcg_rk->rk_consumer.assignment.all);
- }
-
- offsets = rko->rko_u.offset_commit.partitions;
-
- if (offsets) {
- /* Set offsets to commits */
- if (set_offsets)
- rd_kafka_topic_partition_list_set_offsets(
- rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions,
- 1, RD_KAFKA_OFFSET_INVALID /* def */,
- 1 /* is commit */);
-
- /* Check the number of valid offsets to commit. */
- valid_offsets = (int)rd_kafka_topic_partition_list_sum(
- offsets, rd_kafka_topic_partition_has_absolute_offset,
- NULL);
- }
-
- if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
- /* Commits are not allowed when a fatal error has been raised */
- err = RD_KAFKA_RESP_ERR__FATAL;
- goto err;
- }
-
- if (!valid_offsets) {
- /* No valid offsets */
- err = RD_KAFKA_RESP_ERR__NO_OFFSET;
- goto err;
- }
-
- if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) {
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
- "COMMIT",
- "Deferring \"%s\" offset commit "
- "for %d partition(s) in state %s: "
- "no coordinator available",
- reason, valid_offsets,
- rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
-
- if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason))
- return;
-
- err = RD_KAFKA_RESP_ERR__WAIT_COORD;
- goto err;
- }
-
-
- rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "COMMIT",
- "Committing offsets for %d partition(s) with "
- "generation-id %" PRId32 " in join-state %s: %s",
- valid_offsets, rkcg->rkcg_generation_id,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- reason);
-
- cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid(
- rkcg->rkcg_rk->rk_conf.group_id_str, rkcg->rkcg_generation_id,
- rkcg->rkcg_member_id->str,
- rkcg->rkcg_rk->rk_conf.group_instance_id);
-
- /* Send OffsetCommit */
- r = rd_kafka_OffsetCommitRequest(rkcg->rkcg_coord, cgmetadata, offsets,
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_cgrp_op_handle_OffsetCommit,
- rko, reason);
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
-
- /* Must have valid offsets to commit if we get here */
- rd_kafka_assert(NULL, r != 0);
-
- return;
-
-err:
- if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
- "COMMIT", "OffsetCommit internal error: %s",
- rd_kafka_err2str(err));
-
- /* Propagate error through dummy buffer object that will
- * call the response handler from the main loop, avoiding
- * any recursive calls from op_handle_OffsetCommit ->
- * assignment_serve() and then back to cgrp_assigned_offsets_commit() */
-
- reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
- reply->rko_rk = rkcg->rkcg_rk; /* Set rk since the rkbuf will not
- * have a rkb to reach it. */
- reply->rko_err = err;
-
- rkbuf = rd_kafka_buf_new(0, 0);
- rkbuf->rkbuf_cb = rd_kafka_cgrp_op_handle_OffsetCommit;
- rkbuf->rkbuf_opaque = rko;
- reply->rko_u.xbuf.rkbuf = rkbuf;
-
- rd_kafka_q_enq(rkcg->rkcg_ops, reply);
-}
-
-
-/**
- * @brief Commit offsets assigned partitions.
- *
- * If \p offsets is NULL all partitions in the current assignment will be used.
- * If \p set_offsets is true the offsets to commit will be read from the
- * rktp's stored offset rather than the .offset fields in \p offsets.
- *
- * rkcg_wait_commit_cnt will be increased accordingly.
- */
-void rd_kafka_cgrp_assigned_offsets_commit(
- rd_kafka_cgrp_t *rkcg,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_bool_t set_offsets,
- const char *reason) {
- rd_kafka_op_t *rko;
-
- if (rd_kafka_cgrp_assignment_is_lost(rkcg)) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "AUTOCOMMIT",
- "Group \"%s\": not committing assigned offsets: "
- "assignment lost",
- rkcg->rkcg_group_id->str);
- return;
- }
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
- rko->rko_u.offset_commit.reason = rd_strdup(reason);
- if (rkcg->rkcg_rk->rk_conf.enabled_events &
- RD_KAFKA_EVENT_OFFSET_COMMIT) {
- /* Send results to application */
- rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0);
- rko->rko_u.offset_commit.cb =
- rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/
- rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque;
- }
- /* NULL partitions means current assignment */
- if (offsets)
- rko->rko_u.offset_commit.partitions =
- rd_kafka_topic_partition_list_copy(offsets);
- rko->rko_u.offset_commit.silent_empty = 1;
- rd_kafka_cgrp_offsets_commit(rkcg, rko, set_offsets, reason);
-}
-
-
-/**
- * auto.commit.interval.ms commit timer callback.
- *
- * Trigger a group offset commit.
- *
- * Locality: rdkafka main thread
- */
-static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_cgrp_t *rkcg = arg;
-
- /* Don't attempt auto commit when rebalancing or initializing since
- * the rkcg_generation_id is most likely in flux. */
- if (rkcg->rkcg_subscription &&
- rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_STEADY)
- return;
-
- rd_kafka_cgrp_assigned_offsets_commit(
- rkcg, NULL, rd_true /*set offsets*/, "cgrp auto commit timer");
-}
-
-
-/**
- * @brief If rkcg_next_subscription or rkcg_next_unsubscribe are
- * set, trigger a state change so that they are applied from the
- * main dispatcher.
- *
- * @returns rd_true if a subscribe was scheduled, else false.
- */
-static rd_bool_t
-rd_kafka_trigger_waiting_subscribe_maybe(rd_kafka_cgrp_t *rkcg) {
-
- if (rkcg->rkcg_next_subscription || rkcg->rkcg_next_unsubscribe) {
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
- rd_kafka_cgrp_rejoin(rkcg, "Applying next subscription");
- return rd_true;
- }
-
- return rd_false;
-}
-
-
-/**
- * @brief Incrementally add to an existing partition assignment
- * May update \p partitions but will not hold on to it.
- *
- * @returns an error object or NULL on success.
- */
-static rd_kafka_error_t *
-rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_error_t *error;
-
- error = rd_kafka_assignment_add(rkcg->rkcg_rk, partitions);
- if (error)
- return error;
-
- if (rkcg->rkcg_join_state ==
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) {
- rd_kafka_assignment_resume(rkcg->rkcg_rk,
- "incremental assign called");
- rd_kafka_cgrp_set_join_state(rkcg,
- RD_KAFKA_CGRP_JOIN_STATE_STEADY);
-
- if (rkcg->rkcg_subscription) {
- /* If using subscribe(), start a timer to enforce
- * `max.poll.interval.ms`.
- * Instead of restarting the timer on each ...poll()
- * call, which would be costly (once per message),
- * set up an intervalled timer that checks a timestamp
- * (that is updated on ..poll()).
- * The timer interval is 2 hz. */
- rd_kafka_timer_start(
- &rkcg->rkcg_rk->rk_timers,
- &rkcg->rkcg_max_poll_interval_tmr,
- 500 * 1000ll /* 500ms */,
- rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg);
- }
- }
-
- rd_kafka_cgrp_assignment_clear_lost(rkcg,
- "incremental_assign() called");
-
- return NULL;
-}
-
-
-/**
- * @brief Incrementally remove partitions from an existing partition
- * assignment. May update \p partitions but will not hold on
- * to it.
- *
- * @remark This method does not unmark the current assignment as lost
- * (if lost). That happens following _incr_unassign_done and
- * a group-rejoin initiated.
- *
- * @returns An error object or NULL on success.
- */
-static rd_kafka_error_t *rd_kafka_cgrp_incremental_unassign(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_error_t *error;
-
- error = rd_kafka_assignment_subtract(rkcg->rkcg_rk, partitions);
- if (error)
- return error;
-
- if (rkcg->rkcg_join_state ==
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) {
- rd_kafka_assignment_resume(rkcg->rkcg_rk,
- "incremental unassign called");
- rd_kafka_cgrp_set_join_state(
- rkcg,
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE);
- }
-
- rd_kafka_cgrp_assignment_clear_lost(rkcg,
- "incremental_unassign() called");
-
- return NULL;
-}
-
-
-/**
- * @brief Call when all incremental unassign operations are done to transition
- * to the next state.
- */
-static void rd_kafka_cgrp_incr_unassign_done(rd_kafka_cgrp_t *rkcg) {
-
- /* If this action was underway when a terminate was initiated, it will
- * be left to complete. Now that's done, unassign all partitions */
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
- "Group \"%s\" is terminating, initiating full "
- "unassign",
- rkcg->rkcg_group_id->str);
- rd_kafka_cgrp_unassign(rkcg);
- return;
- }
-
- if (rkcg->rkcg_rebalance_incr_assignment) {
-
- /* This incremental unassign was part of a normal rebalance
- * (in which the revoke set was not empty). Immediately
- * trigger the assign that follows this revoke. The protocol
- * dictates this should occur even if the new assignment
- * set is empty.
- *
- * Also, since this rebalance had some revoked partitions,
- * a re-join should occur following the assign.
- */
-
- rd_kafka_rebalance_op_incr(rkcg,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rkcg->rkcg_rebalance_incr_assignment,
- rd_true /*rejoin following assign*/,
- "cooperative assign after revoke");
-
- rd_kafka_topic_partition_list_destroy(
- rkcg->rkcg_rebalance_incr_assignment);
- rkcg->rkcg_rebalance_incr_assignment = NULL;
-
- /* Note: rkcg_rebalance_rejoin is actioned / reset in
- * rd_kafka_cgrp_incremental_assign call */
-
- } else if (rkcg->rkcg_rebalance_rejoin) {
- rkcg->rkcg_rebalance_rejoin = rd_false;
-
- /* There are some cases (lost partitions), where a rejoin
- * should occur immediately following the unassign (this
- * is not the case under normal conditions), in which case
- * the rejoin flag will be set. */
-
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
-
- rd_kafka_cgrp_rejoin(rkcg, "Incremental unassignment done");
-
- } else if (!rd_kafka_trigger_waiting_subscribe_maybe(rkcg)) {
- /* After this incremental unassignment we're now back in
- * a steady state. */
- rd_kafka_cgrp_set_join_state(rkcg,
- RD_KAFKA_CGRP_JOIN_STATE_STEADY);
- }
-}
-
-
-/**
- * @brief Call when all absolute (non-incremental) unassign operations are done
- * to transition to the next state.
- */
-static void rd_kafka_cgrp_unassign_done(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
- "Group \"%s\": unassign done in state %s "
- "(join-state %s)",
- rkcg->rkcg_group_id->str,
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- /* Leave group, if desired. */
- rd_kafka_cgrp_leave_maybe(rkcg);
-
- if (rkcg->rkcg_join_state !=
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE)
- return;
-
- /* All partitions are unassigned. Rejoin the group. */
-
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
-
- rd_kafka_cgrp_rejoin(rkcg, "Unassignment done");
-}
-
-
-
-/**
- * @brief Called from assignment code when all in progress
- * assignment/unassignment operations are done, allowing the cgrp to
- * transition to other states if needed.
- *
- * @remark This may be called spontaneously without any need for a state
- * change in the rkcg.
- */
-void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE",
- "Group \"%s\": "
- "assignment operations done in join-state %s "
- "(rebalance rejoin=%s)",
- rkcg->rkcg_group_id->str,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- RD_STR_ToF(rkcg->rkcg_rebalance_rejoin));
-
- switch (rkcg->rkcg_join_state) {
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE:
- rd_kafka_cgrp_unassign_done(rkcg);
- break;
-
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE:
- rd_kafka_cgrp_incr_unassign_done(rkcg);
- break;
-
- case RD_KAFKA_CGRP_JOIN_STATE_STEADY:
- /* If an updated/next subscription is available, schedule it. */
- if (rd_kafka_trigger_waiting_subscribe_maybe(rkcg))
- break;
-
- if (rkcg->rkcg_rebalance_rejoin) {
- rkcg->rkcg_rebalance_rejoin = rd_false;
-
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
-
- rd_kafka_cgrp_rejoin(
- rkcg,
- "rejoining group to redistribute "
- "previously owned partitions to other "
- "group members");
- break;
- }
-
- /* FALLTHRU */
-
- case RD_KAFKA_CGRP_JOIN_STATE_INIT:
- /* Check if cgrp is trying to terminate, which is safe to do
- * in these two states. Otherwise we'll need to wait for
- * the current state to decommission. */
- rd_kafka_cgrp_try_terminate(rkcg);
- break;
-
- default:
- break;
- }
-}
-
-
-
-/**
- * @brief Remove existing assignment.
- */
-static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg) {
-
- rd_kafka_assignment_clear(rkcg->rkcg_rk);
-
- if (rkcg->rkcg_join_state ==
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) {
- rd_kafka_assignment_resume(rkcg->rkcg_rk, "unassign called");
- rd_kafka_cgrp_set_join_state(
- rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE);
- }
-
- rd_kafka_cgrp_assignment_clear_lost(rkcg, "unassign() called");
-
- return NULL;
-}
-
-
-/**
- * @brief Set new atomic partition assignment
- * May update \p assignment but will not hold on to it.
- *
- * @returns NULL on success or an error if a fatal error has been raised.
- */
-static rd_kafka_error_t *
-rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *assignment) {
- rd_kafka_error_t *error;
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGN",
- "Group \"%s\": new assignment of %d partition(s) "
- "in join-state %s",
- rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- /* Clear existing assignment, if any, and serve its removals. */
- if (rd_kafka_assignment_clear(rkcg->rkcg_rk))
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
-
- error = rd_kafka_assignment_add(rkcg->rkcg_rk, assignment);
- if (error)
- return error;
-
- rd_kafka_cgrp_assignment_clear_lost(rkcg, "assign() called");
-
- if (rkcg->rkcg_join_state ==
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) {
- rd_kafka_assignment_resume(rkcg->rkcg_rk, "assign called");
- rd_kafka_cgrp_set_join_state(rkcg,
- RD_KAFKA_CGRP_JOIN_STATE_STEADY);
-
- if (rkcg->rkcg_subscription) {
- /* If using subscribe(), start a timer to enforce
- * `max.poll.interval.ms`.
- * Instead of restarting the timer on each ...poll()
- * call, which would be costly (once per message),
- * set up an intervalled timer that checks a timestamp
- * (that is updated on ..poll()).
- * The timer interval is 2 hz. */
- rd_kafka_timer_start(
- &rkcg->rkcg_rk->rk_timers,
- &rkcg->rkcg_max_poll_interval_tmr,
- 500 * 1000ll /* 500ms */,
- rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg);
- }
- }
-
- return NULL;
-}
-
-
-
-/**
- * @brief Construct a typed map from list \p rktparlist with key corresponding
- * to each element in the list and value NULL.
- *
- * @remark \p rktparlist may be NULL.
- */
-static map_toppar_member_info_t *rd_kafka_toppar_list_to_toppar_member_info_map(
- rd_kafka_topic_partition_list_t *rktparlist) {
- map_toppar_member_info_t *map = rd_calloc(1, sizeof(*map));
- const rd_kafka_topic_partition_t *rktpar;
-
- RD_MAP_INIT(map, rktparlist ? rktparlist->cnt : 0,
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- PartitionMemberInfo_free);
-
- if (!rktparlist)
- return map;
-
- RD_KAFKA_TPLIST_FOREACH(rktpar, rktparlist)
- RD_MAP_SET(map, rd_kafka_topic_partition_copy(rktpar),
- PartitionMemberInfo_new(NULL, rd_false));
-
- return map;
-}
-
-
-/**
- * @brief Construct a toppar list from map \p map with elements corresponding
- * to the keys of \p map.
- */
-static rd_kafka_topic_partition_list_t *
-rd_kafka_toppar_member_info_map_to_list(map_toppar_member_info_t *map) {
- const rd_kafka_topic_partition_t *k;
- rd_kafka_topic_partition_list_t *list =
- rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map));
-
- RD_MAP_FOREACH_KEY(k, map) {
- rd_kafka_topic_partition_list_add(list, k->topic, k->partition);
- }
-
- return list;
-}
-
-
-/**
- * @brief Handle a rebalance-triggered partition assignment
- * (COOPERATIVE case).
- */
-static void rd_kafka_cgrp_handle_assignment_cooperative(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *assignment) {
- map_toppar_member_info_t *new_assignment_set;
- map_toppar_member_info_t *old_assignment_set;
- map_toppar_member_info_t *newly_added_set;
- map_toppar_member_info_t *revoked_set;
- rd_kafka_topic_partition_list_t *newly_added;
- rd_kafka_topic_partition_list_t *revoked;
-
- new_assignment_set =
- rd_kafka_toppar_list_to_toppar_member_info_map(assignment);
-
- old_assignment_set = rd_kafka_toppar_list_to_toppar_member_info_map(
- rkcg->rkcg_group_assignment);
-
- newly_added_set = rd_kafka_member_partitions_subtract(
- new_assignment_set, old_assignment_set);
- revoked_set = rd_kafka_member_partitions_subtract(old_assignment_set,
- new_assignment_set);
-
- newly_added = rd_kafka_toppar_member_info_map_to_list(newly_added_set);
- revoked = rd_kafka_toppar_member_info_map_to_list(revoked_set);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COOPASSIGN",
- "Group \"%s\": incremental assignment: %d newly added, "
- "%d revoked partitions based on assignment of %d "
- "partitions",
- rkcg->rkcg_group_id->str, newly_added->cnt, revoked->cnt,
- assignment->cnt);
-
- if (revoked->cnt > 0) {
- /* Setting rkcg_incr_assignment causes a follow on incremental
- * assign rebalance op after completion of this incremental
- * unassign op. */
-
- rkcg->rkcg_rebalance_incr_assignment = newly_added;
- newly_added = NULL;
-
- rd_kafka_rebalance_op_incr(rkcg,
- RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- revoked, rd_false /*no rejoin
- following unassign*/
- ,
- "sync group revoke");
-
- } else {
- /* There are no revoked partitions - trigger the assign
- * rebalance op, and flag that the group does not need
- * to be re-joined */
-
- rd_kafka_rebalance_op_incr(
- rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, newly_added,
- rd_false /*no rejoin following assign*/,
- "sync group assign");
- }
-
- if (newly_added)
- rd_kafka_topic_partition_list_destroy(newly_added);
- rd_kafka_topic_partition_list_destroy(revoked);
- RD_MAP_DESTROY_AND_FREE(revoked_set);
- RD_MAP_DESTROY_AND_FREE(newly_added_set);
- RD_MAP_DESTROY_AND_FREE(old_assignment_set);
- RD_MAP_DESTROY_AND_FREE(new_assignment_set);
-}
-
-
-/**
- * @brief Sets or clears the group's partition assignment for our consumer.
- *
- * Will replace the current group assignment, if any.
- */
-static void rd_kafka_cgrp_group_assignment_set(
- rd_kafka_cgrp_t *rkcg,
- const rd_kafka_topic_partition_list_t *partitions) {
-
- if (rkcg->rkcg_group_assignment)
- rd_kafka_topic_partition_list_destroy(
- rkcg->rkcg_group_assignment);
-
- if (partitions) {
- rkcg->rkcg_group_assignment =
- rd_kafka_topic_partition_list_copy(partitions);
- rd_kafka_topic_partition_list_sort_by_topic(
- rkcg->rkcg_group_assignment);
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
- "Group \"%s\": setting group assignment to %d "
- "partition(s)",
- rkcg->rkcg_group_id->str,
- rkcg->rkcg_group_assignment->cnt);
-
- } else {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
- "Group \"%s\": clearing group assignment",
- rkcg->rkcg_group_id->str);
- rkcg->rkcg_group_assignment = NULL;
- }
-
- rd_kafka_wrlock(rkcg->rkcg_rk);
- rkcg->rkcg_c.assignment_size =
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0;
- rd_kafka_wrunlock(rkcg->rkcg_rk);
-
- if (rkcg->rkcg_group_assignment)
- rd_kafka_topic_partition_list_log(
- rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP,
- rkcg->rkcg_group_assignment);
-}
-
-
-/**
- * @brief Adds or removes \p partitions from the current group assignment.
- *
- * @param add Whether to add or remove the partitions.
- *
- * @remark The added partitions must not already be on the group assignment,
- * and the removed partitions must be on the group assignment.
- *
- * To be used with incremental rebalancing.
- *
- */
-static void rd_kafka_cgrp_group_assignment_modify(
- rd_kafka_cgrp_t *rkcg,
- rd_bool_t add,
- const rd_kafka_topic_partition_list_t *partitions) {
- const rd_kafka_topic_partition_t *rktpar;
- int precnt;
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
- "Group \"%s\": %d partition(s) being %s group assignment "
- "of %d partition(s)",
- rkcg->rkcg_group_id->str, partitions->cnt,
- add ? "added to" : "removed from",
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0);
-
- if (partitions == rkcg->rkcg_group_assignment) {
- /* \p partitions is the actual assignment, which
- * must mean it is all to be removed.
- * Short-cut directly to set(NULL). */
- rd_assert(!add);
- rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
- return;
- }
-
- if (add && (!rkcg->rkcg_group_assignment ||
- rkcg->rkcg_group_assignment->cnt == 0)) {
- /* Adding to an empty assignment is a set operation. */
- rd_kafka_cgrp_group_assignment_set(rkcg, partitions);
- return;
- }
-
- if (!add) {
- /* Removing from an empty assignment is illegal. */
- rd_assert(rkcg->rkcg_group_assignment != NULL &&
- rkcg->rkcg_group_assignment->cnt > 0);
- }
-
-
- precnt = rkcg->rkcg_group_assignment->cnt;
- RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
- int idx;
-
- idx = rd_kafka_topic_partition_list_find_idx(
- rkcg->rkcg_group_assignment, rktpar->topic,
- rktpar->partition);
-
- if (add) {
- rd_assert(idx == -1);
-
- rd_kafka_topic_partition_list_add_copy(
- rkcg->rkcg_group_assignment, rktpar);
-
- } else {
- rd_assert(idx != -1);
-
- rd_kafka_topic_partition_list_del_by_idx(
- rkcg->rkcg_group_assignment, idx);
- }
- }
-
- if (add)
- rd_assert(precnt + partitions->cnt ==
- rkcg->rkcg_group_assignment->cnt);
- else
- rd_assert(precnt - partitions->cnt ==
- rkcg->rkcg_group_assignment->cnt);
-
- if (rkcg->rkcg_group_assignment->cnt == 0) {
- rd_kafka_topic_partition_list_destroy(
- rkcg->rkcg_group_assignment);
- rkcg->rkcg_group_assignment = NULL;
-
- } else if (add)
- rd_kafka_topic_partition_list_sort_by_topic(
- rkcg->rkcg_group_assignment);
-
- rd_kafka_wrlock(rkcg->rkcg_rk);
- rkcg->rkcg_c.assignment_size =
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0;
- rd_kafka_wrunlock(rkcg->rkcg_rk);
-
- if (rkcg->rkcg_group_assignment)
- rd_kafka_topic_partition_list_log(
- rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP,
- rkcg->rkcg_group_assignment);
-}
-
-
-/**
- * @brief Handle a rebalance-triggered partition assignment.
- *
- * If a rebalance_cb has been registered we enqueue an op for the app
- * and let the app perform the actual assign() call. Otherwise we
- * assign() directly from here.
- *
- * This provides the most flexibility, allowing the app to perform any
- * operation it seem fit (e.g., offset writes or reads) before actually
- * updating the assign():ment.
- */
-static void
-rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *assignment) {
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) {
- rd_kafka_cgrp_handle_assignment_cooperative(rkcg, assignment);
- } else {
-
- rd_kafka_rebalance_op(rkcg,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- assignment, "new assignment");
- }
-}
-
-
-/**
- * Clean up any group-leader related resources.
- *
- * Locality: cgrp thread
- */
-static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg,
- const char *reason) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPLEADER",
- "Group \"%.*s\": resetting group leader info: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
-
- if (rkcg->rkcg_group_leader.members) {
- int i;
-
- for (i = 0; i < rkcg->rkcg_group_leader.member_cnt; i++)
- rd_kafka_group_member_clear(
- &rkcg->rkcg_group_leader.members[i]);
- rkcg->rkcg_group_leader.member_cnt = 0;
- rd_free(rkcg->rkcg_group_leader.members);
- rkcg->rkcg_group_leader.members = NULL;
- }
-}
-
-
-/**
- * @brief React to a RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS broker response.
- */
-static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg) {
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_EAGER) {
- rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_false /*lost*/,
- rd_false /*initiating*/,
- "rebalance in progress");
- return;
- }
-
-
- /* In the COOPERATIVE case, simply rejoin the group
- * - partitions are unassigned on SyncGroup response,
- * not prior to JoinGroup as with the EAGER case. */
-
- if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
- "Group \"%.*s\": skipping "
- "COOPERATIVE rebalance in state %s "
- "(join-state %s)%s%s%s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)
- ? " (awaiting assign call)"
- : "",
- (rkcg->rkcg_rebalance_incr_assignment != NULL)
- ? " (incremental assignment pending)"
- : "",
- rkcg->rkcg_rebalance_rejoin ? " (rebalance rejoin)" : "");
- return;
- }
-
- rd_kafka_cgrp_rejoin(rkcg, "Group is rebalancing");
-}
-
-
-
-/**
- * @brief Triggers the application rebalance callback if required to
- * revoke partitions, and transition to INIT state for (eventual)
- * rejoin. Does nothing if a rebalance workflow is already in
- * progress
- */
-static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg,
- rd_bool_t assignment_lost,
- rd_bool_t initiating,
- const char *reason) {
- if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
- "Group \"%.*s\": rebalance (%s) "
- "already in progress, skipping in state %s "
- "(join-state %s) with %d assigned partition(s)%s%s%s: "
- "%s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_rebalance_protocol2str(
- rd_kafka_cgrp_rebalance_protocol(rkcg)),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rkcg->rkcg_group_assignment
- ? rkcg->rkcg_group_assignment->cnt
- : 0,
- assignment_lost ? " (lost)" : "",
- rkcg->rkcg_rebalance_incr_assignment
- ? ", incremental assignment in progress"
- : "",
- rkcg->rkcg_rebalance_rejoin ? ", rejoin on rebalance" : "",
- reason);
- return;
- }
-
- rd_kafka_cgrp_revoke_all_rejoin(rkcg, assignment_lost, initiating,
- reason);
-}
-
-
-/**
- * @brief Triggers the application rebalance callback if required to
- * revoke partitions, and transition to INIT state for (eventual)
- * rejoin.
- */
-static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg,
- rd_bool_t assignment_lost,
- rd_bool_t initiating,
- const char *reason) {
-
- rd_kafka_rebalance_protocol_t protocol =
- rd_kafka_cgrp_rebalance_protocol(rkcg);
-
- rd_bool_t terminating =
- unlikely(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE);
-
-
- rd_kafka_dbg(
- rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
- "Group \"%.*s\" %s (%s) in state %s (join-state %s) "
- "with %d assigned partition(s)%s: %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- initiating ? "initiating rebalance" : "is rebalancing",
- rd_kafka_rebalance_protocol2str(protocol),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
- assignment_lost ? " (lost)" : "", reason);
-
- rd_snprintf(rkcg->rkcg_c.rebalance_reason,
- sizeof(rkcg->rkcg_c.rebalance_reason), "%s", reason);
-
-
- if (protocol == RD_KAFKA_REBALANCE_PROTOCOL_EAGER ||
- protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE) {
- /* EAGER case (or initial subscribe) - revoke partitions which
- * will be followed by rejoin, if required. */
-
- if (assignment_lost)
- rd_kafka_cgrp_assignment_set_lost(
- rkcg, "%s: revoking assignment and rejoining",
- reason);
-
- /* Schedule application rebalance op if there is an existing
- * assignment (albeit perhaps empty) and there is no
- * outstanding rebalance op in progress. */
- if (rkcg->rkcg_group_assignment &&
- !RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) {
- rd_kafka_rebalance_op(
- rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- rkcg->rkcg_group_assignment, reason);
- } else {
- /* Skip the join backoff */
- rd_interval_reset(&rkcg->rkcg_join_intvl);
-
- rd_kafka_cgrp_rejoin(rkcg, "%s", reason);
- }
-
- return;
- }
-
-
- /* COOPERATIVE case. */
-
- /* All partitions should never be revoked unless terminating, leaving
- * the group, or on assignment lost. Another scenario represents a
- * logic error. Fail fast in this case. */
- if (!(terminating || assignment_lost ||
- (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))) {
- rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
- "Group \"%s\": unexpected instruction to revoke "
- "current assignment and rebalance "
- "(terminating=%d, assignment_lost=%d, "
- "LEAVE_ON_UNASSIGN_DONE=%d)",
- rkcg->rkcg_group_id->str, terminating,
- assignment_lost,
- (rkcg->rkcg_flags &
- RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE));
- rd_dassert(!*"BUG: unexpected instruction to revoke "
- "current assignment and rebalance");
- }
-
- if (rkcg->rkcg_group_assignment &&
- rkcg->rkcg_group_assignment->cnt > 0) {
- if (assignment_lost)
- rd_kafka_cgrp_assignment_set_lost(
- rkcg,
- "%s: revoking incremental assignment "
- "and rejoining",
- reason);
-
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
- "REBALANCE",
- "Group \"%.*s\": revoking "
- "all %d partition(s)%s%s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rkcg->rkcg_group_assignment->cnt,
- terminating ? " (terminating)" : "",
- assignment_lost ? " (assignment lost)" : "");
-
- rd_kafka_rebalance_op_incr(
- rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- rkcg->rkcg_group_assignment,
- terminating ? rd_false : rd_true /*rejoin*/, reason);
-
- return;
- }
-
- if (terminating) {
- /* If terminating, then don't rejoin group. */
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
- "REBALANCE",
- "Group \"%.*s\": consumer is "
- "terminating, skipping rejoin",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
- return;
- }
-
- rd_kafka_cgrp_rejoin(rkcg, "Current assignment is empty");
-}
-
-
-/**
- * @brief `max.poll.interval.ms` enforcement check timer.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void
-rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_cgrp_t *rkcg = arg;
- rd_kafka_t *rk = rkcg->rkcg_rk;
- int exceeded;
-
- exceeded = rd_kafka_max_poll_exceeded(rk);
-
- if (likely(!exceeded))
- return;
-
- rd_kafka_log(rk, LOG_WARNING, "MAXPOLL",
- "Application maximum poll interval (%dms) "
- "exceeded by %dms "
- "(adjust max.poll.interval.ms for "
- "long-running message processing): "
- "leaving group",
- rk->rk_conf.max_poll_interval_ms, exceeded);
-
- rd_kafka_consumer_err(rkcg->rkcg_q, RD_KAFKA_NODEID_UA,
- RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, 0, NULL,
- NULL, RD_KAFKA_OFFSET_INVALID,
- "Application maximum poll interval (%dms) "
- "exceeded by %dms",
- rk->rk_conf.max_poll_interval_ms, exceeded);
-
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED;
-
- rd_kafka_timer_stop(rkts, &rkcg->rkcg_max_poll_interval_tmr,
- 1 /*lock*/);
-
- /* Leave the group before calling rebalance since the standard leave
- * will be triggered first after the rebalance callback has been served.
- * But since the application is blocked still doing processing
- * that leave will be further delayed.
- *
- * KIP-345: static group members should continue to respect
- * `max.poll.interval.ms` but should not send a LeaveGroupRequest.
- */
- if (!RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg))
- rd_kafka_cgrp_leave(rkcg);
-
- /* Timing out or leaving the group invalidates the member id, reset it
- * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
- rd_kafka_cgrp_set_member_id(rkcg, "");
-
- /* Trigger rebalance */
- rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
- rd_true /*initiating*/,
- "max.poll.interval.ms exceeded");
-}
-
-
-/**
- * @brief Generate consumer errors for each topic in the list.
- *
- * Also replaces the list of last reported topic errors so that repeated
- * errors are silenced.
- *
- * @param errored Errored topics.
- * @param error_prefix Error message prefix.
- *
- * @remark Assumes ownership of \p errored.
- */
-static void rd_kafka_propagate_consumer_topic_errors(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *errored,
- const char *error_prefix) {
- int i;
-
- for (i = 0; i < errored->cnt; i++) {
- rd_kafka_topic_partition_t *topic = &errored->elems[i];
- rd_kafka_topic_partition_t *prev;
-
- rd_assert(topic->err);
-
- /* Normalize error codes, unknown topic may be
- * reported by the broker, or the lack of a topic in
- * metadata response is figured out by the client.
- * Make sure the application only sees one error code
- * for both these cases. */
- if (topic->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- topic->err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- /* Check if this topic errored previously */
- prev = rd_kafka_topic_partition_list_find(
- rkcg->rkcg_errored_topics, topic->topic,
- RD_KAFKA_PARTITION_UA);
-
- if (prev && prev->err == topic->err)
- continue; /* This topic already reported same error */
-
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_TOPIC,
- "TOPICERR", "%s: %s: %s", error_prefix,
- topic->topic, rd_kafka_err2str(topic->err));
-
- /* Send consumer error to application */
- rd_kafka_consumer_err(
- rkcg->rkcg_q, RD_KAFKA_NODEID_UA, topic->err, 0,
- topic->topic, NULL, RD_KAFKA_OFFSET_INVALID, "%s: %s: %s",
- error_prefix, topic->topic, rd_kafka_err2str(topic->err));
- }
-
- rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics);
- rkcg->rkcg_errored_topics = errored;
-}
-
-
-/**
- * @brief Work out the topics currently subscribed to that do not
- * match any pattern in \p subscription.
- */
-static rd_kafka_topic_partition_list_t *rd_kafka_cgrp_get_unsubscribing_topics(
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *subscription) {
- int i;
- rd_kafka_topic_partition_list_t *result;
-
- result = rd_kafka_topic_partition_list_new(
- rkcg->rkcg_subscribed_topics->rl_cnt);
-
- /* TODO: Something that isn't O(N*M) */
- for (i = 0; i < rkcg->rkcg_subscribed_topics->rl_cnt; i++) {
- int j;
- const char *topic =
- ((rd_kafka_topic_info_t *)
- rkcg->rkcg_subscribed_topics->rl_elems[i])
- ->topic;
-
- for (j = 0; j < subscription->cnt; j++) {
- const char *pattern = subscription->elems[j].topic;
- if (rd_kafka_topic_match(rkcg->rkcg_rk, pattern,
- topic)) {
- break;
- }
- }
-
- if (j == subscription->cnt)
- rd_kafka_topic_partition_list_add(
- result, topic, RD_KAFKA_PARTITION_UA);
- }
-
- if (result->cnt == 0) {
- rd_kafka_topic_partition_list_destroy(result);
- return NULL;
- }
-
- return result;
-}
-
-
-/**
- * @brief Determine the partitions to revoke, given the topics being
- * unassigned.
- */
-static rd_kafka_topic_partition_list_t *
-rd_kafka_cgrp_calculate_subscribe_revoking_partitions(
- rd_kafka_cgrp_t *rkcg,
- const rd_kafka_topic_partition_list_t *unsubscribing) {
- rd_kafka_topic_partition_list_t *revoking;
- const rd_kafka_topic_partition_t *rktpar;
-
- if (!unsubscribing)
- return NULL;
-
- if (!rkcg->rkcg_group_assignment ||
- rkcg->rkcg_group_assignment->cnt == 0)
- return NULL;
-
- revoking =
- rd_kafka_topic_partition_list_new(rkcg->rkcg_group_assignment->cnt);
-
- /* TODO: Something that isn't O(N*M). */
- RD_KAFKA_TPLIST_FOREACH(rktpar, unsubscribing) {
- const rd_kafka_topic_partition_t *assigned;
-
- RD_KAFKA_TPLIST_FOREACH(assigned, rkcg->rkcg_group_assignment) {
- if (!strcmp(assigned->topic, rktpar->topic)) {
- rd_kafka_topic_partition_list_add(
- revoking, assigned->topic,
- assigned->partition);
- continue;
- }
- }
- }
-
- if (revoking->cnt == 0) {
- rd_kafka_topic_partition_list_destroy(revoking);
- revoking = NULL;
- }
-
- return revoking;
-}
-
-
-/**
- * @brief Handle a new subscription that is modifying an existing subscription
- * in the COOPERATIVE case.
- *
- * @remark Assumes ownership of \p rktparlist.
- */
-static rd_kafka_resp_err_t
-rd_kafka_cgrp_modify_subscription(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *rktparlist) {
- rd_kafka_topic_partition_list_t *unsubscribing_topics;
- rd_kafka_topic_partition_list_t *revoking;
- rd_list_t *tinfos;
- rd_kafka_topic_partition_list_t *errored;
- int metadata_age;
- int old_cnt = rkcg->rkcg_subscription->cnt;
-
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
-
- if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0)
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
-
- /* Topics in rkcg_subscribed_topics that don't match any pattern in
- the new subscription. */
- unsubscribing_topics =
- rd_kafka_cgrp_get_unsubscribing_topics(rkcg, rktparlist);
-
- /* Currently assigned topic partitions that are no longer desired. */
- revoking = rd_kafka_cgrp_calculate_subscribe_revoking_partitions(
- rkcg, unsubscribing_topics);
-
- rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription);
- rkcg->rkcg_subscription = rktparlist;
-
- if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age,
- "modify subscription") == 1) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
- "MODSUB",
- "Group \"%.*s\": postponing join until "
- "up-to-date metadata is available",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
-
- rd_assert(
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT ||
- /* Possible via rd_kafka_cgrp_modify_subscription */
- rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY);
-
- rd_kafka_cgrp_set_join_state(
- rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
-
-
- /* Revoke/join will occur after metadata refresh completes */
- if (revoking)
- rd_kafka_topic_partition_list_destroy(revoking);
- if (unsubscribing_topics)
- rd_kafka_topic_partition_list_destroy(
- unsubscribing_topics);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
- "Group \"%.*s\": modifying subscription of size %d to "
- "new subscription of size %d, removing %d topic(s), "
- "revoking %d partition(s) (join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), old_cnt,
- rkcg->rkcg_subscription->cnt,
- unsubscribing_topics ? unsubscribing_topics->cnt : 0,
- revoking ? revoking->cnt : 0,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- if (unsubscribing_topics)
- rd_kafka_topic_partition_list_destroy(unsubscribing_topics);
-
- /* Create a list of the topics in metadata that matches the new
- * subscription */
- tinfos = rd_list_new(rkcg->rkcg_subscription->cnt,
- (void *)rd_kafka_topic_info_destroy);
-
- /* Unmatched topics will be added to the errored list. */
- errored = rd_kafka_topic_partition_list_new(0);
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION)
- rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos,
- rkcg->rkcg_subscription, errored);
- else
- rd_kafka_metadata_topic_filter(
- rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored);
-
- /* Propagate consumer errors for any non-existent or errored topics.
- * The function takes ownership of errored. */
- rd_kafka_propagate_consumer_topic_errors(
- rkcg, errored, "Subscribed topic not available");
-
- if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && !revoking) {
- rd_kafka_cgrp_rejoin(rkcg, "Subscription modified");
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (revoking) {
- rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
- "REBALANCE",
- "Group \"%.*s\" revoking "
- "%d of %d partition(s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- revoking->cnt, rkcg->rkcg_group_assignment->cnt);
-
- rd_kafka_rebalance_op_incr(
- rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, revoking,
- rd_true /*rejoin*/, "subscribe");
-
- rd_kafka_topic_partition_list_destroy(revoking);
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Remove existing topic subscription.
- */
-static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg,
- rd_bool_t leave_group) {
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNSUBSCRIBE",
- "Group \"%.*s\": unsubscribe from current %ssubscription "
- "of size %d (leave group=%s, has joined=%s, %s, "
- "join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rkcg->rkcg_subscription ? "" : "unset ",
- rkcg->rkcg_subscription ? rkcg->rkcg_subscription->cnt : 0,
- RD_STR_ToF(leave_group),
- RD_STR_ToF(RD_KAFKA_CGRP_HAS_JOINED(rkcg)),
- rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "n/a",
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers,
- &rkcg->rkcg_max_poll_interval_tmr, 1 /*lock*/);
-
- if (rkcg->rkcg_subscription) {
- rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription);
- rkcg->rkcg_subscription = NULL;
- }
-
- rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL);
-
- /*
- * Clean-up group leader duties, if any.
- */
- rd_kafka_cgrp_group_leader_reset(rkcg, "unsubscribe");
-
- if (leave_group && RD_KAFKA_CGRP_HAS_JOINED(rkcg))
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE;
-
- /* FIXME: Why are we only revoking if !assignment_lost ? */
- if (!rd_kafka_cgrp_assignment_is_lost(rkcg))
- rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/,
- rd_true /*initiating*/,
- "unsubscribe");
-
- rkcg->rkcg_flags &= ~(RD_KAFKA_CGRP_F_SUBSCRIPTION |
- RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Set new atomic topic subscription.
- */
-static rd_kafka_resp_err_t
-rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg,
- rd_kafka_topic_partition_list_t *rktparlist) {
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
- "Group \"%.*s\": subscribe to new %ssubscription "
- "of %d topics (join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rktparlist ? "" : "unset ",
- rktparlist ? rktparlist->cnt : 0,
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- if (rkcg->rkcg_rk->rk_conf.enabled_assignor_cnt == 0)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- /* If the consumer has raised a fatal error treat all subscribes as
- unsubscribe */
- if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
- if (rkcg->rkcg_subscription)
- rd_kafka_cgrp_unsubscribe(rkcg,
- rd_true /*leave group*/);
- return RD_KAFKA_RESP_ERR__FATAL;
- }
-
- /* Clear any existing postponed subscribe. */
- if (rkcg->rkcg_next_subscription)
- rd_kafka_topic_partition_list_destroy_free(
- rkcg->rkcg_next_subscription);
- rkcg->rkcg_next_subscription = NULL;
- rkcg->rkcg_next_unsubscribe = rd_false;
-
- if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
- "Group \"%.*s\": postponing "
- "subscribe until previous rebalance "
- "completes (join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- if (!rktparlist)
- rkcg->rkcg_next_unsubscribe = rd_true;
- else
- rkcg->rkcg_next_subscription = rktparlist;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
- rktparlist && rkcg->rkcg_subscription)
- return rd_kafka_cgrp_modify_subscription(rkcg, rktparlist);
-
- /* Remove existing subscription first */
- if (rkcg->rkcg_subscription)
- rd_kafka_cgrp_unsubscribe(
- rkcg,
- rktparlist
- ? rd_false /* don't leave group if new subscription */
- : rd_true /* leave group if no new subscription */);
-
- if (!rktparlist)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_SUBSCRIPTION;
-
- if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0)
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
-
- rkcg->rkcg_subscription = rktparlist;
-
- rd_kafka_cgrp_join(rkcg);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * Same as cgrp_terminate() but called from the cgrp/main thread upon receiving
- * the op 'rko' from cgrp_terminate().
- *
- * NOTE: Takes ownership of 'rko'
- *
- * Locality: main thread
- */
-void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) {
-
- rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread));
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
- "Terminating group \"%.*s\" in state %s "
- "with %d partition(s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_list_cnt(&rkcg->rkcg_toppars));
-
- if (unlikely(rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM ||
- (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) ||
- rkcg->rkcg_reply_rko != NULL)) {
- /* Already terminating or handling a previous terminate */
- if (rko) {
- rd_kafka_q_t *rkq = rko->rko_replyq.q;
- rko->rko_replyq.q = NULL;
- rd_kafka_consumer_err(
- rkq, RD_KAFKA_NODEID_UA,
- RD_KAFKA_RESP_ERR__IN_PROGRESS,
- rko->rko_replyq.version, NULL, NULL,
- RD_KAFKA_OFFSET_INVALID, "Group is %s",
- rkcg->rkcg_reply_rko ? "terminating"
- : "terminated");
- rd_kafka_q_destroy(rkq);
- rd_kafka_op_destroy(rko);
- }
- return;
- }
-
- /* Mark for stopping, the actual state transition
- * is performed when all toppars have left. */
- rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_TERMINATE;
- rkcg->rkcg_ts_terminate = rd_clock();
- rkcg->rkcg_reply_rko = rko;
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION)
- rd_kafka_cgrp_unsubscribe(
- rkcg,
- /* Leave group if this is a controlled shutdown */
- !rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk));
-
- /* Reset the wait-for-LeaveGroup flag if there is an outstanding
- * LeaveGroupRequest being waited on (from a prior unsubscribe), but
- * the destroy flags have NO_CONSUMER_CLOSE set, which calls
- * for immediate termination. */
- if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
- rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE;
-
- /* If there's an oustanding rebalance which has not yet been
- * served by the application it will be served from consumer_close().
- * If the instance is being terminated with NO_CONSUMER_CLOSE we
- * trigger unassign directly to avoid stalling on rebalance callback
- * queues that are no longer served by the application. */
- if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ||
- rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
- rd_kafka_cgrp_unassign(rkcg);
-
- /* Serve assignment so it can start to decommission */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
-
- /* Try to terminate right away if all preconditions are met. */
- rd_kafka_cgrp_try_terminate(rkcg);
-}
-
-
-/**
- * Terminate and decommission a cgrp asynchronously.
- *
- * Locality: any thread
- */
-void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) {
- rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread));
- rd_kafka_cgrp_op(rkcg, NULL, replyq, RD_KAFKA_OP_TERMINATE, 0);
-}
-
-
-struct _op_timeout_offset_commit {
- rd_ts_t now;
- rd_kafka_t *rk;
- rd_list_t expired;
-};
-
-/**
- * q_filter callback for expiring OFFSET_COMMIT timeouts.
- */
-static int rd_kafka_op_offset_commit_timeout_check(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- void *opaque) {
- struct _op_timeout_offset_commit *state =
- (struct _op_timeout_offset_commit *)opaque;
-
- if (likely(rko->rko_type != RD_KAFKA_OP_OFFSET_COMMIT ||
- rko->rko_u.offset_commit.ts_timeout == 0 ||
- rko->rko_u.offset_commit.ts_timeout > state->now)) {
- return 0;
- }
-
- rd_kafka_q_deq0(rkq, rko);
-
- /* Add to temporary list to avoid recursive
- * locking of rkcg_wait_coord_q. */
- rd_list_add(&state->expired, rko);
- return 1;
-}
-
-
-/**
- * Scan for various timeouts.
- */
-static void rd_kafka_cgrp_timeout_scan(rd_kafka_cgrp_t *rkcg, rd_ts_t now) {
- struct _op_timeout_offset_commit ofc_state;
- int i, cnt = 0;
- rd_kafka_op_t *rko;
-
- ofc_state.now = now;
- ofc_state.rk = rkcg->rkcg_rk;
- rd_list_init(&ofc_state.expired, 0, NULL);
-
- cnt += rd_kafka_q_apply(rkcg->rkcg_wait_coord_q,
- rd_kafka_op_offset_commit_timeout_check,
- &ofc_state);
-
- RD_LIST_FOREACH(rko, &ofc_state.expired, i)
- rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL,
- RD_KAFKA_RESP_ERR__WAIT_COORD,
- NULL, NULL, rko);
-
- rd_list_destroy(&ofc_state.expired);
-
- if (cnt > 0)
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTIMEOUT",
- "Group \"%.*s\": timed out %d op(s), %d remain",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), cnt,
- rd_kafka_q_len(rkcg->rkcg_wait_coord_q));
-}
-
-
-/**
- * @brief Handle an assign op.
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error = NULL;
-
- if (rd_kafka_fatal_error_code(rkcg->rkcg_rk) ||
- rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) {
- /* Treat all assignments as unassign when a fatal error is
- * raised or the cgrp is terminating. */
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
- "ASSIGN",
- "Group \"%s\": Consumer %s: "
- "treating assign as unassign",
- rkcg->rkcg_group_id->str,
- rd_kafka_fatal_error_code(rkcg->rkcg_rk)
- ? "has raised a fatal error"
- : "is terminating");
-
- if (rko->rko_u.assign.partitions) {
- rd_kafka_topic_partition_list_destroy(
- rko->rko_u.assign.partitions);
- rko->rko_u.assign.partitions = NULL;
- }
- rko->rko_u.assign.method = RD_KAFKA_ASSIGN_METHOD_ASSIGN;
-
- } else if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
- !(rko->rko_u.assign.method ==
- RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN ||
- rko->rko_u.assign.method ==
- RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN))
- error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE,
- "Changes to the current assignment "
- "must be made using "
- "incremental_assign() or "
- "incremental_unassign() "
- "when rebalance protocol type is "
- "COOPERATIVE");
-
- else if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_EAGER &&
- !(rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_ASSIGN))
- error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE,
- "Changes to the current assignment "
- "must be made using "
- "assign() when rebalance "
- "protocol type is EAGER");
-
- if (!error) {
- switch (rko->rko_u.assign.method) {
- case RD_KAFKA_ASSIGN_METHOD_ASSIGN:
- /* New atomic assignment (partitions != NULL),
- * or unassignment (partitions == NULL) */
- if (rko->rko_u.assign.partitions)
- error = rd_kafka_cgrp_assign(
- rkcg, rko->rko_u.assign.partitions);
- else
- error = rd_kafka_cgrp_unassign(rkcg);
- break;
- case RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN:
- error = rd_kafka_cgrp_incremental_assign(
- rkcg, rko->rko_u.assign.partitions);
- break;
- case RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN:
- error = rd_kafka_cgrp_incremental_unassign(
- rkcg, rko->rko_u.assign.partitions);
- break;
- default:
- RD_NOTREACHED();
- break;
- }
-
- /* If call succeeded serve the assignment */
- if (!error)
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
- }
-
- if (error) {
- /* Log error since caller might not check
- * *assign() return value. */
- rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "ASSIGN",
- "Group \"%s\": application *assign() call "
- "failed: %s",
- rkcg->rkcg_group_id->str,
- rd_kafka_error_string(error));
- }
-
- rd_kafka_op_error_reply(rko, error);
-}
-
-
-/**
- * @brief Handle cgrp queue op.
- * @locality rdkafka main thread
- * @locks none
- */
-static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = opaque;
- rd_kafka_toppar_t *rktp;
- rd_kafka_resp_err_t err;
- const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF;
-
- rktp = rko->rko_rktp;
-
- if (rktp && !silent_op)
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "CGRPOP",
- "Group \"%.*s\" received op %s in state %s "
- "(join-state %s) for %.*s [%" PRId32 "]",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_op2str(rko->rko_type),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- else if (!silent_op)
- rd_kafka_dbg(
- rkcg->rkcg_rk, CGRP, "CGRPOP",
- "Group \"%.*s\" received op %s in state %s "
- "(join-state %s)",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_op2str(rko->rko_type),
- rd_kafka_cgrp_state_names[rkcg->rkcg_state],
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- switch ((int)rko->rko_type) {
- case RD_KAFKA_OP_NAME:
- /* Return the currently assigned member id. */
- if (rkcg->rkcg_member_id)
- rko->rko_u.name.str =
- RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id);
- rd_kafka_op_reply(rko, 0);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_CG_METADATA:
- /* Return the current consumer group metadata. */
- rko->rko_u.cg_metadata =
- rkcg->rkcg_member_id
- ? rd_kafka_consumer_group_metadata_new_with_genid(
- rkcg->rkcg_rk->rk_conf.group_id_str,
- rkcg->rkcg_generation_id,
- rkcg->rkcg_member_id->str,
- rkcg->rkcg_rk->rk_conf.group_instance_id)
- : NULL;
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_OFFSET_FETCH:
- if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP ||
- (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) {
- rd_kafka_op_handle_OffsetFetch(
- rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD,
- NULL, NULL, rko);
- rko = NULL; /* rko freed by handler */
- break;
- }
-
- rd_kafka_OffsetFetchRequest(
- rkcg->rkcg_coord, rk->rk_group_id->str,
- rko->rko_u.offset_fetch.partitions,
- rko->rko_u.offset_fetch.require_stable_offsets,
- 0, /* Timeout */
- RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
- rd_kafka_op_handle_OffsetFetch, rko);
- rko = NULL; /* rko now owned by request */
- break;
-
- case RD_KAFKA_OP_PARTITION_JOIN:
- rd_kafka_cgrp_partition_add(rkcg, rktp);
-
- /* If terminating tell the partition to leave */
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
- rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ);
- break;
-
- case RD_KAFKA_OP_PARTITION_LEAVE:
- rd_kafka_cgrp_partition_del(rkcg, rktp);
- break;
-
- case RD_KAFKA_OP_OFFSET_COMMIT:
- /* Trigger offsets commit. */
- rd_kafka_cgrp_offsets_commit(rkcg, rko,
- /* only set offsets
- * if no partitions were
- * specified. */
- rko->rko_u.offset_commit.partitions
- ? 0
- : 1 /* set_offsets*/,
- rko->rko_u.offset_commit.reason);
- rko = NULL; /* rko now owned by request */
- break;
-
- case RD_KAFKA_OP_COORD_QUERY:
- rd_kafka_cgrp_coord_query(
- rkcg,
- rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op");
- break;
-
- case RD_KAFKA_OP_SUBSCRIBE:
- rd_kafka_app_polled(rk);
-
- /* New atomic subscription (may be NULL) */
- err =
- rd_kafka_cgrp_subscribe(rkcg, rko->rko_u.subscribe.topics);
-
- if (!err) /* now owned by rkcg */
- rko->rko_u.subscribe.topics = NULL;
-
- rd_kafka_op_reply(rko, err);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_ASSIGN:
- rd_kafka_cgrp_handle_assign_op(rkcg, rko);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_GET_SUBSCRIPTION:
- if (rkcg->rkcg_next_subscription)
- rko->rko_u.subscribe.topics =
- rd_kafka_topic_partition_list_copy(
- rkcg->rkcg_next_subscription);
- else if (rkcg->rkcg_next_unsubscribe)
- rko->rko_u.subscribe.topics = NULL;
- else if (rkcg->rkcg_subscription)
- rko->rko_u.subscribe.topics =
- rd_kafka_topic_partition_list_copy(
- rkcg->rkcg_subscription);
- rd_kafka_op_reply(rko, 0);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_GET_ASSIGNMENT:
- /* This is the consumer assignment, not the group assignment. */
- rko->rko_u.assign.partitions =
- rd_kafka_topic_partition_list_copy(
- rkcg->rkcg_rk->rk_consumer.assignment.all);
-
- rd_kafka_op_reply(rko, 0);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL:
- rko->rko_u.rebalance_protocol.str =
- rd_kafka_rebalance_protocol2str(
- rd_kafka_cgrp_rebalance_protocol(rkcg));
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
- rko = NULL;
- break;
-
- case RD_KAFKA_OP_TERMINATE:
- rd_kafka_cgrp_terminate0(rkcg, rko);
- rko = NULL; /* terminate0() takes ownership */
- break;
-
- default:
- rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type");
- break;
- }
-
- if (rko)
- rd_kafka_op_destroy(rko);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @returns true if the session timeout has expired (due to no successful
- * Heartbeats in session.timeout.ms) and triggers a rebalance.
- */
-static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg,
- rd_ts_t now) {
- rd_ts_t delta;
- char buf[256];
-
- if (unlikely(!rkcg->rkcg_ts_session_timeout))
- return rd_true; /* Session has expired */
-
- delta = now - rkcg->rkcg_ts_session_timeout;
- if (likely(delta < 0))
- return rd_false;
-
- delta += rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000;
-
- rd_snprintf(buf, sizeof(buf),
- "Consumer group session timed out (in join-state %s) after "
- "%" PRId64
- " ms without a successful response from the "
- "group coordinator (broker %" PRId32 ", last error was %s)",
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
- delta / 1000, rkcg->rkcg_coord_id,
- rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err));
-
- rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "SESSTMOUT",
- "%s: revoking assignment and rejoining group", buf);
-
- /* Prevent further rebalances */
- rkcg->rkcg_ts_session_timeout = 0;
-
- /* Timing out invalidates the member id, reset it
- * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
- rd_kafka_cgrp_set_member_id(rkcg, "");
-
- /* Revoke and rebalance */
- rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
- rd_true /*initiating*/, buf);
-
- return rd_true;
-}
-
-
-/**
- * @brief Apply the next waiting subscribe/unsubscribe, if any.
- */
-static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) {
- rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT);
-
- if (rkcg->rkcg_next_subscription) {
- rd_kafka_topic_partition_list_t *next_subscription =
- rkcg->rkcg_next_subscription;
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE",
- "Group \"%s\": invoking waiting postponed "
- "subscribe",
- rkcg->rkcg_group_id->str);
- rkcg->rkcg_next_subscription = NULL;
- rd_kafka_cgrp_subscribe(rkcg, next_subscription);
-
- } else if (rkcg->rkcg_next_unsubscribe) {
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE",
- "Group \"%s\": invoking waiting postponed "
- "unsubscribe",
- rkcg->rkcg_group_id->str);
- rkcg->rkcg_next_unsubscribe = rd_false;
- rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/);
- }
-}
-
-/**
- * Client group's join state handling
- */
-static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) {
- rd_ts_t now = rd_clock();
-
- if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk)))
- return;
-
- switch (rkcg->rkcg_join_state) {
- case RD_KAFKA_CGRP_JOIN_STATE_INIT:
- if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg)))
- break;
-
- /* If there is a next subscription, apply it. */
- rd_kafka_cgrp_apply_next_subscribe(rkcg);
-
- /* If we have a subscription start the join process. */
- if (!rkcg->rkcg_subscription)
- break;
-
- if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000,
- now) > 0)
- rd_kafka_cgrp_join(rkcg);
- break;
-
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN:
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA:
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC:
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE:
- /* FIXME: I think we might have to send heartbeats in
- * in WAIT_INCR_UNASSIGN, yes-no? */
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE:
- break;
-
- case RD_KAFKA_CGRP_JOIN_STATE_STEADY:
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL:
- case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL:
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION &&
- rd_interval(
- &rkcg->rkcg_heartbeat_intvl,
- rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000,
- now) > 0)
- rd_kafka_cgrp_heartbeat(rkcg);
- break;
- }
-}
-/**
- * Client group handling.
- * Called from main thread to serve the operational aspects of a cgrp.
- */
-void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_broker_t *rkb = rkcg->rkcg_coord;
- int rkb_state = RD_KAFKA_BROKER_STATE_INIT;
- rd_ts_t now;
-
- if (rkb) {
- rd_kafka_broker_lock(rkb);
- rkb_state = rkb->rkb_state;
- rd_kafka_broker_unlock(rkb);
-
- /* Go back to querying state if we lost the current coordinator
- * connection. */
- if (rkb_state < RD_KAFKA_BROKER_STATE_UP &&
- rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP)
- rd_kafka_cgrp_set_state(
- rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
- }
-
- now = rd_clock();
-
- /* Check for cgrp termination */
- if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) {
- rd_kafka_cgrp_terminated(rkcg);
- return; /* cgrp terminated */
- }
-
- /* Bail out if we're terminating. */
- if (unlikely(rd_kafka_terminating(rkcg->rkcg_rk)))
- return;
-
- /* Check session timeout regardless of current coordinator
- * connection state (rkcg_state) */
- if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY)
- rd_kafka_cgrp_session_timeout_check(rkcg, now);
-
-retry:
- switch (rkcg->rkcg_state) {
- case RD_KAFKA_CGRP_STATE_TERM:
- break;
-
- case RD_KAFKA_CGRP_STATE_INIT:
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
- /* FALLTHRU */
-
- case RD_KAFKA_CGRP_STATE_QUERY_COORD:
- /* Query for coordinator. */
- if (rd_interval_immediate(&rkcg->rkcg_coord_query_intvl,
- 500 * 1000, now) > 0)
- rd_kafka_cgrp_coord_query(rkcg,
- "intervaled in "
- "state query-coord");
- break;
-
- case RD_KAFKA_CGRP_STATE_WAIT_COORD:
- /* Waiting for FindCoordinator response */
- break;
-
- case RD_KAFKA_CGRP_STATE_WAIT_BROKER:
- /* See if the group should be reassigned to another broker. */
- if (rd_kafka_cgrp_coord_update(rkcg, rkcg->rkcg_coord_id))
- goto retry; /* Coordinator changed, retry state-machine
- * to speed up next transition. */
-
- /* Coordinator query */
- if (rd_interval(&rkcg->rkcg_coord_query_intvl, 1000 * 1000,
- now) > 0)
- rd_kafka_cgrp_coord_query(rkcg,
- "intervaled in "
- "state wait-broker");
- break;
-
- case RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT:
- /* Waiting for broker transport to come up.
- * Also make sure broker supports groups. */
- if (rkb_state < RD_KAFKA_BROKER_STATE_UP || !rkb ||
- !rd_kafka_broker_supports(
- rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) {
- /* Coordinator query */
- if (rd_interval(&rkcg->rkcg_coord_query_intvl,
- 1000 * 1000, now) > 0)
- rd_kafka_cgrp_coord_query(
- rkcg,
- "intervaled in state "
- "wait-broker-transport");
-
- } else {
- rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_UP);
-
- /* Serve join state to trigger (re)join */
- rd_kafka_cgrp_join_state_serve(rkcg);
-
- /* Serve any pending partitions in the assignment */
- rd_kafka_assignment_serve(rkcg->rkcg_rk);
- }
- break;
-
- case RD_KAFKA_CGRP_STATE_UP:
- /* Move any ops awaiting the coordinator to the ops queue
- * for reprocessing. */
- rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q);
-
- /* Relaxed coordinator queries. */
- if (rd_interval(&rkcg->rkcg_coord_query_intvl,
- rkcg->rkcg_rk->rk_conf.coord_query_intvl_ms *
- 1000,
- now) > 0)
- rd_kafka_cgrp_coord_query(rkcg,
- "intervaled in state up");
-
- rd_kafka_cgrp_join_state_serve(rkcg);
- break;
- }
-
- if (unlikely(rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP &&
- rd_interval(&rkcg->rkcg_timeout_scan_intvl, 1000 * 1000,
- now) > 0))
- rd_kafka_cgrp_timeout_scan(rkcg, now);
-}
-
-
-
-/**
- * Send an op to a cgrp.
- *
- * Locality: any thread
- */
-void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg,
- rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq,
- rd_kafka_op_type_t type,
- rd_kafka_resp_err_t err) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(type);
- rko->rko_err = err;
- rko->rko_replyq = replyq;
-
- if (rktp)
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_q_enq(rkcg->rkcg_ops, rko);
-}
-
-
-
-void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id) {
- if (rkcg->rkcg_member_id && member_id &&
- !rd_kafkap_str_cmp_str(rkcg->rkcg_member_id, member_id))
- return; /* No change */
-
- rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "MEMBERID",
- "Group \"%.*s\": updating member id \"%s\" -> \"%s\"",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str
- : "(not-set)",
- member_id ? member_id : "(not-set)");
-
- if (rkcg->rkcg_member_id) {
- rd_kafkap_str_destroy(rkcg->rkcg_member_id);
- rkcg->rkcg_member_id = NULL;
- }
-
- if (member_id)
- rkcg->rkcg_member_id = rd_kafkap_str_new(member_id, -1);
-}
-
-
-/**
- * @brief Determine owned partitions that no longer exist (partitions in
- * deleted or re-created topics).
- */
-static rd_kafka_topic_partition_list_t *
-rd_kafka_cgrp_owned_but_not_exist_partitions(rd_kafka_cgrp_t *rkcg) {
- rd_kafka_topic_partition_list_t *result = NULL;
- const rd_kafka_topic_partition_t *curr;
-
- if (!rkcg->rkcg_group_assignment)
- return NULL;
-
- RD_KAFKA_TPLIST_FOREACH(curr, rkcg->rkcg_group_assignment) {
- if (rd_list_find(rkcg->rkcg_subscribed_topics, curr->topic,
- rd_kafka_topic_info_topic_cmp))
- continue;
-
- if (!result)
- result = rd_kafka_topic_partition_list_new(
- rkcg->rkcg_group_assignment->cnt);
-
- rd_kafka_topic_partition_list_add_copy(result, curr);
- }
-
- return result;
-}
-
-
-/**
- * @brief Check if the latest metadata affects the current subscription:
- * - matched topic added
- * - matched topic removed
- * - matched topic's partition count change
- *
- * @locks none
- * @locality rdkafka main thread
- */
-void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
- rd_bool_t do_join) {
- rd_list_t *tinfos;
- rd_kafka_topic_partition_list_t *errored;
- rd_bool_t changed;
-
- rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread));
-
- if (!rkcg->rkcg_subscription || rkcg->rkcg_subscription->cnt == 0)
- return;
-
- /*
- * Unmatched topics will be added to the errored list.
- */
- errored = rd_kafka_topic_partition_list_new(0);
-
- /*
- * Create a list of the topics in metadata that matches our subscription
- */
- tinfos = rd_list_new(rkcg->rkcg_subscription->cnt,
- (void *)rd_kafka_topic_info_destroy);
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION)
- rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos,
- rkcg->rkcg_subscription, errored);
- else
- rd_kafka_metadata_topic_filter(
- rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored);
-
-
- /*
- * Propagate consumer errors for any non-existent or errored topics.
- * The function takes ownership of errored.
- */
- rd_kafka_propagate_consumer_topic_errors(
- rkcg, errored, "Subscribed topic not available");
-
- /*
- * Update effective list of topics (takes ownership of \c tinfos)
- */
- changed = rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos);
-
- if (!do_join ||
- (!changed &&
- /* If we get the same effective list of topics as last time around,
- * but the join is waiting for this metadata query to complete,
- * then we should not return here but follow through with the
- * (re)join below. */
- rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA))
- return;
-
- /* List of subscribed topics changed, trigger rejoin. */
- rd_kafka_dbg(rkcg->rkcg_rk,
- CGRP | RD_KAFKA_DBG_METADATA | RD_KAFKA_DBG_CONSUMER,
- "REJOIN",
- "Group \"%.*s\": "
- "subscription updated from metadata change: "
- "rejoining group in state %s",
- RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
- rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
-
- if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) {
-
- /* Partitions from deleted topics */
- rd_kafka_topic_partition_list_t *owned_but_not_exist =
- rd_kafka_cgrp_owned_but_not_exist_partitions(rkcg);
-
- if (owned_but_not_exist) {
- rd_kafka_cgrp_assignment_set_lost(
- rkcg, "%d subscribed topic(s) no longer exist",
- owned_but_not_exist->cnt);
-
- rd_kafka_rebalance_op_incr(
- rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- owned_but_not_exist,
- rkcg->rkcg_group_leader.members != NULL
- /* Rejoin group following revoke's
- * unassign if we are leader */
- ,
- "topics not available");
- rd_kafka_topic_partition_list_destroy(
- owned_but_not_exist);
-
- } else {
- /* Nothing to revoke, rejoin group if we are the
- * leader.
- * The KIP says to rejoin the group on metadata
- * change only if we're the leader. But what if a
- * non-leader is subscribed to a regex that the others
- * aren't?
- * Going against the KIP and rejoining here. */
- rd_kafka_cgrp_rejoin(
- rkcg,
- "Metadata for subscribed topic(s) has "
- "changed");
- }
-
- } else {
- /* EAGER */
- rd_kafka_cgrp_revoke_rejoin(rkcg,
- "Metadata for subscribed topic(s) "
- "has changed");
- }
-
- /* We shouldn't get stuck in this state. */
- rd_dassert(rkcg->rkcg_join_state !=
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
-}
-
-
-rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata_new(const char *group_id) {
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid(
- group_id, -1, "", NULL);
-
- return cgmetadata;
-}
-
-rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id,
- int32_t generation_id,
- const char *member_id,
- const char *group_instance_id) {
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- cgmetadata = rd_calloc(1, sizeof(*cgmetadata));
- cgmetadata->group_id = rd_strdup(group_id);
- cgmetadata->generation_id = generation_id;
- cgmetadata->member_id = rd_strdup(member_id);
- if (group_instance_id)
- cgmetadata->group_instance_id = rd_strdup(group_instance_id);
-
- return cgmetadata;
-}
-
-rd_kafka_consumer_group_metadata_t *
-rd_kafka_consumer_group_metadata(rd_kafka_t *rk) {
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- rd_kafka_op_t *rko;
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return NULL;
-
- rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_CG_METADATA);
- if (!rko)
- return NULL;
-
- cgmetadata = rko->rko_u.cg_metadata;
- rko->rko_u.cg_metadata = NULL;
- rd_kafka_op_destroy(rko);
-
- return cgmetadata;
-}
-
-void rd_kafka_consumer_group_metadata_destroy(
- rd_kafka_consumer_group_metadata_t *cgmetadata) {
- rd_free(cgmetadata->group_id);
- rd_free(cgmetadata->member_id);
- if (cgmetadata->group_instance_id)
- rd_free(cgmetadata->group_instance_id);
- rd_free(cgmetadata);
-}
-
-rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup(
- const rd_kafka_consumer_group_metadata_t *cgmetadata) {
- rd_kafka_consumer_group_metadata_t *ret;
-
- ret = rd_calloc(1, sizeof(*cgmetadata));
- ret->group_id = rd_strdup(cgmetadata->group_id);
- ret->generation_id = cgmetadata->generation_id;
- ret->member_id = rd_strdup(cgmetadata->member_id);
- if (cgmetadata->group_instance_id)
- ret->group_instance_id =
- rd_strdup(cgmetadata->group_instance_id);
-
- return ret;
-}
-
-/*
- * Consumer group metadata serialization format v2:
- * "CGMDv2:"<generation_id><group_id>"\0"<member_id>"\0" \
- * <group_instance_id_is_null>[<group_instance_id>"\0"]
- * Where <group_id> is the group_id string.
- */
-static const char rd_kafka_consumer_group_metadata_magic[7] = "CGMDv2:";
-
-rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(
- const rd_kafka_consumer_group_metadata_t *cgmd,
- void **bufferp,
- size_t *sizep) {
- char *buf;
- size_t size;
- size_t of = 0;
- size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic);
- size_t groupid_len = strlen(cgmd->group_id) + 1;
- size_t generationid_len = sizeof(cgmd->generation_id);
- size_t member_id_len = strlen(cgmd->member_id) + 1;
- int8_t group_instance_id_is_null = cgmd->group_instance_id ? 0 : 1;
- size_t group_instance_id_is_null_len =
- sizeof(group_instance_id_is_null);
- size_t group_instance_id_len =
- cgmd->group_instance_id ? strlen(cgmd->group_instance_id) + 1 : 0;
-
- size = magic_len + groupid_len + generationid_len + member_id_len +
- group_instance_id_is_null_len + group_instance_id_len;
-
- buf = rd_malloc(size);
-
- memcpy(buf, rd_kafka_consumer_group_metadata_magic, magic_len);
- of += magic_len;
-
- memcpy(buf + of, &cgmd->generation_id, generationid_len);
- of += generationid_len;
-
- memcpy(buf + of, cgmd->group_id, groupid_len);
- of += groupid_len;
-
- memcpy(buf + of, cgmd->member_id, member_id_len);
- of += member_id_len;
-
- memcpy(buf + of, &group_instance_id_is_null,
- group_instance_id_is_null_len);
- of += group_instance_id_is_null_len;
-
- if (!group_instance_id_is_null)
- memcpy(buf + of, cgmd->group_instance_id,
- group_instance_id_len);
- of += group_instance_id_len;
-
- rd_assert(of == size);
-
- *bufferp = buf;
- *sizep = size;
-
- return NULL;
-}
-
-
-/*
- * Check that a string is printable, returning NULL if not or
- * a pointer immediately after the end of the string NUL
- * terminator if so.
- **/
-static const char *str_is_printable(const char *s, const char *end) {
- const char *c;
- for (c = s; *c && c != end; c++)
- if (!isprint((int)*c))
- return NULL;
- return c + 1;
-}
-
-
-rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(
- rd_kafka_consumer_group_metadata_t **cgmdp,
- const void *buffer,
- size_t size) {
- const char *buf = (const char *)buffer;
- const char *end = buf + size;
- const char *next;
- size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic);
- int32_t generation_id;
- size_t generationid_len = sizeof(generation_id);
- const char *group_id;
- const char *member_id;
- int8_t group_instance_id_is_null;
- const char *group_instance_id = NULL;
-
- if (size < magic_len + generationid_len + 1 + 1 + 1)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer is too short");
-
- if (memcmp(buffer, rd_kafka_consumer_group_metadata_magic, magic_len))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer is not a serialized "
- "consumer group metadata object");
- memcpy(&generation_id, buf + magic_len, generationid_len);
-
- group_id = buf + magic_len + generationid_len;
- next = str_is_printable(group_id, end);
- if (!next)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer group id is not safe");
-
- member_id = next;
- next = str_is_printable(member_id, end);
- if (!next)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer member id is not "
- "safe");
-
- group_instance_id_is_null = (int8_t) * (next++);
- if (!group_instance_id_is_null) {
- group_instance_id = next;
- next = str_is_printable(group_instance_id, end);
- if (!next)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer group "
- "instance id is not safe");
- }
-
- if (next != end)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
- "Input buffer bad length");
-
- *cgmdp = rd_kafka_consumer_group_metadata_new_with_genid(
- group_id, generation_id, member_id, group_instance_id);
-
- return NULL;
-}
-
-
-static int
-unittest_consumer_group_metadata_iteration(const char *group_id,
- int32_t generation_id,
- const char *member_id,
- const char *group_instance_id) {
- rd_kafka_consumer_group_metadata_t *cgmd;
- void *buffer, *buffer2;
- size_t size, size2;
- rd_kafka_error_t *error;
-
- cgmd = rd_kafka_consumer_group_metadata_new_with_genid(
- group_id, generation_id, member_id, group_instance_id);
- RD_UT_ASSERT(cgmd != NULL, "failed to create metadata");
-
- error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer, &size);
- RD_UT_ASSERT(!error, "metadata_write failed: %s",
- rd_kafka_error_string(error));
-
- rd_kafka_consumer_group_metadata_destroy(cgmd);
-
- cgmd = NULL;
- error = rd_kafka_consumer_group_metadata_read(&cgmd, buffer, size);
- RD_UT_ASSERT(!error, "metadata_read failed: %s",
- rd_kafka_error_string(error));
-
- /* Serialize again and compare buffers */
- error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer2, &size2);
- RD_UT_ASSERT(!error, "metadata_write failed: %s",
- rd_kafka_error_string(error));
-
- RD_UT_ASSERT(size == size2 && !memcmp(buffer, buffer2, size),
- "metadata_read/write size or content mismatch: "
- "size %" PRIusz ", size2 %" PRIusz,
- size, size2);
-
- rd_kafka_consumer_group_metadata_destroy(cgmd);
- rd_free(buffer);
- rd_free(buffer2);
-
- return 0;
-}
-
-
-static int unittest_consumer_group_metadata(void) {
- const char *ids[] = {
- "mY. random id:.",
- "0",
- "2222222222222222222222221111111111111111111111111111112222",
- "",
- "NULL",
- NULL,
- };
- int i, j, k, gen_id;
- int ret;
- const char *group_id;
- const char *member_id;
- const char *group_instance_id;
-
- for (i = 0; ids[i]; i++) {
- for (j = 0; ids[j]; j++) {
- for (k = 0; ids[k]; k++) {
- for (gen_id = -1; gen_id < 1; gen_id++) {
- group_id = ids[i];
- member_id = ids[j];
- group_instance_id = ids[k];
- if (strcmp(group_instance_id, "NULL") ==
- 0)
- group_instance_id = NULL;
- ret =
- unittest_consumer_group_metadata_iteration(
- group_id, gen_id, member_id,
- group_instance_id);
- if (ret)
- return ret;
- }
- }
- }
- }
-
- RD_UT_PASS();
-}
-
-
-static int unittest_set_intersect(void) {
- size_t par_cnt = 10;
- map_toppar_member_info_t *dst;
- rd_kafka_topic_partition_t *toppar;
- PartitionMemberInfo_t *v;
- char *id = "id";
- rd_kafkap_str_t id1 = RD_KAFKAP_STR_INITIALIZER;
- rd_kafkap_str_t id2 = RD_KAFKAP_STR_INITIALIZER;
- rd_kafka_group_member_t *gm1;
- rd_kafka_group_member_t *gm2;
-
- id1.len = 2;
- id1.str = id;
- id2.len = 2;
- id2.str = id;
-
- map_toppar_member_info_t a = RD_MAP_INITIALIZER(
- par_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- map_toppar_member_info_t b = RD_MAP_INITIALIZER(
- par_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- gm1 = rd_calloc(1, sizeof(*gm1));
- gm1->rkgm_member_id = &id1;
- gm1->rkgm_group_instance_id = &id1;
- gm2 = rd_calloc(1, sizeof(*gm2));
- gm2->rkgm_member_id = &id2;
- gm2->rkgm_group_instance_id = &id2;
-
- RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4),
- PartitionMemberInfo_new(gm1, rd_false));
- RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 4),
- PartitionMemberInfo_new(gm1, rd_false));
- RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 7),
- PartitionMemberInfo_new(gm1, rd_false));
-
- RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 7),
- PartitionMemberInfo_new(gm1, rd_false));
- RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4),
- PartitionMemberInfo_new(gm2, rd_false));
-
- dst = rd_kafka_member_partitions_intersect(&a, &b);
-
- RD_UT_ASSERT(RD_MAP_CNT(&a) == 3, "expected a cnt to be 3 not %d",
- (int)RD_MAP_CNT(&a));
- RD_UT_ASSERT(RD_MAP_CNT(&b) == 2, "expected b cnt to be 2 not %d",
- (int)RD_MAP_CNT(&b));
- RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d",
- (int)RD_MAP_CNT(dst));
-
- toppar = rd_kafka_topic_partition_new("t1", 4);
- RD_UT_ASSERT((v = RD_MAP_GET(dst, toppar)), "unexpected element");
- RD_UT_ASSERT(v->members_match, "expected members to match");
- rd_kafka_topic_partition_destroy(toppar);
-
- RD_MAP_DESTROY(&a);
- RD_MAP_DESTROY(&b);
- RD_MAP_DESTROY(dst);
- rd_free(dst);
-
- rd_free(gm1);
- rd_free(gm2);
-
- RD_UT_PASS();
-}
-
-
-static int unittest_set_subtract(void) {
- size_t par_cnt = 10;
- rd_kafka_topic_partition_t *toppar;
- map_toppar_member_info_t *dst;
-
- map_toppar_member_info_t a = RD_MAP_INITIALIZER(
- par_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- map_toppar_member_info_t b = RD_MAP_INITIALIZER(
- par_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4),
- PartitionMemberInfo_new(NULL, rd_false));
- RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 7),
- PartitionMemberInfo_new(NULL, rd_false));
-
- RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 4),
- PartitionMemberInfo_new(NULL, rd_false));
- RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4),
- PartitionMemberInfo_new(NULL, rd_false));
- RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 7),
- PartitionMemberInfo_new(NULL, rd_false));
-
- dst = rd_kafka_member_partitions_subtract(&a, &b);
-
- RD_UT_ASSERT(RD_MAP_CNT(&a) == 2, "expected a cnt to be 2 not %d",
- (int)RD_MAP_CNT(&a));
- RD_UT_ASSERT(RD_MAP_CNT(&b) == 3, "expected b cnt to be 3 not %d",
- (int)RD_MAP_CNT(&b));
- RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d",
- (int)RD_MAP_CNT(dst));
-
- toppar = rd_kafka_topic_partition_new("t2", 7);
- RD_UT_ASSERT(RD_MAP_GET(dst, toppar), "unexpected element");
- rd_kafka_topic_partition_destroy(toppar);
-
- RD_MAP_DESTROY(&a);
- RD_MAP_DESTROY(&b);
- RD_MAP_DESTROY(dst);
- rd_free(dst);
-
- RD_UT_PASS();
-}
-
-
-static int unittest_map_to_list(void) {
- rd_kafka_topic_partition_list_t *list;
-
- map_toppar_member_info_t map = RD_MAP_INITIALIZER(
- 10, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
-
- RD_MAP_SET(&map, rd_kafka_topic_partition_new("t1", 101),
- PartitionMemberInfo_new(NULL, rd_false));
-
- list = rd_kafka_toppar_member_info_map_to_list(&map);
-
- RD_UT_ASSERT(list->cnt == 1, "expecting list size of 1 not %d.",
- list->cnt);
- RD_UT_ASSERT(list->elems[0].partition == 101,
- "expecting partition 101 not %d",
- list->elems[0].partition);
- RD_UT_ASSERT(!strcmp(list->elems[0].topic, "t1"),
- "expecting topic 't1', not %s", list->elems[0].topic);
-
- rd_kafka_topic_partition_list_destroy(list);
- RD_MAP_DESTROY(&map);
-
- RD_UT_PASS();
-}
-
-
-static int unittest_list_to_map(void) {
- rd_kafka_topic_partition_t *toppar;
- map_toppar_member_info_t *map;
- rd_kafka_topic_partition_list_t *list =
- rd_kafka_topic_partition_list_new(1);
-
- rd_kafka_topic_partition_list_add(list, "topic1", 201);
- rd_kafka_topic_partition_list_add(list, "topic2", 202);
-
- map = rd_kafka_toppar_list_to_toppar_member_info_map(list);
-
- RD_UT_ASSERT(RD_MAP_CNT(map) == 2, "expected map cnt to be 2 not %d",
- (int)RD_MAP_CNT(map));
- toppar = rd_kafka_topic_partition_new("topic1", 201);
- RD_UT_ASSERT(RD_MAP_GET(map, toppar),
- "expected topic1 [201] to exist in map");
- rd_kafka_topic_partition_destroy(toppar);
- toppar = rd_kafka_topic_partition_new("topic2", 202);
- RD_UT_ASSERT(RD_MAP_GET(map, toppar),
- "expected topic2 [202] to exist in map");
- rd_kafka_topic_partition_destroy(toppar);
-
- RD_MAP_DESTROY(map);
- rd_free(map);
- rd_kafka_topic_partition_list_destroy(list);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Consumer group unit tests
- */
-int unittest_cgrp(void) {
- int fails = 0;
-
- fails += unittest_consumer_group_metadata();
- fails += unittest_set_intersect();
- fails += unittest_set_subtract();
- fails += unittest_map_to_list();
- fails += unittest_list_to_map();
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h
deleted file mode 100644
index 4fa51e548..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_CGRP_H_
-#define _RDKAFKA_CGRP_H_
-
-#include "rdinterval.h"
-
-#include "rdkafka_assignor.h"
-
-
-/**
- * Client groups implementation
- *
- * Client groups handling for a single cgrp is assigned to a single
- * rd_kafka_broker_t object at any given time.
- * The main thread will call cgrp_serve() to serve its cgrps.
- *
- * This means that the cgrp itself does not need to be locked since it
- * is only ever used from the main thread.
- *
- */
-
-
-extern const char *rd_kafka_cgrp_join_state_names[];
-
-/**
- * Client group
- */
-typedef struct rd_kafka_cgrp_s {
- const rd_kafkap_str_t *rkcg_group_id;
- rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
- rd_kafkap_str_t *rkcg_group_instance_id;
- const rd_kafkap_str_t *rkcg_client_id;
-
- enum {
- /* Init state */
- RD_KAFKA_CGRP_STATE_INIT,
-
- /* Cgrp has been stopped. This is a final state */
- RD_KAFKA_CGRP_STATE_TERM,
-
- /* Query for group coordinator */
- RD_KAFKA_CGRP_STATE_QUERY_COORD,
-
- /* Outstanding query, awaiting response */
- RD_KAFKA_CGRP_STATE_WAIT_COORD,
-
- /* Wait ack from assigned cgrp manager broker thread */
- RD_KAFKA_CGRP_STATE_WAIT_BROKER,
-
- /* Wait for manager broker thread to connect to broker */
- RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT,
-
- /* Coordinator is up and manager is assigned. */
- RD_KAFKA_CGRP_STATE_UP,
- } rkcg_state;
- rd_ts_t rkcg_ts_statechange; /* Timestamp of last
- * state change. */
-
-
- enum {
- /* all: join or rejoin, possibly with an existing assignment. */
- RD_KAFKA_CGRP_JOIN_STATE_INIT,
-
- /* all: JoinGroupRequest sent, awaiting response. */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN,
-
- /* all: MetadataRequest sent, awaiting response.
- * While metadata requests may be issued at any time,
- * this state is only set upon a proper (re)join. */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA,
-
- /* Follower: SyncGroupRequest sent, awaiting response. */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC,
-
- /* all: waiting for application to call *_assign() */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL,
-
- /* all: waiting for application to call *_unassign() */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL,
-
- /* all: waiting for full assignment to decommission */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE,
-
- /* all: waiting for partial assignment to decommission */
- RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE,
-
- /* all: synchronized and assigned
- * may be an empty assignment. */
- RD_KAFKA_CGRP_JOIN_STATE_STEADY,
- } rkcg_join_state;
-
- /* State when group leader */
- struct {
- rd_kafka_group_member_t *members;
- int member_cnt;
- } rkcg_group_leader;
-
- rd_kafka_q_t *rkcg_q; /* Application poll queue */
- rd_kafka_q_t *rkcg_ops; /* Manager ops queue */
- rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */
- int rkcg_flags;
-#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */
-#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \
- 0x8 /* Send LeaveGroup when \
- * unassign is done */
-#define RD_KAFKA_CGRP_F_SUBSCRIPTION \
- 0x10 /* If set: \
- * subscription \
- * else: \
- * static assignment */
-#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \
- 0x20 /* A Heartbeat request \
- * is in transit, dont \
- * send a new one. */
-#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \
- 0x40 /* Subscription contains \
- * wildcards. */
-#define RD_KAFKA_CGRP_F_WAIT_LEAVE \
- 0x80 /* Wait for LeaveGroup \
- * to be sent. \
- * This is used to stall \
- * termination until \
- * the LeaveGroupRequest \
- * is responded to, \
- * otherwise it risks \
- * being dropped in the \
- * output queue when \
- * the broker is destroyed. \
- */
-#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \
- 0x100 /**< max.poll.interval.ms \
- * was exceeded and we \
- * left the group. \
- * Do not rejoin until \
- * the application has \
- * polled again. */
-
- rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
- rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
- rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
- rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
-
- rd_ts_t rkcg_ts_session_timeout; /**< Absolute session
- * timeout enforced by
- * the consumer, this
- * value is updated on
- * Heartbeat success,
- * etc. */
- rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error,
- * used for logging. */
-
- TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */
-
- rd_list_t rkcg_toppars; /* Toppars subscribed to*/
-
- int32_t rkcg_generation_id; /* Current generation id */
-
- rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
- * assignor. used by both
- * leader and members. */
- void *rkcg_assignor_state; /**< current partition
- * assignor state */
-
- int32_t rkcg_coord_id; /**< Current coordinator id,
- * or -1 if not known. */
-
- rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
- * broker handle, or NULL.
- * rkcg_coord's nodename is
- * updated to this broker's
- * nodename when there is a
- * coordinator change. */
- rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator
- * broker handle.
- * Will be updated when the
- * coordinator changes. */
-
- int16_t rkcg_wait_resp; /**< Awaiting response for this
- * ApiKey.
- * Makes sure only one
- * JoinGroup or SyncGroup
- * request is outstanding.
- * Unset value is -1. */
-
- /** Current subscription */
- rd_kafka_topic_partition_list_t *rkcg_subscription;
- /** The actual topics subscribed (after metadata+wildcard matching).
- * Sorted. */
- rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */
- /** Subscribed topics that are errored/not available. */
- rd_kafka_topic_partition_list_t *rkcg_errored_topics;
- /** If a SUBSCRIBE op is received during a COOPERATIVE rebalance,
- * actioning this will be postponed until after the rebalance
- * completes. The waiting subscription is stored here.
- * Mutually exclusive with rkcg_next_subscription. */
- rd_kafka_topic_partition_list_t *rkcg_next_subscription;
- /** If a (un)SUBSCRIBE op is received during a COOPERATIVE rebalance,
- * actioning this will be posponed until after the rebalance
- * completes. This flag is used to signal a waiting unsubscribe
- * operation. Mutually exclusive with rkcg_next_subscription. */
- rd_bool_t rkcg_next_unsubscribe;
-
- /** Assignment considered lost */
- rd_atomic32_t rkcg_assignment_lost;
-
- /** Current assignment of partitions from last SyncGroup response.
- * NULL means no assignment, else empty or non-empty assignment.
- *
- * This group assignment is the actual set of partitions that were
- * assigned to our consumer by the consumer group leader and should
- * not be confused with the rk_consumer.assignment which is the
- * partitions assigned by the application using assign(), et.al.
- *
- * The group assignment and the consumer assignment are typically
- * identical, but not necessarily since an application is free to
- * assign() any partition, not just the partitions it is handed
- * through the rebalance callback.
- *
- * Yes, this nomenclature is ambigious but has historical reasons,
- * so for now just try to remember that:
- * - group assignment == consumer group assignment.
- * - assignment == actual used assignment, i.e., fetched partitions.
- *
- * @remark This list is always sorted.
- */
- rd_kafka_topic_partition_list_t *rkcg_group_assignment;
-
- /** The partitions to incrementally assign following a
- * currently in-progress incremental unassign. */
- rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment;
-
- /** Rejoin the group following a currently in-progress
- * incremental unassign. */
- rd_bool_t rkcg_rebalance_rejoin;
-
- rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to
- * application.
- * This is for silencing
- * same errors. */
-
- rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */
- rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max
- * poll interval. */
-
- rd_kafka_t *rkcg_rk;
-
- rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op
- * (OP_TERMINATE)
- * to this rko's queue. */
-
- rd_ts_t rkcg_ts_terminate; /* Timestamp of when
- * cgrp termination was
- * initiated. */
-
- rd_atomic32_t rkcg_terminated; /**< Consumer has been closed */
-
- /* Protected by rd_kafka_*lock() */
- struct {
- rd_ts_t ts_rebalance; /* Timestamp of
- * last rebalance */
- int rebalance_cnt; /* Number of
- rebalances */
- char rebalance_reason[256]; /**< Last rebalance
- * reason */
- int assignment_size; /* Partition count
- * of last rebalance
- * assignment */
- } rkcg_c;
-
-} rd_kafka_cgrp_t;
-
-
-
-/* Check if broker is the coordinator */
-#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \
- ((rkcg)->rkcg_coord_id != -1 && \
- (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid)
-
-/**
- * @returns true if cgrp is using static group membership
- */
-#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \
- !RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id)
-
-extern const char *rd_kafka_cgrp_state_names[];
-extern const char *rd_kafka_cgrp_join_state_names[];
-
-void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg);
-rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
- const rd_kafkap_str_t *group_id,
- const rd_kafkap_str_t *client_id);
-void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg);
-
-void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg,
- rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq,
- rd_kafka_op_type_t type,
- rd_kafka_resp_err_t err);
-void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko);
-void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq);
-
-
-rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg,
- const char *pattern);
-rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg,
- const char *pattern);
-
-int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic);
-
-void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id);
-
-void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state);
-
-rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg);
-void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason);
-void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg,
- rd_kafka_resp_err_t err,
- const char *reason);
-void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
- rd_bool_t do_join);
-#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp)
-
-
-void rd_kafka_cgrp_assigned_offsets_commit(
- rd_kafka_cgrp_t *rkcg,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_bool_t set_offsets,
- const char *reason);
-
-void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg);
-
-rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg);
-
-
-struct rd_kafka_consumer_group_metadata_s {
- char *group_id;
- int32_t generation_id;
- char *member_id;
- char *group_instance_id; /**< Optional (NULL) */
-};
-
-rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup(
- const rd_kafka_consumer_group_metadata_t *cgmetadata);
-
-static RD_UNUSED const char *
-rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) {
- switch (protocol) {
- case RD_KAFKA_REBALANCE_PROTOCOL_EAGER:
- return "EAGER";
- case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE:
- return "COOPERATIVE";
- default:
- return "NONE";
- }
-}
-
-#endif /* _RDKAFKA_CGRP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c
deleted file mode 100644
index e481f4dd8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c
+++ /dev/null
@@ -1,4362 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2022 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rd.h"
-#include "rdfloat.h"
-
-#include <stdlib.h>
-#include <ctype.h>
-#include <stddef.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_feature.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_sasl_oauthbearer.h"
-#if WITH_PLUGINS
-#include "rdkafka_plugin.h"
-#endif
-#include "rdunittest.h"
-
-#ifndef _WIN32
-#include <netinet/tcp.h>
-#else
-
-#ifndef WIN32_MEAN_AND_LEAN
-#define WIN32_MEAN_AND_LEAN
-#endif
-#include <windows.h>
-#endif
-
-struct rd_kafka_property {
- rd_kafka_conf_scope_t scope;
- const char *name;
- enum { _RK_C_STR,
- _RK_C_INT,
- _RK_C_DBL, /* Double */
- _RK_C_S2I, /* String to Integer mapping.
- * Supports limited canonical str->int mappings
- * using s2i[] */
- _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */
- _RK_C_BOOL,
- _RK_C_PTR, /* Only settable through special set functions */
- _RK_C_PATLIST, /* Pattern list */
- _RK_C_KSTR, /* Kafka string */
- _RK_C_ALIAS, /* Alias: points to other property through .sdef */
- _RK_C_INTERNAL, /* Internal, don't expose to application */
- _RK_C_INVALID, /* Invalid property, used to catch known
- * but unsupported Java properties. */
- } type;
- int offset;
- const char *desc;
- int vmin;
- int vmax;
- int vdef; /* Default value (int) */
- const char *sdef; /* Default value (string) */
- void *pdef; /* Default value (pointer) */
- double ddef; /* Default value (double) */
- double dmin;
- double dmax;
- struct {
- int val;
- const char *str;
- const char *unsupported; /**< Reason for value not being
- * supported in this build. */
- } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */
-
- const char *unsupported; /**< Reason for propery not being supported
- * in this build.
- * Will be included in the conf_set()
- * error string. */
-
- /* Value validator (STR) */
- int (*validate)(const struct rd_kafka_property *prop,
- const char *val,
- int ival);
-
- /* Configuration object constructors and destructor for use when
- * the property value itself is not used, or needs extra care. */
- void (*ctor)(int scope, void *pconf);
- void (*dtor)(int scope, void *pconf);
- void (*copy)(int scope,
- void *pdst,
- const void *psrc,
- void *dstptr,
- const void *srcptr,
- size_t filter_cnt,
- const char **filter);
-
- rd_kafka_conf_res_t (*set)(int scope,
- void *pconf,
- const char *name,
- const char *value,
- void *dstptr,
- rd_kafka_conf_set_mode_t set_mode,
- char *errstr,
- size_t errstr_size);
-};
-
-
-#define _RK(field) offsetof(rd_kafka_conf_t, field)
-#define _RKT(field) offsetof(rd_kafka_topic_conf_t, field)
-
-#if WITH_SSL
-#define _UNSUPPORTED_SSL .unsupported = NULL
-#else
-#define _UNSUPPORTED_SSL .unsupported = "OpenSSL not available at build time"
-#endif
-
-#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && defined(WITH_SSL) && \
- !defined(LIBRESSL_VERSION_NUMBER)
-#define _UNSUPPORTED_OPENSSL_1_0_2 .unsupported = NULL
-#else
-#define _UNSUPPORTED_OPENSSL_1_0_2 \
- .unsupported = "OpenSSL >= 1.0.2 not available at build time"
-#endif
-
-#if OPENSSL_VERSION_NUMBER >= 0x10100000 && defined(WITH_SSL) && \
- !defined(LIBRESSL_VERSION_NUMBER)
-#define _UNSUPPORTED_OPENSSL_1_1_0 .unsupported = NULL
-#else
-#define _UNSUPPORTED_OPENSSL_1_1_0 \
- .unsupported = "OpenSSL >= 1.1.0 not available at build time"
-#endif
-
-#if WITH_SSL_ENGINE
-#define _UNSUPPORTED_SSL_ENGINE .unsupported = NULL
-#else
-#define _UNSUPPORTED_SSL_ENGINE \
- .unsupported = "OpenSSL >= 1.1.x not available at build time"
-#endif
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000 && defined(WITH_SSL)
-#define _UNSUPPORTED_SSL_3 .unsupported = NULL
-#else
-#define _UNSUPPORTED_SSL_3 \
- .unsupported = "OpenSSL >= 3.0.0 not available at build time"
-#endif
-
-
-#if WITH_ZLIB
-#define _UNSUPPORTED_ZLIB .unsupported = NULL
-#else
-#define _UNSUPPORTED_ZLIB .unsupported = "zlib not available at build time"
-#endif
-
-#if WITH_SNAPPY
-#define _UNSUPPORTED_SNAPPY .unsupported = NULL
-#else
-#define _UNSUPPORTED_SNAPPY .unsupported = "snappy not enabled at build time"
-#endif
-
-#if WITH_ZSTD
-#define _UNSUPPORTED_ZSTD .unsupported = NULL
-#else
-#define _UNSUPPORTED_ZSTD .unsupported = "libzstd not available at build time"
-#endif
-
-#if WITH_CURL
-#define _UNSUPPORTED_HTTP .unsupported = NULL
-#else
-#define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time"
-#endif
-
-#if WITH_OAUTHBEARER_OIDC
-#define _UNSUPPORTED_OIDC .unsupported = NULL
-#else
-#define _UNSUPPORTED_OIDC \
- .unsupported = \
- "OAuth/OIDC depends on libcurl and OpenSSL which were not " \
- "available at build time"
-#endif
-
-#ifdef _WIN32
-#define _UNSUPPORTED_WIN32_GSSAPI \
- .unsupported = \
- "Kerberos keytabs are not supported on Windows, " \
- "instead the logged on " \
- "user's credentials are used through native SSPI"
-#else
-#define _UNSUPPORTED_WIN32_GSSAPI .unsupported = NULL
-#endif
-
-#if defined(_WIN32) || defined(WITH_SASL_CYRUS)
-#define _UNSUPPORTED_GSSAPI .unsupported = NULL
-#else
-#define _UNSUPPORTED_GSSAPI \
- .unsupported = "cyrus-sasl/libsasl2 not available at build time"
-#endif
-
-#define _UNSUPPORTED_OAUTHBEARER _UNSUPPORTED_SSL
-
-
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_get0(const void *conf,
- const struct rd_kafka_property *prop,
- char *dest,
- size_t *dest_size);
-
-
-
-/**
- * @returns a unique index for property \p prop, using the byte position
- * of the field.
- */
-static RD_INLINE int rd_kafka_prop2idx(const struct rd_kafka_property *prop) {
- return prop->offset;
-}
-
-
-
-/**
- * @brief Set the property as modified.
- *
- * We do this by mapping the property's conf struct field byte offset
- * to a bit in a bit vector.
- * If the bit is set the property has been modified, otherwise it is
- * at its default unmodified value.
- *
- * \p is_modified 1: set as modified, 0: clear modified
- */
-static void rd_kafka_anyconf_set_modified(void *conf,
- const struct rd_kafka_property *prop,
- int is_modified) {
- int idx = rd_kafka_prop2idx(prop);
- int bkt = idx / 64;
- uint64_t bit = (uint64_t)1 << (idx % 64);
- struct rd_kafka_anyconf_hdr *confhdr = conf;
-
- rd_assert(idx < RD_KAFKA_CONF_PROPS_IDX_MAX &&
- *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
-
- if (is_modified)
- confhdr->modified[bkt] |= bit;
- else
- confhdr->modified[bkt] &= ~bit;
-}
-
-/**
- * @brief Clear is_modified for all properties.
- * @warning Does NOT clear/reset the value.
- */
-static void rd_kafka_anyconf_clear_all_is_modified(void *conf) {
- struct rd_kafka_anyconf_hdr *confhdr = conf;
-
- memset(confhdr, 0, sizeof(*confhdr));
-}
-
-
-/**
- * @returns true of the property has been set/modified, else false.
- */
-static rd_bool_t
-rd_kafka_anyconf_is_modified(const void *conf,
- const struct rd_kafka_property *prop) {
- int idx = rd_kafka_prop2idx(prop);
- int bkt = idx / 64;
- uint64_t bit = (uint64_t)1 << (idx % 64);
- const struct rd_kafka_anyconf_hdr *confhdr = conf;
-
- return !!(confhdr->modified[bkt] & bit);
-}
-
-/**
- * @returns true if any property in \p conf has been set/modified.
- */
-static rd_bool_t rd_kafka_anyconf_is_any_modified(const void *conf) {
- const struct rd_kafka_anyconf_hdr *confhdr = conf;
- int i;
-
- for (i = 0; i < (int)RD_ARRAYSIZE(confhdr->modified); i++)
- if (confhdr->modified[i])
- return rd_true;
-
- return rd_false;
-}
-
-
-
-/**
- * @brief Validate \p broker.version.fallback property.
- */
-static int
-rd_kafka_conf_validate_broker_version(const struct rd_kafka_property *prop,
- const char *val,
- int ival) {
- struct rd_kafka_ApiVersion *apis;
- size_t api_cnt;
- return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL);
-}
-
-/**
- * @brief Validate that string is a single item, without delimters (, space).
- */
-static RD_UNUSED int
-rd_kafka_conf_validate_single(const struct rd_kafka_property *prop,
- const char *val,
- int ival) {
- return !strchr(val, ',') && !strchr(val, ' ');
-}
-
-/**
- * @brief Validate builtin partitioner string
- */
-static RD_UNUSED int
-rd_kafka_conf_validate_partitioner(const struct rd_kafka_property *prop,
- const char *val,
- int ival) {
- return !strcmp(val, "random") || !strcmp(val, "consistent") ||
- !strcmp(val, "consistent_random") || !strcmp(val, "murmur2") ||
- !strcmp(val, "murmur2_random") || !strcmp(val, "fnv1a") ||
- !strcmp(val, "fnv1a_random");
-}
-
-
-/**
- * librdkafka configuration property definitions.
- */
-static const struct rd_kafka_property rd_kafka_properties[] = {
- /* Global properties */
- {_RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features),
- "Indicates the builtin features for this build of librdkafka. "
- "An application can either query this value or attempt to set it "
- "with its list of required features to check for library support.",
- 0, 0x7fffffff, 0xffff,
- .s2i = {{0x1, "gzip", _UNSUPPORTED_ZLIB},
- {0x2, "snappy", _UNSUPPORTED_SNAPPY},
- {0x4, "ssl", _UNSUPPORTED_SSL},
- {0x8, "sasl"},
- {0x10, "regex"},
- {0x20, "lz4"},
- {0x40, "sasl_gssapi", _UNSUPPORTED_GSSAPI},
- {0x80, "sasl_plain"},
- {0x100, "sasl_scram", _UNSUPPORTED_SSL},
- {0x200, "plugins"
-#if !WITH_PLUGINS
- ,
- .unsupported = "libdl/dlopen(3) not available at "
- "build time"
-#endif
- },
- {0x400, "zstd", _UNSUPPORTED_ZSTD},
- {0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL},
- {0x1000, "http", _UNSUPPORTED_HTTP},
- {0x2000, "oidc", _UNSUPPORTED_OIDC},
- {0, NULL}}},
- {_RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str),
- "Client identifier.", .sdef = "rdkafka"},
- {_RK_GLOBAL | _RK_HIDDEN, "client.software.name", _RK_C_STR, _RK(sw_name),
- "Client software name as reported to broker version >= v2.4.0. "
- "Broker-side character restrictions apply, as of broker version "
- "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client "
- "will replace any other character with `-` and strip leading and "
- "trailing non-alphanumeric characters before tranmission to "
- "the broker. "
- "This property should only be set by high-level language "
- "librdkafka client bindings.",
- .sdef = "librdkafka"},
- {
- _RK_GLOBAL | _RK_HIDDEN,
- "client.software.version",
- _RK_C_STR,
- _RK(sw_version),
- "Client software version as reported to broker version >= v2.4.0. "
- "Broker-side character restrictions apply, as of broker version "
- "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client "
- "will replace any other character with `-` and strip leading and "
- "trailing non-alphanumeric characters before tranmission to "
- "the broker. "
- "This property should only be set by high-level language "
- "librdkafka client bindings."
- "If changing this property it is highly recommended to append the "
- "librdkafka version.",
- },
- {_RK_GLOBAL | _RK_HIGH, "metadata.broker.list", _RK_C_STR, _RK(brokerlist),
- "Initial list of brokers as a CSV list of broker host or host:port. "
- "The application may also use `rd_kafka_brokers_add()` to add "
- "brokers during runtime."},
- {_RK_GLOBAL | _RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0,
- "See metadata.broker.list", .sdef = "metadata.broker.list"},
- {_RK_GLOBAL | _RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size),
- "Maximum Kafka protocol request message size. "
- "Due to differing framing overhead between protocol versions the "
- "producer is unable to reliably enforce a strict max message limit "
- "at produce time and may exceed the maximum size by one message in "
- "protocol ProduceRequests, the broker will enforce the the topic's "
- "`max.message.bytes` limit (see Apache Kafka documentation).",
- 1000, 1000000000, 1000000},
- {_RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, _RK(msg_copy_max_size),
- "Maximum size for message to be copied to buffer. "
- "Messages larger than this will be passed by reference (zero-copy) "
- "at the expense of larger iovecs.",
- 0, 1000000000, 0xffff},
- {_RK_GLOBAL | _RK_MED, "receive.message.max.bytes", _RK_C_INT,
- _RK(recv_max_msg_size),
- "Maximum Kafka protocol response message size. "
- "This serves as a safety precaution to avoid memory exhaustion in "
- "case of protocol hickups. "
- "This value must be at least `fetch.max.bytes` + 512 to allow "
- "for protocol overhead; the value is adjusted automatically "
- "unless the configuration property is explicitly set.",
- 1000, INT_MAX, 100000000},
- {_RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT,
- _RK(max_inflight),
- "Maximum number of in-flight requests per broker connection. "
- "This is a generic property applied to all broker communication, "
- "however it is primarily relevant to produce requests. "
- "In particular, note that other mechanisms limit the number "
- "of outstanding consumer fetch request per broker to one.",
- 1, 1000000, 1000000},
- {_RK_GLOBAL, "max.in.flight", _RK_C_ALIAS,
- .sdef = "max.in.flight.requests.per.connection"},
- {_RK_GLOBAL | _RK_DEPRECATED | _RK_HIDDEN, "metadata.request.timeout.ms",
- _RK_C_INT, _RK(metadata_request_timeout_ms), "Not used.", 10, 900 * 1000,
- 10},
- {_RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT,
- _RK(metadata_refresh_interval_ms),
- "Period of time in milliseconds at which topic and broker "
- "metadata is refreshed in order to proactively discover any new "
- "brokers, topics, partitions or partition leader changes. "
- "Use -1 to disable the intervalled refresh (not recommended). "
- "If there are no locally referenced topics "
- "(no topic objects created, no messages produced, "
- "no subscription or no assignment) then only the broker list will "
- "be refreshed every interval but no more often than every 10s.",
- -1, 3600 * 1000, 5 * 60 * 1000},
- {_RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, _RK(metadata_max_age_ms),
- "Metadata cache max age. "
- "Defaults to topic.metadata.refresh.interval.ms * 3",
- 1, 24 * 3600 * 1000, 5 * 60 * 1000 * 3},
- {_RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT,
- _RK(metadata_refresh_fast_interval_ms),
- "When a topic loses its leader a new metadata request will be "
- "enqueued with this initial interval, exponentially increasing "
- "until the topic metadata has been refreshed. "
- "This is used to recover quickly from transitioning leader brokers.",
- 1, 60 * 1000, 250},
- {_RK_GLOBAL | _RK_DEPRECATED, "topic.metadata.refresh.fast.cnt", _RK_C_INT,
- _RK(metadata_refresh_fast_cnt), "No longer used.", 0, 1000, 10},
- {_RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL,
- _RK(metadata_refresh_sparse),
- "Sparse metadata requests (consumes less network bandwidth)", 0, 1, 1},
- {_RK_GLOBAL, "topic.metadata.propagation.max.ms", _RK_C_INT,
- _RK(metadata_propagation_max_ms),
- "Apache Kafka topic creation is asynchronous and it takes some "
- "time for a new topic to propagate throughout the cluster to all "
- "brokers. "
- "If a client requests topic metadata after manual topic creation but "
- "before the topic has been fully propagated to the broker the "
- "client is requesting metadata from, the topic will seem to be "
- "non-existent and the client will mark the topic as such, "
- "failing queued produced messages with `ERR__UNKNOWN_TOPIC`. "
- "This setting delays marking a topic as non-existent until the "
- "configured propagation max time has passed. "
- "The maximum propagation time is calculated from the time the "
- "topic is first referenced in the client, e.g., on produce().",
- 0, 60 * 60 * 1000, 30 * 1000},
- {_RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, _RK(topic_blacklist),
- "Topic blacklist, a comma-separated list of regular expressions "
- "for matching topic names that should be ignored in "
- "broker metadata information as if the topics did not exist."},
- {_RK_GLOBAL | _RK_MED, "debug", _RK_C_S2F, _RK(debug),
- "A comma-separated list of debug contexts to enable. "
- "Detailed Producer debugging: broker,topic,msg. "
- "Consumer: consumer,cgrp,topic,fetch",
- .s2i = {{RD_KAFKA_DBG_GENERIC, "generic"},
- {RD_KAFKA_DBG_BROKER, "broker"},
- {RD_KAFKA_DBG_TOPIC, "topic"},
- {RD_KAFKA_DBG_METADATA, "metadata"},
- {RD_KAFKA_DBG_FEATURE, "feature"},
- {RD_KAFKA_DBG_QUEUE, "queue"},
- {RD_KAFKA_DBG_MSG, "msg"},
- {RD_KAFKA_DBG_PROTOCOL, "protocol"},
- {RD_KAFKA_DBG_CGRP, "cgrp"},
- {RD_KAFKA_DBG_SECURITY, "security"},
- {RD_KAFKA_DBG_FETCH, "fetch"},
- {RD_KAFKA_DBG_INTERCEPTOR, "interceptor"},
- {RD_KAFKA_DBG_PLUGIN, "plugin"},
- {RD_KAFKA_DBG_CONSUMER, "consumer"},
- {RD_KAFKA_DBG_ADMIN, "admin"},
- {RD_KAFKA_DBG_EOS, "eos"},
- {RD_KAFKA_DBG_MOCK, "mock"},
- {RD_KAFKA_DBG_ASSIGNOR, "assignor"},
- {RD_KAFKA_DBG_CONF, "conf"},
- {RD_KAFKA_DBG_ALL, "all"}}},
- {_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms),
- "Default timeout for network requests. "
- "Producer: ProduceRequests will use the lesser value of "
- "`socket.timeout.ms` and remaining `message.timeout.ms` for the "
- "first message in the batch. "
- "Consumer: FetchRequests will use "
- "`fetch.wait.max.ms` + `socket.timeout.ms`. "
- "Admin: Admin requests will use `socket.timeout.ms` or explicitly "
- "set `rd_kafka_AdminOptions_set_operation_timeout()` value.",
- 10, 300 * 1000, 60 * 1000},
- {_RK_GLOBAL | _RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT,
- _RK(socket_blocking_max_ms), "No longer used.", 1, 60 * 1000, 1000},
- {_RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, _RK(socket_sndbuf_size),
- "Broker socket send buffer size. System default is used if 0.", 0,
- 100000000, 0},
- {_RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT,
- _RK(socket_rcvbuf_size),
- "Broker socket receive buffer size. System default is used if 0.", 0,
- 100000000, 0},
- {_RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, _RK(socket_keepalive),
- "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", 0, 1, 0
-#ifndef SO_KEEPALIVE
- ,
- .unsupported = "SO_KEEPALIVE not available at build time"
-#endif
- },
- {_RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, _RK(socket_nagle_disable),
- "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 0
-#ifndef TCP_NODELAY
- ,
- .unsupported = "TCP_NODELAY not available at build time"
-#endif
- },
- {_RK_GLOBAL, "socket.max.fails", _RK_C_INT, _RK(socket_max_fails),
- "Disconnect from broker when this number of send failures "
- "(e.g., timed out requests) is reached. Disable with 0. "
- "WARNING: It is highly recommended to leave this setting at "
- "its default value of 1 to avoid the client and broker to "
- "become desynchronized in case of request timeouts. "
- "NOTE: The connection is automatically re-established.",
- 0, 1000000, 1},
- {_RK_GLOBAL, "broker.address.ttl", _RK_C_INT, _RK(broker_addr_ttl),
- "How long to cache the broker address resolving "
- "results (milliseconds).",
- 0, 86400 * 1000, 1 * 1000},
- {_RK_GLOBAL, "broker.address.family", _RK_C_S2I, _RK(broker_addr_family),
- "Allowed broker IP address families: any, v4, v6", .vdef = AF_UNSPEC,
- .s2i =
- {
- {AF_UNSPEC, "any"},
- {AF_INET, "v4"},
- {AF_INET6, "v6"},
- }},
- {_RK_GLOBAL | _RK_MED, "socket.connection.setup.timeout.ms", _RK_C_INT,
- _RK(socket_connection_setup_timeout_ms),
- "Maximum time allowed for broker connection setup "
- "(TCP connection setup as well SSL and SASL handshake). "
- "If the connection to the broker is not fully functional after this "
- "the connection will be closed and retried.",
- 1000, INT_MAX, 30 * 1000 /* 30s */},
- {_RK_GLOBAL | _RK_MED, "connections.max.idle.ms", _RK_C_INT,
- _RK(connections_max_idle_ms),
- "Close broker connections after the specified time of "
- "inactivity. "
- "Disable with 0. "
- "If this property is left at its default value some heuristics are "
- "performed to determine a suitable default value, this is currently "
- "limited to identifying brokers on Azure "
- "(see librdkafka issue #3109 for more info).",
- 0, INT_MAX, 0},
- {_RK_GLOBAL | _RK_MED | _RK_HIDDEN, "enable.sparse.connections", _RK_C_BOOL,
- _RK(sparse_connections),
- "When enabled the client will only connect to brokers "
- "it needs to communicate with. When disabled the client "
- "will maintain connections to all brokers in the cluster.",
- 0, 1, 1},
- {_RK_GLOBAL | _RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT,
- _RK(reconnect_jitter_ms),
- "No longer used. See `reconnect.backoff.ms` and "
- "`reconnect.backoff.max.ms`.",
- 0, 60 * 60 * 1000, 0},
- {_RK_GLOBAL | _RK_MED, "reconnect.backoff.ms", _RK_C_INT,
- _RK(reconnect_backoff_ms),
- "The initial time to wait before reconnecting to a broker "
- "after the connection has been closed. "
- "The time is increased exponentially until "
- "`reconnect.backoff.max.ms` is reached. "
- "-25% to +50% jitter is applied to each reconnect backoff. "
- "A value of 0 disables the backoff and reconnects immediately.",
- 0, 60 * 60 * 1000, 100},
- {_RK_GLOBAL | _RK_MED, "reconnect.backoff.max.ms", _RK_C_INT,
- _RK(reconnect_backoff_max_ms),
- "The maximum time to wait before reconnecting to a broker "
- "after the connection has been closed.",
- 0, 60 * 60 * 1000, 10 * 1000},
- {_RK_GLOBAL | _RK_HIGH, "statistics.interval.ms", _RK_C_INT,
- _RK(stats_interval_ms),
- "librdkafka statistics emit interval. The application also needs to "
- "register a stats callback using `rd_kafka_conf_set_stats_cb()`. "
- "The granularity is 1000ms. A value of 0 disables statistics.",
- 0, 86400 * 1000, 0},
- {_RK_GLOBAL, "enabled_events", _RK_C_INT, _RK(enabled_events),
- "See `rd_kafka_conf_set_events()`", 0, 0x7fffffff, 0},
- {_RK_GLOBAL, "error_cb", _RK_C_PTR, _RK(error_cb),
- "Error callback (set with rd_kafka_conf_set_error_cb())"},
- {_RK_GLOBAL, "throttle_cb", _RK_C_PTR, _RK(throttle_cb),
- "Throttle callback (set with rd_kafka_conf_set_throttle_cb())"},
- {_RK_GLOBAL, "stats_cb", _RK_C_PTR, _RK(stats_cb),
- "Statistics callback (set with rd_kafka_conf_set_stats_cb())"},
- {_RK_GLOBAL, "log_cb", _RK_C_PTR, _RK(log_cb),
- "Log callback (set with rd_kafka_conf_set_log_cb())",
- .pdef = rd_kafka_log_print},
- {_RK_GLOBAL, "log_level", _RK_C_INT, _RK(log_level),
- "Logging level (syslog(3) levels)", 0, 7, 6},
- {_RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue),
- "Disable spontaneous log_cb from internal librdkafka "
- "threads, instead enqueue log messages on queue set with "
- "`rd_kafka_set_log_queue()` and serve log callbacks or "
- "events through the standard poll APIs. "
- "**NOTE**: Log messages will linger in a temporary queue "
- "until the log queue has been set.",
- 0, 1, 0},
- {_RK_GLOBAL, "log.thread.name", _RK_C_BOOL, _RK(log_thread_name),
- "Print internal thread name in log messages "
- "(useful for debugging librdkafka internals)",
- 0, 1, 1},
- {_RK_GLOBAL, "enable.random.seed", _RK_C_BOOL, _RK(enable_random_seed),
- "If enabled librdkafka will initialize the PRNG "
- "with srand(current_time.milliseconds) on the first invocation of "
- "rd_kafka_new() (required only if rand_r() is not available on your "
- "platform). "
- "If disabled the application must call srand() prior to calling "
- "rd_kafka_new().",
- 0, 1, 1},
- {_RK_GLOBAL, "log.connection.close", _RK_C_BOOL, _RK(log_connection_close),
- "Log broker disconnects. "
- "It might be useful to turn this off when interacting with "
- "0.9 brokers with an aggressive `connections.max.idle.ms` value.",
- 0, 1, 1},
- {_RK_GLOBAL, "background_event_cb", _RK_C_PTR, _RK(background_event_cb),
- "Background queue event callback "
- "(set with rd_kafka_conf_set_background_event_cb())"},
- {_RK_GLOBAL, "socket_cb", _RK_C_PTR, _RK(socket_cb),
- "Socket creation callback to provide race-free CLOEXEC",
- .pdef =
-#ifdef __linux__
- rd_kafka_socket_cb_linux
-#else
- rd_kafka_socket_cb_generic
-#endif
- },
- {
- _RK_GLOBAL,
- "connect_cb",
- _RK_C_PTR,
- _RK(connect_cb),
- "Socket connect callback",
- },
- {
- _RK_GLOBAL,
- "closesocket_cb",
- _RK_C_PTR,
- _RK(closesocket_cb),
- "Socket close callback",
- },
- {_RK_GLOBAL, "open_cb", _RK_C_PTR, _RK(open_cb),
- "File open callback to provide race-free CLOEXEC",
- .pdef =
-#ifdef __linux__
- rd_kafka_open_cb_linux
-#else
- rd_kafka_open_cb_generic
-#endif
- },
- {_RK_GLOBAL, "resolve_cb", _RK_C_PTR, _RK(resolve_cb),
- "Address resolution callback (set with rd_kafka_conf_set_resolve_cb())."},
- {_RK_GLOBAL, "opaque", _RK_C_PTR, _RK(opaque),
- "Application opaque (set with rd_kafka_conf_set_opaque())"},
- {_RK_GLOBAL, "default_topic_conf", _RK_C_PTR, _RK(topic_conf),
- "Default topic configuration for automatically subscribed topics"},
- {_RK_GLOBAL, "internal.termination.signal", _RK_C_INT, _RK(term_sig),
- "Signal that librdkafka will use to quickly terminate on "
- "rd_kafka_destroy(). If this signal is not set then there will be a "
- "delay before rd_kafka_wait_destroyed() returns true "
- "as internal threads are timing out their system calls. "
- "If this signal is set however the delay will be minimal. "
- "The application should mask this signal as an internal "
- "signal handler is installed.",
- 0, 128, 0},
- {_RK_GLOBAL | _RK_HIGH, "api.version.request", _RK_C_BOOL,
- _RK(api_version_request),
- "Request broker's supported API versions to adjust functionality to "
- "available protocol features. If set to false, or the "
- "ApiVersionRequest fails, the fallback version "
- "`broker.version.fallback` will be used. "
- "**NOTE**: Depends on broker version >=0.10.0. If the request is not "
- "supported by (an older) broker the `broker.version.fallback` fallback is "
- "used.",
- 0, 1, 1},
- {_RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT,
- _RK(api_version_request_timeout_ms),
- "Timeout for broker API version requests.", 1, 5 * 60 * 1000, 10 * 1000},
- {_RK_GLOBAL | _RK_MED, "api.version.fallback.ms", _RK_C_INT,
- _RK(api_version_fallback_ms),
- "Dictates how long the `broker.version.fallback` fallback is used "
- "in the case the ApiVersionRequest fails. "
- "**NOTE**: The ApiVersionRequest is only issued when a new connection "
- "to the broker is made (such as after an upgrade).",
- 0, 86400 * 7 * 1000, 0},
-
- {_RK_GLOBAL | _RK_MED, "broker.version.fallback", _RK_C_STR,
- _RK(broker_version_fallback),
- "Older broker versions (before 0.10.0) provide no way for a client to "
- "query "
- "for supported protocol features "
- "(ApiVersionRequest, see `api.version.request`) making it impossible "
- "for the client to know what features it may use. "
- "As a workaround a user may set this property to the expected broker "
- "version and the client will automatically adjust its feature set "
- "accordingly if the ApiVersionRequest fails (or is disabled). "
- "The fallback broker version will be used for `api.version.fallback.ms`. "
- "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. "
- "Any other value >= 0.10, such as 0.10.2.1, "
- "enables ApiVersionRequests.",
- .sdef = "0.10.0", .validate = rd_kafka_conf_validate_broker_version},
- {_RK_GLOBAL, "allow.auto.create.topics", _RK_C_BOOL,
- _RK(allow_auto_create_topics),
- "Allow automatic topic creation on the broker when subscribing to "
- "or assigning non-existent topics. "
- "The broker must also be configured with "
- "`auto.create.topics.enable=true` for this configuration to "
- "take effect. "
- "Note: the default value (true) for the producer is "
- "different from the default value (false) for the consumer. "
- "Further, the consumer default value is different from the Java "
- "consumer (true), and this property is not supported by the Java "
- "producer. Requires broker version >= 0.11.0.0, for older broker "
- "versions only the broker configuration applies.",
- 0, 1, 0},
-
- /* Security related global properties */
- {_RK_GLOBAL | _RK_HIGH, "security.protocol", _RK_C_S2I,
- _RK(security_protocol), "Protocol used to communicate with brokers.",
- .vdef = RD_KAFKA_PROTO_PLAINTEXT,
- .s2i = {{RD_KAFKA_PROTO_PLAINTEXT, "plaintext"},
- {RD_KAFKA_PROTO_SSL, "ssl", _UNSUPPORTED_SSL},
- {RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext"},
- {RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl", _UNSUPPORTED_SSL},
- {0, NULL}}},
-
- {_RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, _RK(ssl.cipher_suites),
- "A cipher suite is a named combination of authentication, "
- "encryption, MAC and key exchange algorithm used to negotiate the "
- "security settings for a network connection using TLS or SSL network "
- "protocol. See manual page for `ciphers(1)` and "
- "`SSL_CTX_set_cipher_list(3).",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.curves.list", _RK_C_STR, _RK(ssl.curves_list),
- "The supported-curves extension in the TLS ClientHello message specifies "
- "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client "
- "is willing to have the server use. See manual page for "
- "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.",
- _UNSUPPORTED_OPENSSL_1_0_2},
- {_RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, _RK(ssl.sigalgs_list),
- "The client uses the TLS ClientHello signature_algorithms extension "
- "to indicate to the server which signature/hash algorithm pairs "
- "may be used in digital signatures. See manual page for "
- "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.",
- _UNSUPPORTED_OPENSSL_1_0_2},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.location", _RK_C_STR,
- _RK(ssl.key_location),
- "Path to client's private key (PEM) used for authentication.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.password", _RK_C_STR,
- _RK(ssl.key_password),
- "Private key passphrase (for use with `ssl.key.location` "
- "and `set_ssl_cert()`)",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, _RK(ssl.key_pem),
- "Client's private key string (PEM format) used for authentication.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl_key", _RK_C_INTERNAL, _RK(ssl.key),
- "Client's private key as set by rd_kafka_conf_set_ssl_cert()",
- .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, _RK(ssl.cert_location),
- "Path to client's public key (PEM) used for authentication.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, _RK(ssl.cert_pem),
- "Client's public key string (PEM format) used for authentication.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, _RK(ssl.key),
- "Client's public key as set by rd_kafka_conf_set_ssl_cert()",
- .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
- _UNSUPPORTED_SSL},
-
- {_RK_GLOBAL, "ssl.ca.location", _RK_C_STR, _RK(ssl.ca_location),
- "File or directory path to CA certificate(s) for verifying "
- "the broker's key. "
- "Defaults: "
- "On Windows the system's CA certificates are automatically looked "
- "up in the Windows Root certificate store. "
- "On Mac OSX this configuration defaults to `probe`. "
- "It is recommended to install openssl using Homebrew, "
- "to provide CA certificates. "
- "On Linux install the distribution's ca-certificates package. "
- "If OpenSSL is statically linked or `ssl.ca.location` is set to "
- "`probe` a list of standard paths will be probed and the first one "
- "found will be used as the default CA certificate location path. "
- "If OpenSSL is dynamically linked the OpenSSL library's default "
- "path will be used (see `OPENSSLDIR` in `openssl version -a`).",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, _RK(ssl.ca_pem),
- "CA certificate string (PEM format) for verifying the broker's key.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, _RK(ssl.ca),
- "CA certificate as set by rd_kafka_conf_set_ssl_cert()",
- .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.ca.certificate.stores", _RK_C_STR,
- _RK(ssl.ca_cert_stores),
- "Comma-separated list of Windows Certificate stores to load "
- "CA certificates from. Certificates will be loaded in the same "
- "order as stores are specified. If no certificates can be loaded "
- "from any of the specified stores an error is logged and the "
- "OpenSSL library's default CA location is used instead. "
- "Store names are typically one or more of: MY, Root, Trust, CA.",
- .sdef = "Root",
-#if !defined(_WIN32)
- .unsupported = "configuration only valid on Windows"
-#endif
- },
-
- {_RK_GLOBAL, "ssl.crl.location", _RK_C_STR, _RK(ssl.crl_location),
- "Path to CRL for verifying broker's certificate validity.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, _RK(ssl.keystore_location),
- "Path to client's keystore (PKCS#12) used for authentication.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL | _RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR,
- _RK(ssl.keystore_password), "Client's keystore (PKCS#12) password.",
- _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.providers", _RK_C_STR, _RK(ssl.providers),
- "Comma-separated list of OpenSSL 3.0.x implementation providers. "
- "E.g., \"default,legacy\".",
- _UNSUPPORTED_SSL_3},
- {_RK_GLOBAL | _RK_DEPRECATED, "ssl.engine.location", _RK_C_STR,
- _RK(ssl.engine_location),
- "Path to OpenSSL engine library. OpenSSL >= 1.1.x required. "
- "DEPRECATED: OpenSSL engine support is deprecated and should be "
- "replaced by OpenSSL 3 providers.",
- _UNSUPPORTED_SSL_ENGINE},
- {_RK_GLOBAL, "ssl.engine.id", _RK_C_STR, _RK(ssl.engine_id),
- "OpenSSL engine id is the name used for loading engine.",
- .sdef = "dynamic", _UNSUPPORTED_SSL_ENGINE},
- {_RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR,
- _RK(ssl.engine_callback_data),
- "OpenSSL engine callback data (set "
- "with rd_kafka_conf_set_engine_callback_data()).",
- _UNSUPPORTED_SSL_ENGINE},
- {_RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL,
- _RK(ssl.enable_verify),
- "Enable OpenSSL's builtin broker (server) certificate verification. "
- "This verification can be extended by the application by "
- "implementing a certificate_verify_cb.",
- 0, 1, 1, _UNSUPPORTED_SSL},
- {_RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I,
- _RK(ssl.endpoint_identification),
- "Endpoint identification algorithm to validate broker "
- "hostname using broker certificate. "
- "https - Server (broker) hostname verification as "
- "specified in RFC2818. "
- "none - No endpoint verification. "
- "OpenSSL >= 1.0.2 required.",
- .vdef = RD_KAFKA_SSL_ENDPOINT_ID_HTTPS,
- .s2i = {{RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none"},
- {RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https"}},
- _UNSUPPORTED_OPENSSL_1_0_2},
- {_RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR,
- _RK(ssl.cert_verify_cb),
- "Callback to verify the broker certificate chain.", _UNSUPPORTED_SSL},
-
- /* Point user in the right direction if they try to apply
- * Java client SSL / JAAS properties. */
- {_RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, _RK(dummy),
- "Java TrustStores are not supported, use `ssl.ca.location` "
- "and a certificate file instead. "
- "See "
- "https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka "
- "for more information."},
- {_RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, _RK(dummy),
- "Java JAAS configuration is not supported, see "
- "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka "
- "for more information."},
-
- {_RK_GLOBAL | _RK_HIGH, "sasl.mechanisms", _RK_C_STR, _RK(sasl.mechanisms),
- "SASL mechanism to use for authentication. "
- "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. "
- "**NOTE**: Despite the name only one mechanism must be configured.",
- .sdef = "GSSAPI", .validate = rd_kafka_conf_validate_single},
- {_RK_GLOBAL | _RK_HIGH, "sasl.mechanism", _RK_C_ALIAS,
- .sdef = "sasl.mechanisms"},
- {_RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR,
- _RK(sasl.service_name),
- "Kerberos principal name that Kafka runs as, "
- "not including /hostname@REALM",
- .sdef = "kafka"},
- {_RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, _RK(sasl.principal),
- "This client's Kerberos principal name. "
- "(Not supported on Windows, will use the logon user's principal).",
- .sdef = "kafkaclient"},
- {_RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, _RK(sasl.kinit_cmd),
- "Shell command to refresh or acquire the client's Kerberos ticket. "
- "This command is executed on client creation and every "
- "sasl.kerberos.min.time.before.relogin (0=disable). "
- "%{config.prop.name} is replaced by corresponding config "
- "object value.",
- .sdef =
- /* First attempt to refresh, else acquire. */
- "kinit -R -t \"%{sasl.kerberos.keytab}\" "
- "-k %{sasl.kerberos.principal} || "
- "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}",
- _UNSUPPORTED_WIN32_GSSAPI},
- {_RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, _RK(sasl.keytab),
- "Path to Kerberos keytab file. "
- "This configuration property is only used as a variable in "
- "`sasl.kerberos.kinit.cmd` as "
- "` ... -t \"%{sasl.kerberos.keytab}\"`.",
- _UNSUPPORTED_WIN32_GSSAPI},
- {_RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT,
- _RK(sasl.relogin_min_time),
- "Minimum time in milliseconds between key refresh attempts. "
- "Disable automatic key refresh by setting this property to 0.",
- 0, 86400 * 1000, 60 * 1000, _UNSUPPORTED_WIN32_GSSAPI},
- {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.username", _RK_C_STR,
- _RK(sasl.username),
- "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms"},
- {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.password", _RK_C_STR,
- _RK(sasl.password),
- "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism"},
- {_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.config", _RK_C_STR,
- _RK(sasl.oauthbearer_config),
- "SASL/OAUTHBEARER configuration. The format is "
- "implementation-dependent and must be parsed accordingly. The "
- "default unsecured token implementation (see "
- "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes "
- "space-separated name=value pairs with valid names including "
- "principalClaimName, principal, scopeClaimName, scope, and "
- "lifeSeconds. The default value for principalClaimName is \"sub\", "
- "the default value for scopeClaimName is \"scope\", and the default "
- "value for lifeSeconds is 3600. The scope value is CSV format with "
- "the default value being no/empty scope. For example: "
- "`principalClaimName=azp principal=admin scopeClaimName=roles "
- "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions "
- "can be communicated to the broker via "
- "`extension_NAME=value`. For example: "
- "`principal=admin extension_traceId=123`",
- _UNSUPPORTED_OAUTHBEARER},
- {_RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL,
- _RK(sasl.enable_oauthbearer_unsecure_jwt),
- "Enable the builtin unsecure JWT OAUTHBEARER token handler "
- "if no oauthbearer_refresh_cb has been set. "
- "This builtin handler should only be used for development "
- "or testing, and not in production.",
- 0, 1, 0, _UNSUPPORTED_OAUTHBEARER},
- {_RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR,
- _RK(sasl.oauthbearer.token_refresh_cb),
- "SASL/OAUTHBEARER token refresh callback (set with "
- "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by "
- "rd_kafka_poll(), et.al. "
- "This callback will be triggered when it is time to refresh "
- "the client's OAUTHBEARER token. "
- "Also see `rd_kafka_conf_enable_sasl_queue()`.",
- _UNSUPPORTED_OAUTHBEARER},
- {
- _RK_GLOBAL | _RK_HIDDEN,
- "enable_sasl_queue",
- _RK_C_BOOL,
- _RK(sasl.enable_callback_queue),
- "Enable the SASL callback queue "
- "(set with rd_kafka_conf_enable_sasl_queue()).",
- 0,
- 1,
- 0,
- },
- {_RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I,
- _RK(sasl.oauthbearer.method),
- "Set to \"default\" or \"oidc\" to control which login method "
- "to be used. If set to \"oidc\", the following properties must also be "
- "be specified: "
- "`sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, "
- "and `sasl.oauthbearer.token.endpoint.url`.",
- .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
- .s2i = {{RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default"},
- {RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc"}},
- _UNSUPPORTED_OIDC},
- {_RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR,
- _RK(sasl.oauthbearer.client_id),
- "Public identifier for the application. "
- "Must be unique across all clients that the "
- "authorization server handles. "
- "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
- _UNSUPPORTED_OIDC},
- {_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR,
- _RK(sasl.oauthbearer.client_secret),
- "Client secret only known to the application and the "
- "authorization server. This should be a sufficiently random string "
- "that is not guessable. "
- "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
- _UNSUPPORTED_OIDC},
- {_RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR,
- _RK(sasl.oauthbearer.scope),
- "Client use this to specify the scope of the access request to the "
- "broker. "
- "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
- _UNSUPPORTED_OIDC},
- {_RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR,
- _RK(sasl.oauthbearer.extensions_str),
- "Allow additional information to be provided to the broker. "
- "Comma-separated list of key=value pairs. "
- "E.g., \"supportFeatureX=true,organizationId=sales-emea\"."
- "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
- _UNSUPPORTED_OIDC},
- {_RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR,
- _RK(sasl.oauthbearer.token_endpoint_url),
- "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. "
- "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
- _UNSUPPORTED_OIDC},
-
- /* Plugins */
- {_RK_GLOBAL, "plugin.library.paths", _RK_C_STR, _RK(plugin_paths),
- "List of plugin libraries to load (; separated). "
- "The library search path is platform dependent (see dlopen(3) for "
- "Unix and LoadLibrary() for Windows). If no filename extension is "
- "specified the platform-specific extension (such as .dll or .so) "
- "will be appended automatically.",
-#if WITH_PLUGINS
- .set = rd_kafka_plugins_conf_set
-#else
- .unsupported = "libdl/dlopen(3) not available at build time"
-#endif
- },
-
- /* Interceptors are added through specific API and not exposed
- * as configuration properties.
- * The interceptor property must be defined after plugin.library.paths
- * so that the plugin libraries are properly loaded before
- * interceptors are configured when duplicating configuration objects.*/
- {_RK_GLOBAL, "interceptors", _RK_C_INTERNAL, _RK(interceptors),
- "Interceptors added through rd_kafka_conf_interceptor_add_..() "
- "and any configuration handled by interceptors.",
- .ctor = rd_kafka_conf_interceptor_ctor,
- .dtor = rd_kafka_conf_interceptor_dtor,
- .copy = rd_kafka_conf_interceptor_copy},
-
- /* Test mocks. */
- {_RK_GLOBAL | _RK_HIDDEN, "test.mock.num.brokers", _RK_C_INT,
- _RK(mock.broker_cnt),
- "Number of mock brokers to create. "
- "This will automatically overwrite `bootstrap.servers` with the "
- "mock broker list.",
- 0, 10000, 0},
- {_RK_GLOBAL | _RK_HIDDEN, "test.mock.broker.rtt", _RK_C_INT,
- _RK(mock.broker_rtt), "Simulated mock broker latency in milliseconds.", 0,
- 60 * 60 * 1000 /*1h*/, 0},
-
- /* Unit test interfaces.
- * These are not part of the public API and may change at any time.
- * Only to be used by the librdkafka tests. */
- {_RK_GLOBAL | _RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR,
- _RK(ut.handle_ProduceResponse),
- "ProduceResponse handler: "
- "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, "
- "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)"},
-
- /* Global consumer group properties */
- {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.id", _RK_C_STR, _RK(group_id_str),
- "Client group id string. All clients sharing the same group.id "
- "belong to the same group."},
- {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.instance.id", _RK_C_STR,
- _RK(group_instance_id),
- "Enable static group membership. "
- "Static group members are able to leave and rejoin a group "
- "within the configured `session.timeout.ms` without prompting a "
- "group rebalance. This should be used in combination with a larger "
- "`session.timeout.ms` to avoid group rebalances caused by transient "
- "unavailability (e.g. process restarts). "
- "Requires broker version >= 2.3.0."},
- {_RK_GLOBAL | _RK_CGRP | _RK_MED, "partition.assignment.strategy",
- _RK_C_STR, _RK(partition_assignment_strategy),
- "The name of one or more partition assignment strategies. The "
- "elected group leader will use a strategy supported by all "
- "members of the group to assign partitions to group members. If "
- "there is more than one eligible strategy, preference is "
- "determined by the order of this list (strategies earlier in the "
- "list have higher priority). "
- "Cooperative and non-cooperative (eager) strategies must not be "
- "mixed. "
- "Available strategies: range, roundrobin, cooperative-sticky.",
- .sdef = "range,roundrobin"},
- {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "session.timeout.ms", _RK_C_INT,
- _RK(group_session_timeout_ms),
- "Client group session and failure detection timeout. "
- "The consumer sends periodic heartbeats (heartbeat.interval.ms) "
- "to indicate its liveness to the broker. If no hearts are "
- "received by the broker for a group member within the "
- "session timeout, the broker will remove the consumer from "
- "the group and trigger a rebalance. "
- "The allowed range is configured with the **broker** configuration "
- "properties `group.min.session.timeout.ms` and "
- "`group.max.session.timeout.ms`. "
- "Also see `max.poll.interval.ms`.",
- 1, 3600 * 1000, 45 * 1000},
- {_RK_GLOBAL | _RK_CGRP, "heartbeat.interval.ms", _RK_C_INT,
- _RK(group_heartbeat_intvl_ms),
- "Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000},
- {_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR,
- _RK(group_protocol_type),
- "Group protocol type. NOTE: Currently, the only supported group "
- "protocol type is `consumer`.",
- .sdef = "consumer"},
- {_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT,
- _RK(coord_query_intvl_ms),
- "How often to query for the current client group coordinator. "
- "If the currently assigned coordinator is down the configured "
- "query interval will be divided by ten to more quickly recover "
- "in case of coordinator reassignment.",
- 1, 3600 * 1000, 10 * 60 * 1000},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "max.poll.interval.ms", _RK_C_INT,
- _RK(max_poll_interval_ms),
- "Maximum allowed time between calls to consume messages "
- "(e.g., rd_kafka_consumer_poll()) for high-level consumers. "
- "If this interval is exceeded the consumer is considered failed "
- "and the group will rebalance in order to reassign the "
- "partitions to another consumer group member. "
- "Warning: Offset commits may be not possible at this point. "
- "Note: It is recommended to set `enable.auto.offset.store=false` "
- "for long-time processing applications and then explicitly store "
- "offsets (using offsets_store()) *after* message processing, to "
- "make sure offsets are not auto-committed prior to processing "
- "has finished. "
- "The interval is checked two times per second. "
- "See KIP-62 for more information.",
- 1, 86400 * 1000, 300000},
-
- /* Global consumer properties */
- {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.commit", _RK_C_BOOL,
- _RK(enable_auto_commit),
- "Automatically and periodically commit offsets in the background. "
- "Note: setting this to false does not prevent the consumer from "
- "fetching previously committed start offsets. To circumvent this "
- "behaviour set specific start offsets per partition in the call "
- "to assign().",
- 0, 1, 1},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "auto.commit.interval.ms", _RK_C_INT,
- _RK(auto_commit_interval_ms),
- "The frequency in milliseconds that the consumer offsets "
- "are committed (written) to offset storage. (0 = disable). "
- "This setting is used by the high-level consumer.",
- 0, 86400 * 1000, 5 * 1000},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.offset.store",
- _RK_C_BOOL, _RK(enable_auto_offset_store),
- "Automatically store offset of last message provided to "
- "application. "
- "The offset store is an in-memory store of the next offset to "
- "(auto-)commit for each partition.",
- 0, 1, 1},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.min.messages", _RK_C_INT,
- _RK(queued_min_msgs),
- "Minimum number of messages per topic+partition "
- "librdkafka tries to maintain in the local consumer queue.",
- 1, 10000000, 100000},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.max.messages.kbytes",
- _RK_C_INT, _RK(queued_max_msg_kbytes),
- "Maximum number of kilobytes of queued pre-fetched messages "
- "in the local consumer queue. "
- "If using the high-level consumer this setting applies to the "
- "single consumer queue, regardless of the number of partitions. "
- "When using the legacy simple consumer or when separate "
- "partition queues are used this setting applies per partition. "
- "This value may be overshot by fetch.message.max.bytes. "
- "This property has higher priority than queued.min.messages.",
- 1, INT_MAX / 1024, 0x10000 /*64MB*/},
- {_RK_GLOBAL | _RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT,
- _RK(fetch_wait_max_ms),
- "Maximum time the broker may wait to fill the Fetch response "
- "with fetch.min.bytes of messages.",
- 0, 300 * 1000, 500},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.message.max.bytes", _RK_C_INT,
- _RK(fetch_msg_max_bytes),
- "Initial maximum number of bytes per topic+partition to request when "
- "fetching messages from the broker. "
- "If the client encounters a message larger than this value "
- "it will gradually try to increase it until the "
- "entire message can be fetched.",
- 1, 1000000000, 1024 * 1024},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "max.partition.fetch.bytes",
- _RK_C_ALIAS, .sdef = "fetch.message.max.bytes"},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.max.bytes", _RK_C_INT,
- _RK(fetch_max_bytes),
- "Maximum amount of data the broker shall return for a Fetch request. "
- "Messages are fetched in batches by the consumer and if the first "
- "message batch in the first non-empty partition of the Fetch request "
- "is larger than this value, then the message batch will still be "
- "returned to ensure the consumer can make progress. "
- "The maximum message batch size accepted by the broker is defined "
- "via `message.max.bytes` (broker config) or "
- "`max.message.bytes` (broker topic config). "
- "`fetch.max.bytes` is automatically adjusted upwards to be "
- "at least `message.max.bytes` (consumer config).",
- 0, INT_MAX - 512, 50 * 1024 * 1024 /* 50MB */},
- {_RK_GLOBAL | _RK_CONSUMER, "fetch.min.bytes", _RK_C_INT,
- _RK(fetch_min_bytes),
- "Minimum number of bytes the broker responds with. "
- "If fetch.wait.max.ms expires the accumulated data will "
- "be sent to the client regardless of this setting.",
- 1, 100000000, 1},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.error.backoff.ms", _RK_C_INT,
- _RK(fetch_error_backoff_ms),
- "How long to postpone the next fetch request for a "
- "topic+partition in case of a fetch error.",
- 0, 300 * 1000, 500},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method",
- _RK_C_S2I, _RK(offset_store_method),
- "Offset commit store method: "
- "'file' - DEPRECATED: local file store (offset.store.path, et.al), "
- "'broker' - broker commit store "
- "(requires Apache Kafka 0.8.2 or later on the broker).",
- .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
- .s2i = {{RD_KAFKA_OFFSET_METHOD_NONE, "none"},
- {RD_KAFKA_OFFSET_METHOD_FILE, "file"},
- {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "isolation.level", _RK_C_S2I,
- _RK(isolation_level),
- "Controls how to read messages written transactionally: "
- "`read_committed` - only return transactional messages which have "
- "been committed. `read_uncommitted` - return all messages, even "
- "transactional messages which have been aborted.",
- .vdef = RD_KAFKA_READ_COMMITTED,
- .s2i = {{RD_KAFKA_READ_UNCOMMITTED, "read_uncommitted"},
- {RD_KAFKA_READ_COMMITTED, "read_committed"}}},
- {_RK_GLOBAL | _RK_CONSUMER, "consume_cb", _RK_C_PTR, _RK(consume_cb),
- "Message consume callback (set with rd_kafka_conf_set_consume_cb())"},
- {_RK_GLOBAL | _RK_CONSUMER, "rebalance_cb", _RK_C_PTR, _RK(rebalance_cb),
- "Called after consumer group has been rebalanced "
- "(set with rd_kafka_conf_set_rebalance_cb())"},
- {_RK_GLOBAL | _RK_CONSUMER, "offset_commit_cb", _RK_C_PTR,
- _RK(offset_commit_cb),
- "Offset commit result propagation callback. "
- "(set with rd_kafka_conf_set_offset_commit_cb())"},
- {_RK_GLOBAL | _RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL,
- _RK(enable_partition_eof),
- "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the "
- "consumer reaches the end of a partition.",
- 0, 1, 0},
- {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "check.crcs", _RK_C_BOOL,
- _RK(check_crcs),
- "Verify CRC32 of consumed messages, ensuring no on-the-wire or "
- "on-disk corruption to the messages occurred. This check comes "
- "at slightly increased CPU usage.",
- 0, 1, 0},
- {_RK_GLOBAL, "client.rack", _RK_C_KSTR, _RK(client_rack),
- "A rack identifier for this client. This can be any string value "
- "which indicates where this client is physically located. It "
- "corresponds with the broker config `broker.rack`.",
- .sdef = ""},
-
- /* Global producer properties */
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "transactional.id", _RK_C_STR,
- _RK(eos.transactional_id),
- "Enables the transactional producer. "
- "The transactional.id is used to identify the same transactional "
- "producer instance across process restarts. "
- "It allows the producer to guarantee that transactions corresponding "
- "to earlier instances of the same producer have been finalized "
- "prior to starting any new transactions, and that any "
- "zombie instances are fenced off. "
- "If no transactional.id is provided, then the producer is limited "
- "to idempotent delivery (if enable.idempotence is set). "
- "Requires broker version >= 0.11.0."},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "transaction.timeout.ms", _RK_C_INT,
- _RK(eos.transaction_timeout_ms),
- "The maximum amount of time in milliseconds that the transaction "
- "coordinator will wait for a transaction status update from the "
- "producer before proactively aborting the ongoing transaction. "
- "If this value is larger than the `transaction.max.timeout.ms` "
- "setting in the broker, the init_transactions() call will fail with "
- "ERR_INVALID_TRANSACTION_TIMEOUT. "
- "The transaction timeout automatically adjusts "
- "`message.timeout.ms` and `socket.timeout.ms`, unless explicitly "
- "configured in which case they must not exceed the "
- "transaction timeout (`socket.timeout.ms` must be at least 100ms "
- "lower than `transaction.timeout.ms`). "
- "This is also the default timeout value if no timeout (-1) is "
- "supplied to the transactional API methods.",
- 1000, INT_MAX, 60000},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "enable.idempotence", _RK_C_BOOL,
- _RK(eos.idempotence),
- "When set to `true`, the producer will ensure that messages are "
- "successfully produced exactly once and in the original produce "
- "order. "
- "The following configuration properties are adjusted automatically "
- "(if not modified by the user) when idempotence is enabled: "
- "`max.in.flight.requests.per.connection=" RD_KAFKA_IDEMP_MAX_INFLIGHT_STR
- "` (must be less than or "
- "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` "
- "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. "
- "Producer instantation will fail if user-supplied configuration "
- "is incompatible.",
- 0, 1, 0},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_EXPERIMENTAL, "enable.gapless.guarantee",
- _RK_C_BOOL, _RK(eos.gapless),
- "When set to `true`, any error that could result in a gap "
- "in the produced message series when a batch of messages fails, "
- "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop "
- "the producer. "
- "Messages failing due to `message.timeout.ms` are not covered "
- "by this guarantee. "
- "Requires `enable.idempotence=true`.",
- 0, 1, 0},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages",
- _RK_C_INT, _RK(queue_buffering_max_msgs),
- "Maximum number of messages allowed on the producer queue. "
- "This queue is shared by all topics and partitions. A value of 0 disables "
- "this limit.",
- 0, INT_MAX, 100000},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes",
- _RK_C_INT, _RK(queue_buffering_max_kbytes),
- "Maximum total message size sum allowed on the producer queue. "
- "This queue is shared by all topics and partitions. "
- "This property has higher priority than queue.buffering.max.messages.",
- 1, INT_MAX, 0x100000 /*1GB*/},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.ms", _RK_C_DBL,
- _RK(buffering_max_ms_dbl),
- "Delay in milliseconds to wait for messages in the producer queue "
- "to accumulate before constructing message batches (MessageSets) to "
- "transmit to brokers. "
- "A higher value allows larger and more effective "
- "(less overhead, improved compression) batches of messages to "
- "accumulate at the expense of increased message delivery latency.",
- .dmin = 0, .dmax = 900.0 * 1000.0, .ddef = 5.0},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "linger.ms", _RK_C_ALIAS,
- .sdef = "queue.buffering.max.ms"},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "message.send.max.retries",
- _RK_C_INT, _RK(max_retries),
- "How many times to retry sending a failing Message. "
- "**Note:** retrying may cause reordering unless "
- "`enable.idempotence` is set to true.",
- 0, INT32_MAX, INT32_MAX},
- {_RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS,
- .sdef = "message.send.max.retries"},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "retry.backoff.ms", _RK_C_INT,
- _RK(retry_backoff_ms),
- "The backoff time in milliseconds before retrying a protocol request.", 1,
- 300 * 1000, 100},
-
- {_RK_GLOBAL | _RK_PRODUCER, "queue.buffering.backpressure.threshold",
- _RK_C_INT, _RK(queue_backpressure_thres),
- "The threshold of outstanding not yet transmitted broker requests "
- "needed to backpressure the producer's message accumulator. "
- "If the number of not yet transmitted requests equals or exceeds "
- "this number, produce request creation that would have otherwise "
- "been triggered (for example, in accordance with linger.ms) will be "
- "delayed. A lower number yields larger and more effective batches. "
- "A higher value can improve latency when using compression on slow "
- "machines.",
- 1, 1000000, 1},
-
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.codec", _RK_C_S2I,
- _RK(compression_codec),
- "compression codec to use for compressing message sets. "
- "This is the default value for all topics, may be overridden by "
- "the topic configuration property `compression.codec`. ",
- .vdef = RD_KAFKA_COMPRESSION_NONE,
- .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"},
- {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB},
- {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY},
- {RD_KAFKA_COMPRESSION_LZ4, "lz4"},
- {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD},
- {0}}},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.type", _RK_C_ALIAS,
- .sdef = "compression.codec"},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.num.messages", _RK_C_INT,
- _RK(batch_num_messages),
- "Maximum number of messages batched in one MessageSet. "
- "The total MessageSet size is also limited by batch.size and "
- "message.max.bytes.",
- 1, 1000000, 10000},
- {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.size", _RK_C_INT,
- _RK(batch_size),
- "Maximum size (in bytes) of all messages batched in one MessageSet, "
- "including protocol framing overhead. "
- "This limit is applied after the first message has been added "
- "to the batch, regardless of the first message's size, this is to "
- "ensure that messages that exceed batch.size are produced. "
- "The total MessageSet size is also limited by batch.num.messages and "
- "message.max.bytes.",
- 1, INT_MAX, 1000000},
- {_RK_GLOBAL | _RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL,
- _RK(dr_err_only), "Only provide delivery reports for failed messages.", 0,
- 1, 0},
- {_RK_GLOBAL | _RK_PRODUCER, "dr_cb", _RK_C_PTR, _RK(dr_cb),
- "Delivery report callback (set with rd_kafka_conf_set_dr_cb())"},
- {_RK_GLOBAL | _RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, _RK(dr_msg_cb),
- "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())"},
- {_RK_GLOBAL | _RK_PRODUCER, "sticky.partitioning.linger.ms", _RK_C_INT,
- _RK(sticky_partition_linger_ms),
- "Delay in milliseconds to wait to assign new sticky partitions for "
- "each topic. "
- "By default, set to double the time of linger.ms. To disable sticky "
- "behavior, set to 0. "
- "This behavior affects messages with the key NULL in all cases, and "
- "messages with key lengths of zero when the consistent_random "
- "partitioner is in use. "
- "These messages would otherwise be assigned randomly. "
- "A higher value allows for more effective batching of these "
- "messages.",
- 0, 900000, 10},
-
-
- /*
- * Topic properties
- */
-
- /* Topic producer properties */
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "request.required.acks", _RK_C_INT,
- _RKT(required_acks),
- "This field indicates the number of acknowledgements the leader "
- "broker must receive from ISR brokers before responding to the "
- "request: "
- "*0*=Broker does not send any response/ack to client, "
- "*-1* or *all*=Broker will block until message is committed by all "
- "in sync replicas (ISRs). If there are less than "
- "`min.insync.replicas` (broker configuration) in the ISR set the "
- "produce request will fail.",
- -1, 1000, -1,
- .s2i =
- {
- {-1, "all"},
- }},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "acks", _RK_C_ALIAS,
- .sdef = "request.required.acks"},
-
- {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "request.timeout.ms", _RK_C_INT,
- _RKT(request_timeout_ms),
- "The ack timeout of the producer request in milliseconds. "
- "This value is only enforced by the broker and relies "
- "on `request.required.acks` being != 0.",
- 1, 900 * 1000, 30 * 1000},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "message.timeout.ms", _RK_C_INT,
- _RKT(message_timeout_ms),
- "Local message timeout. "
- "This value is only enforced locally and limits the time a "
- "produced message waits for successful delivery. "
- "A time of 0 is infinite. "
- "This is the maximum time librdkafka may use to deliver a message "
- "(including retries). Delivery error occurs when either the retry "
- "count or the message timeout are exceeded. "
- "The message timeout is automatically adjusted to "
- "`transaction.timeout.ms` if `transactional.id` is configured.",
- 0, INT32_MAX, 300 * 1000},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS,
- .sdef = "message.timeout.ms"},
- {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL,
- "queuing.strategy", _RK_C_S2I, _RKT(queuing_strategy),
- "Producer queuing strategy. FIFO preserves produce ordering, "
- "while LIFO prioritizes new messages.",
- .vdef = 0,
- .s2i = {{RD_KAFKA_QUEUE_FIFO, "fifo"}, {RD_KAFKA_QUEUE_LIFO, "lifo"}}},
- {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED, "produce.offset.report",
- _RK_C_BOOL, _RKT(produce_offset_report), "No longer used.", 0, 1, 0},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "partitioner", _RK_C_STR,
- _RKT(partitioner_str),
- "Partitioner: "
- "`random` - random distribution, "
- "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to "
- "single partition), "
- "`consistent_random` - CRC32 hash of key (Empty and NULL keys are "
- "randomly partitioned), "
- "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are "
- "mapped to single partition), "
- "`murmur2_random` - Java Producer compatible Murmur2 hash of key "
- "(NULL keys are randomly partitioned. This is functionally equivalent "
- "to the default partitioner in the Java Producer.), "
- "`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), "
- "`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly "
- "partitioned).",
- .sdef = "consistent_random",
- .validate = rd_kafka_conf_validate_partitioner},
- {_RK_TOPIC | _RK_PRODUCER, "partitioner_cb", _RK_C_PTR, _RKT(partitioner),
- "Custom partitioner callback "
- "(set with rd_kafka_topic_conf_set_partitioner_cb())"},
- {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL,
- "msg_order_cmp", _RK_C_PTR, _RKT(msg_order_cmp),
- "Message queue ordering comparator "
- "(set with rd_kafka_topic_conf_set_msg_order_cmp()). "
- "Also see `queuing.strategy`."},
- {_RK_TOPIC, "opaque", _RK_C_PTR, _RKT(opaque),
- "Application opaque (set with rd_kafka_topic_conf_set_opaque())"},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.codec", _RK_C_S2I,
- _RKT(compression_codec),
- "Compression codec to use for compressing message sets. "
- "inherit = inherit global compression.codec configuration.",
- .vdef = RD_KAFKA_COMPRESSION_INHERIT,
- .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"},
- {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB},
- {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY},
- {RD_KAFKA_COMPRESSION_LZ4, "lz4"},
- {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD},
- {RD_KAFKA_COMPRESSION_INHERIT, "inherit"},
- {0}}},
- {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.type", _RK_C_ALIAS,
- .sdef = "compression.codec"},
- {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "compression.level", _RK_C_INT,
- _RKT(compression_level),
- "Compression level parameter for algorithm selected by configuration "
- "property `compression.codec`. Higher values will result in better "
- "compression at the cost of more CPU usage. Usable range is "
- "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; "
- "-1 = codec-dependent default compression level.",
- RD_KAFKA_COMPLEVEL_MIN, RD_KAFKA_COMPLEVEL_MAX,
- RD_KAFKA_COMPLEVEL_DEFAULT},
-
-
- /* Topic consumer properties */
- {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "auto.commit.enable",
- _RK_C_BOOL, _RKT(auto_commit),
- "[**LEGACY PROPERTY:** This property is used by the simple legacy "
- "consumer only. When using the high-level KafkaConsumer, the global "
- "`enable.auto.commit` property must be used instead]. "
- "If true, periodically commit offset of the last message handed "
- "to the application. This committed offset will be used when the "
- "process restarts to pick up where it left off. "
- "If false, the application will have to call "
- "`rd_kafka_offset_store()` to store an offset (optional). "
- "Offsets will be written to broker or local file according to "
- "offset.store.method.",
- 0, 1, 1},
- {_RK_TOPIC | _RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS,
- .sdef = "auto.commit.enable"},
- {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.commit.interval.ms", _RK_C_INT,
- _RKT(auto_commit_interval_ms),
- "[**LEGACY PROPERTY:** This setting is used by the simple legacy "
- "consumer only. When using the high-level KafkaConsumer, the "
- "global `auto.commit.interval.ms` property must be used instead]. "
- "The frequency in milliseconds that the consumer offsets "
- "are committed (written) to offset storage.",
- 10, 86400 * 1000, 60 * 1000},
- {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.offset.reset", _RK_C_S2I,
- _RKT(auto_offset_reset),
- "Action to take when there is no initial offset in offset store "
- "or the desired offset is out of range: "
- "'smallest','earliest' - automatically reset the offset to the smallest "
- "offset, "
- "'largest','latest' - automatically reset the offset to the largest "
- "offset, "
- "'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is "
- "retrieved by consuming messages and checking 'message->err'.",
- .vdef = RD_KAFKA_OFFSET_END,
- .s2i =
- {
- {RD_KAFKA_OFFSET_BEGINNING, "smallest"},
- {RD_KAFKA_OFFSET_BEGINNING, "earliest"},
- {RD_KAFKA_OFFSET_BEGINNING, "beginning"},
- {RD_KAFKA_OFFSET_END, "largest"},
- {RD_KAFKA_OFFSET_END, "latest"},
- {RD_KAFKA_OFFSET_END, "end"},
- {RD_KAFKA_OFFSET_INVALID, "error"},
- }},
- {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.path", _RK_C_STR,
- _RKT(offset_store_path),
- "Path to local file for storing offsets. If the path is a directory "
- "a filename will be automatically generated in that directory based "
- "on the topic and partition. "
- "File-based offset storage will be removed in a future version.",
- .sdef = "."},
-
- {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.sync.interval.ms",
- _RK_C_INT, _RKT(offset_store_sync_interval_ms),
- "fsync() interval for the offset file, in milliseconds. "
- "Use -1 to disable syncing, and 0 for immediate sync after "
- "each write. "
- "File-based offset storage will be removed in a future version.",
- -1, 86400 * 1000, -1},
-
- {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method",
- _RK_C_S2I, _RKT(offset_store_method),
- "Offset commit store method: "
- "'file' - DEPRECATED: local file store (offset.store.path, et.al), "
- "'broker' - broker commit store "
- "(requires \"group.id\" to be configured and "
- "Apache Kafka 0.8.2 or later on the broker.).",
- .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
- .s2i = {{RD_KAFKA_OFFSET_METHOD_FILE, "file"},
- {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}},
-
- {_RK_TOPIC | _RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT,
- _RKT(consume_callback_max_msgs),
- "Maximum number of messages to dispatch in "
- "one `rd_kafka_consume_callback*()` call (0 = unlimited)",
- 0, 1000000, 0},
-
- {0, /* End */}};
-
-/**
- * @returns the property object for \p name in \p scope, or NULL if not found.
- * @remark does not work with interceptor configs.
- */
-const struct rd_kafka_property *rd_kafka_conf_prop_find(int scope,
- const char *name) {
- const struct rd_kafka_property *prop;
-
-restart:
- for (prop = rd_kafka_properties; prop->name; prop++) {
-
- if (!(prop->scope & scope))
- continue;
-
- if (strcmp(prop->name, name))
- continue;
-
- if (prop->type == _RK_C_ALIAS) {
- /* Caller supplied an alias, restart
- * search for real name. */
- name = prop->sdef;
- goto restart;
- }
-
- return prop;
- }
-
- return NULL;
-}
-
-/**
- * @returns rd_true if property has been set/modified, else rd_false.
- *
- * @warning Asserts if the property does not exist.
- */
-rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
- const char *name) {
- const struct rd_kafka_property *prop;
-
- if (!(prop = rd_kafka_conf_prop_find(_RK_GLOBAL, name)))
- RD_BUG("Configuration property \"%s\" does not exist", name);
-
- return rd_kafka_anyconf_is_modified(conf, prop);
-}
-
-
-/**
- * @returns true if property has been set/modified, else 0.
- *
- * @warning Asserts if the property does not exist.
- */
-static rd_bool_t
-rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf,
- const char *name) {
- const struct rd_kafka_property *prop;
-
- if (!(prop = rd_kafka_conf_prop_find(_RK_TOPIC, name)))
- RD_BUG("Topic configuration property \"%s\" does not exist",
- name);
-
- return rd_kafka_anyconf_is_modified(conf, prop);
-}
-
-
-
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_set_prop0(int scope,
- void *conf,
- const struct rd_kafka_property *prop,
- const char *istr,
- int ival,
- rd_kafka_conf_set_mode_t set_mode,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_conf_res_t res;
-
-#define _RK_PTR(TYPE, BASE, OFFSET) (TYPE)(void *)(((char *)(BASE)) + (OFFSET))
-
- /* Try interceptors first (only for GLOBAL config) */
- if (scope & _RK_GLOBAL) {
- if (prop->type == _RK_C_PTR || prop->type == _RK_C_INTERNAL)
- res = RD_KAFKA_CONF_UNKNOWN;
- else
- res = rd_kafka_interceptors_on_conf_set(
- conf, prop->name, istr, errstr, errstr_size);
- if (res != RD_KAFKA_CONF_UNKNOWN)
- return res;
- }
-
-
- if (prop->set) {
- /* Custom setter */
-
- res = prop->set(scope, conf, prop->name, istr,
- _RK_PTR(void *, conf, prop->offset), set_mode,
- errstr, errstr_size);
-
- if (res != RD_KAFKA_CONF_OK)
- return res;
-
- /* FALLTHRU so that property value is set. */
- }
-
- switch (prop->type) {
- case _RK_C_STR: {
- char **str = _RK_PTR(char **, conf, prop->offset);
- if (*str)
- rd_free(*str);
- if (istr)
- *str = rd_strdup(istr);
- else
- *str = prop->sdef ? rd_strdup(prop->sdef) : NULL;
- break;
- }
- case _RK_C_KSTR: {
- rd_kafkap_str_t **kstr =
- _RK_PTR(rd_kafkap_str_t **, conf, prop->offset);
- if (*kstr)
- rd_kafkap_str_destroy(*kstr);
- if (istr)
- *kstr = rd_kafkap_str_new(istr, -1);
- else
- *kstr = prop->sdef ? rd_kafkap_str_new(prop->sdef, -1)
- : NULL;
- break;
- }
- case _RK_C_PTR:
- *_RK_PTR(const void **, conf, prop->offset) = istr;
- break;
- case _RK_C_BOOL:
- case _RK_C_INT:
- case _RK_C_S2I:
- case _RK_C_S2F: {
- int *val = _RK_PTR(int *, conf, prop->offset);
-
- if (prop->type == _RK_C_S2F) {
- switch (set_mode) {
- case _RK_CONF_PROP_SET_REPLACE:
- *val = ival;
- break;
- case _RK_CONF_PROP_SET_ADD:
- *val |= ival;
- break;
- case _RK_CONF_PROP_SET_DEL:
- *val &= ~ival;
- break;
- }
- } else {
- /* Single assignment */
- *val = ival;
- }
- break;
- }
- case _RK_C_DBL: {
- double *val = _RK_PTR(double *, conf, prop->offset);
- if (istr) {
- char *endptr;
- double new_val = strtod(istr, &endptr);
- /* This is verified in set_prop() */
- rd_assert(endptr != istr);
- *val = new_val;
- } else
- *val = prop->ddef;
- break;
- }
-
- case _RK_C_PATLIST: {
- /* Split comma-separated list into individual regex expressions
- * that are verified and then append to the provided list. */
- rd_kafka_pattern_list_t **plist;
-
- plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
-
- if (*plist)
- rd_kafka_pattern_list_destroy(*plist);
-
- if (istr) {
- if (!(*plist = rd_kafka_pattern_list_new(
- istr, errstr, (int)errstr_size)))
- return RD_KAFKA_CONF_INVALID;
- } else
- *plist = NULL;
-
- break;
- }
-
- case _RK_C_INTERNAL:
- /* Probably handled by setter */
- break;
-
- default:
- rd_kafka_assert(NULL, !*"unknown conf type");
- }
-
-
- rd_kafka_anyconf_set_modified(conf, prop, 1 /*modified*/);
- return RD_KAFKA_CONF_OK;
-}
-
-
-/**
- * @brief Find s2i (string-to-int mapping) entry and return its array index,
- * or -1 on miss.
- */
-static int rd_kafka_conf_s2i_find(const struct rd_kafka_property *prop,
- const char *value) {
- int j;
-
- for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
- if (prop->s2i[j].str && !rd_strcasecmp(prop->s2i[j].str, value))
- return j;
- }
-
- return -1;
-}
-
-
-/**
- * @brief Set configuration property.
- *
- * @param allow_specific Allow rd_kafka_*conf_set_..() to be set,
- * such as rd_kafka_conf_set_log_cb().
- * Should not be allowed from the conf_set() string interface.
- */
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_set_prop(int scope,
- void *conf,
- const struct rd_kafka_property *prop,
- const char *value,
- int allow_specific,
- char *errstr,
- size_t errstr_size) {
- int ival;
-
- if (prop->unsupported) {
- rd_snprintf(errstr, errstr_size,
- "Configuration property \"%s\" not supported "
- "in this build: %s",
- prop->name, prop->unsupported);
- return RD_KAFKA_CONF_INVALID;
- }
-
- switch (prop->type) {
- case _RK_C_STR:
- /* Left-trim string(likes) */
- if (value)
- while (isspace((int)*value))
- value++;
-
- /* FALLTHRU */
- case _RK_C_KSTR:
- if (prop->s2i[0].str) {
- int match;
-
- if (!value || (match = rd_kafka_conf_s2i_find(
- prop, value)) == -1) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value for "
- "configuration property \"%s\": "
- "%s",
- prop->name, value);
- return RD_KAFKA_CONF_INVALID;
- }
-
- /* Replace value string with canonical form */
- value = prop->s2i[match].str;
- }
- /* FALLTHRU */
- case _RK_C_PATLIST:
- if (prop->validate &&
- (!value || !prop->validate(prop, value, -1))) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value for "
- "configuration property \"%s\": %s",
- prop->name, value);
- return RD_KAFKA_CONF_INVALID;
- }
-
- return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
- _RK_CONF_PROP_SET_REPLACE,
- errstr, errstr_size);
-
- case _RK_C_PTR:
- /* Allow hidden internal unit test properties to
- * be set from generic conf_set() interface. */
- if (!allow_specific && !(prop->scope & _RK_HIDDEN)) {
- rd_snprintf(errstr, errstr_size,
- "Property \"%s\" must be set through "
- "dedicated .._set_..() function",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
- return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
- _RK_CONF_PROP_SET_REPLACE,
- errstr, errstr_size);
-
- case _RK_C_BOOL:
- if (!value) {
- rd_snprintf(errstr, errstr_size,
- "Bool configuration property \"%s\" cannot "
- "be set to empty value",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
-
- if (!rd_strcasecmp(value, "true") ||
- !rd_strcasecmp(value, "t") || !strcmp(value, "1"))
- ival = 1;
- else if (!rd_strcasecmp(value, "false") ||
- !rd_strcasecmp(value, "f") || !strcmp(value, "0"))
- ival = 0;
- else {
- rd_snprintf(errstr, errstr_size,
- "Expected bool value for \"%s\": "
- "true or false",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
- _RK_CONF_PROP_SET_REPLACE, errstr,
- errstr_size);
- return RD_KAFKA_CONF_OK;
-
- case _RK_C_INT: {
- const char *end;
-
- if (!value) {
- rd_snprintf(errstr, errstr_size,
- "Integer configuration "
- "property \"%s\" cannot be set "
- "to empty value",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- ival = (int)strtol(value, (char **)&end, 0);
- if (end == value) {
- /* Non numeric, check s2i for string mapping */
- int match = rd_kafka_conf_s2i_find(prop, value);
-
- if (match == -1) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value for "
- "configuration property \"%s\"",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- if (prop->s2i[match].unsupported) {
- rd_snprintf(errstr, errstr_size,
- "Unsupported value \"%s\" for "
- "configuration property \"%s\": %s",
- value, prop->name,
- prop->s2i[match].unsupported);
- return RD_KAFKA_CONF_INVALID;
- }
-
- ival = prop->s2i[match].val;
- }
-
- if (ival < prop->vmin || ival > prop->vmax) {
- rd_snprintf(errstr, errstr_size,
- "Configuration property \"%s\" value "
- "%i is outside allowed range %i..%i\n",
- prop->name, ival, prop->vmin, prop->vmax);
- return RD_KAFKA_CONF_INVALID;
- }
-
- rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
- _RK_CONF_PROP_SET_REPLACE, errstr,
- errstr_size);
- return RD_KAFKA_CONF_OK;
- }
-
- case _RK_C_DBL: {
- const char *end;
- double dval;
-
- if (!value) {
- rd_snprintf(errstr, errstr_size,
- "Float configuration "
- "property \"%s\" cannot be set "
- "to empty value",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- dval = strtod(value, (char **)&end);
- if (end == value) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value for "
- "configuration property \"%s\"",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- if (dval < prop->dmin || dval > prop->dmax) {
- rd_snprintf(errstr, errstr_size,
- "Configuration property \"%s\" value "
- "%g is outside allowed range %g..%g\n",
- prop->name, dval, prop->dmin, prop->dmax);
- return RD_KAFKA_CONF_INVALID;
- }
-
- rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
- _RK_CONF_PROP_SET_REPLACE, errstr,
- errstr_size);
- return RD_KAFKA_CONF_OK;
- }
-
- case _RK_C_S2I:
- case _RK_C_S2F: {
- int j;
- const char *next;
-
- if (!value) {
- rd_snprintf(errstr, errstr_size,
- "Configuration "
- "property \"%s\" cannot be set "
- "to empty value",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
-
- next = value;
- while (next && *next) {
- const char *s, *t;
- rd_kafka_conf_set_mode_t set_mode =
- _RK_CONF_PROP_SET_ADD; /* S2F */
-
- s = next;
-
- if (prop->type == _RK_C_S2F && (t = strchr(s, ','))) {
- /* CSV flag field */
- next = t + 1;
- } else {
- /* Single string */
- t = s + strlen(s);
- next = NULL;
- }
-
-
- /* Left trim */
- while (s < t && isspace((int)*s))
- s++;
-
- /* Right trim */
- while (t > s && isspace((int)*t))
- t--;
-
- /* S2F: +/- prefix */
- if (prop->type == _RK_C_S2F) {
- if (*s == '+') {
- set_mode = _RK_CONF_PROP_SET_ADD;
- s++;
- } else if (*s == '-') {
- set_mode = _RK_CONF_PROP_SET_DEL;
- s++;
- }
- }
-
- /* Empty string? */
- if (s == t)
- continue;
-
- /* Match string to s2i table entry */
- for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
- int new_val;
-
- if (!prop->s2i[j].str)
- continue;
-
- if (strlen(prop->s2i[j].str) ==
- (size_t)(t - s) &&
- !rd_strncasecmp(prop->s2i[j].str, s,
- (int)(t - s)))
- new_val = prop->s2i[j].val;
- else
- continue;
-
- if (prop->s2i[j].unsupported) {
- rd_snprintf(
- errstr, errstr_size,
- "Unsupported value \"%.*s\" "
- "for configuration property "
- "\"%s\": %s",
- (int)(t - s), s, prop->name,
- prop->s2i[j].unsupported);
- return RD_KAFKA_CONF_INVALID;
- }
-
- rd_kafka_anyconf_set_prop0(
- scope, conf, prop, value, new_val, set_mode,
- errstr, errstr_size);
-
- if (prop->type == _RK_C_S2F) {
- /* Flags: OR it in: do next */
- break;
- } else {
- /* Single assignment */
- return RD_KAFKA_CONF_OK;
- }
- }
-
- /* S2F: Good match: continue with next */
- if (j < (int)RD_ARRAYSIZE(prop->s2i))
- continue;
-
- /* No match */
- rd_snprintf(errstr, errstr_size,
- "Invalid value \"%.*s\" for "
- "configuration property \"%s\"",
- (int)(t - s), s, prop->name);
- return RD_KAFKA_CONF_INVALID;
- }
- return RD_KAFKA_CONF_OK;
- }
-
- case _RK_C_INTERNAL:
- rd_snprintf(errstr, errstr_size,
- "Internal property \"%s\" not settable",
- prop->name);
- return RD_KAFKA_CONF_INVALID;
-
- case _RK_C_INVALID:
- rd_snprintf(errstr, errstr_size, "%s", prop->desc);
- return RD_KAFKA_CONF_INVALID;
-
- default:
- rd_kafka_assert(NULL, !*"unknown conf type");
- }
-
- /* not reachable */
- return RD_KAFKA_CONF_INVALID;
-}
-
-
-
-static void rd_kafka_defaultconf_set(int scope, void *conf) {
- const struct rd_kafka_property *prop;
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- if (!(prop->scope & scope))
- continue;
-
- if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
- continue;
-
- if (prop->ctor)
- prop->ctor(scope, conf);
-
- if (prop->sdef || prop->vdef || prop->pdef ||
- !rd_dbl_zero(prop->ddef))
- rd_kafka_anyconf_set_prop0(
- scope, conf, prop,
- prop->sdef ? prop->sdef : prop->pdef, prop->vdef,
- _RK_CONF_PROP_SET_REPLACE, NULL, 0);
- }
-}
-
-rd_kafka_conf_t *rd_kafka_conf_new(void) {
- rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf));
- rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*conf) &&
- *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
- rd_kafka_defaultconf_set(_RK_GLOBAL, conf);
- rd_kafka_anyconf_clear_all_is_modified(conf);
- return conf;
-}
-
-rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void) {
- rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf));
- rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*tconf) &&
- *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
- rd_kafka_defaultconf_set(_RK_TOPIC, tconf);
- rd_kafka_anyconf_clear_all_is_modified(tconf);
- return tconf;
-}
-
-
-static int rd_kafka_anyconf_set(int scope,
- void *conf,
- const char *name,
- const char *value,
- char *errstr,
- size_t errstr_size) {
- char estmp[1];
- const struct rd_kafka_property *prop;
- rd_kafka_conf_res_t res;
-
- if (!errstr) {
- errstr = estmp;
- errstr_size = 0;
- }
-
- if (value && !*value)
- value = NULL;
-
- /* Try interceptors first (only for GLOBAL config for now) */
- if (scope & _RK_GLOBAL) {
- res = rd_kafka_interceptors_on_conf_set(
- (rd_kafka_conf_t *)conf, name, value, errstr, errstr_size);
- /* Handled (successfully or not) by interceptor. */
- if (res != RD_KAFKA_CONF_UNKNOWN)
- return res;
- }
-
- /* Then global config */
-
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
-
- if (!(prop->scope & scope))
- continue;
-
- if (strcmp(prop->name, name))
- continue;
-
- if (prop->type == _RK_C_ALIAS)
- return rd_kafka_anyconf_set(scope, conf, prop->sdef,
- value, errstr, errstr_size);
-
- return rd_kafka_anyconf_set_prop(scope, conf, prop, value,
- 0 /*don't allow specifics*/,
- errstr, errstr_size);
- }
-
- rd_snprintf(errstr, errstr_size,
- "No such configuration property: \"%s\"", name);
-
- return RD_KAFKA_CONF_UNKNOWN;
-}
-
-
-/**
- * @brief Set a rd_kafka_*_conf_set_...() specific property, such as
- * rd_kafka_conf_set_error_cb().
- *
- * @warning Will not call interceptor's on_conf_set.
- * @warning Asserts if \p name is not known or value is incorrect.
- *
- * Implemented as a macro to have rd_assert() print the original function.
- */
-
-#define rd_kafka_anyconf_set_internal(SCOPE, CONF, NAME, VALUE) \
- do { \
- const struct rd_kafka_property *_prop; \
- rd_kafka_conf_res_t _res; \
- _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \
- rd_assert(_prop && * "invalid property name"); \
- _res = rd_kafka_anyconf_set_prop( \
- SCOPE, CONF, _prop, (const void *)VALUE, \
- 1 /*allow-specifics*/, NULL, 0); \
- rd_assert(_res == RD_KAFKA_CONF_OK); \
- } while (0)
-
-
-rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
- const char *name,
- const char *value,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_conf_res_t res;
-
- res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, errstr,
- errstr_size);
- if (res != RD_KAFKA_CONF_UNKNOWN)
- return res;
-
- /* Fallthru:
- * If the global property was unknown, try setting it on the
- * default topic config. */
- if (!conf->topic_conf) {
- /* Create topic config, might be over-written by application
- * later. */
- rd_kafka_conf_set_default_topic_conf(conf,
- rd_kafka_topic_conf_new());
- }
-
- return rd_kafka_topic_conf_set(conf->topic_conf, name, value, errstr,
- errstr_size);
-}
-
-
-rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf,
- const char *name,
- const char *value,
- char *errstr,
- size_t errstr_size) {
- if (!strncmp(name, "topic.", strlen("topic.")))
- name += strlen("topic.");
-
- return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, errstr,
- errstr_size);
-}
-
-
-/**
- * @brief Overwrites the contents of \p str up until but not including
- * the nul-term.
- */
-void rd_kafka_desensitize_str(char *str) {
- size_t len;
- static const char redacted[] = "(REDACTED)";
-
-#ifdef _WIN32
- len = strlen(str);
- SecureZeroMemory(str, len);
-#else
- volatile char *volatile s;
-
- for (s = str; *s; s++)
- *s = '\0';
-
- len = (size_t)(s - str);
-#endif
-
- if (len > sizeof(redacted))
- memcpy(str, redacted, sizeof(redacted));
-}
-
-
-
-/**
- * @brief Overwrite the value of \p prop, if sensitive.
- */
-static RD_INLINE void
-rd_kafka_anyconf_prop_desensitize(int scope,
- void *conf,
- const struct rd_kafka_property *prop) {
- if (likely(!(prop->scope & _RK_SENSITIVE)))
- return;
-
- switch (prop->type) {
- case _RK_C_STR: {
- char **str = _RK_PTR(char **, conf, prop->offset);
- if (*str)
- rd_kafka_desensitize_str(*str);
- break;
- }
-
- case _RK_C_INTERNAL:
- /* This is typically a pointer to something, the
- * _RK_SENSITIVE flag is set to get it redacted in
- * ..dump_dbg(), but we don't have to desensitize
- * anything here. */
- break;
-
- default:
- rd_assert(!*"BUG: Don't know how to desensitize prop type");
- break;
- }
-}
-
-
-/**
- * @brief Desensitize all sensitive properties in \p conf
- */
-static void rd_kafka_anyconf_desensitize(int scope, void *conf) {
- const struct rd_kafka_property *prop;
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- if (!(prop->scope & scope))
- continue;
-
- rd_kafka_anyconf_prop_desensitize(scope, conf, prop);
- }
-}
-
-/**
- * @brief Overwrite the values of sensitive properties
- */
-void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf) {
- if (conf->topic_conf)
- rd_kafka_anyconf_desensitize(_RK_TOPIC, conf->topic_conf);
- rd_kafka_anyconf_desensitize(_RK_GLOBAL, conf);
-}
-
-/**
- * @brief Overwrite the values of sensitive properties
- */
-void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf) {
- rd_kafka_anyconf_desensitize(_RK_TOPIC, tconf);
-}
-
-
-static void rd_kafka_anyconf_clear(int scope,
- void *conf,
- const struct rd_kafka_property *prop) {
-
- rd_kafka_anyconf_prop_desensitize(scope, conf, prop);
-
- switch (prop->type) {
- case _RK_C_STR: {
- char **str = _RK_PTR(char **, conf, prop->offset);
-
- if (*str) {
- if (prop->set) {
- prop->set(scope, conf, prop->name, NULL, *str,
- _RK_CONF_PROP_SET_DEL, NULL, 0);
- /* FALLTHRU */
- }
- rd_free(*str);
- *str = NULL;
- }
- } break;
-
- case _RK_C_KSTR: {
- rd_kafkap_str_t **kstr =
- _RK_PTR(rd_kafkap_str_t **, conf, prop->offset);
- if (*kstr) {
- rd_kafkap_str_destroy(*kstr);
- *kstr = NULL;
- }
- } break;
-
- case _RK_C_PATLIST: {
- rd_kafka_pattern_list_t **plist;
- plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
- if (*plist) {
- rd_kafka_pattern_list_destroy(*plist);
- *plist = NULL;
- }
- } break;
-
- case _RK_C_PTR:
- if (_RK_PTR(void *, conf, prop->offset) != NULL) {
- if (!strcmp(prop->name, "default_topic_conf")) {
- rd_kafka_topic_conf_t **tconf;
-
- tconf = _RK_PTR(rd_kafka_topic_conf_t **, conf,
- prop->offset);
- if (*tconf) {
- rd_kafka_topic_conf_destroy(*tconf);
- *tconf = NULL;
- }
- }
- }
- break;
-
- default:
- break;
- }
-
- if (prop->dtor)
- prop->dtor(scope, conf);
-}
-
-void rd_kafka_anyconf_destroy(int scope, void *conf) {
- const struct rd_kafka_property *prop;
-
- /* Call on_conf_destroy() interceptors */
- if (scope == _RK_GLOBAL)
- rd_kafka_interceptors_on_conf_destroy(conf);
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- if (!(prop->scope & scope))
- continue;
-
- rd_kafka_anyconf_clear(scope, conf, prop);
- }
-}
-
-
-void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) {
- rd_kafka_anyconf_destroy(_RK_GLOBAL, conf);
- // FIXME: partition_assignors
- rd_free(conf);
-}
-
-void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf) {
- rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf);
- rd_free(topic_conf);
-}
-
-
-
-static void rd_kafka_anyconf_copy(int scope,
- void *dst,
- const void *src,
- size_t filter_cnt,
- const char **filter) {
- const struct rd_kafka_property *prop;
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- const char *val = NULL;
- int ival = 0;
- char *valstr;
- size_t valsz;
- size_t fi;
- size_t nlen;
-
- if (!(prop->scope & scope))
- continue;
-
- if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
- continue;
-
- /* Skip properties that have not been set,
- * unless it is an internal one which requires
- * extra logic, such as the interceptors. */
- if (!rd_kafka_anyconf_is_modified(src, prop) &&
- prop->type != _RK_C_INTERNAL)
- continue;
-
- /* Apply filter, if any. */
- nlen = strlen(prop->name);
- for (fi = 0; fi < filter_cnt; fi++) {
- size_t flen = strlen(filter[fi]);
- if (nlen >= flen &&
- !strncmp(filter[fi], prop->name, flen))
- break;
- }
- if (fi < filter_cnt)
- continue; /* Filter matched */
-
- switch (prop->type) {
- case _RK_C_STR:
- case _RK_C_PTR:
- val = *_RK_PTR(const char **, src, prop->offset);
-
- if (!strcmp(prop->name, "default_topic_conf") && val)
- val = (void *)rd_kafka_topic_conf_dup(
- (const rd_kafka_topic_conf_t *)(void *)val);
- break;
- case _RK_C_KSTR: {
- rd_kafkap_str_t **kstr =
- _RK_PTR(rd_kafkap_str_t **, src, prop->offset);
- if (*kstr)
- val = (*kstr)->str;
- break;
- }
-
- case _RK_C_BOOL:
- case _RK_C_INT:
- case _RK_C_S2I:
- case _RK_C_S2F:
- ival = *_RK_PTR(const int *, src, prop->offset);
-
- /* Get string representation of configuration value. */
- valsz = 0;
- rd_kafka_anyconf_get0(src, prop, NULL, &valsz);
- valstr = rd_alloca(valsz);
- rd_kafka_anyconf_get0(src, prop, valstr, &valsz);
- val = valstr;
- break;
- case _RK_C_DBL:
- /* Get string representation of configuration value. */
- valsz = 0;
- rd_kafka_anyconf_get0(src, prop, NULL, &valsz);
- valstr = rd_alloca(valsz);
- rd_kafka_anyconf_get0(src, prop, valstr, &valsz);
- val = valstr;
- break;
- case _RK_C_PATLIST: {
- const rd_kafka_pattern_list_t **plist;
- plist = _RK_PTR(const rd_kafka_pattern_list_t **, src,
- prop->offset);
- if (*plist)
- val = (*plist)->rkpl_orig;
- break;
- }
- case _RK_C_INTERNAL:
- /* Handled by ->copy() below. */
- break;
- default:
- continue;
- }
-
- if (prop->copy)
- prop->copy(scope, dst, src,
- _RK_PTR(void *, dst, prop->offset),
- _RK_PTR(const void *, src, prop->offset),
- filter_cnt, filter);
-
- rd_kafka_anyconf_set_prop0(scope, dst, prop, val, ival,
- _RK_CONF_PROP_SET_REPLACE, NULL, 0);
- }
-}
-
-
-rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf) {
- rd_kafka_conf_t *new = rd_kafka_conf_new();
-
- rd_kafka_interceptors_on_conf_dup(new, conf, 0, NULL);
-
- rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, 0, NULL);
-
- return new;
-}
-
-rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf,
- size_t filter_cnt,
- const char **filter) {
- rd_kafka_conf_t *new = rd_kafka_conf_new();
-
- rd_kafka_interceptors_on_conf_dup(new, conf, filter_cnt, filter);
-
- rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, filter_cnt, filter);
-
- return new;
-}
-
-
-rd_kafka_topic_conf_t *
-rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf) {
- rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new();
-
- rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL);
-
- return new;
-}
-
-rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk) {
- if (rk->rk_conf.topic_conf)
- return rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf);
- else
- return rd_kafka_topic_conf_new();
-}
-
-void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events) {
- char tmp[32];
- rd_snprintf(tmp, sizeof(tmp), "%d", events);
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enabled_events", tmp);
-}
-
-void rd_kafka_conf_set_background_event_cb(
- rd_kafka_conf_t *conf,
- void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "background_event_cb",
- event_cb);
-}
-
-
-void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf,
- void (*dr_cb)(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_cb", dr_cb);
-}
-
-
-void rd_kafka_conf_set_dr_msg_cb(
- rd_kafka_conf_t *conf,
- void (*dr_msg_cb)(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_msg_cb", dr_msg_cb);
-}
-
-
-void rd_kafka_conf_set_consume_cb(
- rd_kafka_conf_t *conf,
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "consume_cb",
- consume_cb);
-}
-
-void rd_kafka_conf_set_rebalance_cb(
- rd_kafka_conf_t *conf,
- void (*rebalance_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "rebalance_cb",
- rebalance_cb);
-}
-
-void rd_kafka_conf_set_offset_commit_cb(
- rd_kafka_conf_t *conf,
- void (*offset_commit_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "offset_commit_cb",
- offset_commit_cb);
-}
-
-
-
-void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,
- void (*error_cb)(rd_kafka_t *rk,
- int err,
- const char *reason,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "error_cb", error_cb);
-}
-
-
-void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf,
- void (*throttle_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int throttle_time_ms,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "throttle_cb",
- throttle_cb);
-}
-
-
-void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf,
- void (*log_cb)(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf)) {
-#if !WITH_SYSLOG
- if (log_cb == rd_kafka_log_syslog)
- rd_assert(!*"syslog support not enabled in this build");
-#endif
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "log_cb", log_cb);
-}
-
-
-void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf,
- int (*stats_cb)(rd_kafka_t *rk,
- char *json,
- size_t json_len,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "stats_cb", stats_cb);
-}
-
-void rd_kafka_conf_set_oauthbearer_token_refresh_cb(
- rd_kafka_conf_t *conf,
- void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque)) {
-#if WITH_SASL_OAUTHBEARER
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf,
- "oauthbearer_token_refresh_cb",
- oauthbearer_token_refresh_cb);
-#endif
-}
-
-void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enable_sasl_queue",
- (enable ? "true" : "false"));
-}
-
-void rd_kafka_conf_set_socket_cb(
- rd_kafka_conf_t *conf,
- int (*socket_cb)(int domain, int type, int protocol, void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", socket_cb);
-}
-
-void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf,
- int (*connect_cb)(int sockfd,
- const struct sockaddr *addr,
- int addrlen,
- const char *id,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "connect_cb",
- connect_cb);
-}
-
-void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf,
- int (*closesocket_cb)(int sockfd,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "closesocket_cb",
- closesocket_cb);
-}
-
-
-
-#ifndef _WIN32
-void rd_kafka_conf_set_open_cb(rd_kafka_conf_t *conf,
- int (*open_cb)(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "open_cb", open_cb);
-}
-#endif
-
-void rd_kafka_conf_set_resolve_cb(
- rd_kafka_conf_t *conf,
- int (*resolve_cb)(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque)) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "resolve_cb",
- resolve_cb);
-}
-
-rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(
- rd_kafka_conf_t *conf,
- int (*ssl_cert_verify_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int *x509_set_error,
- int depth,
- const char *buf,
- size_t size,
- char *errstr,
- size_t errstr_size,
- void *opaque)) {
-#if !WITH_SSL
- return RD_KAFKA_CONF_INVALID;
-#else
- rd_kafka_anyconf_set_internal(
- _RK_GLOBAL, conf, "ssl.certificate.verify_cb", ssl_cert_verify_cb);
- return RD_KAFKA_CONF_OK;
-#endif
-}
-
-
-void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) {
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "opaque", opaque);
-}
-
-
-void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf,
- void *callback_data) {
- rd_kafka_anyconf_set_internal(
- _RK_GLOBAL, conf, "ssl_engine_callback_data", callback_data);
-}
-
-
-void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf) {
- if (conf->topic_conf) {
- if (rd_kafka_anyconf_is_any_modified(conf->topic_conf))
- conf->warn.default_topic_conf_overwritten = rd_true;
- rd_kafka_topic_conf_destroy(conf->topic_conf);
- }
-
- rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "default_topic_conf",
- tconf);
-}
-
-rd_kafka_topic_conf_t *
-rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf) {
- return conf->topic_conf;
-}
-
-
-void rd_kafka_topic_conf_set_partitioner_cb(
- rd_kafka_topic_conf_t *topic_conf,
- int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
- const void *keydata,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque)) {
- rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "partitioner_cb",
- partitioner);
-}
-
-void rd_kafka_topic_conf_set_msg_order_cmp(
- rd_kafka_topic_conf_t *topic_conf,
- int (*msg_order_cmp)(const rd_kafka_message_t *a,
- const rd_kafka_message_t *b)) {
- rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "msg_order_cmp",
- msg_order_cmp);
-}
-
-void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *topic_conf,
- void *opaque) {
- rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "opaque", opaque);
-}
-
-
-
-/**
- * @brief Convert flags \p ival to csv-string using S2F property \p prop.
- *
- * This function has two modes: size query and write.
- * To query for needed size call with dest==NULL,
- * to write to buffer of size dest_size call with dest!=NULL.
- *
- * An \p ival of -1 means all.
- *
- * @param include_unsupported Include flag values that are unsupported
- * due to missing dependencies at build time.
- *
- * @returns the number of bytes written to \p dest (if not NULL), else the
- * total number of bytes needed.
- *
- */
-static size_t rd_kafka_conf_flags2str(char *dest,
- size_t dest_size,
- const char *delim,
- const struct rd_kafka_property *prop,
- int ival,
- rd_bool_t include_unsupported) {
- size_t of = 0;
- int j;
-
- if (dest && dest_size > 0)
- *dest = '\0';
-
- /* Phase 1: scan for set flags, accumulate needed size.
- * Phase 2: write to dest */
- for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i) && prop->s2i[j].str; j++) {
- if (prop->type == _RK_C_S2F && ival != -1 &&
- (ival & prop->s2i[j].val) != prop->s2i[j].val)
- continue;
- else if (prop->type == _RK_C_S2I && ival != -1 &&
- prop->s2i[j].val != ival)
- continue;
- else if (prop->s2i[j].unsupported && !include_unsupported)
- continue;
-
- if (!dest)
- of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0);
- else {
- size_t r;
- r = rd_snprintf(dest + of, dest_size - of, "%s%s",
- of > 0 ? delim : "", prop->s2i[j].str);
- if (r > dest_size - of) {
- r = dest_size - of;
- break;
- }
- of += r;
- }
- }
-
- return of + 1 /*nul*/;
-}
-
-
-/**
- * Return "original"(re-created) configuration value string
- */
-static rd_kafka_conf_res_t
-rd_kafka_anyconf_get0(const void *conf,
- const struct rd_kafka_property *prop,
- char *dest,
- size_t *dest_size) {
- char tmp[22];
- const char *val = NULL;
- size_t val_len = 0;
- int j;
-
- switch (prop->type) {
- case _RK_C_STR:
- val = *_RK_PTR(const char **, conf, prop->offset);
- break;
-
- case _RK_C_KSTR: {
- const rd_kafkap_str_t **kstr =
- _RK_PTR(const rd_kafkap_str_t **, conf, prop->offset);
- if (*kstr)
- val = (*kstr)->str;
- break;
- }
-
- case _RK_C_PTR:
- val = *_RK_PTR(const void **, conf, prop->offset);
- if (val) {
- rd_snprintf(tmp, sizeof(tmp), "%p", (void *)val);
- val = tmp;
- }
- break;
-
- case _RK_C_BOOL:
- val = (*_RK_PTR(int *, conf, prop->offset) ? "true" : "false");
- break;
-
- case _RK_C_INT:
- rd_snprintf(tmp, sizeof(tmp), "%i",
- *_RK_PTR(int *, conf, prop->offset));
- val = tmp;
- break;
-
- case _RK_C_DBL:
- rd_snprintf(tmp, sizeof(tmp), "%g",
- *_RK_PTR(double *, conf, prop->offset));
- val = tmp;
- break;
-
- case _RK_C_S2I:
- for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
- if (prop->s2i[j].val ==
- *_RK_PTR(int *, conf, prop->offset)) {
- val = prop->s2i[j].str;
- break;
- }
- }
- break;
-
- case _RK_C_S2F: {
- const int ival = *_RK_PTR(const int *, conf, prop->offset);
-
- val_len = rd_kafka_conf_flags2str(dest, dest ? *dest_size : 0,
- ",", prop, ival,
- rd_false /*only supported*/);
- if (dest) {
- val_len = 0;
- val = dest;
- dest = NULL;
- }
- break;
- }
-
- case _RK_C_PATLIST: {
- const rd_kafka_pattern_list_t **plist;
- plist = _RK_PTR(const rd_kafka_pattern_list_t **, conf,
- prop->offset);
- if (*plist)
- val = (*plist)->rkpl_orig;
- break;
- }
-
- default:
- break;
- }
-
- if (val_len) {
- *dest_size = val_len + 1;
- return RD_KAFKA_CONF_OK;
- }
-
- if (!val)
- return RD_KAFKA_CONF_INVALID;
-
- val_len = strlen(val);
-
- if (dest) {
- size_t use_len = RD_MIN(val_len, (*dest_size) - 1);
- memcpy(dest, val, use_len);
- dest[use_len] = '\0';
- }
-
- /* Return needed size */
- *dest_size = val_len + 1;
-
- return RD_KAFKA_CONF_OK;
-}
-
-
-static rd_kafka_conf_res_t rd_kafka_anyconf_get(int scope,
- const void *conf,
- const char *name,
- char *dest,
- size_t *dest_size) {
- const struct rd_kafka_property *prop;
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
-
- if (!(prop->scope & scope) || strcmp(prop->name, name))
- continue;
-
- if (prop->type == _RK_C_ALIAS)
- return rd_kafka_anyconf_get(scope, conf, prop->sdef,
- dest, dest_size);
-
- if (rd_kafka_anyconf_get0(conf, prop, dest, dest_size) ==
- RD_KAFKA_CONF_OK)
- return RD_KAFKA_CONF_OK;
- }
-
- return RD_KAFKA_CONF_UNKNOWN;
-}
-
-rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf,
- const char *name,
- char *dest,
- size_t *dest_size) {
- return rd_kafka_anyconf_get(_RK_TOPIC, conf, name, dest, dest_size);
-}
-
-rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf,
- const char *name,
- char *dest,
- size_t *dest_size) {
- rd_kafka_conf_res_t res;
- res = rd_kafka_anyconf_get(_RK_GLOBAL, conf, name, dest, dest_size);
- if (res != RD_KAFKA_CONF_UNKNOWN || !conf->topic_conf)
- return res;
-
- /* Fallthru:
- * If the global property was unknown, try getting it from the
- * default topic config, if any. */
- return rd_kafka_topic_conf_get(conf->topic_conf, name, dest, dest_size);
-}
-
-
-static const char **rd_kafka_anyconf_dump(int scope,
- const void *conf,
- size_t *cntp,
- rd_bool_t only_modified,
- rd_bool_t redact_sensitive) {
- const struct rd_kafka_property *prop;
- char **arr;
- int cnt = 0;
-
- arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties) * 2);
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- char *val = NULL;
- size_t val_size;
-
- if (!(prop->scope & scope))
- continue;
-
- if (only_modified && !rd_kafka_anyconf_is_modified(conf, prop))
- continue;
-
- /* Skip aliases, show original property instead.
- * Skip invalids. */
- if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
- continue;
-
- if (redact_sensitive && (prop->scope & _RK_SENSITIVE)) {
- val = rd_strdup("[redacted]");
- } else {
- /* Query value size */
- if (rd_kafka_anyconf_get0(conf, prop, NULL,
- &val_size) !=
- RD_KAFKA_CONF_OK)
- continue;
-
- /* Get value */
- val = rd_malloc(val_size);
- rd_kafka_anyconf_get0(conf, prop, val, &val_size);
- }
-
- arr[cnt++] = rd_strdup(prop->name);
- arr[cnt++] = val;
- }
-
- *cntp = cnt;
-
- return (const char **)arr;
-}
-
-
-const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp) {
- return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp, rd_false /*all*/,
- rd_false /*don't redact*/);
-}
-
-const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf,
- size_t *cntp) {
- return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp, rd_false /*all*/,
- rd_false /*don't redact*/);
-}
-
-void rd_kafka_conf_dump_free(const char **arr, size_t cnt) {
- char **_arr = (char **)arr;
- unsigned int i;
-
- for (i = 0; i < cnt; i++)
- if (_arr[i])
- rd_free(_arr[i]);
-
- rd_free(_arr);
-}
-
-
-
-/**
- * @brief Dump configured properties to debug log.
- */
-void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk,
- int scope,
- const void *conf,
- const char *description) {
- const char **arr;
- size_t cnt;
- size_t i;
-
- arr =
- rd_kafka_anyconf_dump(scope, conf, &cnt, rd_true /*modified only*/,
- rd_true /*redact sensitive*/);
- if (cnt > 0)
- rd_kafka_dbg(rk, CONF, "CONF", "%s:", description);
- for (i = 0; i < cnt; i += 2)
- rd_kafka_dbg(rk, CONF, "CONF", " %s = %s", arr[i], arr[i + 1]);
-
- rd_kafka_conf_dump_free(arr, cnt);
-}
-
-void rd_kafka_conf_properties_show(FILE *fp) {
- const struct rd_kafka_property *prop0;
- int last = 0;
- int j;
- char tmp[512];
- const char *dash80 =
- "----------------------------------------"
- "----------------------------------------";
-
- for (prop0 = rd_kafka_properties; prop0->name; prop0++) {
- const char *typeinfo = "";
- const char *importance;
- const struct rd_kafka_property *prop = prop0;
-
- /* Skip hidden properties. */
- if (prop->scope & _RK_HIDDEN)
- continue;
-
- /* Skip invalid properties. */
- if (prop->type == _RK_C_INVALID)
- continue;
-
- if (!(prop->scope & last)) {
- fprintf(fp, "%s## %s configuration properties\n\n",
- last ? "\n\n" : "",
- prop->scope == _RK_GLOBAL ? "Global" : "Topic");
-
- fprintf(fp,
- "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n"
- "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n",
- "Property", "C/P", "Range", "Default",
- "Importance", "Description", 40, dash80, 3,
- dash80, 15, dash80, 13, dash80, 10, dash80, 25,
- dash80);
-
- last = prop->scope & (_RK_GLOBAL | _RK_TOPIC);
- }
-
- fprintf(fp, "%-40s | ", prop->name);
-
- /* For aliases, use the aliased property from here on
- * so that the alias property shows up with proper
- * ranges, defaults, etc. */
- if (prop->type == _RK_C_ALIAS) {
- prop = rd_kafka_conf_prop_find(prop->scope, prop->sdef);
- rd_assert(prop && *"BUG: "
- "alias points to unknown config property");
- }
-
- fprintf(fp, "%3s | ",
- (!(prop->scope & _RK_PRODUCER) ==
- !(prop->scope & _RK_CONSUMER)
- ? " * "
- : ((prop->scope & _RK_PRODUCER) ? " P " : " C ")));
-
- switch (prop->type) {
- case _RK_C_STR:
- case _RK_C_KSTR:
- typeinfo = "string";
- case _RK_C_PATLIST:
- if (prop->type == _RK_C_PATLIST)
- typeinfo = "pattern list";
- if (prop->s2i[0].str) {
- rd_kafka_conf_flags2str(
- tmp, sizeof(tmp), ", ", prop, -1,
- rd_true /*include unsupported*/);
- fprintf(fp, "%-15s | %13s", tmp,
- prop->sdef ? prop->sdef : "");
- } else {
- fprintf(fp, "%-15s | %13s", "",
- prop->sdef ? prop->sdef : "");
- }
- break;
- case _RK_C_BOOL:
- typeinfo = "boolean";
- fprintf(fp, "%-15s | %13s", "true, false",
- prop->vdef ? "true" : "false");
- break;
- case _RK_C_INT:
- typeinfo = "integer";
- rd_snprintf(tmp, sizeof(tmp), "%d .. %d", prop->vmin,
- prop->vmax);
- fprintf(fp, "%-15s | %13i", tmp, prop->vdef);
- break;
- case _RK_C_DBL:
- typeinfo = "float"; /* more user-friendly than double */
- rd_snprintf(tmp, sizeof(tmp), "%g .. %g", prop->dmin,
- prop->dmax);
- fprintf(fp, "%-15s | %13g", tmp, prop->ddef);
- break;
- case _RK_C_S2I:
- typeinfo = "enum value";
- rd_kafka_conf_flags2str(
- tmp, sizeof(tmp), ", ", prop, -1,
- rd_true /*include unsupported*/);
- fprintf(fp, "%-15s | ", tmp);
-
- for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
- if (prop->s2i[j].val == prop->vdef) {
- fprintf(fp, "%13s", prop->s2i[j].str);
- break;
- }
- }
- if (j == RD_ARRAYSIZE(prop->s2i))
- fprintf(fp, "%13s", " ");
- break;
-
- case _RK_C_S2F:
- typeinfo = "CSV flags";
- /* Dont duplicate builtin.features value in
- * both Range and Default */
- if (!strcmp(prop->name, "builtin.features"))
- *tmp = '\0';
- else
- rd_kafka_conf_flags2str(
- tmp, sizeof(tmp), ", ", prop, -1,
- rd_true /*include unsupported*/);
- fprintf(fp, "%-15s | ", tmp);
- rd_kafka_conf_flags2str(
- tmp, sizeof(tmp), ", ", prop, prop->vdef,
- rd_true /*include unsupported*/);
- fprintf(fp, "%13s", tmp);
-
- break;
- case _RK_C_PTR:
- case _RK_C_INTERNAL:
- typeinfo = "see dedicated API";
- /* FALLTHRU */
- default:
- fprintf(fp, "%-15s | %-13s", "", " ");
- break;
- }
-
- if (prop->scope & _RK_HIGH)
- importance = "high";
- else if (prop->scope & _RK_MED)
- importance = "medium";
- else
- importance = "low";
-
- fprintf(fp, " | %-10s | ", importance);
-
- if (prop->scope & _RK_EXPERIMENTAL)
- fprintf(fp,
- "**EXPERIMENTAL**: "
- "subject to change or removal. ");
-
- if (prop->scope & _RK_DEPRECATED)
- fprintf(fp, "**DEPRECATED** ");
-
- /* If the original property is an alias, prefix the
- * description saying so. */
- if (prop0->type == _RK_C_ALIAS)
- fprintf(fp, "Alias for `%s`: ", prop0->sdef);
-
- fprintf(fp, "%s <br>*Type: %s*\n", prop->desc, typeinfo);
- }
- fprintf(fp, "\n");
- fprintf(fp, "### C/P legend: C = Consumer, P = Producer, * = both\n");
-}
-
-
-
-/**
- * @name Configuration value methods
- *
- * @remark This generic interface will eventually replace the config property
- * used above.
- * @{
- */
-
-
-/**
- * @brief Set up an INT confval.
- *
- * @oaram name Property name, must be a const static string (will not be copied)
- */
-void rd_kafka_confval_init_int(rd_kafka_confval_t *confval,
- const char *name,
- int vmin,
- int vmax,
- int vdef) {
- confval->name = name;
- confval->is_enabled = 1;
- confval->valuetype = RD_KAFKA_CONFVAL_INT;
- confval->u.INT.vmin = vmin;
- confval->u.INT.vmax = vmax;
- confval->u.INT.vdef = vdef;
- confval->u.INT.v = vdef;
-}
-
-/**
- * @brief Set up a PTR confval.
- *
- * @oaram name Property name, must be a const static string (will not be copied)
- */
-void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name) {
- confval->name = name;
- confval->is_enabled = 1;
- confval->valuetype = RD_KAFKA_CONFVAL_PTR;
- confval->u.PTR = NULL;
-}
-
-/**
- * @brief Set up but disable an intval, attempt to set this confval will fail.
- *
- * @oaram name Property name, must be a const static string (will not be copied)
- */
-void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name) {
- confval->name = name;
- confval->is_enabled = 0;
-}
-
-/**
- * @brief Set confval's value to \p valuep, verifying the passed
- * \p valuetype matches (or can be cast to) \p confval's type.
- *
- * @param dispname is the display name for the configuration value and is
- * included in error strings.
- * @param valuep is a pointer to the value, or NULL to revert to default.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the new value was set, or
- * RD_KAFKA_RESP_ERR__INVALID_ARG if the value was of incorrect type,
- * out of range, or otherwise not a valid value.
- */
-rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval,
- rd_kafka_confval_type_t valuetype,
- const void *valuep,
- char *errstr,
- size_t errstr_size) {
-
- if (!confval->is_enabled) {
- rd_snprintf(errstr, errstr_size,
- "\"%s\" is not supported for this operation",
- confval->name);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- switch (confval->valuetype) {
- case RD_KAFKA_CONFVAL_INT: {
- int v;
- const char *end;
-
- if (!valuep) {
- /* Revert to default */
- confval->u.INT.v = confval->u.INT.vdef;
- confval->is_set = 0;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- switch (valuetype) {
- case RD_KAFKA_CONFVAL_INT:
- v = *(const int *)valuep;
- break;
- case RD_KAFKA_CONFVAL_STR:
- v = (int)strtol((const char *)valuep, (char **)&end, 0);
- if (end == (const char *)valuep) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value type for \"%s\": "
- "expecting integer",
- confval->name);
- return RD_KAFKA_RESP_ERR__INVALID_TYPE;
- }
- break;
- default:
- rd_snprintf(errstr, errstr_size,
- "Invalid value type for \"%s\": "
- "expecting integer",
- confval->name);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
-
- if ((confval->u.INT.vmin || confval->u.INT.vmax) &&
- (v < confval->u.INT.vmin || v > confval->u.INT.vmax)) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value type for \"%s\": "
- "expecting integer in range %d..%d",
- confval->name, confval->u.INT.vmin,
- confval->u.INT.vmax);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- confval->u.INT.v = v;
- confval->is_set = 1;
- } break;
-
- case RD_KAFKA_CONFVAL_STR: {
- size_t vlen;
- const char *v = (const char *)valuep;
-
- if (!valuep) {
- confval->is_set = 0;
- if (confval->u.STR.vdef)
- confval->u.STR.v =
- rd_strdup(confval->u.STR.vdef);
- else
- confval->u.STR.v = NULL;
- }
-
- if (valuetype != RD_KAFKA_CONFVAL_STR) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value type for \"%s\": "
- "expecting string",
- confval->name);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- vlen = strlen(v);
- if ((confval->u.STR.minlen || confval->u.STR.maxlen) &&
- (vlen < confval->u.STR.minlen ||
- vlen > confval->u.STR.maxlen)) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value for \"%s\": "
- "expecting string with length "
- "%" PRIusz "..%" PRIusz,
- confval->name, confval->u.STR.minlen,
- confval->u.STR.maxlen);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- if (confval->u.STR.v)
- rd_free(confval->u.STR.v);
-
- confval->u.STR.v = rd_strdup(v);
- } break;
-
- case RD_KAFKA_CONFVAL_PTR:
- confval->u.PTR = (void *)valuep;
- break;
-
- default:
- RD_NOTREACHED();
- return RD_KAFKA_RESP_ERR__NOENT;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval) {
- rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_INT);
- return confval->u.INT.v;
-}
-
-
-const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval) {
- rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_STR);
- return confval->u.STR.v;
-}
-
-void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval) {
- rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_PTR);
- return confval->u.PTR;
-}
-
-
-#define _is_alphanum(C) \
- (((C) >= 'a' && (C) <= 'z') || ((C) >= 'A' && (C) <= 'Z') || \
- ((C) >= '0' && (C) <= '9'))
-
-/**
- * @returns true if the string is KIP-511 safe, else false.
- */
-static rd_bool_t rd_kafka_sw_str_is_safe(const char *str) {
- const char *s;
-
- if (!*str)
- return rd_true;
-
- for (s = str; *s; s++) {
- int c = (int)*s;
-
- if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.')))
- return rd_false;
- }
-
- /* Verify that the string begins and ends with a-zA-Z0-9 */
- if (!_is_alphanum(*str))
- return rd_false;
- if (!_is_alphanum(*(s - 1)))
- return rd_false;
-
- return rd_true;
-}
-
-
-/**
- * @brief Sanitize KIP-511 software name/version strings in-place,
- * replacing unaccepted characters with "-".
- *
- * @warning The \p str is modified in-place.
- */
-static void rd_kafka_sw_str_sanitize_inplace(char *str) {
- char *s = str, *d = str;
-
- /* Strip any leading non-alphanums */
- while (!_is_alphanum(*s))
- s++;
-
- for (; *s; s++) {
- int c = (int)*s;
-
- if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.')))
- *d = '-';
- else
- *d = *s;
- d++;
- }
-
- *d = '\0';
-
- /* Strip any trailing non-alphanums */
- for (d = d - 1; d >= str && !_is_alphanum(*d); d--)
- *d = '\0';
-}
-
-#undef _is_alphanum
-
-
-/**
- * @brief Create a staggered array of key-value pairs from
- * an array of "key=value" strings (typically from rd_string_split()).
- *
- * The output array will have element 0 being key0 and element 1 being
- * value0. Element 2 being key1 and element 3 being value1, and so on.
- * E.g.:
- * input { "key0=value0", "key1=value1" } incnt=2
- * returns { "key0", "value0", "key1", "value1" } cntp=4
- *
- * @returns NULL on error (no '=' separator), or a newly allocated array
- * on success. The array count is returned in \p cntp.
- * The returned pointer must be freed with rd_free().
- */
-char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) {
- size_t i;
- char **out, *p;
- size_t lens = 0;
- size_t outcnt = 0;
-
- /* First calculate total length needed for key-value strings. */
- for (i = 0; i < incnt; i++) {
- const char *t = strchr(input[i], '=');
-
- /* No "=", or "=" at beginning of string. */
- if (!t || t == input[i])
- return NULL;
-
- /* Length of key, '=' (will be \0), value, and \0 */
- lens += strlen(input[i]) + 1;
- }
-
- /* Allocate array along with elements in one go */
- out = rd_malloc((sizeof(*out) * incnt * 2) + lens);
- p = (char *)(&out[incnt * 2]);
-
- for (i = 0; i < incnt; i++) {
- const char *t = strchr(input[i], '=');
- size_t namelen = (size_t)(t - input[i]);
- size_t valuelen = strlen(t + 1);
-
- /* Copy name */
- out[outcnt++] = p;
- memcpy(p, input[i], namelen);
- p += namelen;
- *(p++) = '\0';
-
- /* Copy value */
- out[outcnt++] = p;
- memcpy(p, t + 1, valuelen + 1);
- p += valuelen;
- *(p++) = '\0';
- }
-
-
- *cntp = outcnt;
- return out;
-}
-
-
-/**
- * @brief Verify configuration \p conf is
- * correct/non-conflicting and finalize the configuration
- * settings for use.
- *
- * @returns an error string if configuration is incorrect, else NULL.
- */
-const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
- rd_kafka_conf_t *conf) {
- const char *errstr;
-
- if (!conf->sw_name)
- rd_kafka_conf_set(conf, "client.software.name", "librdkafka",
- NULL, 0);
- if (!conf->sw_version)
- rd_kafka_conf_set(conf, "client.software.version",
- rd_kafka_version_str(), NULL, 0);
-
- /* The client.software.name and .version are sent to the broker
- * with the ApiVersionRequest starting with AK 2.4.0 (KIP-511).
- * These strings need to be sanitized or the broker will reject them,
- * so modify them in-place here. */
- rd_assert(conf->sw_name && conf->sw_version);
- rd_kafka_sw_str_sanitize_inplace(conf->sw_name);
- rd_kafka_sw_str_sanitize_inplace(conf->sw_version);
-
- /* Verify mandatory configuration */
- if (!conf->socket_cb)
- return "Mandatory config property `socket_cb` not set";
-
- if (!conf->open_cb)
- return "Mandatory config property `open_cb` not set";
-
-#if WITH_SSL
- if (conf->ssl.keystore_location && !conf->ssl.keystore_password)
- return "`ssl.keystore.password` is mandatory when "
- "`ssl.keystore.location` is set";
- if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem))
- return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based "
- "set_ssl_cert(CERT_CA) are mutually exclusive.";
-#ifdef __APPLE__
- else if (!conf->ssl.ca && !conf->ssl.ca_location && !conf->ssl.ca_pem)
- /* Default ssl.ca.location to 'probe' on OSX */
- rd_kafka_conf_set(conf, "ssl.ca.location", "probe", NULL, 0);
-#endif
-#endif
-
-#if WITH_SASL_OAUTHBEARER
- if (!rd_strcasecmp(conf->sasl.mechanisms, "OAUTHBEARER")) {
- if (conf->sasl.enable_oauthbearer_unsecure_jwt &&
- conf->sasl.oauthbearer.token_refresh_cb)
- return "`enable.sasl.oauthbearer.unsecure.jwt` and "
- "`oauthbearer_token_refresh_cb` are "
- "mutually exclusive";
-
- if (conf->sasl.enable_oauthbearer_unsecure_jwt &&
- conf->sasl.oauthbearer.method ==
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC)
- return "`enable.sasl.oauthbearer.unsecure.jwt` and "
- "`sasl.oauthbearer.method=oidc` are "
- "mutually exclusive";
-
- if (conf->sasl.oauthbearer.method ==
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) {
- if (!conf->sasl.oauthbearer.client_id)
- return "`sasl.oauthbearer.client.id` is "
- "mandatory when "
- "`sasl.oauthbearer.method=oidc` is set";
-
- if (!conf->sasl.oauthbearer.client_secret) {
- return "`sasl.oauthbearer.client.secret` is "
- "mandatory when "
- "`sasl.oauthbearer.method=oidc` is set";
- }
-
- if (!conf->sasl.oauthbearer.token_endpoint_url) {
- return "`sasl.oauthbearer.token.endpoint.url` "
- "is mandatory when "
- "`sasl.oauthbearer.method=oidc` is set";
- }
- }
-
- /* Enable background thread for the builtin OIDC handler,
- * unless a refresh callback has been set. */
- if (conf->sasl.oauthbearer.method ==
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
- !conf->sasl.oauthbearer.token_refresh_cb) {
- conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND;
- conf->sasl.enable_callback_queue = 1;
- }
- }
-
-#endif
-
- if (cltype == RD_KAFKA_CONSUMER) {
-
- /* Automatically adjust `fetch.max.bytes` to be >=
- * `message.max.bytes` and <= `queued.max.message.kbytes`
- * unless set by user. */
- if (rd_kafka_conf_is_modified(conf, "fetch.max.bytes")) {
- if (conf->fetch_max_bytes < conf->max_msg_size)
- return "`fetch.max.bytes` must be >= "
- "`message.max.bytes`";
- } else {
- conf->fetch_max_bytes =
- RD_MAX(RD_MIN(conf->fetch_max_bytes,
- conf->queued_max_msg_kbytes * 1024),
- conf->max_msg_size);
- }
-
- /* Automatically adjust 'receive.message.max.bytes' to
- * be 512 bytes larger than 'fetch.max.bytes' to have enough
- * room for protocol framing (including topic name), unless
- * set by user. */
- if (rd_kafka_conf_is_modified(conf,
- "receive.message.max.bytes")) {
- if (conf->fetch_max_bytes + 512 >
- conf->recv_max_msg_size)
- return "`receive.message.max.bytes` must be >= "
- "`fetch.max.bytes` + 512";
- } else {
- conf->recv_max_msg_size =
- RD_MAX(conf->recv_max_msg_size,
- conf->fetch_max_bytes + 512);
- }
-
- if (conf->max_poll_interval_ms < conf->group_session_timeout_ms)
- return "`max.poll.interval.ms`must be >= "
- "`session.timeout.ms`";
-
- /* Simplifies rd_kafka_is_idempotent() which is producer-only */
- conf->eos.idempotence = 0;
-
- } else if (cltype == RD_KAFKA_PRODUCER) {
- if (conf->eos.transactional_id) {
- if (!conf->eos.idempotence) {
- /* Auto enable idempotence unless
- * explicitly disabled */
- if (rd_kafka_conf_is_modified(
- conf, "enable.idempotence"))
- return "`transactional.id` requires "
- "`enable.idempotence=true`";
-
- conf->eos.idempotence = rd_true;
- }
-
- /* Make sure at least one request can be sent
- * before the transaction times out. */
- if (!rd_kafka_conf_is_modified(conf,
- "socket.timeout.ms"))
- conf->socket_timeout_ms = RD_MAX(
- conf->eos.transaction_timeout_ms - 100,
- 900);
- else if (conf->eos.transaction_timeout_ms + 100 <
- conf->socket_timeout_ms)
- return "`socket.timeout.ms` must be set <= "
- "`transaction.timeout.ms` + 100";
- }
-
- if (conf->eos.idempotence) {
- /* Adjust configuration values for idempotent producer*/
-
- if (rd_kafka_conf_is_modified(conf, "max.in.flight")) {
- if (conf->max_inflight >
- RD_KAFKA_IDEMP_MAX_INFLIGHT)
- return "`max.in.flight` must be "
- "set "
- "<="
- " " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR
- " when `enable.idempotence` "
- "is true";
- } else {
- conf->max_inflight =
- RD_MIN(conf->max_inflight,
- RD_KAFKA_IDEMP_MAX_INFLIGHT);
- }
-
-
- if (rd_kafka_conf_is_modified(conf, "retries")) {
- if (conf->max_retries < 1)
- return "`retries` must be set >= 1 "
- "when `enable.idempotence` is "
- "true";
- } else {
- conf->max_retries = INT32_MAX;
- }
-
-
- if (rd_kafka_conf_is_modified(
- conf,
- "queue.buffering.backpressure.threshold") &&
- conf->queue_backpressure_thres > 1)
- return "`queue.buffering.backpressure."
- "threshold` "
- "must be set to 1 when "
- "`enable.idempotence` is true";
- else
- conf->queue_backpressure_thres = 1;
-
- /* acks=all and queuing.strategy are set
- * in topic_conf_finalize() */
-
- } else {
- if (conf->eos.gapless &&
- rd_kafka_conf_is_modified(
- conf, "enable.gapless.guarantee"))
- return "`enable.gapless.guarantee` requires "
- "`enable.idempotence` to be enabled";
- }
-
- if (!rd_kafka_conf_is_modified(conf,
- "sticky.partitioning.linger.ms"))
- conf->sticky_partition_linger_ms = (int)RD_MIN(
- 900000, (rd_ts_t)(2 * conf->buffering_max_ms_dbl));
- }
-
-
- if (!rd_kafka_conf_is_modified(conf, "metadata.max.age.ms") &&
- conf->metadata_refresh_interval_ms > 0)
- conf->metadata_max_age_ms =
- conf->metadata_refresh_interval_ms * 3;
-
- if (conf->reconnect_backoff_max_ms < conf->reconnect_backoff_ms)
- return "`reconnect.backoff.max.ms` must be >= "
- "`reconnect.max.ms`";
-
- if (conf->sparse_connections) {
- /* Set sparse connection random selection interval to
- * 10 < reconnect.backoff.ms / 2 < 1000. */
- conf->sparse_connect_intvl =
- RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms / 2, 1000));
- }
-
- if (!rd_kafka_conf_is_modified(conf, "connections.max.idle.ms") &&
- conf->brokerlist && rd_strcasestr(conf->brokerlist, "azure")) {
- /* Issue #3109:
- * Default connections.max.idle.ms to <4 minutes on Azure. */
- conf->connections_max_idle_ms = (4 * 60 - 10) * 1000;
- }
-
- if (!rd_kafka_conf_is_modified(conf, "allow.auto.create.topics")) {
- /* Consumer: Do not allow auto create by default.
- * Producer: Allow auto create by default. */
- if (cltype == RD_KAFKA_CONSUMER)
- conf->allow_auto_create_topics = rd_false;
- else if (cltype == RD_KAFKA_PRODUCER)
- conf->allow_auto_create_topics = rd_true;
- }
-
- /* Finalize and verify the default.topic.config */
- if (conf->topic_conf) {
-
- if (cltype == RD_KAFKA_PRODUCER) {
- rd_kafka_topic_conf_t *tconf = conf->topic_conf;
-
- if (tconf->message_timeout_ms != 0 &&
- (double)tconf->message_timeout_ms <=
- conf->buffering_max_ms_dbl) {
- if (rd_kafka_conf_is_modified(conf,
- "linger.ms"))
- return "`message.timeout.ms` must be "
- "greater than `linger.ms`";
- else /* Auto adjust linger.ms to be lower
- * than message.timeout.ms */
- conf->buffering_max_ms_dbl =
- (double)tconf->message_timeout_ms -
- 0.1;
- }
- }
-
- errstr = rd_kafka_topic_conf_finalize(cltype, conf,
- conf->topic_conf);
- if (errstr)
- return errstr;
- }
-
- /* Convert double linger.ms to internal int microseconds after
- * finalizing default_topic_conf since it may
- * update buffering_max_ms_dbl. */
- conf->buffering_max_us = (rd_ts_t)(conf->buffering_max_ms_dbl * 1000);
-
-
- return NULL;
-}
-
-
-/**
- * @brief Verify topic configuration \p tconf is
- * correct/non-conflicting and finalize the configuration
- * settings for use.
- *
- * @returns an error string if configuration is incorrect, else NULL.
- */
-const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
- const rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf) {
-
- if (cltype != RD_KAFKA_PRODUCER)
- return NULL;
-
- if (conf->eos.idempotence) {
- /* Ensure acks=all */
- if (rd_kafka_topic_conf_is_modified(tconf, "acks")) {
- if (tconf->required_acks != -1)
- return "`acks` must be set to `all` when "
- "`enable.idempotence` is true";
- } else {
- tconf->required_acks = -1; /* all */
- }
-
- /* Ensure FIFO queueing */
- if (rd_kafka_topic_conf_is_modified(tconf,
- "queuing.strategy")) {
- if (tconf->queuing_strategy != RD_KAFKA_QUEUE_FIFO)
- return "`queuing.strategy` must be set to "
- "`fifo` when `enable.idempotence` is "
- "true";
- } else {
- tconf->queuing_strategy = RD_KAFKA_QUEUE_FIFO;
- }
-
- /* Ensure message.timeout.ms <= transaction.timeout.ms */
- if (conf->eos.transactional_id) {
- if (!rd_kafka_topic_conf_is_modified(
- tconf, "message.timeout.ms"))
- tconf->message_timeout_ms =
- conf->eos.transaction_timeout_ms;
- else if (tconf->message_timeout_ms >
- conf->eos.transaction_timeout_ms)
- return "`message.timeout.ms` must be set <= "
- "`transaction.timeout.ms`";
- }
- }
-
- if (tconf->message_timeout_ms != 0 &&
- (double)tconf->message_timeout_ms <= conf->buffering_max_ms_dbl &&
- rd_kafka_conf_is_modified(conf, "linger.ms"))
- return "`message.timeout.ms` must be greater than `linger.ms`";
-
- return NULL;
-}
-
-
-/**
- * @brief Log warnings for set deprecated or experimental
- * configuration properties.
- * @returns the number of warnings logged.
- */
-static int rd_kafka_anyconf_warn_deprecated(rd_kafka_t *rk,
- rd_kafka_conf_scope_t scope,
- const void *conf) {
- const struct rd_kafka_property *prop;
- int warn_type =
- rk->rk_type == RD_KAFKA_PRODUCER ? _RK_CONSUMER : _RK_PRODUCER;
- int warn_on = _RK_DEPRECATED | _RK_EXPERIMENTAL | warn_type;
-
- int cnt = 0;
-
- for (prop = rd_kafka_properties; prop->name; prop++) {
- int match = prop->scope & warn_on;
-
- if (likely(!(prop->scope & scope) || !match))
- continue;
-
- if (likely(!rd_kafka_anyconf_is_modified(conf, prop)))
- continue;
-
- if (match != warn_type)
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property %s is %s%s%s: %s",
- prop->name,
- match & _RK_DEPRECATED ? "deprecated" : "",
- match == warn_on ? " and " : "",
- match & _RK_EXPERIMENTAL ? "experimental"
- : "",
- prop->desc);
-
- if (match & warn_type)
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property %s "
- "is a %s property and will be ignored by "
- "this %s instance",
- prop->name,
- warn_type == _RK_PRODUCER ? "producer"
- : "consumer",
- warn_type == _RK_PRODUCER ? "consumer"
- : "producer");
-
- cnt++;
- }
-
- return cnt;
-}
-
-
-/**
- * @brief Log configuration warnings (deprecated configuration properties,
- * unrecommended combinations, etc).
- *
- * @returns the number of warnings logged.
- *
- * @locality any
- * @locks none
- */
-int rd_kafka_conf_warn(rd_kafka_t *rk) {
- int cnt = 0;
-
- cnt = rd_kafka_anyconf_warn_deprecated(rk, _RK_GLOBAL, &rk->rk_conf);
- if (rk->rk_conf.topic_conf)
- cnt += rd_kafka_anyconf_warn_deprecated(rk, _RK_TOPIC,
- rk->rk_conf.topic_conf);
-
- if (rk->rk_conf.warn.default_topic_conf_overwritten)
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Topic configuration properties set in the "
- "global configuration were overwritten by "
- "explicitly setting a default_topic_conf: "
- "recommend not using set_default_topic_conf");
-
- /* Additional warnings */
- if (rk->rk_type == RD_KAFKA_CONSUMER) {
- if (rk->rk_conf.fetch_wait_max_ms + 1000 >
- rk->rk_conf.socket_timeout_ms)
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property "
- "`fetch.wait.max.ms` (%d) should be "
- "set lower than `socket.timeout.ms` (%d) "
- "by at least 1000ms to avoid blocking "
- "and timing out sub-sequent requests",
- rk->rk_conf.fetch_wait_max_ms,
- rk->rk_conf.socket_timeout_ms);
- }
-
- if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.mechanisms") &&
- !(rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
- rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT)) {
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property `sasl.mechanism` set to "
- "`%s` but `security.protocol` is not configured "
- "for SASL: recommend setting "
- "`security.protocol` to SASL_SSL or "
- "SASL_PLAINTEXT",
- rk->rk_conf.sasl.mechanisms);
- }
-
- if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.username") &&
- !(!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM", 5) ||
- !strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")))
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property `sasl.username` only "
- "applies when `sasl.mechanism` is set to "
- "PLAIN or SCRAM-SHA-..");
-
- if (rd_kafka_conf_is_modified(&rk->rk_conf, "client.software.name") &&
- !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_name))
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property `client.software.name` "
- "may only contain 'a-zA-Z0-9.-', other characters "
- "will be replaced with '-'");
-
- if (rd_kafka_conf_is_modified(&rk->rk_conf,
- "client.software.version") &&
- !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_version))
- rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
- "Configuration property `client.software.verison` "
- "may only contain 'a-zA-Z0-9.-', other characters "
- "will be replaced with '-'");
-
- if (rd_atomic32_get(&rk->rk_broker_cnt) == 0)
- rd_kafka_log(rk, LOG_NOTICE, "CONFWARN",
- "No `bootstrap.servers` configured: "
- "client will not be able to connect "
- "to Kafka cluster");
-
- return cnt;
-}
-
-
-const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk) {
- return &rk->rk_conf;
-}
-
-
-/**
- * @brief Unittests
- */
-int unittest_conf(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_conf_res_t res, res2;
- char errstr[128];
- int iteration;
- const struct rd_kafka_property *prop;
- char readval[512];
- size_t readlen;
- const char *errstr2;
-
- conf = rd_kafka_conf_new();
- tconf = rd_kafka_topic_conf_new();
-
- res = rd_kafka_conf_set(conf, "unknown.thing", "foo", errstr,
- sizeof(errstr));
- RD_UT_ASSERT(res == RD_KAFKA_CONF_UNKNOWN, "fail");
- RD_UT_ASSERT(*errstr, "fail");
-
- for (iteration = 0; iteration < 5; iteration++) {
- int cnt;
-
-
- /* Iterations:
- * 0 - Check is_modified
- * 1 - Set every other config property, read back and verify.
- * 2 - Check is_modified.
- * 3 - Set all config properties, read back and verify.
- * 4 - Check is_modified. */
- for (prop = rd_kafka_properties, cnt = 0; prop->name;
- prop++, cnt++) {
- const char *val;
- char tmp[64];
- int odd = cnt & 1;
- int do_set = iteration == 3 || (iteration == 1 && odd);
- rd_bool_t is_modified;
- int exp_is_modified =
- !prop->unsupported &&
- (iteration >= 3 ||
- (iteration > 0 && (do_set || odd)));
-
- readlen = sizeof(readval);
-
- /* Avoid some special configs */
- if (!strcmp(prop->name, "plugin.library.paths") ||
- !strcmp(prop->name, "builtin.features"))
- continue;
-
- switch (prop->type) {
- case _RK_C_STR:
- case _RK_C_KSTR:
- case _RK_C_PATLIST:
- if (prop->sdef)
- val = prop->sdef;
- else
- val = "test";
- break;
-
- case _RK_C_BOOL:
- val = "true";
- break;
-
- case _RK_C_INT:
- rd_snprintf(tmp, sizeof(tmp), "%d", prop->vdef);
- val = tmp;
- break;
-
- case _RK_C_DBL:
- rd_snprintf(tmp, sizeof(tmp), "%g", prop->ddef);
- val = tmp;
- break;
-
- case _RK_C_S2F:
- case _RK_C_S2I:
- val = prop->s2i[0].str;
- break;
-
- case _RK_C_PTR:
- case _RK_C_ALIAS:
- case _RK_C_INVALID:
- case _RK_C_INTERNAL:
- default:
- continue;
- }
-
-
- if (prop->scope & _RK_GLOBAL) {
- if (do_set)
- res = rd_kafka_conf_set(
- conf, prop->name, val, errstr,
- sizeof(errstr));
-
- res2 = rd_kafka_conf_get(conf, prop->name,
- readval, &readlen);
-
- is_modified =
- rd_kafka_conf_is_modified(conf, prop->name);
-
-
- } else if (prop->scope & _RK_TOPIC) {
- if (do_set)
- res = rd_kafka_topic_conf_set(
- tconf, prop->name, val, errstr,
- sizeof(errstr));
-
- res2 = rd_kafka_topic_conf_get(
- tconf, prop->name, readval, &readlen);
-
- is_modified = rd_kafka_topic_conf_is_modified(
- tconf, prop->name);
-
- } else {
- RD_NOTREACHED();
- }
-
-
-
- if (do_set && prop->unsupported) {
- RD_UT_ASSERT(res == RD_KAFKA_CONF_INVALID,
- "conf_set %s should've failed "
- "with CONF_INVALID, not %d: %s",
- prop->name, res, errstr);
-
- } else if (do_set) {
- RD_UT_ASSERT(res == RD_KAFKA_CONF_OK,
- "conf_set %s failed: %d: %s",
- prop->name, res, errstr);
- RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK,
- "conf_get %s failed: %d",
- prop->name, res2);
-
- RD_UT_ASSERT(!strcmp(readval, val),
- "conf_get %s "
- "returned \"%s\": "
- "expected \"%s\"",
- prop->name, readval, val);
-
- RD_UT_ASSERT(is_modified,
- "Property %s was set but "
- "is_modified=%d",
- prop->name, is_modified);
- }
-
- assert(is_modified == exp_is_modified);
- RD_UT_ASSERT(is_modified == exp_is_modified,
- "Property %s is_modified=%d, "
- "exp_is_modified=%d "
- "(iter %d, odd %d, do_set %d)",
- prop->name, is_modified, exp_is_modified,
- iteration, odd, do_set);
- }
- }
-
- /* Set an alias and make sure is_modified() works for it. */
- res = rd_kafka_conf_set(conf, "max.in.flight", "19", NULL, 0);
- RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
-
- RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") ==
- rd_true,
- "fail");
- RD_UT_ASSERT(rd_kafka_conf_is_modified(
- conf, "max.in.flight.requests.per.connection") ==
- rd_true,
- "fail");
-
- rd_kafka_conf_destroy(conf);
- rd_kafka_topic_conf_destroy(tconf);
-
-
- /* Verify that software.client.* string-safing works */
- conf = rd_kafka_conf_new();
- res = rd_kafka_conf_set(conf, "client.software.name",
- " .~aba. va! !.~~", NULL, 0);
- RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
- res = rd_kafka_conf_set(conf, "client.software.version",
- "!1.2.3.4.5!!! a", NULL, 0);
- RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
-
- errstr2 = rd_kafka_conf_finalize(RD_KAFKA_PRODUCER, conf);
- RD_UT_ASSERT(!errstr2, "conf_finalize() failed: %s", errstr2);
-
- readlen = sizeof(readval);
- res2 =
- rd_kafka_conf_get(conf, "client.software.name", readval, &readlen);
- RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2);
- RD_UT_ASSERT(!strcmp(readval, "aba.-va"),
- "client.software.* safification failed: \"%s\"", readval);
- RD_UT_SAY("Safified client.software.name=\"%s\"", readval);
-
- readlen = sizeof(readval);
- res2 = rd_kafka_conf_get(conf, "client.software.version", readval,
- &readlen);
- RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2);
- RD_UT_ASSERT(!strcmp(readval, "1.2.3.4.5----a"),
- "client.software.* safification failed: \"%s\"", readval);
- RD_UT_SAY("Safified client.software.version=\"%s\"", readval);
-
- rd_kafka_conf_destroy(conf);
-
- RD_UT_PASS();
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h
deleted file mode 100644
index 161d6e469..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014-2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_CONF_H_
-#define _RDKAFKA_CONF_H_
-
-#include "rdlist.h"
-#include "rdkafka_cert.h"
-
-#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 && \
- !defined(OPENSSL_IS_BORINGSSL)
-#define WITH_SSL_ENGINE 1
-/* Deprecated in OpenSSL 3 */
-#include <openssl/engine.h>
-#endif /* WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 */
-
-/**
- * Forward declarations
- */
-struct rd_kafka_transport_s;
-
-
-/**
- * MessageSet compression codecs
- */
-typedef enum {
- RD_KAFKA_COMPRESSION_NONE,
- RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP,
- RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY,
- RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4,
- RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD,
- RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */
- RD_KAFKA_COMPRESSION_NUM
-} rd_kafka_compression_t;
-
-static RD_INLINE RD_UNUSED const char *
-rd_kafka_compression2str(rd_kafka_compression_t compr) {
- static const char *names[RD_KAFKA_COMPRESSION_NUM] = {
- [RD_KAFKA_COMPRESSION_NONE] = "none",
- [RD_KAFKA_COMPRESSION_GZIP] = "gzip",
- [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy",
- [RD_KAFKA_COMPRESSION_LZ4] = "lz4",
- [RD_KAFKA_COMPRESSION_ZSTD] = "zstd",
- [RD_KAFKA_COMPRESSION_INHERIT] = "inherit"};
- static RD_TLS char ret[32];
-
- if ((int)compr < 0 || compr >= RD_KAFKA_COMPRESSION_NUM) {
- rd_snprintf(ret, sizeof(ret), "codec0x%x?", (int)compr);
- return ret;
- }
-
- return names[compr];
-}
-
-/**
- * MessageSet compression levels
- */
-typedef enum {
- RD_KAFKA_COMPLEVEL_DEFAULT = -1,
- RD_KAFKA_COMPLEVEL_MIN = -1,
- RD_KAFKA_COMPLEVEL_GZIP_MAX = 9,
- RD_KAFKA_COMPLEVEL_LZ4_MAX = 12,
- RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0,
- RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22,
- RD_KAFKA_COMPLEVEL_MAX = 12
-} rd_kafka_complevel_t;
-
-typedef enum {
- RD_KAFKA_PROTO_PLAINTEXT,
- RD_KAFKA_PROTO_SSL,
- RD_KAFKA_PROTO_SASL_PLAINTEXT,
- RD_KAFKA_PROTO_SASL_SSL,
- RD_KAFKA_PROTO_NUM,
-} rd_kafka_secproto_t;
-
-
-typedef enum {
- RD_KAFKA_CONFIGURED,
- RD_KAFKA_LEARNED,
- RD_KAFKA_INTERNAL,
- RD_KAFKA_LOGICAL
-} rd_kafka_confsource_t;
-
-static RD_INLINE RD_UNUSED const char *
-rd_kafka_confsource2str(rd_kafka_confsource_t source) {
- static const char *names[] = {"configured", "learned", "internal",
- "logical"};
-
- return names[source];
-}
-
-
-typedef enum {
- _RK_GLOBAL = 0x1,
- _RK_PRODUCER = 0x2,
- _RK_CONSUMER = 0x4,
- _RK_TOPIC = 0x8,
- _RK_CGRP = 0x10,
- _RK_DEPRECATED = 0x20,
- _RK_HIDDEN = 0x40,
- _RK_HIGH = 0x80, /* High Importance */
- _RK_MED = 0x100, /* Medium Importance */
- _RK_EXPERIMENTAL = 0x200, /* Experimental (unsupported) property */
- _RK_SENSITIVE = 0x400 /* The configuration property's value
- * might contain sensitive information. */
-} rd_kafka_conf_scope_t;
-
-/**< While the client groups is a generic concept, it is currently
- * only implemented for consumers in librdkafka. */
-#define _RK_CGRP _RK_CONSUMER
-
-typedef enum {
- _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */
- _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */
- _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */
-} rd_kafka_conf_set_mode_t;
-
-
-
-typedef enum {
- RD_KAFKA_OFFSET_METHOD_NONE,
- RD_KAFKA_OFFSET_METHOD_FILE,
- RD_KAFKA_OFFSET_METHOD_BROKER
-} rd_kafka_offset_method_t;
-
-typedef enum {
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC
-} rd_kafka_oauthbearer_method_t;
-
-typedef enum {
- RD_KAFKA_SSL_ENDPOINT_ID_NONE,
- RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */
-} rd_kafka_ssl_endpoint_id_t;
-
-/* Increase in steps of 64 as needed.
- * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */
-#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33)
-
-/**
- * @struct rd_kafka_anyconf_t
- * @brief The anyconf header must be the first field in the
- * rd_kafka_conf_t and rd_kafka_topic_conf_t structs.
- * It provides a way to track which property has been modified.
- */
-struct rd_kafka_anyconf_hdr {
- uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX / 64];
-};
-
-
-/**
- * Optional configuration struct passed to rd_kafka_new*().
- *
- * The struct is populated ted through string properties
- * by calling rd_kafka_conf_set().
- *
- */
-struct rd_kafka_conf_s {
- struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
-
- /*
- * Generic configuration
- */
- int enabled_events;
- int max_msg_size;
- int msg_copy_max_size;
- int recv_max_msg_size;
- int max_inflight;
- int metadata_request_timeout_ms;
- int metadata_refresh_interval_ms;
- int metadata_refresh_fast_cnt;
- int metadata_refresh_fast_interval_ms;
- int metadata_refresh_sparse;
- int metadata_max_age_ms;
- int metadata_propagation_max_ms;
- int debug;
- int broker_addr_ttl;
- int broker_addr_family;
- int socket_timeout_ms;
- int socket_blocking_max_ms;
- int socket_sndbuf_size;
- int socket_rcvbuf_size;
- int socket_keepalive;
- int socket_nagle_disable;
- int socket_max_fails;
- char *client_id_str;
- char *brokerlist;
- int stats_interval_ms;
- int term_sig;
- int reconnect_backoff_ms;
- int reconnect_backoff_max_ms;
- int reconnect_jitter_ms;
- int socket_connection_setup_timeout_ms;
- int connections_max_idle_ms;
- int sparse_connections;
- int sparse_connect_intvl;
- int api_version_request;
- int api_version_request_timeout_ms;
- int api_version_fallback_ms;
- char *broker_version_fallback;
- rd_kafka_secproto_t security_protocol;
-
- struct {
-#if WITH_SSL
- SSL_CTX *ctx;
-#endif
- char *cipher_suites;
- char *curves_list;
- char *sigalgs_list;
- char *key_location;
- char *key_pem;
- rd_kafka_cert_t *key;
- char *key_password;
- char *cert_location;
- char *cert_pem;
- rd_kafka_cert_t *cert;
- char *ca_location;
- char *ca_pem;
- rd_kafka_cert_t *ca;
- /** CSV list of Windows certificate stores */
- char *ca_cert_stores;
- char *crl_location;
-#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000
- ENGINE *engine;
-#endif
- char *engine_location;
- char *engine_id;
- void *engine_callback_data;
- char *providers;
- rd_list_t loaded_providers; /**< (SSL_PROVIDER*) */
- char *keystore_location;
- char *keystore_password;
- int endpoint_identification;
- int enable_verify;
- int (*cert_verify_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int *x509_error,
- int depth,
- const char *buf,
- size_t size,
- char *errstr,
- size_t errstr_size,
- void *opaque);
- } ssl;
-
- struct {
- const struct rd_kafka_sasl_provider *provider;
- char *principal;
- char *mechanisms;
- char *service_name;
- char *kinit_cmd;
- char *keytab;
- int relogin_min_time;
- /** Protects .username and .password access after client
- * instance has been created (see sasl_set_credentials()). */
- mtx_t lock;
- char *username;
- char *password;
-#if WITH_SASL_SCRAM
- /* SCRAM EVP-wrapped hash function
- * (return value from EVP_shaX()) */
- const void /*EVP_MD*/ *scram_evp;
- /* SCRAM direct hash function (e.g., SHA256()) */
- unsigned char *(*scram_H)(const unsigned char *d,
- size_t n,
- unsigned char *md);
- /* Hash size */
- size_t scram_H_size;
-#endif
- char *oauthbearer_config;
- int enable_oauthbearer_unsecure_jwt;
- int enable_callback_queue;
- struct {
- rd_kafka_oauthbearer_method_t method;
- char *token_endpoint_url;
- char *client_id;
- char *client_secret;
- char *scope;
- char *extensions_str;
- /* SASL/OAUTHBEARER token refresh event callback */
- void (*token_refresh_cb)(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque);
- } oauthbearer;
- } sasl;
-
- char *plugin_paths;
-#if WITH_PLUGINS
- rd_list_t plugins;
-#endif
-
- /* Interceptors */
- struct {
- /* rd_kafka_interceptor_method_t lists */
- rd_list_t on_conf_set; /* on_conf_set interceptors
- * (not copied on conf_dup()) */
- rd_list_t on_conf_dup; /* .. (not copied) */
- rd_list_t on_conf_destroy; /* .. (not copied) */
- rd_list_t on_new; /* .. (copied) */
- rd_list_t on_destroy; /* .. (copied) */
- rd_list_t on_send; /* .. (copied) */
- rd_list_t on_acknowledgement; /* .. (copied) */
- rd_list_t on_consume; /* .. (copied) */
- rd_list_t on_commit; /* .. (copied) */
- rd_list_t on_request_sent; /* .. (copied) */
- rd_list_t on_response_received; /* .. (copied) */
- rd_list_t on_thread_start; /* .. (copied) */
- rd_list_t on_thread_exit; /* .. (copied) */
- rd_list_t on_broker_state_change; /* .. (copied) */
-
- /* rd_strtup_t list */
- rd_list_t config; /* Configuration name=val's
- * handled by interceptors. */
- } interceptors;
-
- /* Client group configuration */
- int coord_query_intvl_ms;
- int max_poll_interval_ms;
-
- int builtin_features;
- /*
- * Consumer configuration
- */
- int check_crcs;
- int queued_min_msgs;
- int queued_max_msg_kbytes;
- int64_t queued_max_msg_bytes;
- int fetch_wait_max_ms;
- int fetch_msg_max_bytes;
- int fetch_max_bytes;
- int fetch_min_bytes;
- int fetch_error_backoff_ms;
- char *group_id_str;
- char *group_instance_id;
- int allow_auto_create_topics;
-
- rd_kafka_pattern_list_t *topic_blacklist;
- struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config
- * for automatically
- * subscribed topics. */
- int enable_auto_commit;
- int enable_auto_offset_store;
- int auto_commit_interval_ms;
- int group_session_timeout_ms;
- int group_heartbeat_intvl_ms;
- rd_kafkap_str_t *group_protocol_type;
- char *partition_assignment_strategy;
- rd_list_t partition_assignors;
- int enabled_assignor_cnt;
-
- void (*rebalance_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque);
-
- void (*offset_commit_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque);
-
- rd_kafka_offset_method_t offset_store_method;
-
- rd_kafka_isolation_level_t isolation_level;
-
- int enable_partition_eof;
-
- rd_kafkap_str_t *client_rack;
-
- /*
- * Producer configuration
- */
- struct {
- /*
- * Idempotence
- */
- int idempotence; /**< Enable Idempotent Producer */
- rd_bool_t gapless; /**< Raise fatal error if
- * gapless guarantee can't be
- * satisfied. */
- /*
- * Transactions
- */
- char *transactional_id; /**< Transactional Id */
- int transaction_timeout_ms; /**< Transaction timeout */
- } eos;
- int queue_buffering_max_msgs;
- int queue_buffering_max_kbytes;
- double buffering_max_ms_dbl; /**< This is the configured value */
- rd_ts_t buffering_max_us; /**< This is the value used in the code */
- int queue_backpressure_thres;
- int max_retries;
- int retry_backoff_ms;
- int batch_num_messages;
- int batch_size;
- rd_kafka_compression_t compression_codec;
- int dr_err_only;
- int sticky_partition_linger_ms;
-
- /* Message delivery report callback.
- * Called once for each produced message, either on
- * successful and acknowledged delivery to the broker in which
- * case 'err' is 0, or if the message could not be delivered
- * in which case 'err' is non-zero (use rd_kafka_err2str()
- * to obtain a human-readable error reason).
- *
- * If the message was produced with neither RD_KAFKA_MSG_F_FREE
- * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original
- * pointer provided to rd_kafka_produce().
- * rdkafka will not perform any further actions on 'payload'
- * at this point and the application may rd_free the payload data
- * at this point.
- *
- * 'opaque' is 'conf.opaque', while 'msg_opaque' is
- * the opaque pointer provided in the rd_kafka_produce() call.
- */
- void (*dr_cb)(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque);
-
- void (*dr_msg_cb)(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque);
-
- /* Consume callback */
- void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque);
-
- /* Log callback */
- void (*log_cb)(const rd_kafka_t *rk,
- int level,
- const char *fac,
- const char *buf);
- int log_level;
- int log_queue;
- int log_thread_name;
- int log_connection_close;
-
- /* PRNG seeding */
- int enable_random_seed;
-
- /* Error callback */
- void (*error_cb)(rd_kafka_t *rk,
- int err,
- const char *reason,
- void *opaque);
-
- /* Throttle callback */
- void (*throttle_cb)(rd_kafka_t *rk,
- const char *broker_name,
- int32_t broker_id,
- int throttle_time_ms,
- void *opaque);
-
- /* Stats callback */
- int (*stats_cb)(rd_kafka_t *rk,
- char *json,
- size_t json_len,
- void *opaque);
-
- /* Socket creation callback */
- int (*socket_cb)(int domain, int type, int protocol, void *opaque);
-
- /* Connect callback */
- int (*connect_cb)(int sockfd,
- const struct sockaddr *addr,
- int addrlen,
- const char *id,
- void *opaque);
-
- /* Close socket callback */
- int (*closesocket_cb)(int sockfd, void *opaque);
-
- /* File open callback */
- int (*open_cb)(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque);
-
- /* Address resolution callback */
- int (*resolve_cb)(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque);
-
- /* Background queue event callback */
- void (*background_event_cb)(rd_kafka_t *rk,
- rd_kafka_event_t *rkev,
- void *opaque);
-
-
- /* Opaque passed to callbacks. */
- void *opaque;
-
- /* For use with value-less properties. */
- int dummy;
-
-
- /* Admin client defaults */
- struct {
- int request_timeout_ms; /* AdminOptions.request_timeout */
- } admin;
-
-
- /*
- * Test mocks
- */
- struct {
- int broker_cnt; /**< Number of mock brokers */
- int broker_rtt; /**< Broker RTT */
- } mock;
-
- /*
- * Unit test pluggable interfaces
- */
- struct {
- /**< Inject errors in ProduceResponse handler */
- rd_kafka_resp_err_t (*handle_ProduceResponse)(
- rd_kafka_t *rk,
- int32_t brokerid,
- uint64_t msgid,
- rd_kafka_resp_err_t err);
- } ut;
-
- char *sw_name; /**< Software/client name */
- char *sw_version; /**< Software/client version */
-
- struct {
- /** Properties on (implicit pass-thru) default_topic_conf were
- * overwritten by passing an explicit default_topic_conf. */
- rd_bool_t default_topic_conf_overwritten;
- } warn;
-};
-
-int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque);
-int rd_kafka_socket_cb_generic(int domain,
- int type,
- int protocol,
- void *opaque);
-#ifndef _WIN32
-int rd_kafka_open_cb_linux(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque);
-#endif
-int rd_kafka_open_cb_generic(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque);
-
-
-
-struct rd_kafka_topic_conf_s {
- struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
-
- int required_acks;
- int32_t request_timeout_ms;
- int message_timeout_ms;
-
- int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
- const void *keydata,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque);
- char *partitioner_str;
-
- rd_bool_t random_partitioner; /**< rd_true - random
- * rd_false - sticky */
-
- int queuing_strategy; /* RD_KAFKA_QUEUE_FIFO|LIFO */
- int (*msg_order_cmp)(const void *a, const void *b);
-
- rd_kafka_compression_t compression_codec;
- rd_kafka_complevel_t compression_level;
- int produce_offset_report;
-
- int consume_callback_max_msgs;
- int auto_commit;
- int auto_commit_interval_ms;
- int auto_offset_reset;
- char *offset_store_path;
- int offset_store_sync_interval_ms;
-
- rd_kafka_offset_method_t offset_store_method;
-
- /* Application provided opaque pointer (this is rkt_opaque) */
- void *opaque;
-};
-
-
-char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp);
-
-void rd_kafka_anyconf_destroy(int scope, void *conf);
-
-rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
- const char *name);
-
-void rd_kafka_desensitize_str(char *str);
-
-void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf);
-void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf);
-
-const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
- rd_kafka_conf_t *conf);
-const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
- const rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf);
-
-
-int rd_kafka_conf_warn(rd_kafka_t *rk);
-
-void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk,
- int scope,
- const void *conf,
- const char *description);
-
-#include "rdkafka_confval.h"
-
-int unittest_conf(void);
-
-#endif /* _RDKAFKA_CONF_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h
deleted file mode 100644
index 3f2bad549..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2014-2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_CONFVAL_H_
-#define _RDKAFKA_CONFVAL_H_
-/**
- * @name Next generation configuration values
- * @{
- *
- */
-
-/**
- * @brief Configuration value type
- */
-typedef enum rd_kafka_confval_type_t {
- RD_KAFKA_CONFVAL_INT,
- RD_KAFKA_CONFVAL_STR,
- RD_KAFKA_CONFVAL_PTR,
-} rd_kafka_confval_type_t;
-
-/**
- * @brief Configuration value (used by AdminOption).
- * Comes with a type, backed by a union, and a flag to indicate
- * if the value has been set or not.
- */
-typedef struct rd_kafka_confval_s {
- const char *name; /**< Property name */
- rd_kafka_confval_type_t valuetype; /**< Value type, maps to union.*/
- int is_set; /**< Value has been set. */
- int is_enabled; /**< Confval is enabled. */
- union {
- struct {
- int v; /**< Current value */
- int vmin; /**< Minimum value (inclusive) */
- int vmax; /**< Maximum value (inclusive) */
- int vdef; /**< Default value */
- } INT;
- struct {
- char *v; /**< Current value */
- int allowempty; /**< Allow empty string as value */
- size_t minlen; /**< Minimum string length excl \0 */
- size_t maxlen; /**< Maximum string length excl \0 */
- const char *vdef; /**< Default value */
- } STR;
- void *PTR; /**< Pointer */
- } u;
-} rd_kafka_confval_t;
-
-
-
-void rd_kafka_confval_init_int(rd_kafka_confval_t *confval,
- const char *name,
- int vmin,
- int vmax,
- int vdef);
-void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name);
-void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name);
-
-rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval,
- rd_kafka_confval_type_t valuetype,
- const void *valuep,
- char *errstr,
- size_t errstr_size);
-
-int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval);
-const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval);
-void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval);
-
-/**@}*/
-
-
-#endif /* _RDKAFKA_CONFVAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c
deleted file mode 100644
index 9e41bab72..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_request.h"
-#include "rdkafka_coord.h"
-
-
-/**
- * @name Coordinator cache
- * @{
- *
- */
-void rd_kafka_coord_cache_entry_destroy(rd_kafka_coord_cache_t *cc,
- rd_kafka_coord_cache_entry_t *cce) {
- rd_assert(cc->cc_cnt > 0);
- rd_free(cce->cce_coordkey);
- rd_kafka_broker_destroy(cce->cce_rkb);
- TAILQ_REMOVE(&cc->cc_entries, cce, cce_link);
- cc->cc_cnt--;
- rd_free(cce);
-}
-
-
-/**
- * @brief Delete any expired cache entries
- *
- * @locality rdkafka main thread
- */
-void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc) {
- rd_kafka_coord_cache_entry_t *cce, *next;
- rd_ts_t expire = rd_clock() - cc->cc_expire_thres;
-
- next = TAILQ_LAST(&cc->cc_entries, rd_kafka_coord_cache_head_s);
- while (next) {
- cce = next;
-
- if (cce->cce_ts_used > expire)
- break;
-
- next = TAILQ_PREV(cce, rd_kafka_coord_cache_head_s, cce_link);
- rd_kafka_coord_cache_entry_destroy(cc, cce);
- }
-}
-
-
-static rd_kafka_coord_cache_entry_t *
-rd_kafka_coord_cache_find(rd_kafka_coord_cache_t *cc,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey) {
- rd_kafka_coord_cache_entry_t *cce;
-
- TAILQ_FOREACH(cce, &cc->cc_entries, cce_link) {
- if (cce->cce_coordtype == coordtype &&
- !strcmp(cce->cce_coordkey, coordkey)) {
- /* Match */
- cce->cce_ts_used = rd_clock();
- if (TAILQ_FIRST(&cc->cc_entries) != cce) {
- /* Move to head of list */
- TAILQ_REMOVE(&cc->cc_entries, cce, cce_link);
- TAILQ_INSERT_HEAD(&cc->cc_entries, cce,
- cce_link);
- }
- return cce;
- }
- }
-
- return NULL;
-}
-
-
-rd_kafka_broker_t *rd_kafka_coord_cache_get(rd_kafka_coord_cache_t *cc,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey) {
- rd_kafka_coord_cache_entry_t *cce;
-
- cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey);
- if (!cce)
- return NULL;
-
- rd_kafka_broker_keep(cce->cce_rkb);
- return cce->cce_rkb;
-}
-
-
-
-static void rd_kafka_coord_cache_add(rd_kafka_coord_cache_t *cc,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey,
- rd_kafka_broker_t *rkb) {
- rd_kafka_coord_cache_entry_t *cce;
-
- if (!(cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey))) {
- if (cc->cc_cnt > 10) {
- /* Not enough room in cache, remove least used entry */
- rd_kafka_coord_cache_entry_t *rem = TAILQ_LAST(
- &cc->cc_entries, rd_kafka_coord_cache_head_s);
- rd_kafka_coord_cache_entry_destroy(cc, rem);
- }
-
- cce = rd_calloc(1, sizeof(*cce));
- cce->cce_coordtype = coordtype;
- cce->cce_coordkey = rd_strdup(coordkey);
- cce->cce_ts_used = rd_clock();
-
- TAILQ_INSERT_HEAD(&cc->cc_entries, cce, cce_link);
- cc->cc_cnt++;
- }
-
- if (cce->cce_rkb != rkb) {
- if (cce->cce_rkb)
- rd_kafka_broker_destroy(cce->cce_rkb);
- cce->cce_rkb = rkb;
- rd_kafka_broker_keep(rkb);
- }
-}
-
-
-/**
- * @brief Evict any cache entries for broker \p rkb.
- *
- * Use this when a request returns ERR_NOT_COORDINATOR_FOR...
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc,
- rd_kafka_broker_t *rkb) {
- rd_kafka_coord_cache_entry_t *cce, *tmp;
-
- TAILQ_FOREACH_SAFE(cce, &cc->cc_entries, cce_link, tmp) {
- if (cce->cce_rkb == rkb)
- rd_kafka_coord_cache_entry_destroy(cc, cce);
- }
-}
-
-/**
- * @brief Destroy all coord cache entries.
- */
-void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc) {
- rd_kafka_coord_cache_entry_t *cce;
-
- while ((cce = TAILQ_FIRST(&cc->cc_entries)))
- rd_kafka_coord_cache_entry_destroy(cc, cce);
-}
-
-
-/**
- * @brief Initialize the coord cache.
- *
- * Locking of the coord-cache is up to the owner.
- */
-void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc,
- int expire_thres_ms) {
- TAILQ_INIT(&cc->cc_entries);
- cc->cc_cnt = 0;
- cc->cc_expire_thres = expire_thres_ms * 1000;
-}
-
-/**@}*/
-
-
-/**
- * @name Asynchronous coordinator requests
- * @{
- *
- */
-
-
-
-static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq);
-
-/**
- * @brief Timer callback for delayed coord requests.
- */
-static void rd_kafka_coord_req_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_coord_req_t *creq = arg;
-
- rd_kafka_coord_req_fsm(rkts->rkts_rk, creq);
-}
-
-
-/**
- * @brief Look up coordinator for \p coordtype and \p coordkey
- * (either from cache or by FindCoordinator), make sure there is
- * a connection to the coordinator, and then call \p send_req_cb,
- * passing the coordinator broker instance and \p rko
- * to send the request.
- * These steps may be performed by this function, or asynchronously
- * at a later time.
- *
- * @param delay_ms If non-zero, delay scheduling of the coord request
- * for this long. The passed \p timeout_ms is automatically
- * adjusted to + \p delay_ms.
- *
- * Response, or error, is sent on \p replyq with callback \p rkbuf_cb.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_coord_req(rd_kafka_t *rk,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey,
- rd_kafka_send_req_cb_t *send_req_cb,
- rd_kafka_op_t *rko,
- int delay_ms,
- int timeout_ms,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *reply_opaque) {
- rd_kafka_coord_req_t *creq;
-
- creq = rd_calloc(1, sizeof(*creq));
- creq->creq_coordtype = coordtype;
- creq->creq_coordkey = rd_strdup(coordkey);
- creq->creq_ts_timeout = rd_timeout_init(delay_ms + timeout_ms);
- creq->creq_send_req_cb = send_req_cb;
- creq->creq_rko = rko;
- creq->creq_replyq = replyq;
- creq->creq_resp_cb = resp_cb;
- creq->creq_reply_opaque = reply_opaque;
- creq->creq_refcnt = 1;
- creq->creq_done = rd_false;
- rd_interval_init(&creq->creq_query_intvl);
-
- TAILQ_INSERT_TAIL(&rk->rk_coord_reqs, creq, creq_link);
-
- if (delay_ms)
- rd_kafka_timer_start_oneshot(&rk->rk_timers, &creq->creq_tmr,
- rd_true, (rd_ts_t)delay_ms * 1000,
- rd_kafka_coord_req_tmr_cb, creq);
- else
- rd_kafka_coord_req_fsm(rk, creq);
-}
-
-
-/**
- * @brief Decrease refcount of creq and free it if no more references.
- *
- * @param done Mark creq as done, having performed its duties. There may still
- * be lingering references.
- *
- * @returns true if creq was destroyed, else false.
- */
-static rd_bool_t rd_kafka_coord_req_destroy(rd_kafka_t *rk,
- rd_kafka_coord_req_t *creq,
- rd_bool_t done) {
-
- rd_assert(creq->creq_refcnt > 0);
-
- if (done) {
- /* Request has been performed, remove from rk_coord_reqs
- * list so creq won't be triggered again by state broadcasts,
- * etc. */
- rd_dassert(!creq->creq_done);
- TAILQ_REMOVE(&rk->rk_coord_reqs, creq, creq_link);
- creq->creq_done = rd_true;
-
- rd_kafka_timer_stop(&rk->rk_timers, &creq->creq_tmr,
- RD_DO_LOCK);
- }
-
- if (--creq->creq_refcnt > 0)
- return rd_false;
-
- rd_dassert(creq->creq_done);
-
- /* Clear out coordinator we were waiting for. */
- if (creq->creq_rkb) {
- rd_kafka_broker_persistent_connection_del(
- creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord);
- rd_kafka_broker_destroy(creq->creq_rkb);
- creq->creq_rkb = NULL;
- }
-
- rd_kafka_replyq_destroy(&creq->creq_replyq);
- rd_free(creq->creq_coordkey);
- rd_free(creq);
-
- return rd_true;
-}
-
-static void rd_kafka_coord_req_keep(rd_kafka_coord_req_t *creq) {
- creq->creq_refcnt++;
-}
-
-static void rd_kafka_coord_req_fail(rd_kafka_t *rk,
- rd_kafka_coord_req_t *creq,
- rd_kafka_resp_err_t err) {
- rd_kafka_op_t *reply;
- rd_kafka_buf_t *rkbuf;
-
- reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
- reply->rko_rk = rk; /* Set rk since the rkbuf will not have a rkb
- * to reach it. */
- reply->rko_err = err;
-
- /* Need a dummy rkbuf to pass state to the buf resp_cb */
- rkbuf = rd_kafka_buf_new(0, 0);
- rkbuf->rkbuf_cb = creq->creq_resp_cb;
- rkbuf->rkbuf_opaque = creq->creq_reply_opaque;
- reply->rko_u.xbuf.rkbuf = rkbuf;
-
- rd_kafka_replyq_enq(&creq->creq_replyq, reply, 0);
-
- rd_kafka_coord_req_destroy(rk, creq, rd_true /*done*/);
-}
-
-
-static void rd_kafka_coord_req_handle_FindCoordinator(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_coord_req_t *creq = opaque;
- int16_t ErrorCode;
- rd_kafkap_str_t Host;
- int32_t NodeId, Port;
- char errstr[256] = "";
- int actions;
- rd_kafka_broker_t *coord;
- rd_kafka_metadata_broker_t mdb = RD_ZERO_INIT;
-
- /* If creq has finished (possibly because of an earlier FindCoordinator
- * response or a broker state broadcast we simply ignore the
- * response. */
- if (creq->creq_done)
- err = RD_KAFKA_RESP_ERR__DESTROY;
-
- if (err)
- goto err;
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1) {
- rd_kafkap_str_t ErrorMsg;
- rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
- if (ErrorCode)
- rd_snprintf(errstr, sizeof(errstr), "%.*s",
- RD_KAFKAP_STR_PR(&ErrorMsg));
- }
-
- if ((err = ErrorCode))
- goto err;
-
- rd_kafka_buf_read_i32(rkbuf, &NodeId);
- rd_kafka_buf_read_str(rkbuf, &Host);
- rd_kafka_buf_read_i32(rkbuf, &Port);
-
- mdb.id = NodeId;
- RD_KAFKAP_STR_DUPA(&mdb.host, &Host);
- mdb.port = Port;
-
- /* Find, update or add broker */
- rd_kafka_broker_update(rk, rkb->rkb_proto, &mdb, &coord);
-
- if (!coord) {
- err = RD_KAFKA_RESP_ERR__FAIL;
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to add broker: "
- "instance is probably terminating");
- goto err;
- }
-
-
- rd_kafka_coord_cache_add(&rk->rk_coord_cache, creq->creq_coordtype,
- creq->creq_coordkey, coord);
- rd_kafka_broker_destroy(coord); /* refcnt from broker_update() */
-
- rd_kafka_coord_req_fsm(rk, creq);
-
- /* Drop refcount from req_fsm() */
- rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
-
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- actions = rd_kafka_err_action(
- rkb, err, request,
-
- RD_KAFKA_ERR_ACTION_SPECIAL, RD_KAFKA_RESP_ERR__DESTROY,
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED,
-
- RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR__TRANSPORT,
-
- RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
- rd_kafka_coord_req_fail(rk, creq, err);
- return;
-
- } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- rd_kafka_buf_retry(rkb, request);
- return; /* Keep refcnt from req_fsm() and retry */
- }
-
- /* Rely on state broadcast to trigger retry */
-
- /* Drop refcount from req_fsm() */
- rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
-}
-
-
-
-/**
- * @brief State machine for async coordinator requests.
- *
- * @remark May destroy the \p creq.
- *
- * @locality any
- * @locks none
- */
-static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq) {
- rd_kafka_broker_t *rkb;
- rd_kafka_resp_err_t err;
-
- if (creq->creq_done)
- /* crqeq has already performed its actions, this is a
- * lingering reference, e.g., a late FindCoordinator response.
- * Just ignore. */
- return;
-
- if (unlikely(rd_kafka_terminating(rk))) {
- rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY);
- return;
- }
-
- /* Do nothing if creq is delayed and the delay time hasn't expired yet.
- * We will be called again by the timer once it expires.*/
- if (rd_kafka_timer_next(&rk->rk_timers, &creq->creq_tmr, RD_DO_LOCK) >
- 0)
- return;
-
- /* Check cache first */
- rkb = rd_kafka_coord_cache_get(
- &rk->rk_coord_cache, creq->creq_coordtype, creq->creq_coordkey);
-
- if (rkb) {
- if (rd_kafka_broker_is_up(rkb)) {
- /* Cached coordinator is up, send request */
- rd_kafka_replyq_t replyq;
-
- /* Clear out previous coordinator we waited for. */
- if (creq->creq_rkb) {
- rd_kafka_broker_persistent_connection_del(
- creq->creq_rkb,
- &creq->creq_rkb->rkb_persistconn.coord);
- rd_kafka_broker_destroy(creq->creq_rkb);
- creq->creq_rkb = NULL;
- }
-
- rd_kafka_replyq_copy(&replyq, &creq->creq_replyq);
- err = creq->creq_send_req_cb(rkb, creq->creq_rko,
- replyq, creq->creq_resp_cb,
- creq->creq_reply_opaque);
-
- if (err) {
- /* Permanent error, e.g., request not
- * supported by broker. */
- rd_kafka_replyq_destroy(&replyq);
- rd_kafka_coord_req_fail(rk, creq, err);
- } else {
- rd_kafka_coord_req_destroy(rk, creq,
- rd_true /*done*/);
- }
-
- } else if (creq->creq_rkb == rkb) {
- /* No change in coordinator, but it is still not up.
- * Query for coordinator if at least a second has
- * passed since this coord_req was created or the
- * last time we queried. */
- if (rd_interval(&creq->creq_query_intvl,
- 1000 * 1000 /* 1s */, 0) > 0) {
- rd_rkb_dbg(rkb, BROKER, "COORD",
- "Coordinator connection is "
- "still down: "
- "querying for new coordinator");
- rd_kafka_broker_destroy(rkb);
- goto query_coord;
- }
-
- } else {
- /* No connection yet.
- * Let broker thread know we need a connection.
- * We'll be re-triggered on broker state broadcast. */
-
- if (creq->creq_rkb) {
- /* Clear previous */
- rd_kafka_broker_persistent_connection_del(
- creq->creq_rkb,
- &creq->creq_rkb->rkb_persistconn.coord);
- rd_kafka_broker_destroy(creq->creq_rkb);
- }
-
- rd_kafka_broker_keep(rkb);
- creq->creq_rkb = rkb;
- rd_kafka_broker_persistent_connection_add(
- rkb, &rkb->rkb_persistconn.coord);
- }
-
- rd_kafka_broker_destroy(rkb);
- return;
-
- } else if (creq->creq_rkb) {
- /* No coordinator information, clear out the previous
- * coordinator we waited for. */
- rd_kafka_broker_persistent_connection_del(
- creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord);
- rd_kafka_broker_destroy(creq->creq_rkb);
- creq->creq_rkb = NULL;
- }
-
-query_coord:
- /* Get any usable broker to look up the coordinator */
- rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, RD_DO_LOCK,
- RD_KAFKA_FEATURE_BROKER_GROUP_COORD,
- "broker to look up coordinator");
-
- if (!rkb) {
- /* No available brokers yet, we'll be re-triggered on
- * broker state broadcast. */
- return;
- }
-
-
- /* Send FindCoordinator request, the handler will continue
- * the state machine. */
- rd_kafka_coord_req_keep(creq);
- err = rd_kafka_FindCoordinatorRequest(
- rkb, creq->creq_coordtype, creq->creq_coordkey,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_coord_req_handle_FindCoordinator, creq);
-
- rd_kafka_broker_destroy(rkb);
-
- if (err) {
- rd_kafka_coord_req_fail(rk, creq, err);
- /* from keep() above */
- rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
- }
-}
-
-
-
-/**
- * @brief Callback called from rdkafka main thread on each
- * broker state change from or to UP.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_coord_req_t *creq, *tmp;
-
- /* Run through all coord_req fsms */
- TAILQ_FOREACH_SAFE(creq, &rk->rk_coord_reqs, creq_link, tmp) {
- rd_kafka_coord_req_fsm(rk, creq);
- }
-}
-
-
-
-/**
- * @brief Instance is terminating: destroy all coord reqs
- */
-void rd_kafka_coord_reqs_term(rd_kafka_t *rk) {
- rd_kafka_coord_req_t *creq;
-
- while ((creq = TAILQ_FIRST(&rk->rk_coord_reqs)))
- rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY);
-}
-
-
-/**
- * @brief Initialize coord reqs list.
- */
-void rd_kafka_coord_reqs_init(rd_kafka_t *rk) {
- TAILQ_INIT(&rk->rk_coord_reqs);
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h
deleted file mode 100644
index 4e00a552b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_COORD_H_
-#define _RDKAFKA_COORD_H_
-
-
-typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s,
- rd_kafka_coord_cache_entry_s) rd_kafka_coord_cache_head_t;
-
-/**
- * @brief Coordinator cache entry
- */
-typedef struct rd_kafka_coord_cache_entry_s {
- TAILQ_ENTRY(rd_kafka_coord_cache_entry_s) cce_link;
- rd_kafka_coordtype_t cce_coordtype; /**< Coordinator type */
- char *cce_coordkey; /**< Coordinator type key,
- * e.g the group id */
- rd_ts_t cce_ts_used; /**< Last used timestamp */
- rd_kafka_broker_t *cce_rkb; /**< The cached coordinator */
-
-} rd_kafka_coord_cache_entry_t;
-
-/**
- * @brief Coordinator cache
- */
-typedef struct rd_kafka_coord_cache_s {
- rd_kafka_coord_cache_head_t cc_entries; /**< Cache entries */
- int cc_cnt; /**< Number of entries */
- rd_ts_t cc_expire_thres; /**< Entries not used in
- * this long will be
- * expired */
-} rd_kafka_coord_cache_t;
-
-
-void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc);
-void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc,
- rd_kafka_broker_t *rkb);
-void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc);
-void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, int expire_thres_ms);
-
-
-
-/**
- * @name Coordinator requests
- */
-
-/**
- * @brief Request to be sent to coordinator.
- * Includes looking up, caching, and connecting to, the coordinator.
- */
-typedef struct rd_kafka_coord_req_s {
- TAILQ_ENTRY(rd_kafka_coord_req_s) creq_link; /**< rk_coord_reqs */
- rd_kafka_coordtype_t creq_coordtype; /**< Coordinator type */
- char *creq_coordkey; /**< Coordinator key */
-
- rd_kafka_op_t *creq_rko; /**< Requester's rko that is
- * provided to creq_send_req_cb
- * (optional). */
- rd_kafka_timer_t creq_tmr; /**< Delay timer. */
- rd_ts_t creq_ts_timeout; /**< Absolute timeout.
- * Will fail with an error
- * code pertaining to the
- * current state */
- rd_interval_t creq_query_intvl; /**< Coord query interval (1s) */
-
- rd_kafka_send_req_cb_t *creq_send_req_cb; /**< Sender callback */
-
- rd_kafka_replyq_t creq_replyq; /**< Reply queue */
- rd_kafka_resp_cb_t *creq_resp_cb; /**< Reply queue response
- * parsing callback for the
- * request sent by
- * send_req_cb */
- void *creq_reply_opaque; /**< Opaque passed to
- * creq_send_req_cb and
- * creq_resp_cb. */
-
- int creq_refcnt; /**< Internal reply queue for
- * FindCoordinator requests
- * which is forwarded to the
- * rk_ops queue, but allows
- * destroying the creq even
- * with outstanding
- * FindCoordinator requests. */
- rd_bool_t creq_done; /**< True if request was sent */
-
- rd_kafka_broker_t *creq_rkb; /**< creq is waiting for this broker to
- * come up. */
-} rd_kafka_coord_req_t;
-
-
-void rd_kafka_coord_req(rd_kafka_t *rk,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey,
- rd_kafka_send_req_cb_t *send_req_cb,
- rd_kafka_op_t *rko,
- int delay_ms,
- int timeout_ms,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *reply_opaque);
-
-void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb);
-
-void rd_kafka_coord_reqs_term(rd_kafka_t *rk);
-void rd_kafka_coord_reqs_init(rd_kafka_t *rk);
-#endif /* _RDKAFKA_COORD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c
deleted file mode 100644
index 4a218daff..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name Public API complex error type implementation.
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_error.h"
-
-#include <stdarg.h>
-
-
-void rd_kafka_error_destroy(rd_kafka_error_t *error) {
- if (error)
- rd_free(error);
-}
-
-
-/**
- * @brief Creates a new error object using the optional va-args format list.
- */
-rd_kafka_error_t *
-rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap) {
- rd_kafka_error_t *error;
- ssize_t strsz = 0;
-
- if (fmt && *fmt) {
- va_list ap2;
- va_copy(ap2, ap);
- strsz = rd_vsnprintf(NULL, 0, fmt, ap2) + 1;
- va_end(ap2);
- }
-
- error = rd_malloc(sizeof(*error) + strsz);
- error->code = code;
- error->fatal = rd_false;
- error->retriable = rd_false;
- error->txn_requires_abort = rd_false;
-
- if (strsz > 0) {
- error->errstr = (char *)(error + 1);
- rd_vsnprintf(error->errstr, strsz, fmt, ap);
- } else {
- error->errstr = NULL;
- }
-
- return error;
-}
-
-rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src) {
- rd_kafka_error_t *error;
- ssize_t strsz = 0;
-
- if (src->errstr) {
- strsz = strlen(src->errstr) + 1;
- }
-
- error = rd_malloc(sizeof(*error) + strsz);
- error->code = src->code;
- error->fatal = src->fatal;
- error->retriable = src->retriable;
- error->txn_requires_abort = src->txn_requires_abort;
-
- if (strsz > 0) {
- error->errstr = (char *)(error + 1);
- rd_strlcpy(error->errstr, src->errstr, strsz);
- } else {
- error->errstr = NULL;
- }
-
- return error;
-}
-
-/**
- * @brief Same as rd_kafka_error_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-void *rd_kafka_error_copy_opaque(const void *error, void *opaque) {
- return rd_kafka_error_copy(error);
-}
-
-
-rd_kafka_error_t *
-rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...) {
- rd_kafka_error_t *error;
- va_list ap;
-
- va_start(ap, fmt);
- error = rd_kafka_error_new_v(code, fmt, ap);
- va_end(ap);
-
- return error;
-}
-
-rd_kafka_error_t *
-rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, const char *fmt, ...) {
- rd_kafka_error_t *error;
- va_list ap;
-
- va_start(ap, fmt);
- error = rd_kafka_error_new_v(code, fmt, ap);
- va_end(ap);
-
- rd_kafka_error_set_fatal(error);
-
- return error;
-}
-
-rd_kafka_error_t *
-rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, const char *fmt, ...) {
- rd_kafka_error_t *error;
- va_list ap;
-
- va_start(ap, fmt);
- error = rd_kafka_error_new_v(code, fmt, ap);
- va_end(ap);
-
- rd_kafka_error_set_retriable(error);
-
- return error;
-}
-
-rd_kafka_error_t *
-rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code,
- const char *fmt,
- ...) {
- rd_kafka_error_t *error;
- va_list ap;
-
- va_start(ap, fmt);
- error = rd_kafka_error_new_v(code, fmt, ap);
- va_end(ap);
-
- rd_kafka_error_set_txn_requires_abort(error);
-
- return error;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error) {
- return error ? error->code : RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-const char *rd_kafka_error_name(const rd_kafka_error_t *error) {
- return error ? rd_kafka_err2name(error->code) : "";
-}
-
-const char *rd_kafka_error_string(const rd_kafka_error_t *error) {
- if (!error)
- return "";
- return error->errstr ? error->errstr : rd_kafka_err2str(error->code);
-}
-
-int rd_kafka_error_is_fatal(const rd_kafka_error_t *error) {
- return error && error->fatal ? 1 : 0;
-}
-
-int rd_kafka_error_is_retriable(const rd_kafka_error_t *error) {
- return error && error->retriable ? 1 : 0;
-}
-
-int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error) {
- return error && error->txn_requires_abort ? 1 : 0;
-}
-
-
-
-void rd_kafka_error_set_fatal(rd_kafka_error_t *error) {
- error->fatal = rd_true;
-}
-
-void rd_kafka_error_set_retriable(rd_kafka_error_t *error) {
- error->retriable = rd_true;
-}
-
-void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error) {
- error->txn_requires_abort = rd_true;
-}
-
-
-/**
- * @brief Converts a new style error_t error to the legacy style
- * resp_err_t code and separate error string, then
- * destroys the the error object.
- *
- * @remark The \p error object is destroyed.
- */
-rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_resp_err_t err = error->code;
-
- rd_snprintf(errstr, errstr_size, "%s", rd_kafka_error_string(error));
-
- rd_kafka_error_destroy(error);
-
- return err;
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h
deleted file mode 100644
index 79984f5ef..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_ERROR_H_
-#define _RDKAFKA_ERROR_H_
-
-#include <stdarg.h>
-
-/**
- * @name Public API complex error type implementation.
- *
- */
-
-struct rd_kafka_error_s {
- rd_kafka_resp_err_t code; /**< Error code. */
- char *errstr; /**< Human readable error string, allocated
- * with the rd_kafka_error_s struct
- * after the struct.
- * Possibly NULL. */
- rd_bool_t fatal; /**< This error is a fatal error. */
- rd_bool_t retriable; /**< Operation is retriable. */
- rd_bool_t
- txn_requires_abort; /**< This is an abortable transaction error.*/
-};
-
-
-rd_kafka_error_t *
-rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap);
-
-rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src);
-
-void *rd_kafka_error_copy_opaque(const void *error, void *opaque);
-
-void rd_kafka_error_set_fatal(rd_kafka_error_t *error);
-void rd_kafka_error_set_retriable(rd_kafka_error_t *error);
-void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error);
-
-
-rd_kafka_error_t *rd_kafka_error_new_fatal(rd_kafka_resp_err_t code,
- const char *fmt,
- ...) RD_FORMAT(printf, 2, 3);
-rd_kafka_error_t *rd_kafka_error_new_retriable(rd_kafka_resp_err_t code,
- const char *fmt,
- ...) RD_FORMAT(printf, 2, 3);
-rd_kafka_error_t *
-rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code,
- const char *fmt,
- ...) RD_FORMAT(printf, 2, 3);
-
-
-rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error,
- char *errstr,
- size_t errstr_size);
-#endif /* _RDKAFKA_ERROR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c
deleted file mode 100644
index ffd1a1780..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_event.h"
-#include "rd.h"
-
-rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev) {
- return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE;
-}
-
-const char *rd_kafka_event_name(const rd_kafka_event_t *rkev) {
- switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) {
- case RD_KAFKA_EVENT_NONE:
- return "(NONE)";
- case RD_KAFKA_EVENT_DR:
- return "DeliveryReport";
- case RD_KAFKA_EVENT_FETCH:
- return "Fetch";
- case RD_KAFKA_EVENT_LOG:
- return "Log";
- case RD_KAFKA_EVENT_ERROR:
- return "Error";
- case RD_KAFKA_EVENT_REBALANCE:
- return "Rebalance";
- case RD_KAFKA_EVENT_OFFSET_COMMIT:
- return "OffsetCommit";
- case RD_KAFKA_EVENT_STATS:
- return "Stats";
- case RD_KAFKA_EVENT_CREATETOPICS_RESULT:
- return "CreateTopicsResult";
- case RD_KAFKA_EVENT_DELETETOPICS_RESULT:
- return "DeleteTopicsResult";
- case RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT:
- return "CreatePartitionsResult";
- case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT:
- return "AlterConfigsResult";
- case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT:
- return "DescribeConfigsResult";
- case RD_KAFKA_EVENT_DELETERECORDS_RESULT:
- return "DeleteRecordsResult";
- case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT:
- return "ListConsumerGroupsResult";
- case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT:
- return "DescribeConsumerGroupsResult";
- case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
- return "DeleteGroupsResult";
- case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
- return "DeleteConsumerGroupOffsetsResult";
- case RD_KAFKA_EVENT_CREATEACLS_RESULT:
- return "CreateAclsResult";
- case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
- return "DescribeAclsResult";
- case RD_KAFKA_EVENT_DELETEACLS_RESULT:
- return "DeleteAclsResult";
- case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT:
- return "AlterConsumerGroupOffsetsResult";
- case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT:
- return "ListConsumerGroupOffsetsResult";
- case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
- return "SaslOAuthBearerTokenRefresh";
- default:
- return "?unknown?";
- }
-}
-
-
-
-void rd_kafka_event_destroy(rd_kafka_event_t *rkev) {
- if (unlikely(!rkev))
- return;
- rd_kafka_op_destroy(rkev);
-}
-
-
-/**
- * @returns the next message from the event's message queue.
- * @remark messages will be freed automatically when event is destroyed,
- * application MUST NOT call rd_kafka_message_destroy()
- */
-const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev) {
- rd_kafka_op_t *rko = rkev;
- rd_kafka_msg_t *rkm;
- rd_kafka_msgq_t *rkmq, *rkmq2;
- rd_kafka_message_t *rkmessage;
-
- switch (rkev->rko_type) {
- case RD_KAFKA_OP_DR:
- rkmq = &rko->rko_u.dr.msgq;
- rkmq2 = &rko->rko_u.dr.msgq2;
- break;
-
- case RD_KAFKA_OP_FETCH:
- /* Just one message */
- if (rko->rko_u.fetch.evidx++ > 0)
- return NULL;
-
- rkmessage = rd_kafka_message_get(rko);
- if (unlikely(!rkmessage))
- return NULL;
-
- /* Store offset, etc. */
- rd_kafka_fetch_op_app_prepare(NULL, rko);
-
- return rkmessage;
-
-
- default:
- return NULL;
- }
-
- if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
- return NULL;
-
- rd_kafka_msgq_deq(rkmq, rkm, 1);
-
- /* Put rkm on secondary message queue which will be purged later. */
- rd_kafka_msgq_enq(rkmq2, rkm);
-
- return rd_kafka_message_get_from_rkm(rko, rkm);
-}
-
-
-size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev,
- const rd_kafka_message_t **rkmessages,
- size_t size) {
- size_t cnt = 0;
- const rd_kafka_message_t *rkmessage;
-
- while (cnt < size && (rkmessage = rd_kafka_event_message_next(rkev)))
- rkmessages[cnt++] = rkmessage;
-
- return cnt;
-}
-
-
-size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev) {
- switch (rkev->rko_evtype) {
- case RD_KAFKA_EVENT_DR:
- return (size_t)rkev->rko_u.dr.msgq.rkmq_msg_cnt;
- case RD_KAFKA_EVENT_FETCH:
- return 1;
- default:
- return 0;
- }
-}
-
-
-const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev) {
- switch (rkev->rko_evtype) {
-#if WITH_SASL_OAUTHBEARER
- case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
- return rkev->rko_rk->rk_conf.sasl.oauthbearer_config;
-#endif
- default:
- return NULL;
- }
-}
-
-rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev) {
- return rkev->rko_err;
-}
-
-const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev) {
- switch (rkev->rko_type) {
- case RD_KAFKA_OP_ERR:
- case RD_KAFKA_OP_CONSUMER_ERR:
- if (rkev->rko_u.err.errstr)
- return rkev->rko_u.err.errstr;
- break;
- case RD_KAFKA_OP_ADMIN_RESULT:
- if (rkev->rko_u.admin_result.errstr)
- return rkev->rko_u.admin_result.errstr;
- break;
- default:
- break;
- }
-
- return rd_kafka_err2str(rkev->rko_err);
-}
-
-int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev) {
- return rkev->rko_u.err.fatal;
-}
-
-
-void *rd_kafka_event_opaque(rd_kafka_event_t *rkev) {
- switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
- case RD_KAFKA_OP_OFFSET_COMMIT:
- return rkev->rko_u.offset_commit.opaque;
- case RD_KAFKA_OP_ADMIN_RESULT:
- return rkev->rko_u.admin_result.opaque;
- default:
- return NULL;
- }
-}
-
-
-int rd_kafka_event_log(rd_kafka_event_t *rkev,
- const char **fac,
- const char **str,
- int *level) {
- if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG))
- return -1;
-
- if (likely(fac != NULL))
- *fac = rkev->rko_u.log.fac;
- if (likely(str != NULL))
- *str = rkev->rko_u.log.str;
- if (likely(level != NULL))
- *level = rkev->rko_u.log.level;
-
- return 0;
-}
-
-int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev,
- char *dst,
- size_t dstsize) {
- static const char *names[] = {
- "generic", "broker", "topic", "metadata", "feature",
- "queue", "msg", "protocol", "cgrp", "security",
- "fetch", "interceptor", "plugin", "consumer", "admin",
- "eos", "mock", NULL};
- if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG))
- return -1;
- rd_flags2str(dst, dstsize, names, rkev->rko_u.log.ctx);
- return 0;
-}
-
-const char *rd_kafka_event_stats(rd_kafka_event_t *rkev) {
- return rkev->rko_u.stats.json;
-}
-
-rd_kafka_topic_partition_list_t *
-rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev) {
- switch (rkev->rko_evtype) {
- case RD_KAFKA_EVENT_REBALANCE:
- return rkev->rko_u.rebalance.partitions;
- case RD_KAFKA_EVENT_OFFSET_COMMIT:
- return rkev->rko_u.offset_commit.partitions;
- default:
- return NULL;
- }
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_event_topic_partition(rd_kafka_event_t *rkev) {
- rd_kafka_topic_partition_t *rktpar;
-
- if (unlikely(!rkev->rko_rktp))
- return NULL;
-
- rktpar = rd_kafka_topic_partition_new_from_rktp(rkev->rko_rktp);
-
- switch (rkev->rko_type) {
- case RD_KAFKA_OP_ERR:
- case RD_KAFKA_OP_CONSUMER_ERR:
- rktpar->offset = rkev->rko_u.err.offset;
- break;
- default:
- break;
- }
-
- rktpar->err = rkev->rko_err;
-
- return rktpar;
-}
-
-
-
-const rd_kafka_CreateTopics_result_t *
-rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATETOPICS_RESULT)
- return NULL;
- else
- return (const rd_kafka_CreateTopics_result_t *)rkev;
-}
-
-
-const rd_kafka_DeleteTopics_result_t *
-rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETETOPICS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DeleteTopics_result_t *)rkev;
-}
-
-
-const rd_kafka_CreatePartitions_result_t *
-rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT)
- return NULL;
- else
- return (const rd_kafka_CreatePartitions_result_t *)rkev;
-}
-
-
-const rd_kafka_AlterConfigs_result_t *
-rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONFIGS_RESULT)
- return NULL;
- else
- return (const rd_kafka_AlterConfigs_result_t *)rkev;
-}
-
-
-const rd_kafka_DescribeConfigs_result_t *
-rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DescribeConfigs_result_t *)rkev;
-}
-
-const rd_kafka_DeleteRecords_result_t *
-rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETERECORDS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DeleteRecords_result_t *)rkev;
-}
-
-const rd_kafka_ListConsumerGroups_result_t *
-rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev) {
- if (!rkev ||
- rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT)
- return NULL;
- else
- return (const rd_kafka_ListConsumerGroups_result_t *)rkev;
-}
-
-const rd_kafka_DescribeConsumerGroups_result_t *
-rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev) {
- if (!rkev ||
- rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DescribeConsumerGroups_result_t *)rkev;
-}
-
-const rd_kafka_DeleteGroups_result_t *
-rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEGROUPS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DeleteGroups_result_t *)rkev;
-}
-
-const rd_kafka_DeleteConsumerGroupOffsets_result_t *
-rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype !=
- RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT)
- return NULL;
- else
- return (
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *)rkev;
-}
-
-const rd_kafka_CreateAcls_result_t *
-rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEACLS_RESULT)
- return NULL;
- else
- return (const rd_kafka_CreateAcls_result_t *)rkev;
-}
-
-const rd_kafka_DescribeAcls_result_t *
-rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBEACLS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DescribeAcls_result_t *)rkev;
-}
-
-const rd_kafka_DeleteAcls_result_t *
-rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev) {
- if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEACLS_RESULT)
- return NULL;
- else
- return (const rd_kafka_DeleteAcls_result_t *)rkev;
-}
-
-const rd_kafka_AlterConsumerGroupOffsets_result_t *
-rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
- if (!rkev ||
- rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
- return NULL;
- else
- return (
- const rd_kafka_AlterConsumerGroupOffsets_result_t *)rkev;
-}
-
-const rd_kafka_ListConsumerGroupOffsets_result_t *
-rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
- if (!rkev ||
- rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT)
- return NULL;
- else
- return (const rd_kafka_ListConsumerGroupOffsets_result_t *)rkev;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h
deleted file mode 100644
index 3f9c22e34..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Converts op type to event type.
- * @returns the event type, or 0 if the op cannot be mapped to an event.
- */
-static RD_UNUSED RD_INLINE rd_kafka_event_type_t
-rd_kafka_op2event(rd_kafka_op_type_t optype) {
- static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = {
- [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR,
- [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH,
- [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR,
- [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR,
- [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE,
- [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT,
- [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG,
- [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS,
- [RD_KAFKA_OP_OAUTHBEARER_REFRESH] =
- RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH};
-
- return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK];
-}
-
-
-/**
- * @brief Attempt to set up an event based on rko.
- * @returns 1 if op is event:able and set up, else 0.
- */
-static RD_UNUSED RD_INLINE int rd_kafka_event_setup(rd_kafka_t *rk,
- rd_kafka_op_t *rko) {
-
- if (unlikely(rko->rko_flags & RD_KAFKA_OP_F_FORCE_CB))
- return 0;
-
- if (!rko->rko_evtype)
- rko->rko_evtype = rd_kafka_op2event(rko->rko_type);
-
- switch (rko->rko_evtype) {
- case RD_KAFKA_EVENT_NONE:
- return 0;
-
- case RD_KAFKA_EVENT_DR:
- rko->rko_rk = rk;
- rd_dassert(!rko->rko_u.dr.do_purge2);
- rd_kafka_msgq_init(&rko->rko_u.dr.msgq2);
- rko->rko_u.dr.do_purge2 = 1;
- return 1;
-
- case RD_KAFKA_EVENT_ERROR:
- if (rko->rko_err == RD_KAFKA_RESP_ERR__FATAL) {
- /* Translate ERR__FATAL to the underlying fatal error
- * code and string */
- rd_kafka_resp_err_t ferr;
- char errstr[512];
- ferr = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
- if (likely(ferr)) {
- rko->rko_err = ferr;
- if (rko->rko_u.err.errstr)
- rd_free(rko->rko_u.err.errstr);
- rko->rko_u.err.errstr = rd_strdup(errstr);
- rko->rko_u.err.fatal = 1;
- }
- }
- return 1;
-
- case RD_KAFKA_EVENT_REBALANCE:
- case RD_KAFKA_EVENT_LOG:
- case RD_KAFKA_EVENT_OFFSET_COMMIT:
- case RD_KAFKA_EVENT_STATS:
- case RD_KAFKA_EVENT_CREATETOPICS_RESULT:
- case RD_KAFKA_EVENT_DELETETOPICS_RESULT:
- case RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT:
- case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT:
- case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT:
- case RD_KAFKA_EVENT_DELETERECORDS_RESULT:
- case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT:
- case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT:
- case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
- case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
- case RD_KAFKA_EVENT_CREATEACLS_RESULT:
- case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
- case RD_KAFKA_EVENT_DELETEACLS_RESULT:
- case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT:
- case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT:
- case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
- return 1;
-
- default:
- return 0;
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c
deleted file mode 100644
index a2fc085c5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_feature.h"
-
-#include <stdlib.h>
-
-static const char *rd_kafka_feature_names[] = {"MsgVer1",
- "ApiVersion",
- "BrokerBalancedConsumer",
- "ThrottleTime",
- "Sasl",
- "SaslHandshake",
- "BrokerGroupCoordinator",
- "LZ4",
- "OffsetTime",
- "MsgVer2",
- "IdempotentProducer",
- "ZSTD",
- "SaslAuthReq",
- "UnitTest",
- NULL};
-
-
-static const struct rd_kafka_feature_map {
- /* RD_KAFKA_FEATURE_... */
- int feature;
-
- /* Depends on the following ApiVersions overlapping with
- * what the broker supports: */
- struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM];
-
-} rd_kafka_feature_map[] = {
- /**
- * @brief List of features and the ApiVersions they depend on.
- *
- * The dependency list consists of the ApiKey followed by this
- * client's supported minimum and maximum API versions.
- * As long as this list and its versions overlaps with the
- * broker supported API versions the feature will be enabled.
- */
- {
-
- /* @brief >=0.10.0: Message.MagicByte version 1:
- * Relative offsets (KIP-31) and message timestamps (KIP-32). */
- .feature = RD_KAFKA_FEATURE_MSGVER1,
- .depends =
- {
- {RD_KAFKAP_Produce, 2, 2},
- {RD_KAFKAP_Fetch, 2, 2},
- {-1},
- },
- },
- {
- /* @brief >=0.11.0: Message.MagicByte version 2 */
- .feature = RD_KAFKA_FEATURE_MSGVER2,
- .depends =
- {
- {RD_KAFKAP_Produce, 3, 3},
- {RD_KAFKAP_Fetch, 4, 4},
- {-1},
- },
- },
- {
- /* @brief >=0.10.0: ApiVersionQuery support.
- * @remark This is a bit of chicken-and-egg problem but needs to be
- * set by feature_check() to avoid the feature being cleared
- * even when broker supports it. */
- .feature = RD_KAFKA_FEATURE_APIVERSION,
- .depends =
- {
- {RD_KAFKAP_ApiVersion, 0, 0},
- {-1},
- },
- },
- {
- /* @brief >=0.8.2.0: Broker-based Group coordinator */
- .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD,
- .depends =
- {
- {RD_KAFKAP_FindCoordinator, 0, 0},
- {-1},
- },
- },
- {
- /* @brief >=0.9.0: Broker-based balanced consumer groups. */
- .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER,
- .depends =
- {
- {RD_KAFKAP_FindCoordinator, 0, 0},
- {RD_KAFKAP_OffsetCommit, 1, 2},
- {RD_KAFKAP_OffsetFetch, 1, 1},
- {RD_KAFKAP_JoinGroup, 0, 0},
- {RD_KAFKAP_SyncGroup, 0, 0},
- {RD_KAFKAP_Heartbeat, 0, 0},
- {RD_KAFKAP_LeaveGroup, 0, 0},
- {-1},
- },
- },
- {
- /* @brief >=0.9.0: ThrottleTime */
- .feature = RD_KAFKA_FEATURE_THROTTLETIME,
- .depends =
- {
- {RD_KAFKAP_Produce, 1, 2},
- {RD_KAFKAP_Fetch, 1, 2},
- {-1},
- },
-
- },
- {
- /* @brief >=0.9.0: SASL (GSSAPI) authentication.
- * Since SASL is not using the Kafka protocol
- * we must use something else to map us to the
- * proper broker version support:
- * JoinGroup was released along with SASL in 0.9.0. */
- .feature = RD_KAFKA_FEATURE_SASL_GSSAPI,
- .depends =
- {
- {RD_KAFKAP_JoinGroup, 0, 0},
- {-1},
- },
- },
- {
- /* @brief >=0.10.0: SASL mechanism handshake (KIP-43)
- * to automatically support other mechanisms
- * than GSSAPI, such as PLAIN. */
- .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE,
- .depends =
- {
- {RD_KAFKAP_SaslHandshake, 0, 0},
- {-1},
- },
- },
- {
- /* @brief >=0.8.2: LZ4 compression.
- * Since LZ4 initially did not rely on a specific API
- * type or version (it does in >=0.10.0)
- * we must use something else to map us to the
- * proper broker version support:
- * GrooupCoordinator was released in 0.8.2 */
- .feature = RD_KAFKA_FEATURE_LZ4,
- .depends =
- {
- {RD_KAFKAP_FindCoordinator, 0, 0},
- {-1},
- },
- },
- {/* @brief >=0.10.1.0: Offset v1 (KIP-79)
- * Time-based offset requests */
- .feature = RD_KAFKA_FEATURE_OFFSET_TIME,
- .depends =
- {
- {RD_KAFKAP_ListOffsets, 1, 1},
- {-1},
- }},
- {/* @brief >=0.11.0.0: Idempotent Producer*/
- .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER,
- .depends =
- {
- {RD_KAFKAP_InitProducerId, 0, 0},
- {-1},
- }},
- {
- /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */
- .feature = RD_KAFKA_FEATURE_ZSTD,
- .depends =
- {
- {RD_KAFKAP_Produce, 7, 7},
- {RD_KAFKAP_Fetch, 10, 10},
- {-1},
- },
- },
- {
- /* @brief >=1.0.0: SaslAuthenticateRequest */
- .feature = RD_KAFKA_FEATURE_SASL_AUTH_REQ,
- .depends =
- {
- {RD_KAFKAP_SaslHandshake, 1, 1},
- {RD_KAFKAP_SaslAuthenticate, 0, 0},
- {-1},
- },
- },
- {.feature = 0}, /* sentinel */
-};
-
-
-
-/**
- * @brief In absence of KIP-35 support in earlier broker versions we provide
- * hardcoded lists that corresponds to older broker versions.
- */
-
-/* >= 0.10.0.0: dummy for all future versions that support ApiVersionRequest */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_Queryable[] = {
- {RD_KAFKAP_ApiVersion, 0, 0}};
-
-
-/* =~ 0.9.0 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_9_0[] = {
- {RD_KAFKAP_Produce, 0, 1}, {RD_KAFKAP_Fetch, 0, 1},
- {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
- {RD_KAFKAP_OffsetCommit, 0, 2}, {RD_KAFKAP_OffsetFetch, 0, 1},
- {RD_KAFKAP_FindCoordinator, 0, 0}, {RD_KAFKAP_JoinGroup, 0, 0},
- {RD_KAFKAP_Heartbeat, 0, 0}, {RD_KAFKAP_LeaveGroup, 0, 0},
- {RD_KAFKAP_SyncGroup, 0, 0}, {RD_KAFKAP_DescribeGroups, 0, 0},
- {RD_KAFKAP_ListGroups, 0, 0}};
-
-/* =~ 0.8.2 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_2[] = {
- {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0},
- {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
- {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 1},
- {RD_KAFKAP_FindCoordinator, 0, 0}};
-
-/* =~ 0.8.1 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_1[] = {
- {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0},
- {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
- {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 0}};
-
-/* =~ 0.8.0 */
-static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = {
- {RD_KAFKAP_Produce, 0, 0},
- {RD_KAFKAP_Fetch, 0, 0},
- {RD_KAFKAP_ListOffsets, 0, 0},
- {RD_KAFKAP_Metadata, 0, 0}};
-
-
-/**
- * @brief Returns the ApiVersion list for legacy broker versions that do not
- * support the ApiVersionQuery request. E.g., brokers <0.10.0.
- *
- * @param broker_version Broker version to match (longest prefix matching).
- * @param use_default If no match is found return the default APIs (but return
- * 0).
- *
- * @returns 1 if \p broker_version was recognized: \p *apisp will point to
- * the ApiVersion list and *api_cntp will be set to its element count.
- * 0 if \p broker_version was not recognized: \p *apisp remains
- * unchanged.
- *
- */
-int rd_kafka_get_legacy_ApiVersions(const char *broker_version,
- struct rd_kafka_ApiVersion **apisp,
- size_t *api_cntp,
- const char *fallback) {
- static const struct {
- const char *pfx;
- struct rd_kafka_ApiVersion *apis;
- size_t api_cnt;
- } vermap[] = {
-#define _VERMAP(PFX, APIS) {PFX, APIS, RD_ARRAYSIZE(APIS)}
- _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0),
- _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2),
- _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1),
- _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0),
- {"0.7.", NULL}, /* Unsupported */
- {"0.6.", NULL}, /* Unsupported */
- _VERMAP("", rd_kafka_ApiVersion_Queryable),
- {NULL}};
- int i;
- int fallback_i = -1;
- int ret = 0;
-
- *apisp = NULL;
- *api_cntp = 0;
-
- for (i = 0; vermap[i].pfx; i++) {
- if (!strncmp(vermap[i].pfx, broker_version,
- strlen(vermap[i].pfx))) {
- if (!vermap[i].apis)
- return 0;
- *apisp = vermap[i].apis;
- *api_cntp = vermap[i].api_cnt;
- ret = 1;
- break;
- } else if (fallback && !strcmp(vermap[i].pfx, fallback))
- fallback_i = i;
- }
-
- if (!*apisp && fallback) {
- rd_kafka_assert(NULL, fallback_i != -1);
- *apisp = vermap[fallback_i].apis;
- *api_cntp = vermap[fallback_i].api_cnt;
- }
-
- return ret;
-}
-
-
-/**
- * @returns 1 if the provided broker version (probably)
- * supports api.version.request.
- */
-int rd_kafka_ApiVersion_is_queryable(const char *broker_version) {
- struct rd_kafka_ApiVersion *apis;
- size_t api_cnt;
-
-
- if (!rd_kafka_get_legacy_ApiVersions(broker_version, &apis, &api_cnt,
- 0))
- return 0;
-
- return apis == rd_kafka_ApiVersion_Queryable;
-}
-
-
-
-/**
- * @brief Check if match's versions overlaps with \p apis.
- *
- * @returns 1 if true, else 0.
- * @remark \p apis must be sorted using rd_kafka_ApiVersion_key_cmp()
- */
-static RD_INLINE int
-rd_kafka_ApiVersion_check(const struct rd_kafka_ApiVersion *apis,
- size_t api_cnt,
- const struct rd_kafka_ApiVersion *match) {
- const struct rd_kafka_ApiVersion *api;
-
- api = bsearch(match, apis, api_cnt, sizeof(*apis),
- rd_kafka_ApiVersion_key_cmp);
- if (unlikely(!api))
- return 0;
-
- return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer;
-}
-
-
-/**
- * @brief Compare broker's supported API versions to our feature request map
- * and enable/disable features accordingly.
- *
- * @param broker_apis Broker's supported APIs. If NULL the
- * \p broker.version.fallback configuration property will specify a
- * default legacy version to use.
- * @param broker_api_cnt Number of elements in \p broker_apis
- *
- * @returns the supported features (bitmask) to enable.
- */
-int rd_kafka_features_check(rd_kafka_broker_t *rkb,
- struct rd_kafka_ApiVersion *broker_apis,
- size_t broker_api_cnt) {
- int features = 0;
- int i;
-
- /* Scan through features. */
- for (i = 0; rd_kafka_feature_map[i].feature != 0; i++) {
- const struct rd_kafka_ApiVersion *match;
- int fails = 0;
-
- /* For each feature check that all its API dependencies
- * can be fullfilled. */
-
- for (match = &rd_kafka_feature_map[i].depends[0];
- match->ApiKey != -1; match++) {
- int r;
-
- r = rd_kafka_ApiVersion_check(broker_apis,
- broker_api_cnt, match);
-
- rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
- " Feature %s: %s (%hd..%hd) "
- "%ssupported by broker",
- rd_kafka_features2str(
- rd_kafka_feature_map[i].feature),
- rd_kafka_ApiKey2str(match->ApiKey),
- match->MinVer, match->MaxVer,
- r ? "" : "NOT ");
-
- fails += !r;
- }
-
- rd_rkb_dbg(
- rkb, FEATURE, "APIVERSION", "%s feature %s",
- fails ? "Disabling" : "Enabling",
- rd_kafka_features2str(rd_kafka_feature_map[i].feature));
-
-
- if (!fails)
- features |= rd_kafka_feature_map[i].feature;
- }
-
- return features;
-}
-
-
-
-/**
- * @brief Make an allocated and sorted copy of \p src.
- */
-void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src,
- size_t src_cnt,
- struct rd_kafka_ApiVersion **dstp,
- size_t *dst_cntp) {
- *dstp = rd_memdup(src, sizeof(*src) * src_cnt);
- *dst_cntp = src_cnt;
- qsort(*dstp, *dst_cntp, sizeof(**dstp), rd_kafka_ApiVersion_key_cmp);
-}
-
-
-
-/**
- * @returns a human-readable feature flag string.
- */
-const char *rd_kafka_features2str(int features) {
- static RD_TLS char ret[4][256];
- size_t of = 0;
- static RD_TLS int reti = 0;
- int i;
-
- reti = (reti + 1) % 4;
-
- *ret[reti] = '\0';
- for (i = 0; rd_kafka_feature_names[i]; i++) {
- int r;
- if (!(features & (1 << i)))
- continue;
-
- r = rd_snprintf(ret[reti] + of, sizeof(ret[reti]) - of, "%s%s",
- of == 0 ? "" : ",", rd_kafka_feature_names[i]);
- if ((size_t)r > sizeof(ret[reti]) - of) {
- /* Out of space */
- memcpy(&ret[reti][sizeof(ret[reti]) - 3], "..", 3);
- break;
- }
-
- of += r;
- }
-
- return ret[reti];
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h
deleted file mode 100644
index a651a07df..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_FEATURE_H_
-#define _RDKAFKA_FEATURE_H_
-
-
-/**
- * @brief Kafka protocol features
- */
-
-/* Message version 1 (MagicByte=1):
- * + relative offsets (KIP-31)
- * + timestamps (KIP-32) */
-#define RD_KAFKA_FEATURE_MSGVER1 0x1
-
-/* ApiVersionQuery support (KIP-35) */
-#define RD_KAFKA_FEATURE_APIVERSION 0x2
-
-/* >= 0.9: Broker-based Balanced Consumer */
-#define RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER 0x4
-
-/* >= 0.9: Produce/Fetch ThrottleTime reporting */
-#define RD_KAFKA_FEATURE_THROTTLETIME 0x8
-
-/* >= 0.9: SASL GSSAPI support */
-#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10
-
-/* >= 0.10: SaslMechanismRequest (KIP-43) */
-#define RD_KAFKA_FEATURE_SASL_HANDSHAKE 0x20
-
-/* >= 0.8.2.0: Broker-based Group coordinator */
-#define RD_KAFKA_FEATURE_BROKER_GROUP_COORD 0x40
-
-/* >= 0.8.2.0: LZ4 compression (with bad and proper HC checksums) */
-#define RD_KAFKA_FEATURE_LZ4 0x80
-
-/* >= 0.10.1.0: Time-based Offset fetch (KIP-79) */
-#define RD_KAFKA_FEATURE_OFFSET_TIME 0x100
-
-/* >= 0.11.0.0: Message version 2 (MagicByte=2):
- * + EOS message format KIP-98 */
-#define RD_KAFKA_FEATURE_MSGVER2 0x200
-
-/* >= 0.11.0.0: Idempotent Producer support */
-#define RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER 0x400
-
-/* >= 2.1.0-IV2: ZSTD compression */
-#define RD_KAFKA_FEATURE_ZSTD 0x800
-
-/* >= 1.0.0: SaslAuthenticateRequest */
-#define RD_KAFKA_FEATURE_SASL_AUTH_REQ 0x1000
-
-/* Unit-test mock broker: broker supports everything.
- * Should be used with RD_KAFKA_FEATURE_ALL, but not be included in bitmask */
-#define RD_KAFKA_FEATURE_UNITTEST 0x4000
-
-/* All features (except UNITTEST) */
-#define RD_KAFKA_FEATURE_ALL (0xffff & ~RD_KAFKA_FEATURE_UNITTEST)
-
-
-
-int rd_kafka_get_legacy_ApiVersions(const char *broker_version,
- struct rd_kafka_ApiVersion **apisp,
- size_t *api_cntp,
- const char *fallback);
-int rd_kafka_ApiVersion_is_queryable(const char *broker_version);
-void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src,
- size_t src_cnt,
- struct rd_kafka_ApiVersion **dstp,
- size_t *dst_cntp);
-int rd_kafka_features_check(rd_kafka_broker_t *rkb,
- struct rd_kafka_ApiVersion *broker_apis,
- size_t broker_api_cnt);
-
-const char *rd_kafka_features2str(int features);
-
-#endif /* _RDKAFKA_FEATURE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c
deleted file mode 100644
index 8ee67a420..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c
+++ /dev/null
@@ -1,1145 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2022 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name Fetcher
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_fetcher.h"
-
-
-/**
- * Backoff the next Fetch request (due to error).
- */
-static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err) {
- int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
- rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
- rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s",
- backoff_ms, rd_kafka_err2str(err));
-}
-
-/**
- * @brief Backoff the next Fetch for specific partition
- */
-static void rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err) {
- int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
-
- /* Don't back off on reaching end of partition */
- if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
- return;
-
- /* Certain errors that may require manual intervention should have
- * a longer backoff time. */
- if (err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
- backoff_ms = RD_MAX(1000, backoff_ms * 10);
-
- rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
-
- rd_rkb_dbg(rkb, FETCH, "BACKOFF",
- "%s [%" PRId32 "]: Fetch backoff for %dms%s%s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- backoff_ms, err ? ": " : "",
- err ? rd_kafka_err2str(err) : "");
-}
-
-
-/**
- * @brief Handle preferred replica in fetch response.
- *
- * @locks rd_kafka_toppar_lock(rktp) and
- * rd_kafka_rdlock(rk) must NOT be held.
- *
- * @locality broker thread
- */
-static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_broker_t *rkb,
- int32_t preferred_id) {
- const rd_ts_t one_minute = 60 * 1000 * 1000;
- const rd_ts_t five_seconds = 5 * 1000 * 1000;
- rd_kafka_broker_t *preferred_rkb;
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- rd_ts_t new_intvl =
- rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0);
-
- if (new_intvl < 0) {
- /* In lieu of KIP-320, the toppar is delegated back to
- * the leader in the event of an offset out-of-range
- * error (KIP-392 error case #4) because this scenario
- * implies the preferred replica is out-of-sync.
- *
- * If program execution reaches here, the leader has
- * relatively quickly instructed the client back to
- * a preferred replica, quite possibly the same one
- * as before (possibly resulting from stale metadata),
- * so we back off the toppar to slow down potential
- * back-and-forth.
- */
-
- if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl,
- one_minute, 0) > 0)
- rd_rkb_log(rkb, LOG_NOTICE, "FETCH",
- "%.*s [%" PRId32
- "]: preferred replica "
- "(%" PRId32
- ") lease changing too quickly "
- "(%" PRId64
- "s < 60s): possibly due to "
- "unavailable replica or stale cluster "
- "state: backing off next fetch",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, preferred_id,
- (one_minute - -new_intvl) / (1000 * 1000));
-
- rd_kafka_toppar_fetch_backoff(rkb, rktp,
- RD_KAFKA_RESP_ERR_NO_ERROR);
- }
-
- rd_kafka_rdlock(rk);
- preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id);
- rd_kafka_rdunlock(rk);
-
- if (preferred_rkb) {
- rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0);
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb,
- "preferred replica updated");
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_broker_destroy(preferred_rkb);
- return;
- }
-
- if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) >
- 0) {
- rd_rkb_log(rkb, LOG_NOTICE, "FETCH",
- "%.*s [%" PRId32 "]: preferred replica (%" PRId32
- ") "
- "is unknown: refreshing metadata",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, preferred_id);
-
- rd_kafka_metadata_refresh_brokers(
- rktp->rktp_rkt->rkt_rk, NULL,
- "preferred replica unavailable");
- }
-
- rd_kafka_toppar_fetch_backoff(rkb, rktp,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE);
-}
-
-
-/**
- * @brief Handle partition-specific Fetch error.
- */
-static void rd_kafka_fetch_reply_handle_partition_error(
- rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const struct rd_kafka_toppar_ver *tver,
- rd_kafka_resp_err_t err,
- int64_t HighwaterMarkOffset) {
-
- rd_rkb_dbg(rkb, FETCH, "FETCHERR",
- "%.*s [%" PRId32 "]: Fetch failed at %s: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos),
- rd_kafka_err2name(err));
-
- /* Some errors should be passed to the
- * application while some handled by rdkafka */
- switch (err) {
- /* Errors handled by rdkafka */
- case RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
- case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER:
- case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR:
- case RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH:
- case RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH:
- if (err == RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) {
- /* Occurs when:
- * - Msg exists on broker but
- * offset > HWM, or:
- * - HWM is >= offset, but msg not
- * yet available at that offset
- * (replica is out of sync).
- * - partition leader is out of sync.
- *
- * Handle by requesting metadata update, changing back
- * to the leader, and then retrying FETCH
- * (with backoff).
- */
- rd_rkb_dbg(rkb, MSG, "FETCH",
- "Topic %s [%" PRId32
- "]: %s not "
- "available on broker %" PRId32
- " (leader %" PRId32
- "): updating metadata and retrying",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(
- rktp->rktp_offsets.fetch_pos),
- rktp->rktp_broker_id, rktp->rktp_leader_id);
- }
-
- if (err == RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) {
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_CONSUMER, "FETCH",
- "Topic %s [%" PRId32
- "]: Fetch failed at %s: %s: broker %" PRId32
- "has not yet caught up on latest metadata: "
- "retrying",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(
- rktp->rktp_offsets.fetch_pos),
- rd_kafka_err2str(err), rktp->rktp_broker_id);
- }
-
- if (rktp->rktp_broker_id != rktp->rktp_leader_id) {
- rd_kafka_toppar_delegate_to_leader(rktp);
- }
- /* Request metadata information update*/
- rd_kafka_toppar_leader_unavailable(rktp, "fetch", err);
- break;
-
- case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: {
- rd_kafka_fetch_pos_t err_pos;
-
- if (rktp->rktp_broker_id != rktp->rktp_leader_id &&
- rktp->rktp_offsets.fetch_pos.offset > HighwaterMarkOffset) {
- rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH",
- "Topic %s [%" PRId32
- "]: %s "
- " out of range (HighwaterMark %" PRId64
- " fetching from "
- "broker %" PRId32 " (leader %" PRId32
- "): reverting to leader",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(
- rktp->rktp_offsets.fetch_pos),
- HighwaterMarkOffset, rktp->rktp_broker_id,
- rktp->rktp_leader_id);
-
- /* Out of range error cannot be taken as definitive
- * when fetching from follower.
- * Revert back to the leader in lieu of KIP-320.
- */
- rd_kafka_toppar_delegate_to_leader(rktp);
- break;
- }
-
- /* Application error */
- err_pos = rktp->rktp_offsets.fetch_pos;
- rktp->rktp_offsets.fetch_pos.offset = RD_KAFKA_OFFSET_INVALID;
- rktp->rktp_offsets.fetch_pos.leader_epoch = -1;
- rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_pos,
- err,
- "fetch failed due to requested offset "
- "not available on the broker");
- } break;
-
- case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
- /* If we're not authorized to access the
- * topic mark it as errored to deny
- * further Fetch requests. */
- if (rktp->rktp_last_error != err) {
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
- tver->version, NULL, rktp,
- rktp->rktp_offsets.fetch_pos.offset,
- "Fetch from broker %" PRId32 " failed: %s",
- rd_kafka_broker_id(rkb), rd_kafka_err2str(err));
- rktp->rktp_last_error = err;
- }
- break;
-
-
- /* Application errors */
- case RD_KAFKA_RESP_ERR__PARTITION_EOF:
- if (rkb->rkb_rk->rk_conf.enable_partition_eof)
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
- tver->version, NULL, rktp,
- rktp->rktp_offsets.fetch_pos.offset,
- "Fetch from broker %" PRId32
- " reached end of "
- "partition at offset %" PRId64
- " (HighwaterMark %" PRId64 ")",
- rd_kafka_broker_id(rkb),
- rktp->rktp_offsets.fetch_pos.offset,
- HighwaterMarkOffset);
- break;
-
- case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE:
- default: /* and all other errors */
- rd_dassert(tver->version > 0);
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
- tver->version, NULL, rktp,
- rktp->rktp_offsets.fetch_pos.offset,
- "Fetch from broker %" PRId32 " failed at %s: %s",
- rd_kafka_broker_id(rkb),
- rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos),
- rd_kafka_err2str(err));
- break;
- }
-
- /* Back off the next fetch for this partition */
- rd_kafka_toppar_fetch_backoff(rkb, rktp, err);
-}
-
-
-
-/**
- * @brief Per-partition FetchResponse parsing and handling.
- *
- * @returns an error on buffer parse failure, else RD_KAFKA_RESP_ERR_NO_ERROR.
- */
-static rd_kafka_resp_err_t
-rd_kafka_fetch_reply_handle_partition(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *topic,
- rd_kafka_topic_t *rkt /*possibly NULL*/,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- int16_t ErrorCode) {
- const int log_decode_errors = LOG_ERR;
- struct rd_kafka_toppar_ver *tver, tver_skel;
- rd_kafka_toppar_t *rktp = NULL;
- rd_kafka_aborted_txns_t *aborted_txns = NULL;
- rd_slice_t save_slice;
- int32_t fetch_version;
- struct {
- int32_t Partition;
- int16_t ErrorCode;
- int64_t HighwaterMarkOffset;
- int64_t LastStableOffset; /* v4 */
- int64_t LogStartOffset; /* v5 */
- int32_t MessageSetSize;
- int32_t PreferredReadReplica; /* v11 */
- } hdr;
- rd_kafka_resp_err_t err;
- int64_t end_offset;
-
- rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
- rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
- if (ErrorCode)
- hdr.ErrorCode = ErrorCode;
- rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset);
-
- end_offset = hdr.HighwaterMarkOffset;
-
- hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID;
- hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID;
- if (rd_kafka_buf_ApiVersion(request) >= 4) {
- int32_t AbortedTxnCnt;
- rd_kafka_buf_read_i64(rkbuf, &hdr.LastStableOffset);
- if (rd_kafka_buf_ApiVersion(request) >= 5)
- rd_kafka_buf_read_i64(rkbuf, &hdr.LogStartOffset);
-
- rd_kafka_buf_read_i32(rkbuf, &AbortedTxnCnt);
-
- if (rkb->rkb_rk->rk_conf.isolation_level ==
- RD_KAFKA_READ_UNCOMMITTED) {
-
- if (unlikely(AbortedTxnCnt > 0)) {
- rd_rkb_log(rkb, LOG_ERR, "FETCH",
- "%.*s [%" PRId32
- "]: "
- "%" PRId32
- " aborted transaction(s) "
- "encountered in READ_UNCOMMITTED "
- "fetch response: ignoring.",
- RD_KAFKAP_STR_PR(topic),
- hdr.Partition, AbortedTxnCnt);
-
- rd_kafka_buf_skip(rkbuf,
- AbortedTxnCnt * (8 + 8));
- }
- } else {
- /* Older brokers may return LSO -1,
- * in which case we use the HWM. */
- if (hdr.LastStableOffset >= 0)
- end_offset = hdr.LastStableOffset;
-
- if (AbortedTxnCnt > 0) {
- int k;
-
- if (unlikely(AbortedTxnCnt > 1000000))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%.*s [%" PRId32
- "]: "
- "invalid AbortedTxnCnt %" PRId32,
- RD_KAFKAP_STR_PR(topic),
- hdr.Partition, AbortedTxnCnt);
-
- aborted_txns =
- rd_kafka_aborted_txns_new(AbortedTxnCnt);
- for (k = 0; k < AbortedTxnCnt; k++) {
- int64_t PID;
- int64_t FirstOffset;
- rd_kafka_buf_read_i64(rkbuf, &PID);
- rd_kafka_buf_read_i64(rkbuf,
- &FirstOffset);
- rd_kafka_aborted_txns_add(
- aborted_txns, PID, FirstOffset);
- }
- rd_kafka_aborted_txns_sort(aborted_txns);
- }
- }
- }
-
- if (rd_kafka_buf_ApiVersion(request) >= 11)
- rd_kafka_buf_read_i32(rkbuf, &hdr.PreferredReadReplica);
- else
- hdr.PreferredReadReplica = -1;
-
- rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize);
-
- if (unlikely(hdr.MessageSetSize < 0))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%.*s [%" PRId32 "]: invalid MessageSetSize %" PRId32,
- RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize);
-
- /* Look up topic+partition */
- if (likely(rkt != NULL)) {
- rd_kafka_topic_rdlock(rkt);
- rktp = rd_kafka_toppar_get(rkt, hdr.Partition,
- 0 /*no ua-on-miss*/);
- rd_kafka_topic_rdunlock(rkt);
- }
-
- if (unlikely(!rkt || !rktp)) {
- rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC",
- "Received Fetch response (error %hu) for unknown "
- "topic %.*s [%" PRId32 "]: ignoring",
- hdr.ErrorCode, RD_KAFKAP_STR_PR(topic),
- hdr.Partition);
- rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_lo_offset = hdr.LogStartOffset;
- rktp->rktp_hi_offset = hdr.HighwaterMarkOffset;
- /* Let the LastStable offset be the effective
- * end_offset based on protocol version, that is:
- * if connected to a broker that does not support
- * LastStableOffset we use the HighwaterMarkOffset. */
- rktp->rktp_ls_offset = end_offset;
- rd_kafka_toppar_unlock(rktp);
-
- if (hdr.PreferredReadReplica != -1) {
-
- rd_kafka_fetch_preferred_replica_handle(
- rktp, rkbuf, rkb, hdr.PreferredReadReplica);
-
- if (unlikely(hdr.MessageSetSize != 0)) {
- rd_rkb_log(rkb, LOG_WARNING, "FETCH",
- "%.*s [%" PRId32
- "]: Fetch response has both preferred read "
- "replica and non-zero message set size: "
- "%" PRId32 ": skipping messages",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, hdr.MessageSetSize);
- rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
- }
-
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- rd_kafka_toppar_destroy(rktp); /* from get */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_toppar_lock(rktp);
-
- /* Make sure toppar hasn't moved to another broker
- * during the lifetime of the request. */
- if (unlikely(rktp->rktp_broker != rkb)) {
- rd_kafka_toppar_unlock(rktp);
- rd_rkb_dbg(rkb, MSG, "FETCH",
- "%.*s [%" PRId32
- "]: partition broker has changed: "
- "discarding fetch response",
- RD_KAFKAP_STR_PR(topic), hdr.Partition);
- rd_kafka_toppar_destroy(rktp); /* from get */
- rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- fetch_version = rktp->rktp_fetch_version;
- rd_kafka_toppar_unlock(rktp);
-
- /* Check if this Fetch is for an outdated fetch version,
- * or the original rktp was removed and a new one
- * created (due to partition count decreasing and
- * then increasing again, which can happen in
- * desynchronized clusters): if so ignore it. */
- tver_skel.rktp = rktp;
- tver = rd_list_find(request->rkbuf_rktp_vers, &tver_skel,
- rd_kafka_toppar_ver_cmp);
- rd_kafka_assert(NULL, tver);
- if (tver->rktp != rktp || tver->version < fetch_version) {
- rd_rkb_dbg(rkb, MSG, "DROP",
- "%s [%" PRId32
- "]: dropping outdated fetch response "
- "(v%d < %d or old rktp)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- tver->version, fetch_version);
- rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1);
- rd_kafka_toppar_destroy(rktp); /* from get */
- rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_rkb_dbg(rkb, MSG, "FETCH",
- "Topic %.*s [%" PRId32 "] MessageSet size %" PRId32
- ", error \"%s\", MaxOffset %" PRId64 ", LSO %" PRId64
- ", Ver %" PRId32 "/%" PRId32,
- RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize,
- rd_kafka_err2str(hdr.ErrorCode), hdr.HighwaterMarkOffset,
- hdr.LastStableOffset, tver->version, fetch_version);
-
- /* If this is the last message of the queue,
- * signal EOF back to the application. */
- if (end_offset == rktp->rktp_offsets.fetch_pos.offset &&
- rktp->rktp_offsets.eof_offset != end_offset) {
- hdr.ErrorCode = RD_KAFKA_RESP_ERR__PARTITION_EOF;
- rktp->rktp_offsets.eof_offset = end_offset;
- }
-
- if (unlikely(hdr.ErrorCode != RD_KAFKA_RESP_ERR_NO_ERROR)) {
- /* Handle partition-level errors. */
- rd_kafka_fetch_reply_handle_partition_error(
- rkb, rktp, tver, hdr.ErrorCode, hdr.HighwaterMarkOffset);
-
- rd_kafka_toppar_destroy(rktp); /* from get()*/
-
- rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
-
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- /* No error, clear any previous fetch error. */
- rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (unlikely(hdr.MessageSetSize <= 0)) {
- rd_kafka_toppar_destroy(rktp); /*from get()*/
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- /**
- * Parse MessageSet
- */
- if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice,
- (size_t)hdr.MessageSetSize))
- rd_kafka_buf_check_len(rkbuf, hdr.MessageSetSize);
-
- /* Parse messages */
- err = rd_kafka_msgset_parse(rkbuf, request, rktp, aborted_txns, tver);
-
- if (aborted_txns)
- rd_kafka_aborted_txns_destroy(aborted_txns);
-
- rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice);
- /* Continue with next partition regardless of
- * parse errors (which are partition-specific) */
-
- /* On error: back off the fetcher for this partition */
- if (unlikely(err))
- rd_kafka_toppar_fetch_backoff(rkb, rktp, err);
-
- rd_kafka_toppar_destroy(rktp); /*from get()*/
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- if (rktp)
- rd_kafka_toppar_destroy(rktp); /*from get()*/
-
- return rkbuf->rkbuf_err;
-}
-
-/**
- * Parses and handles a Fetch reply.
- * Returns 0 on success or an error code on failure.
- */
-static rd_kafka_resp_err_t
-rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request) {
- int32_t TopicArrayCnt;
- int i;
- const int log_decode_errors = LOG_ERR;
- rd_kafka_topic_t *rkt = NULL;
- int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (rd_kafka_buf_ApiVersion(request) >= 1) {
- int32_t Throttle_Time;
- rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
-
- rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
- Throttle_Time);
- }
-
- if (rd_kafka_buf_ApiVersion(request) >= 7) {
- int32_t SessionId;
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- rd_kafka_buf_read_i32(rkbuf, &SessionId);
- }
-
- rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
- /* Verify that TopicArrayCnt seems to be in line with remaining size */
- rd_kafka_buf_check_len(rkbuf,
- TopicArrayCnt * (3 /*topic min size*/ +
- 4 /*PartitionArrayCnt*/ + 4 +
- 2 + 8 + 4 /*inner header*/));
-
- for (i = 0; i < TopicArrayCnt; i++) {
- rd_kafkap_str_t topic;
- int32_t PartitionArrayCnt;
- int j;
-
- rd_kafka_buf_read_str(rkbuf, &topic);
- rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
-
- rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic);
-
- for (j = 0; j < PartitionArrayCnt; j++) {
- if (rd_kafka_fetch_reply_handle_partition(
- rkb, &topic, rkt, rkbuf, request, ErrorCode))
- goto err_parse;
- }
-
- if (rkt) {
- rd_kafka_topic_destroy0(rkt);
- rkt = NULL;
- }
- }
-
- if (rd_kafka_buf_read_remain(rkbuf) != 0) {
- rd_kafka_buf_parse_fail(rkbuf,
- "Remaining data after message set "
- "parse: %" PRIusz " bytes",
- rd_kafka_buf_read_remain(rkbuf));
- RD_NOTREACHED();
- }
-
- return 0;
-
-err_parse:
- if (rkt)
- rd_kafka_topic_destroy0(rkt);
- rd_rkb_dbg(rkb, MSG, "BADMSG",
- "Bad message (Fetch v%d): "
- "is broker.version.fallback incorrectly set?",
- (int)request->rkbuf_reqhdr.ApiVersion);
- return rkbuf->rkbuf_err;
-}
-
-
-
-/**
- * @broker FetchResponse handling.
- *
- * @locality broker thread (or any thread if err == __DESTROY).
- */
-static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return; /* Terminating */
-
- rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0);
- rkb->rkb_fetching = 0;
-
- /* Parse and handle the messages (unless the request errored) */
- if (!err && reply)
- err = rd_kafka_fetch_reply_handle(rkb, reply, request);
-
- if (unlikely(err)) {
- char tmp[128];
-
- rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s",
- rd_kafka_err2str(err));
- switch (err) {
- case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
- case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION:
- case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
- /* Request metadata information update */
- rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s",
- rd_kafka_err2str(err));
- rd_kafka_metadata_refresh_known_topics(
- rkb->rkb_rk, NULL, rd_true /*force*/, tmp);
- /* FALLTHRU */
-
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
- case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT:
- /* The fetch is already intervalled from
- * consumer_serve() so dont retry. */
- break;
-
- default:
- break;
- }
-
- rd_kafka_broker_fetch_backoff(rkb, err);
- /* FALLTHRU */
- }
-}
-
-
-
-/**
- * @brief Build and send a Fetch request message for all underflowed toppars
- * for a specific broker.
- *
- * @returns the number of partitions included in the FetchRequest, if any.
- *
- * @locality broker thread
- */
-int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_buf_t *rkbuf;
- int cnt = 0;
- size_t of_TopicArrayCnt = 0;
- int TopicArrayCnt = 0;
- size_t of_PartitionArrayCnt = 0;
- int PartitionArrayCnt = 0;
- rd_kafka_topic_t *rkt_last = NULL;
- int16_t ApiVersion = 0;
-
- /* Create buffer and segments:
- * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt
- * N x topic name
- * N x PartitionArrayCnt Partition FetchOffset MaxBytes
- * where N = number of toppars.
- * Since we dont keep track of the number of topics served by
- * this broker, only the partition count, we do a worst-case calc
- * when allocating and assume each partition is on its own topic
- */
-
- if (unlikely(rkb->rkb_active_toppar_cnt == 0))
- return 0;
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_Fetch, 1,
- /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+
- * SessionId+Epoch+TopicCnt */
- 4 + 4 + 4 + 4 + 1 + 4 + 4 + 4 +
- /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+
- * LogStartOffset+MaxBytes+?TopicNameLen?*/
- (rkb->rkb_active_toppar_cnt * (4 + 4 + 4 + 8 + 8 + 4 + 40)) +
- /* ForgottenTopicsCnt */
- 4 +
- /* N x ForgottenTopicsData */
- 0);
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch,
- 0, 11, NULL);
-
- if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
- RD_KAFKA_FEATURE_MSGVER2);
- else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
- RD_KAFKA_FEATURE_MSGVER1);
- else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
- RD_KAFKA_FEATURE_THROTTLETIME);
-
-
- /* FetchRequest header */
- /* ReplicaId */
- rd_kafka_buf_write_i32(rkbuf, -1);
- /* MaxWaitTime */
- rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms);
- /* MinBytes */
- rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 3)
- /* MaxBytes */
- rd_kafka_buf_write_i32(rkbuf,
- rkb->rkb_rk->rk_conf.fetch_max_bytes);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 4)
- /* IsolationLevel */
- rd_kafka_buf_write_i8(rkbuf,
- rkb->rkb_rk->rk_conf.isolation_level);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) {
- /* SessionId */
- rd_kafka_buf_write_i32(rkbuf, 0);
- /* Epoch */
- rd_kafka_buf_write_i32(rkbuf, -1);
- }
-
- /* Write zero TopicArrayCnt but store pointer for later update */
- of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* Prepare map for storing the fetch version for each partition,
- * this will later be checked in Fetch response to purge outdated
- * responses (e.g., after a seek). */
- rkbuf->rkbuf_rktp_vers =
- rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy);
- rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers,
- sizeof(struct rd_kafka_toppar_ver),
- rkb->rkb_active_toppar_cnt, 0);
-
- /* Round-robin start of the list. */
- rktp = rkb->rkb_active_toppar_next;
- do {
- struct rd_kafka_toppar_ver *tver;
-
- if (rkt_last != rktp->rktp_rkt) {
- if (rkt_last != NULL) {
- /* Update PartitionArrayCnt */
- rd_kafka_buf_update_i32(rkbuf,
- of_PartitionArrayCnt,
- PartitionArrayCnt);
- }
-
- /* Topic name */
- rd_kafka_buf_write_kstr(rkbuf,
- rktp->rktp_rkt->rkt_topic);
- TopicArrayCnt++;
- rkt_last = rktp->rktp_rkt;
- /* Partition count */
- of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
- PartitionArrayCnt = 0;
- }
-
- PartitionArrayCnt++;
-
- /* Partition */
- rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) {
- /* CurrentLeaderEpoch */
- if (rktp->rktp_leader_epoch < 0 &&
- rd_kafka_has_reliable_leader_epochs(rkb)) {
- /* If current leader epoch is set to -1 and
- * the broker has reliable leader epochs,
- * send 0 instead, so that epoch is checked
- * and optionally metadata is refreshed.
- * This can happen if metadata is read initially
- * without an existing topic (see
- * rd_kafka_topic_metadata_update2).
- * TODO: have a private metadata struct that
- * stores leader epochs before topic creation.
- */
- rd_kafka_buf_write_i32(rkbuf, 0);
- } else {
- rd_kafka_buf_write_i32(rkbuf,
- rktp->rktp_leader_epoch);
- }
- }
-
- /* FetchOffset */
- rd_kafka_buf_write_i64(rkbuf,
- rktp->rktp_offsets.fetch_pos.offset);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 5)
- /* LogStartOffset - only used by follower replica */
- rd_kafka_buf_write_i64(rkbuf, -1);
-
- /* MaxBytes */
- rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes);
-
- rd_rkb_dbg(rkb, FETCH, "FETCH",
- "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64
- " (leader epoch %" PRId32
- ", current leader epoch %" PRId32 ", v%d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rktp->rktp_offsets.fetch_pos.offset,
- rktp->rktp_offsets.fetch_pos.leader_epoch,
- rktp->rktp_leader_epoch, rktp->rktp_fetch_version);
-
- /* We must have a valid fetch offset when we get here */
- rd_dassert(rktp->rktp_offsets.fetch_pos.offset >= 0);
-
- /* Add toppar + op version mapping. */
- tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL);
- tver->rktp = rd_kafka_toppar_keep(rktp);
- tver->version = rktp->rktp_fetch_version;
-
- cnt++;
- } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
- rktp_activelink)) !=
- rkb->rkb_active_toppar_next);
-
- /* Update next toppar to fetch in round-robin list. */
- rd_kafka_broker_active_toppar_next(
- rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
- rktp_activelink)
- : NULL);
-
- rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt,
- rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt);
- if (!cnt) {
- rd_kafka_buf_destroy(rkbuf);
- return cnt;
- }
-
- if (rkt_last != NULL) {
- /* Update last topic's PartitionArrayCnt */
- rd_kafka_buf_update_i32(rkbuf, of_PartitionArrayCnt,
- PartitionArrayCnt);
- }
-
- /* Update TopicArrayCnt */
- rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt);
-
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 7)
- /* Length of the ForgottenTopics list (KIP-227). Broker
- * use only - not used by the consumer. */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 11)
- /* RackId */
- rd_kafka_buf_write_kstr(rkbuf,
- rkb->rkb_rk->rk_conf.client_rack);
-
- /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */
- if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000)
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
-
- /* Use configured timeout */
- rd_kafka_buf_set_timeout(rkbuf,
- rkb->rkb_rk->rk_conf.socket_timeout_ms +
- rkb->rkb_rk->rk_conf.fetch_wait_max_ms,
- now);
-
- /* Sort toppar versions for quicker lookups in Fetch response. */
- rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp);
-
- rkb->rkb_fetching = 1;
- rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL);
-
- return cnt;
-}
-
-
-
-/**
- * @brief Decide whether this toppar should be on the fetch list or not.
- *
- * Also:
- * - update toppar's op version (for broker thread's copy)
- * - finalize statistics (move rktp_offsets to rktp_offsets_fin)
- *
- * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
- *
- * @locality broker thread
- * @locks none
- */
-rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *rkb,
- int force_remove) {
- int should_fetch = 1;
- const char *reason = "";
- int32_t version;
- rd_ts_t ts_backoff = 0;
- rd_bool_t lease_expired = rd_false;
-
- rd_kafka_toppar_lock(rktp);
-
- /* Check for preferred replica lease expiry */
- lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id &&
- rd_interval(&rktp->rktp_lease_intvl,
- 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0;
- if (lease_expired) {
- /* delete_to_leader() requires no locks to be held */
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_delegate_to_leader(rktp);
- rd_kafka_toppar_lock(rktp);
-
- reason = "preferred replica lease expired";
- should_fetch = 0;
- goto done;
- }
-
- /* Forced removal from fetch list */
- if (unlikely(force_remove)) {
- reason = "forced removal";
- should_fetch = 0;
- goto done;
- }
-
- if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) {
- reason = "partition removed";
- should_fetch = 0;
- goto done;
- }
-
- /* Skip toppars not in active fetch state */
- if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
- reason = "not in active fetch state";
- should_fetch = 0;
- goto done;
- }
-
- /* Update broker thread's fetch op version */
- version = rktp->rktp_op_version;
- if (version > rktp->rktp_fetch_version ||
- rd_kafka_fetch_pos_cmp(&rktp->rktp_next_fetch_start,
- &rktp->rktp_last_next_fetch_start) ||
- rktp->rktp_offsets.fetch_pos.offset == RD_KAFKA_OFFSET_INVALID) {
- /* New version barrier, something was modified from the
- * control plane. Reset and start over.
- * Alternatively only the next_offset changed but not the
- * barrier, which is the case when automatically triggering
- * offset.reset (such as on PARTITION_EOF or
- * OFFSET_OUT_OF_RANGE). */
-
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC",
- "Topic %s [%" PRId32
- "]: fetch decide: "
- "updating to version %d (was %d) at %s "
- "(was %s)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- version, rktp->rktp_fetch_version,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos));
-
- rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
-
- /* New start offset */
- rktp->rktp_offsets.fetch_pos = rktp->rktp_next_fetch_start;
- rktp->rktp_last_next_fetch_start = rktp->rktp_next_fetch_start;
-
- rktp->rktp_fetch_version = version;
-
- /* Clear last error to propagate new fetch
- * errors if encountered. */
- rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
- version);
- }
-
-
- if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) {
- should_fetch = 0;
- reason = "paused";
-
- } else if (RD_KAFKA_OFFSET_IS_LOGICAL(
- rktp->rktp_next_fetch_start.offset)) {
- should_fetch = 0;
- reason = "no concrete offset";
-
- } else if (rd_kafka_q_len(rktp->rktp_fetchq) >=
- rkb->rkb_rk->rk_conf.queued_min_msgs) {
- /* Skip toppars who's local message queue is already above
- * the lower threshold. */
- reason = "queued.min.messages exceeded";
- should_fetch = 0;
-
- } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >=
- rkb->rkb_rk->rk_conf.queued_max_msg_bytes) {
- reason = "queued.max.messages.kbytes exceeded";
- should_fetch = 0;
-
- } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) {
- reason = "fetch backed off";
- ts_backoff = rktp->rktp_ts_fetch_backoff;
- should_fetch = 0;
- }
-
-done:
- /* Copy offset stats to finalized place holder. */
- rktp->rktp_offsets_fin = rktp->rktp_offsets;
-
- if (rktp->rktp_fetch != should_fetch) {
- rd_rkb_dbg(
- rkb, FETCH, "FETCH",
- "Topic %s [%" PRId32
- "] in state %s at %s "
- "(%d/%d msgs, %" PRId64
- "/%d kb queued, "
- "opv %" PRId32 ") is %s%s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- rd_kafka_q_len(rktp->rktp_fetchq),
- rkb->rkb_rk->rk_conf.queued_min_msgs,
- rd_kafka_q_size(rktp->rktp_fetchq) / 1024,
- rkb->rkb_rk->rk_conf.queued_max_msg_kbytes,
- rktp->rktp_fetch_version,
- should_fetch ? "fetchable" : "not fetchable: ", reason);
-
- if (should_fetch) {
- rd_dassert(rktp->rktp_fetch_version > 0);
- rd_kafka_broker_active_toppar_add(
- rkb, rktp, *reason ? reason : "fetchable");
- } else {
- rd_kafka_broker_active_toppar_del(rkb, rktp, reason);
- }
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- /* Non-fetching partitions will have an
- * indefinate backoff, unless explicitly specified. */
- if (!should_fetch && !ts_backoff)
- ts_backoff = RD_TS_MAX;
-
- return ts_backoff;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h
deleted file mode 100644
index 0e3af82bb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2022 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_FETCHER_H_
-#define _RDKAFKA_FETCHER_H_
-
-
-int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now);
-
-rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *rkb,
- int force_remove);
-
-
-#endif /* _RDKAFKA_FETCHER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c
deleted file mode 100644
index 98359b424..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_header.h"
-
-
-
-#define rd_kafka_header_destroy rd_free
-
-void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs) {
- rd_list_destroy(&hdrs->rkhdrs_list);
- rd_free(hdrs);
-}
-
-rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count) {
- rd_kafka_headers_t *hdrs;
-
- hdrs = rd_malloc(sizeof(*hdrs));
- rd_list_init(&hdrs->rkhdrs_list, (int)initial_count,
- rd_kafka_header_destroy);
- hdrs->rkhdrs_ser_size = 0;
-
- return hdrs;
-}
-
-static void *rd_kafka_header_copy(const void *_src, void *opaque) {
- rd_kafka_headers_t *hdrs = opaque;
- const rd_kafka_header_t *src = (const rd_kafka_header_t *)_src;
-
- return (void *)rd_kafka_header_add(
- hdrs, src->rkhdr_name, src->rkhdr_name_size, src->rkhdr_value,
- src->rkhdr_value_size);
-}
-
-rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src) {
- rd_kafka_headers_t *dst;
-
- dst = rd_malloc(sizeof(*dst));
- rd_list_init(&dst->rkhdrs_list, rd_list_cnt(&src->rkhdrs_list),
- rd_kafka_header_destroy);
- dst->rkhdrs_ser_size = 0; /* Updated by header_copy() */
- rd_list_copy_to(&dst->rkhdrs_list, &src->rkhdrs_list,
- rd_kafka_header_copy, dst);
-
- return dst;
-}
-
-
-
-rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs,
- const char *name,
- ssize_t name_size,
- const void *value,
- ssize_t value_size) {
- rd_kafka_header_t *hdr;
- char varint_NameLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
- char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
-
- if (name_size == -1)
- name_size = strlen(name);
-
- if (value_size == -1)
- value_size = value ? strlen(value) : 0;
- else if (!value)
- value_size = 0;
-
- hdr = rd_malloc(sizeof(*hdr) + name_size + 1 + value_size + 1);
- hdr->rkhdr_name_size = name_size;
- memcpy((void *)hdr->rkhdr_name, name, name_size);
- hdr->rkhdr_name[name_size] = '\0';
-
- if (likely(value != NULL)) {
- hdr->rkhdr_value = hdr->rkhdr_name + name_size + 1;
- memcpy((void *)hdr->rkhdr_value, value, value_size);
- hdr->rkhdr_value[value_size] = '\0';
- hdr->rkhdr_value_size = value_size;
- } else {
- hdr->rkhdr_value = NULL;
- hdr->rkhdr_value_size = 0;
- }
-
- rd_list_add(&hdrs->rkhdrs_list, hdr);
-
- /* Calculate serialized size of header */
- hdr->rkhdr_ser_size = name_size + value_size;
- hdr->rkhdr_ser_size += rd_uvarint_enc_i64(
- varint_NameLen, sizeof(varint_NameLen), name_size);
- hdr->rkhdr_ser_size += rd_uvarint_enc_i64(
- varint_ValueLen, sizeof(varint_ValueLen), value_size);
- hdrs->rkhdrs_ser_size += hdr->rkhdr_ser_size;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief header_t(name) to char * comparator
- */
-static int rd_kafka_header_cmp_str(void *_a, void *_b) {
- const rd_kafka_header_t *a = _a;
- const char *b = _b;
-
- return strcmp(a->rkhdr_name, b);
-}
-
-rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs,
- const char *name) {
- size_t ser_size = 0;
- rd_kafka_header_t *hdr;
- int i;
-
- RD_LIST_FOREACH_REVERSE(hdr, &hdrs->rkhdrs_list, i) {
- if (rd_kafka_header_cmp_str(hdr, (void *)name))
- continue;
-
- ser_size += hdr->rkhdr_ser_size;
- rd_list_remove_elem(&hdrs->rkhdrs_list, i);
- rd_kafka_header_destroy(hdr);
- }
-
- if (ser_size == 0)
- return RD_KAFKA_RESP_ERR__NOENT;
-
- rd_dassert(hdrs->rkhdrs_ser_size >= ser_size);
- hdrs->rkhdrs_ser_size -= ser_size;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs,
- const char *name,
- const void **valuep,
- size_t *sizep) {
- const rd_kafka_header_t *hdr;
- int i;
- size_t name_size = strlen(name);
-
- RD_LIST_FOREACH_REVERSE(hdr, &hdrs->rkhdrs_list, i) {
- if (hdr->rkhdr_name_size == name_size &&
- !strcmp(hdr->rkhdr_name, name)) {
- *valuep = hdr->rkhdr_value;
- *sizep = hdr->rkhdr_value_size;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
- return RD_KAFKA_RESP_ERR__NOENT;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs,
- size_t idx,
- const char *name,
- const void **valuep,
- size_t *sizep) {
- const rd_kafka_header_t *hdr;
- int i;
- size_t mi = 0; /* index for matching names */
- size_t name_size = strlen(name);
-
- RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) {
- if (hdr->rkhdr_name_size == name_size &&
- !strcmp(hdr->rkhdr_name, name) && mi++ == idx) {
- *valuep = hdr->rkhdr_value;
- *sizep = hdr->rkhdr_value_size;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
- return RD_KAFKA_RESP_ERR__NOENT;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs,
- size_t idx,
- const char **namep,
- const void **valuep,
- size_t *sizep) {
- const rd_kafka_header_t *hdr;
-
- hdr = rd_list_elem(&hdrs->rkhdrs_list, (int)idx);
- if (unlikely(!hdr))
- return RD_KAFKA_RESP_ERR__NOENT;
-
- *namep = hdr->rkhdr_name;
- *valuep = hdr->rkhdr_value;
- *sizep = hdr->rkhdr_value_size;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs) {
- return (size_t)rd_list_cnt(&hdrs->rkhdrs_list);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h
deleted file mode 100644
index bd6b0e959..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_HEADER_H
-#define _RDKAFKA_HEADER_H
-
-
-
-/**
- * @brief The header list (rd_kafka_headers_t) wraps the generic rd_list_t
- * with additional fields to keep track of the total on-wire size.
- */
-struct rd_kafka_headers_s {
- rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */
- size_t rkhdrs_ser_size; /**< Total serialized size of headers */
-};
-
-
-/**
- * @brief The header item itself is a single-allocation immutable structure
- * (rd_kafka_header_t) containing the header name, value and value
- * length.
- * Both the header name and header value are nul-terminated for
- * API convenience.
- * The header value is a tri-state:
- * - proper value (considered binary) with length > 0
- * - empty value with length = 0 (pointer is non-NULL and nul-termd)
- * - null value with length = 0 (pointer is NULL)
- */
-typedef struct rd_kafka_header_s {
- size_t rkhdr_ser_size; /**< Serialized size */
- size_t rkhdr_value_size; /**< Value length (without nul-term) */
- size_t rkhdr_name_size; /**< Header name size (w/o nul-term) */
- char *rkhdr_value; /**< Header value (nul-terminated string but
- * considered binary).
- * Will be NULL for null values, else
- * points to rkhdr_name+.. */
- char rkhdr_name[1]; /**< Header name (nul-terminated string).
- * Followed by allocation for value+nul */
-} rd_kafka_header_t;
-
-
-/**
- * @returns the serialized size for the headers
- */
-static RD_INLINE RD_UNUSED size_t
-rd_kafka_headers_serialized_size(const rd_kafka_headers_t *hdrs) {
- return hdrs->rkhdrs_ser_size;
-}
-
-#endif /* _RDKAFKA_HEADER_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c
deleted file mode 100644
index 3245e856e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_txnmgr.h"
-#include "rdkafka_request.h"
-#include "rdunittest.h"
-
-#include <stdarg.h>
-
-/**
- * @name Idempotent Producer logic
- *
- *
- * Unrecoverable idempotent producer errors that could jeopardize the
- * idempotency guarantees if the producer was to continue operating
- * are treated as fatal errors, unless the producer is transactional in which
- * case the current transaction will fail (also known as an abortable error)
- * but the producer will not raise a fatal error.
- *
- */
-
-static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk,
- rd_bool_t immediate,
- const char *reason);
-
-
-/**
- * @brief Set the producer's idempotence state.
- * @locks rd_kafka_wrlock() MUST be held
- */
-void rd_kafka_idemp_set_state(rd_kafka_t *rk,
- rd_kafka_idemp_state_t new_state) {
-
- if (rk->rk_eos.idemp_state == new_state)
- return;
-
- if (rd_kafka_fatal_error_code(rk) &&
- new_state != RD_KAFKA_IDEMP_STATE_FATAL_ERROR &&
- new_state != RD_KAFKA_IDEMP_STATE_TERM &&
- new_state != RD_KAFKA_IDEMP_STATE_DRAIN_RESET &&
- new_state != RD_KAFKA_IDEMP_STATE_DRAIN_BUMP) {
- rd_kafka_dbg(rk, EOS, "IDEMPSTATE",
- "Denying state change %s -> %s since a "
- "fatal error has been raised",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- rd_kafka_idemp_state2str(new_state));
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
- return;
- }
-
- rd_kafka_dbg(rk, EOS, "IDEMPSTATE",
- "Idempotent producer state change %s -> %s",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- rd_kafka_idemp_state2str(new_state));
-
- rk->rk_eos.idemp_state = new_state;
- rk->rk_eos.ts_idemp_state = rd_clock();
-
- /* Inform transaction manager of state change */
- if (rd_kafka_is_transactional(rk))
- rd_kafka_txn_idemp_state_change(rk, new_state);
-}
-
-
-
-/**
- * @brief Find a usable broker suitable for acquiring Pid
- * or Coordinator query.
- *
- * @locks rd_kafka_wrlock() MUST be held
- *
- * @returns a broker with increased refcount, or NULL on error.
- */
-rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk,
- rd_kafka_resp_err_t *errp,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_broker_t *rkb;
- int up_cnt;
-
- rkb = rd_kafka_broker_any_up(rk, &up_cnt,
- rd_kafka_broker_filter_non_idempotent,
- NULL, "acquire ProducerID");
- if (rkb)
- return rkb;
-
- if (up_cnt > 0) {
- *errp = RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- rd_snprintf(errstr, errstr_size,
- "%s not supported by "
- "any of the %d connected broker(s): requires "
- "Apache Kafka broker version >= 0.11.0",
- rd_kafka_is_transactional(rk)
- ? "Transactions"
- : "Idempotent producer",
- up_cnt);
- } else {
- *errp = RD_KAFKA_RESP_ERR__TRANSPORT;
- rd_snprintf(errstr, errstr_size,
- "No brokers available for %s (%d broker(s) known)",
- rd_kafka_is_transactional(rk)
- ? "Transactions"
- : "Idempotent producer",
- rd_atomic32_get(&rk->rk_broker_cnt));
- }
-
- rd_kafka_dbg(rk, EOS, "PIDBROKER", "%s", errstr);
-
- return NULL;
-}
-
-
-
-/**
- * @brief Check if an error needs special attention, possibly
- * raising a fatal error.
- *
- * @param is_fatal if true, force fatal error regardless of error code.
- *
- * @returns rd_true if a fatal error was triggered, else rd_false.
- *
- * @locks rd_kafka_wrlock() MUST be held
- * @locality rdkafka main thread
- */
-rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *errstr,
- rd_bool_t is_fatal) {
- const char *preface = "";
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE:
- case RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT:
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- is_fatal = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
- is_fatal = rd_true;
- /* Normalize error */
- err = RD_KAFKA_RESP_ERR__FENCED;
- preface = "Producer fenced by newer instance: ";
- break;
-
- default:
- break;
- }
-
- if (!is_fatal)
- return rd_false;
-
- if (rd_kafka_is_transactional(rk))
- rd_kafka_txn_set_fatal_error(rk, RD_DONT_LOCK, err, "%s%s",
- preface, errstr);
- else
- rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s%s",
- preface, errstr);
-
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
-
- return rd_true;
-}
-
-
-
-/**
- * @brief State machine for PID acquisition for the idempotent
- * and transactional producers.
- *
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock() MUST be held.
- */
-void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_broker_t *rkb;
- rd_bool_t is_fatal = rd_false;
-
- /* If a fatal error has been raised we do not
- * attempt to acquire a PID. */
- if (unlikely(rd_kafka_fatal_error_code(rk)))
- return;
-
-redo:
- switch (rk->rk_eos.idemp_state) {
- case RD_KAFKA_IDEMP_STATE_INIT:
- case RD_KAFKA_IDEMP_STATE_TERM:
- case RD_KAFKA_IDEMP_STATE_FATAL_ERROR:
- break;
-
- case RD_KAFKA_IDEMP_STATE_REQ_PID:
- /* Request (new) PID */
-
- /* The idempotent producer may ask any broker for a PID,
- * while the transactional producer needs to ask its
- * transaction coordinator for a PID. */
- if (!rd_kafka_is_transactional(rk) ||
- rk->rk_eos.txn_curr_coord) {
- rd_kafka_idemp_set_state(
- rk, RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT);
- goto redo;
- }
-
-
- /*
- * Look up transaction coordinator.
- * When the coordinator is known this FSM will be called again.
- */
- if (rd_kafka_txn_coord_query(rk, "Acquire PID"))
- return; /* Fatal error */
- break;
-
- case RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT:
- /* Waiting for broker/coordinator to become available */
- if (rd_kafka_is_transactional(rk)) {
- /* Check that a proper coordinator broker has
- * been assigned by inspecting txn_curr_coord
- * (the real broker) rather than txn_coord
- * (the logical broker). */
- if (!rk->rk_eos.txn_curr_coord) {
- /*
- * Can happen if the coordinator wasn't set or
- * wasn't up initially and has been set to NULL
- * after a COORDINATOR_NOT_AVAILABLE error in
- * FindCoordinatorResponse. When the coordinator
- * is known this FSM will be called again.
- */
- rd_kafka_txn_coord_query(
- rk, "Awaiting coordinator");
- return;
- }
- rkb = rk->rk_eos.txn_coord;
- rd_kafka_broker_keep(rkb);
-
- } else {
- rkb = rd_kafka_idemp_broker_any(rk, &err, errstr,
- sizeof(errstr));
-
- if (!rkb && rd_kafka_idemp_check_error(rk, err, errstr,
- rd_false))
- return; /* Fatal error */
- }
-
- if (!rkb || !rd_kafka_broker_is_up(rkb)) {
- /* The coordinator broker monitor will re-trigger
- * the fsm sooner if txn_coord has a state change,
- * else rely on the timer to retry. */
- rd_kafka_idemp_pid_timer_restart(
- rk, rd_false,
- rkb ? "No broker available" : "Coordinator not up");
-
- if (rkb)
- rd_kafka_broker_destroy(rkb);
- return;
- }
-
- if (rd_kafka_is_transactional(rk)) {
- int err_of = 0;
-
- /* If this is a transactional producer and the
- * PID-epoch needs to be bumped we'll require KIP-360
- * support on the broker, else raise a fatal error. */
-
- if (rd_kafka_pid_valid(rk->rk_eos.pid)) {
- rd_rkb_dbg(rkb, EOS, "GETPID",
- "Requesting ProducerId bump for %s",
- rd_kafka_pid2str(rk->rk_eos.pid));
- err_of = rd_snprintf(errstr, sizeof(errstr),
- "Failed to request "
- "ProducerId bump: ");
- rd_assert(err_of < 0 ||
- err_of < (int)sizeof(errstr));
- } else {
- rd_rkb_dbg(rkb, EOS, "GETPID",
- "Acquiring ProducerId");
- }
-
- err = rd_kafka_InitProducerIdRequest(
- rkb, rk->rk_conf.eos.transactional_id,
- rk->rk_conf.eos.transaction_timeout_ms,
- rd_kafka_pid_valid(rk->rk_eos.pid) ? &rk->rk_eos.pid
- : NULL,
- errstr + err_of, sizeof(errstr) - err_of,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_handle_InitProducerId, NULL);
-
- if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE &&
- rd_kafka_pid_valid(rk->rk_eos.pid))
- is_fatal = rd_true;
- } else {
- rd_rkb_dbg(rkb, EOS, "GETPID", "Acquiring ProducerId");
-
- err = rd_kafka_InitProducerIdRequest(
- rkb, NULL, -1, NULL, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_handle_InitProducerId, NULL);
- }
-
- if (err) {
- rd_rkb_dbg(rkb, EOS, "GETPID",
- "Can't acquire ProducerId from "
- "this broker: %s",
- errstr);
- }
-
- rd_kafka_broker_destroy(rkb);
-
- if (err) {
- if (rd_kafka_idemp_check_error(rk, err, errstr,
- is_fatal))
- return; /* Fatal error */
-
- /* The coordinator broker monitor will re-trigger
- * the fsm sooner if txn_coord has a state change,
- * else rely on the timer to retry. */
- rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr);
- return;
- }
-
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_WAIT_PID);
- break;
-
- case RD_KAFKA_IDEMP_STATE_WAIT_PID:
- /* PID requested, waiting for reply */
- break;
-
- case RD_KAFKA_IDEMP_STATE_ASSIGNED:
- /* New PID assigned */
- break;
-
- case RD_KAFKA_IDEMP_STATE_DRAIN_RESET:
- /* Wait for outstanding ProduceRequests to finish
- * before resetting and re-requesting a new PID. */
- break;
-
- case RD_KAFKA_IDEMP_STATE_DRAIN_BUMP:
- /* Wait for outstanding ProduceRequests to finish
- * before bumping the current epoch. */
- break;
-
- case RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT:
- /* Wait for txnmgr to abort its current transaction
- * and then trigger a drain & reset or bump. */
- break;
- }
-}
-
-
-/**
- * @brief Timed PID retrieval timer callback.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_idemp_pid_timer_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_t *rk = arg;
-
- rd_kafka_wrlock(rk);
- rd_kafka_idemp_pid_fsm(rk);
- rd_kafka_wrunlock(rk);
-}
-
-
-/**
- * @brief Restart the pid retrieval timer.
- *
- * @param immediate If true, request a pid as soon as possible,
- * else use the default interval (500ms).
- * @locality any
- * @locks none
- */
-static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk,
- rd_bool_t immediate,
- const char *reason) {
- rd_kafka_dbg(rk, EOS, "TXN", "Starting PID FSM timer%s: %s",
- immediate ? " (fire immediately)" : "", reason);
- rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.pid_tmr,
- rd_true,
- 1000 * (immediate ? 1 : 500 /*500ms*/),
- rd_kafka_idemp_pid_timer_cb, rk);
-}
-
-
-/**
- * @brief Handle failure to acquire a PID from broker.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err) {
- rd_kafka_t *rk = rkb->rkb_rk;
- char errstr[512];
-
- rd_rkb_dbg(rkb, EOS, "GETPID", "Failed to acquire PID: %s",
- rd_kafka_err2str(err));
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return; /* Ignore */
-
- rd_assert(thrd_is_current(rk->rk_thread));
-
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to acquire %s PID from broker %s: %s",
- rd_kafka_is_transactional(rk) ? "transactional"
- : "idempotence",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err));
-
- rd_kafka_wrlock(rk);
-
- if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) {
- rd_kafka_wrunlock(rk);
- return; /* Fatal error */
- }
-
- RD_UT_COVERAGE(0);
-
- if (rd_kafka_is_transactional(rk) &&
- (err == RD_KAFKA_RESP_ERR_NOT_COORDINATOR ||
- err == RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE))
- rd_kafka_txn_coord_set(rk, NULL, "%s", errstr);
-
- /* This error code is read by init_transactions() for propagation
- * to the application. */
- rk->rk_eos.txn_init_err = err;
-
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_log(rk, LOG_WARNING, "GETPID", "%s: retrying", errstr);
-
- /* Restart acquisition after a short wait */
- rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr);
-}
-
-
-/**
- * @brief Update Producer ID from InitProducerId response.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb,
- const rd_kafka_pid_t pid) {
- rd_kafka_t *rk = rkb->rkb_rk;
-
- rd_kafka_wrlock(rk);
- if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID) {
- rd_rkb_dbg(rkb, EOS, "GETPID",
- "Ignoring InitProduceId response (%s) "
- "in state %s",
- rd_kafka_pid2str(pid),
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
- rd_kafka_wrunlock(rk);
- return;
- }
-
- if (!rd_kafka_pid_valid(pid)) {
- rd_kafka_wrunlock(rk);
- rd_rkb_log(rkb, LOG_WARNING, "GETPID",
- "Acquired invalid PID{%" PRId64 ",%hd}: ignoring",
- pid.id, pid.epoch);
- rd_kafka_idemp_request_pid_failed(rkb,
- RD_KAFKA_RESP_ERR__BAD_MSG);
- return;
- }
-
- if (rd_kafka_pid_valid(rk->rk_eos.pid))
- rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s (previous %s)",
- rd_kafka_pid2str(pid),
- rd_kafka_pid2str(rk->rk_eos.pid));
- else
- rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s",
- rd_kafka_pid2str(pid));
- rk->rk_eos.pid = pid;
- rk->rk_eos.epoch_cnt++;
-
- /* The idempotence state change will trigger the transaction manager,
- * see rd_kafka_txn_idemp_state_change(). */
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_ASSIGNED);
-
- rd_kafka_wrunlock(rk);
-
- /* Wake up all broker threads (that may have messages to send
- * that were waiting for a Producer ID). */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "PID updated");
-}
-
-
-/**
- * @brief Call when all partition request queues
- * are drained to reset and re-request a new PID.
- *
- * @locality any
- * @locks none
- */
-static void rd_kafka_idemp_drain_done(rd_kafka_t *rk) {
- rd_bool_t restart_tmr = rd_false;
- rd_bool_t wakeup_brokers = rd_false;
-
- rd_kafka_wrlock(rk);
- if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_DRAIN_RESET) {
- rd_kafka_dbg(rk, EOS, "DRAIN", "All partitions drained");
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
- restart_tmr = rd_true;
-
- } else if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_DRAIN_BUMP &&
- rd_kafka_pid_valid(rk->rk_eos.pid)) {
-
- if (rd_kafka_is_transactional(rk)) {
- /* The epoch bump needs to be performed by the
- * coordinator by sending it an InitPid request. */
- rd_kafka_dbg(rk, EOS, "DRAIN",
- "All partitions drained, asking "
- "coordinator to bump epoch (currently %s)",
- rd_kafka_pid2str(rk->rk_eos.pid));
- rd_kafka_idemp_set_state(rk,
- RD_KAFKA_IDEMP_STATE_REQ_PID);
- restart_tmr = rd_true;
-
- } else {
- /* The idempotent producer can bump its own epoch */
- rk->rk_eos.pid = rd_kafka_pid_bump(rk->rk_eos.pid);
- rd_kafka_dbg(rk, EOS, "DRAIN",
- "All partitions drained, bumped "
- "epoch to %s",
- rd_kafka_pid2str(rk->rk_eos.pid));
- rd_kafka_idemp_set_state(rk,
- RD_KAFKA_IDEMP_STATE_ASSIGNED);
- wakeup_brokers = rd_true;
- }
- }
- rd_kafka_wrunlock(rk);
-
- /* Restart timer to eventually trigger a re-request */
- if (restart_tmr)
- rd_kafka_idemp_pid_timer_restart(rk, rd_true, "Drain done");
-
- /* Wake up all broker threads (that may have messages to send
- * that were waiting for a Producer ID). */
- if (wakeup_brokers)
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "message drain done");
-}
-
-/**
- * @brief Check if in-flight toppars drain is done, if so transition to
- * next state.
- *
- * @locality any
- * @locks none
- */
-static RD_INLINE void rd_kafka_idemp_check_drain_done(rd_kafka_t *rk) {
- if (rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt) == 0)
- rd_kafka_idemp_drain_done(rk);
-}
-
-
-/**
- * @brief Schedule a reset and re-request of PID when the
- * local ProduceRequest queues have been fully drained.
- *
- * The PID is not reset until the queues are fully drained.
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason) {
- rd_kafka_wrlock(rk);
- rd_kafka_dbg(rk, EOS, "DRAIN",
- "Beginning partition drain for %s reset "
- "for %d partition(s) with in-flight requests: %s",
- rd_kafka_pid2str(rk->rk_eos.pid),
- rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), reason);
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_RESET);
- rd_kafka_wrunlock(rk);
-
- /* Check right away if the drain could be done. */
- rd_kafka_idemp_check_drain_done(rk);
-}
-
-
-/**
- * @brief Schedule an epoch bump when the local ProduceRequest queues
- * have been fully drained.
- *
- * The PID is not bumped until the queues are fully drained and the current
- * transaction is aborted (if any).
- *
- * @param allow_txn_abort If this is a transactional producer and this flag is
- * true then we trigger an abortable txn error to abort
- * the current transaction first. The txnmgr will later
- * call us back with this flag set to false to go ahead
- * with the epoch bump.
- * @param fmt is a human-readable reason for the bump
- *
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk,
- rd_bool_t allow_txn_abort,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
- char buf[256];
- rd_bool_t requires_txn_abort =
- allow_txn_abort && rd_kafka_is_transactional(rk);
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- rd_kafka_wrlock(rk);
-
-
- if (requires_txn_abort) {
- rd_kafka_dbg(rk, EOS, "DRAIN",
- "Need transaction abort before beginning "
- "partition drain in state %s for %s epoch bump "
- "for %d partition(s) with in-flight requests: %s",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- rd_kafka_pid2str(rk->rk_eos.pid),
- rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt),
- buf);
- rd_kafka_idemp_set_state(rk,
- RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT);
-
- } else {
- rd_kafka_dbg(rk, EOS, "DRAIN",
- "Beginning partition drain in state %s "
- "for %s epoch bump "
- "for %d partition(s) with in-flight requests: %s",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- rd_kafka_pid2str(rk->rk_eos.pid),
- rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt),
- buf);
-
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP);
- }
-
- rd_kafka_wrunlock(rk);
-
- if (requires_txn_abort) {
- /* Transactions: bumping the epoch requires the current
- * transaction to be aborted first. */
- rd_kafka_txn_set_abortable_error_with_bump(rk, err, "%s", buf);
-
- } else {
- /* Idempotent producer: check right away if the drain could
- * be done. */
- rd_kafka_idemp_check_drain_done(rk);
- }
-}
-
-/**
- * @brief Mark partition as waiting-to-drain.
- *
- * @locks toppar_lock MUST be held
- * @locality broker thread (leader or not)
- */
-void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason) {
- if (rktp->rktp_eos.wait_drain)
- return;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "DRAIN",
- "%.*s [%" PRId32 "] beginning partition drain: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, reason);
- rktp->rktp_eos.wait_drain = rd_true;
-}
-
-
-/**
- * @brief Mark partition as no longer having a ProduceRequest in-flight.
- *
- * @locality any
- * @locks none
- */
-void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp) {
- int r = rd_atomic32_sub(&rk->rk_eos.inflight_toppar_cnt, 1);
-
- if (r == 0) {
- /* Check if we're waiting for the partitions to drain
- * before resetting the PID, and if so trigger a reset
- * since this was the last drained one. */
- rd_kafka_idemp_drain_done(rk);
- } else {
- rd_assert(r >= 0);
- }
-}
-
-
-/**
- * @brief Mark partition as having a ProduceRequest in-flight.
- *
- * @locality toppar handler thread
- * @locks none
- */
-void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp) {
- rd_atomic32_add(&rk->rk_eos.inflight_toppar_cnt, 1);
-}
-
-
-
-/**
- * @brief Start idempotent producer (asynchronously).
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate) {
-
- if (rd_kafka_terminating(rk))
- return;
-
- rd_kafka_wrlock(rk);
- /* Don't restart PID acquisition if there's already an outstanding
- * request. */
- if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID)
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
- rd_kafka_wrunlock(rk);
-
- /* Schedule request timer */
- rd_kafka_idemp_pid_timer_restart(rk, immediate,
- "Starting idempotent producer");
-}
-
-
-/**
- * @brief Initialize the idempotent producer.
- *
- * @remark Must be called from rd_kafka_new() and only once.
- * @locality rdkafka main thread
- * @locks none / not needed from rd_kafka_new()
- */
-void rd_kafka_idemp_init(rd_kafka_t *rk) {
- rd_assert(thrd_is_current(rk->rk_thread));
-
- rd_atomic32_init(&rk->rk_eos.inflight_toppar_cnt, 0);
- rd_kafka_pid_reset(&rk->rk_eos.pid);
-
- /* The transactional producer acquires the PID
- * from init_transactions(), for non-transactional producers
- * the PID can be acquired right away. */
- if (rd_kafka_is_transactional(rk))
- rd_kafka_txns_init(rk);
- else
- /* There are no available brokers this early,
- * so just set the state to indicate that we want to
- * acquire a PID as soon as possible and start
- * the timer. */
- rd_kafka_idemp_start(rk, rd_false /*non-immediate*/);
-}
-
-
-/**
- * @brief Terminate and clean up idempotent producer
- *
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock() MUST be held
- */
-void rd_kafka_idemp_term(rd_kafka_t *rk) {
- rd_assert(thrd_is_current(rk->rk_thread));
-
- rd_kafka_wrlock(rk);
- if (rd_kafka_is_transactional(rk))
- rd_kafka_txns_term(rk);
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_TERM);
- rd_kafka_wrunlock(rk);
- rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.pid_tmr, 1);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h
deleted file mode 100644
index 5be8d606d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RD_KAFKA_IDEMPOTENCE_H_
-#define _RD_KAFKA_IDEMPOTENCE_H_
-
-
-/**
- * @define The broker maintains a window of the 5 last Produce requests
- * for a partition to be able to de-deduplicate resends.
- */
-#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5
-#define RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "5" /* For printouts */
-
-/**
- * @brief Get the current PID if state permits.
- *
- * @param bumpable If true, return PID even if it may only be used for
- * bumping the Epoch.
- *
- * @returns If there is no valid PID or the state
- * does not permit further PID usage (such as when draining)
- * then an invalid PID is returned.
- *
- * @locality any
- * @locks none
- */
-static RD_UNUSED RD_INLINE rd_kafka_pid_t
-rd_kafka_idemp_get_pid0(rd_kafka_t *rk,
- rd_dolock_t do_lock,
- rd_bool_t bumpable) {
- rd_kafka_pid_t pid;
-
- if (do_lock)
- rd_kafka_rdlock(rk);
- if (likely(rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED))
- pid = rk->rk_eos.pid;
- else if (unlikely(bumpable && rk->rk_eos.idemp_state ==
- RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT))
- pid = rk->rk_eos.pid;
- else
- rd_kafka_pid_reset(&pid);
- if (do_lock)
- rd_kafka_rdunlock(rk);
-
- return pid;
-}
-
-#define rd_kafka_idemp_get_pid(rk) \
- rd_kafka_idemp_get_pid0(rk, RD_DO_LOCK, rd_false)
-
-void rd_kafka_idemp_set_state(rd_kafka_t *rk, rd_kafka_idemp_state_t new_state);
-void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err);
-void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb,
- const rd_kafka_pid_t pid);
-void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk);
-void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason);
-void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk,
- rd_bool_t allow_txn_abort,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 4, 5);
-#define rd_kafka_idemp_drain_epoch_bump(rk, err, ...) \
- rd_kafka_idemp_drain_epoch_bump0(rk, rd_true, err, __VA_ARGS__)
-
-void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason);
-void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp);
-void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp);
-
-rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk,
- rd_kafka_resp_err_t *errp,
- char *errstr,
- size_t errstr_size);
-
-rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *errstr,
- rd_bool_t is_fatal);
-
-
-/**
- * @brief Call when a fatal idempotence error has occurred, when the producer
- * can't continue without risking the idempotency guarantees.
- *
- * If the producer is transactional this error is non-fatal and will just
- * cause the current transaction to transition into the ABORTABLE_ERROR state.
- * If the producer is not transactional the client instance fatal error
- * is set and the producer instance is no longer usable.
- *
- * @Warning Until KIP-360 has been fully implemented any fatal idempotent
- * producer error will also raise a fatal transactional producer error.
- * This is to guarantee that there is no silent data loss.
- *
- * @param RK rd_kafka_t instance
- * @param ERR error to raise
- * @param ... format string with error message
- *
- * @locality any thread
- * @locks none
- */
-#define rd_kafka_idemp_set_fatal_error(RK, ERR, ...) \
- do { \
- if (rd_kafka_is_transactional(RK)) \
- rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, ERR, \
- __VA_ARGS__); \
- else \
- rd_kafka_set_fatal_error(RK, ERR, __VA_ARGS__); \
- } while (0)
-
-void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate);
-void rd_kafka_idemp_init(rd_kafka_t *rk);
-void rd_kafka_idemp_term(rd_kafka_t *rk);
-
-
-#endif /* _RD_KAFKA_IDEMPOTENCE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h
deleted file mode 100644
index 584ff3c96..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h
+++ /dev/null
@@ -1,1054 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_INT_H_
-#define _RDKAFKA_INT_H_
-
-#ifndef _WIN32
-#define _GNU_SOURCE /* for strndup() */
-#endif
-
-#ifdef _MSC_VER
-typedef int mode_t;
-#endif
-
-#include <fcntl.h>
-
-
-#include "rdsysqueue.h"
-
-#include "rdkafka.h"
-#include "rd.h"
-#include "rdlog.h"
-#include "rdtime.h"
-#include "rdaddr.h"
-#include "rdinterval.h"
-#include "rdavg.h"
-#include "rdlist.h"
-
-#if WITH_SSL
-#include <openssl/ssl.h>
-#endif
-
-
-
-#define rd_kafka_assert(rk, cond) \
- do { \
- if (unlikely(!(cond))) \
- rd_kafka_crash(__FILE__, __LINE__, __FUNCTION__, (rk), \
- "assert: " #cond); \
- } while (0)
-
-
-void RD_NORETURN rd_kafka_crash(const char *file,
- int line,
- const char *function,
- rd_kafka_t *rk,
- const char *reason);
-
-
-/* Forward declarations */
-struct rd_kafka_s;
-struct rd_kafka_topic_s;
-struct rd_kafka_msg_s;
-struct rd_kafka_broker_s;
-struct rd_kafka_toppar_s;
-
-typedef struct rd_kafka_lwtopic_s rd_kafka_lwtopic_t;
-
-
-/**
- * Protocol level sanity
- */
-#define RD_KAFKAP_BROKERS_MAX 10000
-#define RD_KAFKAP_TOPICS_MAX 1000000
-#define RD_KAFKAP_PARTITIONS_MAX 100000
-
-
-#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0)
-
-
-/**
- * @struct Represents a fetch position:
- * an offset and an partition leader epoch (if known, else -1).
- */
-typedef struct rd_kafka_fetch_pos_s {
- int64_t offset;
- int32_t leader_epoch;
- rd_bool_t validated;
-} rd_kafka_fetch_pos_t;
-
-
-
-#include "rdkafka_op.h"
-#include "rdkafka_queue.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_proto.h"
-#include "rdkafka_buf.h"
-#include "rdkafka_pattern.h"
-#include "rdkafka_conf.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_timer.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_mock.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_assignment.h"
-#include "rdkafka_coord.h"
-#include "rdkafka_mock.h"
-
-/**
- * Protocol level sanity
- */
-#define RD_KAFKAP_BROKERS_MAX 10000
-#define RD_KAFKAP_TOPICS_MAX 1000000
-#define RD_KAFKAP_PARTITIONS_MAX 100000
-#define RD_KAFKAP_GROUPS_MAX 100000
-
-
-#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0)
-
-
-
-/**
- * @enum Idempotent Producer state
- */
-typedef enum {
- RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */
- RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */
- RD_KAFKA_IDEMP_STATE_FATAL_ERROR, /**< A fatal error has been raised */
- RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */
- RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT, /**< Waiting for coordinator to
- * become available. */
- RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */
- RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */
- RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding
- * ProduceRequests to finish
- * before resetting and
- * re-requesting a new PID. */
- RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding
- * ProduceRequests to finish
- * before bumping the current
- * epoch. */
- RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT, /**< Wait for transaction abort
- * to finish and trigger a
- * drain and reset or bump. */
-} rd_kafka_idemp_state_t;
-
-/**
- * @returns the idemp_state_t string representation
- */
-static RD_UNUSED const char *
-rd_kafka_idemp_state2str(rd_kafka_idemp_state_t state) {
- static const char *names[] = {
- "Init", "Terminate", "FatalError", "RequestPID", "WaitTransport",
- "WaitPID", "Assigned", "DrainReset", "DrainBump", "WaitTxnAbort"};
- return names[state];
-}
-
-
-
-/**
- * @enum Transactional Producer state
- */
-typedef enum {
- /**< Initial state */
- RD_KAFKA_TXN_STATE_INIT,
- /**< Awaiting PID to be acquired by rdkafka_idempotence.c */
- RD_KAFKA_TXN_STATE_WAIT_PID,
- /**< PID acquired, but application has not made a successful
- * init_transactions() call. */
- RD_KAFKA_TXN_STATE_READY_NOT_ACKED,
- /**< PID acquired, no active transaction. */
- RD_KAFKA_TXN_STATE_READY,
- /**< begin_transaction() has been called. */
- RD_KAFKA_TXN_STATE_IN_TRANSACTION,
- /**< commit_transaction() has been called. */
- RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
- /**< commit_transaction() has been called and all outstanding
- * messages, partitions, and offsets have been sent. */
- RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
- /**< Transaction successfully committed but application has not made
- * a successful commit_transaction() call yet. */
- RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED,
- /**< begin_transaction() has been called. */
- RD_KAFKA_TXN_STATE_BEGIN_ABORT,
- /**< abort_transaction() has been called. */
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
- /**< Transaction successfully aborted but application has not made
- * a successful abort_transaction() call yet. */
- RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED,
- /**< An abortable error has occurred. */
- RD_KAFKA_TXN_STATE_ABORTABLE_ERROR,
- /* A fatal error has occured. */
- RD_KAFKA_TXN_STATE_FATAL_ERROR
-} rd_kafka_txn_state_t;
-
-
-/**
- * @returns the txn_state_t string representation
- */
-static RD_UNUSED const char *
-rd_kafka_txn_state2str(rd_kafka_txn_state_t state) {
- static const char *names[] = {"Init",
- "WaitPID",
- "ReadyNotAcked",
- "Ready",
- "InTransaction",
- "BeginCommit",
- "CommittingTransaction",
- "CommitNotAcked",
- "BeginAbort",
- "AbortingTransaction",
- "AbortedNotAcked",
- "AbortableError",
- "FatalError"};
- return names[state];
-}
-
-
-
-/**
- * Kafka handle, internal representation of the application's rd_kafka_t.
- */
-
-struct rd_kafka_s {
- rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */
- rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */
-
- TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers;
- rd_list_t rk_broker_by_id; /* Fast id lookups. */
- rd_atomic32_t rk_broker_cnt;
- /**< Number of brokers in state >= UP */
- rd_atomic32_t rk_broker_up_cnt;
- /**< Number of logical brokers in state >= UP, this is a sub-set
- * of rk_broker_up_cnt. */
- rd_atomic32_t rk_logical_broker_up_cnt;
- /**< Number of brokers that are down, only includes brokers
- * that have had at least one connection attempt. */
- rd_atomic32_t rk_broker_down_cnt;
- /**< Logical brokers currently without an address.
- * Used for calculating ERR__ALL_BROKERS_DOWN. */
- rd_atomic32_t rk_broker_addrless_cnt;
-
- mtx_t rk_internal_rkb_lock;
- rd_kafka_broker_t *rk_internal_rkb;
-
- /* Broadcasting of broker state changes to wake up
- * functions waiting for a state change. */
- cnd_t rk_broker_state_change_cnd;
- mtx_t rk_broker_state_change_lock;
- int rk_broker_state_change_version;
- /* List of (rd_kafka_enq_once_t*) objects waiting for broker
- * state changes. Protected by rk_broker_state_change_lock. */
- rd_list_t rk_broker_state_change_waiters; /**< (rd_kafka_enq_once_t*) */
-
- TAILQ_HEAD(, rd_kafka_topic_s) rk_topics;
- int rk_topic_cnt;
-
- struct rd_kafka_cgrp_s *rk_cgrp;
-
- rd_kafka_conf_t rk_conf;
- rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */
- char rk_name[128];
- rd_kafkap_str_t *rk_client_id;
- rd_kafkap_str_t *rk_group_id; /* Consumer group id */
-
- rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_..
- * flags instance
- * is being destroyed.
- * The value set is the
- * destroy flags from
- * rd_kafka_destroy*() and
- * the two internal flags shown
- * below.
- *
- * Order:
- * 1. user_flags | .._F_DESTROY_CALLED
- * is set in rd_kafka_destroy*().
- * 2. consumer_close() is called
- * for consumers.
- * 3. .._F_TERMINATE is set to
- * signal all background threads
- * to terminate.
- */
-
-#define RD_KAFKA_DESTROY_F_TERMINATE \
- 0x1 /**< Internal flag to make sure \
- * rk_terminate is set to non-zero \
- * value even if user passed \
- * no destroy flags. */
-#define RD_KAFKA_DESTROY_F_DESTROY_CALLED \
- 0x2 /**< Application has called \
- * ..destroy*() and we've \
- * begun the termination \
- * process. \
- * This flag is needed to avoid \
- * rk_terminate from being \
- * 0 when destroy_flags() \
- * is called with flags=0 \
- * and prior to _F_TERMINATE \
- * has been set. */
-#define RD_KAFKA_DESTROY_F_IMMEDIATE \
- 0x4 /**< Immediate non-blocking \
- * destruction without waiting \
- * for all resources \
- * to be cleaned up. \
- * WARNING: Memory and resource \
- * leaks possible. \
- * This flag automatically sets \
- * .._NO_CONSUMER_CLOSE. */
-
-
- rwlock_t rk_lock;
- rd_kafka_type_t rk_type;
- struct timeval rk_tv_state_change;
-
- rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application
- * consumer_poll() call
- * (or equivalent).
- * Used to enforce
- * max.poll.interval.ms.
- * Only relevant for consumer. */
- /* First fatal error. */
- struct {
- rd_atomic32_t err; /**< rd_kafka_resp_err_t */
- char *errstr; /**< Protected by rk_lock */
- int cnt; /**< Number of errors raised, only
- * the first one is stored. */
- } rk_fatal;
-
- rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value
- * from broker. */
-
- /* Locks: rd_kafka_*lock() */
- rd_ts_t rk_ts_metadata; /* Timestamp of most recent
- * metadata. */
-
- struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */
- rd_ts_t rk_ts_full_metadata; /* Timesstamp of .. */
- struct rd_kafka_metadata_cache rk_metadata_cache; /* Metadata cache */
-
- char *rk_clusterid; /* ClusterId from metadata */
- int32_t rk_controllerid; /* ControllerId from metadata */
-
- /**< Producer: Delivery report mode */
- enum { RD_KAFKA_DR_MODE_NONE, /**< No delivery reports */
- RD_KAFKA_DR_MODE_CB, /**< Delivery reports through callback */
- RD_KAFKA_DR_MODE_EVENT, /**< Delivery reports through event API*/
- } rk_drmode;
-
- /* Simple consumer count:
- * >0: Running in legacy / Simple Consumer mode,
- * 0: No consumers running
- * <0: Running in High level consumer mode */
- rd_atomic32_t rk_simple_cnt;
-
- /**
- * Exactly Once Semantics and Idempotent Producer
- *
- * @locks rk_lock
- */
- struct {
- /*
- * Idempotence
- */
- rd_kafka_idemp_state_t idemp_state; /**< Idempotent Producer
- * state */
- rd_ts_t ts_idemp_state; /**< Last state change */
- rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */
- int epoch_cnt; /**< Number of times pid/epoch changed */
- rd_atomic32_t inflight_toppar_cnt; /**< Current number of
- * toppars with inflight
- * requests. */
- rd_kafka_timer_t pid_tmr; /**< PID FSM timer */
-
- /*
- * Transactions
- *
- * All field access is from the rdkafka main thread,
- * unless a specific lock is mentioned in the doc string.
- *
- */
- rd_atomic32_t txn_may_enq; /**< Transaction state allows
- * application to enqueue
- * (produce) messages. */
-
- rd_kafkap_str_t *transactional_id; /**< transactional.id */
- rd_kafka_txn_state_t txn_state; /**< Transactional state.
- * @locks rk_lock */
- rd_ts_t ts_txn_state; /**< Last state change.
- * @locks rk_lock */
- rd_kafka_broker_t *txn_coord; /**< Transaction coordinator,
- * this is a logical broker.*/
- rd_kafka_broker_t *txn_curr_coord; /**< Current actual coord
- * broker.
- * This is only used to
- * check if the coord
- * changes. */
- rd_kafka_broker_monitor_t txn_coord_mon; /**< Monitor for
- * coordinator to
- * take action when
- * the broker state
- * changes. */
- rd_bool_t txn_requires_epoch_bump; /**< Coordinator epoch bump
- * required to recover from
- * idempotent producer
- * fatal error. */
-
- /**< Blocking transactional API application call
- * currently being handled, its state, reply queue and how
- * to handle timeout.
- * Only one transactional API call is allowed at any time.
- * Protected by the rk_lock. */
- struct {
- char name[64]; /**< API name, e.g.,
- * send_offsets_to_transaction.
- * This is used to make sure
- * conflicting APIs are not
- * called simultaneously. */
- rd_bool_t calling; /**< API is being actively called.
- * I.e., application is blocking
- * on a txn API call.
- * This is used to make sure
- * no concurrent API calls are
- * being made. */
- rd_kafka_error_t *error; /**< Last error from background
- * processing. This is only
- * set if the application's
- * API call timed out.
- * It will be returned on
- * the next call. */
- rd_bool_t has_result; /**< Indicates whether an API
- * result (possibly
- * intermediate) has been set.
- */
- cnd_t cnd; /**< Application thread will
- * block on this cnd waiting
- * for a result to be set. */
- mtx_t lock; /**< Protects all fields of
- * txn_curr_api. */
- } txn_curr_api;
-
-
- int txn_req_cnt; /**< Number of transaction
- * requests sent.
- * This is incremented when a
- * AddPartitionsToTxn or
- * AddOffsetsToTxn request
- * has been sent for the
- * current transaction,
- * to keep track of
- * whether the broker is
- * aware of the current
- * transaction and thus
- * requires an EndTxn request
- * on abort or not. */
-
- /**< Timer to trigger registration of pending partitions */
- rd_kafka_timer_t txn_register_parts_tmr;
-
- /**< Lock for txn_pending_rktps and txn_waitresp_rktps */
- mtx_t txn_pending_lock;
-
- /**< Partitions pending being added to transaction. */
- rd_kafka_toppar_tqhead_t txn_pending_rktps;
-
- /**< Partitions in-flight added to transaction. */
- rd_kafka_toppar_tqhead_t txn_waitresp_rktps;
-
- /**< Partitions added and registered to transaction. */
- rd_kafka_toppar_tqhead_t txn_rktps;
-
- /**< Number of messages that failed delivery.
- * If this number is >0 on transaction_commit then an
- * abortable transaction error will be raised.
- * Is reset to zero on each begin_transaction(). */
- rd_atomic64_t txn_dr_fails;
-
- /**< Current transaction error. */
- rd_kafka_resp_err_t txn_err;
-
- /**< Current transaction error string, if any. */
- char *txn_errstr;
-
- /**< Last InitProducerIdRequest error. */
- rd_kafka_resp_err_t txn_init_err;
-
- /**< Waiting for transaction coordinator query response */
- rd_bool_t txn_wait_coord;
-
- /**< Transaction coordinator query timer */
- rd_kafka_timer_t txn_coord_tmr;
- } rk_eos;
-
- rd_atomic32_t rk_flushing; /**< Application is calling flush(). */
-
- /**
- * Consumer state
- *
- * @locality rdkafka main thread
- * @locks_required none
- */
- struct {
- /** Application consumer queue for messages, events and errors.
- * (typically points to rkcg_q) */
- rd_kafka_q_t *q;
- /** Current assigned partitions through assign() et.al. */
- rd_kafka_assignment_t assignment;
- /** Waiting for this number of commits to finish. */
- int wait_commit_cnt;
- } rk_consumer;
-
- /**<
- * Coordinator cache.
- *
- * @locks none
- * @locality rdkafka main thread
- */
- rd_kafka_coord_cache_t rk_coord_cache; /**< Coordinator cache */
-
- TAILQ_HEAD(, rd_kafka_coord_req_s)
- rk_coord_reqs; /**< Coordinator
- * requests */
-
-
- struct {
- mtx_t lock; /* Protects acces to this struct */
- cnd_t cnd; /* For waking up blocking injectors */
- unsigned int cnt; /* Current message count */
- size_t size; /* Current message size sum */
- unsigned int max_cnt; /* Max limit */
- size_t max_size; /* Max limit */
- } rk_curr_msgs;
-
- rd_kafka_timers_t rk_timers;
- thrd_t rk_thread;
-
- int rk_initialized; /**< Will be > 0 when the rd_kafka_t
- * instance has been fully initialized. */
-
- int rk_init_wait_cnt; /**< Number of background threads that
- * need to finish initialization. */
- cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread
- * to finish its initialization before
- * before rd_kafka_new() returns. */
- mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */
-
- rd_ts_t rk_ts_created; /**< Timestamp (monotonic clock) of
- * rd_kafka_t creation. */
-
- /**
- * Background thread and queue,
- * enabled by setting `background_event_cb()`.
- */
- struct {
- rd_kafka_q_t *q; /**< Queue served by background thread. */
- thrd_t thread; /**< Background thread. */
- int calling; /**< Indicates whether the event callback
- * is being called, reset back to 0
- * when the callback returns.
- * This can be used for troubleshooting
- * purposes. */
- } rk_background;
-
-
- /*
- * Logs, events or actions to rate limit / suppress
- */
- struct {
- /**< Log: No brokers support Idempotent Producer */
- rd_interval_t no_idemp_brokers;
-
- /**< Sparse connections: randomly select broker
- * to bring up. This interval should allow
- * for a previous connection to be established,
- * which varies between different environments:
- * Use 10 < reconnect.backoff.jitter.ms / 2 < 1000.
- */
- rd_interval_t sparse_connect_random;
- /**< Lock for sparse_connect_random */
- mtx_t sparse_connect_lock;
-
- /**< Broker metadata refresh interval:
- * this is rate-limiting the number of topic-less
- * broker/cluster metadata refreshes when there are no
- * topics to refresh.
- * Will be refreshed every topic.metadata.refresh.interval.ms
- * but no more often than every 10s.
- * No locks: only accessed by rdkafka main thread. */
- rd_interval_t broker_metadata_refresh;
-
- /**< Suppression for allow.auto.create.topics=false not being
- * supported by the broker. */
- rd_interval_t allow_auto_create_topics;
- } rk_suppress;
-
- struct {
- void *handle; /**< Provider-specific handle struct pointer.
- * Typically assigned in provider's .init() */
- rd_kafka_q_t *callback_q; /**< SASL callback queue, if any. */
- } rk_sasl;
-
- /* Test mocks */
- struct {
- rd_kafka_mock_cluster_t *cluster; /**< Mock cluster, created
- * by test.mock.num.brokers
- */
- rd_atomic32_t cluster_cnt; /**< Total number of mock
- * clusters, created either
- * through
- * test.mock.num.brokers
- * or mock_cluster_new().
- */
-
- } rk_mock;
-};
-
-#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock)
-#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock)
-#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock)
-#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock)
-
-
-/**
- * @brief Add \p cnt messages and of total size \p size bytes to the
- * internal bookkeeping of current message counts.
- * If the total message count or size after add would exceed the
- * configured limits \c queue.buffering.max.messages and
- * \c queue.buffering.max.kbytes then depending on the value of
- * \p block the function either blocks until enough space is available
- * if \p block is 1, else immediately returns
- * RD_KAFKA_RESP_ERR__QUEUE_FULL.
- *
- * @param rdmtx If non-null and \p block is set and blocking is to ensue,
- * then unlock this mutex for the duration of the blocking
- * and then reacquire with a read-lock.
- */
-static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
-rd_kafka_curr_msgs_add(rd_kafka_t *rk,
- unsigned int cnt,
- size_t size,
- int block,
- rwlock_t *rdlock) {
-
- if (rk->rk_type != RD_KAFKA_PRODUCER)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- mtx_lock(&rk->rk_curr_msgs.lock);
- while (
- unlikely((rk->rk_curr_msgs.max_cnt > 0 &&
- rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt) ||
- (unsigned long long)(rk->rk_curr_msgs.size + size) >
- (unsigned long long)rk->rk_curr_msgs.max_size)) {
- if (!block) {
- mtx_unlock(&rk->rk_curr_msgs.lock);
- return RD_KAFKA_RESP_ERR__QUEUE_FULL;
- }
-
- if (rdlock)
- rwlock_rdunlock(rdlock);
-
- cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock);
-
- if (rdlock)
- rwlock_rdlock(rdlock);
- }
-
- rk->rk_curr_msgs.cnt += cnt;
- rk->rk_curr_msgs.size += size;
- mtx_unlock(&rk->rk_curr_msgs.lock);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Subtract \p cnt messages of total size \p size from the
- * current bookkeeping and broadcast a wakeup on the condvar
- * for any waiting & blocking threads.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_curr_msgs_sub(rd_kafka_t *rk, unsigned int cnt, size_t size) {
- int broadcast = 0;
-
- if (rk->rk_type != RD_KAFKA_PRODUCER)
- return;
-
- mtx_lock(&rk->rk_curr_msgs.lock);
- rd_kafka_assert(NULL, rk->rk_curr_msgs.cnt >= cnt &&
- rk->rk_curr_msgs.size >= size);
-
- /* If the subtraction would pass one of the thresholds
- * broadcast a wake-up to any waiting listeners. */
- if ((rk->rk_curr_msgs.cnt - cnt == 0) ||
- (rk->rk_curr_msgs.cnt >= rk->rk_curr_msgs.max_cnt &&
- rk->rk_curr_msgs.cnt - cnt < rk->rk_curr_msgs.max_cnt) ||
- (rk->rk_curr_msgs.size >= rk->rk_curr_msgs.max_size &&
- rk->rk_curr_msgs.size - size < rk->rk_curr_msgs.max_size))
- broadcast = 1;
-
- rk->rk_curr_msgs.cnt -= cnt;
- rk->rk_curr_msgs.size -= size;
-
- if (unlikely(broadcast))
- cnd_broadcast(&rk->rk_curr_msgs.cnd);
-
- mtx_unlock(&rk->rk_curr_msgs.lock);
-}
-
-static RD_INLINE RD_UNUSED void
-rd_kafka_curr_msgs_get(rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) {
- if (rk->rk_type != RD_KAFKA_PRODUCER) {
- *cntp = 0;
- *sizep = 0;
- return;
- }
-
- mtx_lock(&rk->rk_curr_msgs.lock);
- *cntp = rk->rk_curr_msgs.cnt;
- *sizep = rk->rk_curr_msgs.size;
- mtx_unlock(&rk->rk_curr_msgs.lock);
-}
-
-static RD_INLINE RD_UNUSED int rd_kafka_curr_msgs_cnt(rd_kafka_t *rk) {
- int cnt;
- if (rk->rk_type != RD_KAFKA_PRODUCER)
- return 0;
-
- mtx_lock(&rk->rk_curr_msgs.lock);
- cnt = rk->rk_curr_msgs.cnt;
- mtx_unlock(&rk->rk_curr_msgs.lock);
-
- return cnt;
-}
-
-/**
- * @brief Wait until \p tspec for curr_msgs to reach 0.
- *
- * @returns rd_true if zero is reached, or rd_false on timeout.
- * The remaining messages are returned in \p *curr_msgsp
- */
-static RD_INLINE RD_UNUSED rd_bool_t
-rd_kafka_curr_msgs_wait_zero(rd_kafka_t *rk,
- int timeout_ms,
- unsigned int *curr_msgsp) {
- unsigned int cnt;
- struct timespec tspec;
-
- rd_timeout_init_timespec(&tspec, timeout_ms);
-
- mtx_lock(&rk->rk_curr_msgs.lock);
- while ((cnt = rk->rk_curr_msgs.cnt) > 0) {
- if (cnd_timedwait_abs(&rk->rk_curr_msgs.cnd,
- &rk->rk_curr_msgs.lock,
- &tspec) == thrd_timedout)
- break;
- }
- mtx_unlock(&rk->rk_curr_msgs.lock);
-
- *curr_msgsp = cnt;
- return cnt == 0;
-}
-
-void rd_kafka_destroy_final(rd_kafka_t *rk);
-
-void rd_kafka_global_init(void);
-
-/**
- * @returns true if \p rk handle is terminating.
- *
- * @remark If consumer_close() is called from destroy*() it will be
- * called prior to _F_TERMINATE being set and will thus not
- * be able to use rd_kafka_terminating() to know it is shutting down.
- * That code should instead just check that rk_terminate is non-zero
- * (the _F_DESTROY_CALLED flag will be set).
- */
-#define rd_kafka_terminating(rk) \
- (rd_atomic32_get(&(rk)->rk_terminate) & RD_KAFKA_DESTROY_F_TERMINATE)
-
-/**
- * @returns the destroy flags set matching \p flags, which might be
- * a subset of the flags.
- */
-#define rd_kafka_destroy_flags_check(rk, flags) \
- (rd_atomic32_get(&(rk)->rk_terminate) & (flags))
-
-/**
- * @returns true if no consumer callbacks, or standard consumer_close
- * behaviour, should be triggered. */
-#define rd_kafka_destroy_flags_no_consumer_close(rk) \
- rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)
-
-#define rd_kafka_is_simple_consumer(rk) \
- (rd_atomic32_get(&(rk)->rk_simple_cnt) > 0)
-int rd_kafka_simple_consumer_add(rd_kafka_t *rk);
-
-
-/**
- * @returns true if idempotency is enabled (producer only).
- */
-#define rd_kafka_is_idempotent(rk) ((rk)->rk_conf.eos.idempotence)
-
-/**
- * @returns true if the producer is transactional (producer only).
- */
-#define rd_kafka_is_transactional(rk) \
- ((rk)->rk_conf.eos.transactional_id != NULL)
-
-
-#define RD_KAFKA_PURGE_F_ABORT_TXN \
- 0x100 /**< Internal flag used when \
- * aborting transaction */
-#define RD_KAFKA_PURGE_F_MASK 0x107
-const char *rd_kafka_purge_flags2str(int flags);
-
-
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-
-
-
-/**
- * Debug contexts
- */
-#define RD_KAFKA_DBG_GENERIC 0x1
-#define RD_KAFKA_DBG_BROKER 0x2
-#define RD_KAFKA_DBG_TOPIC 0x4
-#define RD_KAFKA_DBG_METADATA 0x8
-#define RD_KAFKA_DBG_FEATURE 0x10
-#define RD_KAFKA_DBG_QUEUE 0x20
-#define RD_KAFKA_DBG_MSG 0x40
-#define RD_KAFKA_DBG_PROTOCOL 0x80
-#define RD_KAFKA_DBG_CGRP 0x100
-#define RD_KAFKA_DBG_SECURITY 0x200
-#define RD_KAFKA_DBG_FETCH 0x400
-#define RD_KAFKA_DBG_INTERCEPTOR 0x800
-#define RD_KAFKA_DBG_PLUGIN 0x1000
-#define RD_KAFKA_DBG_CONSUMER 0x2000
-#define RD_KAFKA_DBG_ADMIN 0x4000
-#define RD_KAFKA_DBG_EOS 0x8000
-#define RD_KAFKA_DBG_MOCK 0x10000
-#define RD_KAFKA_DBG_ASSIGNOR 0x20000
-#define RD_KAFKA_DBG_CONF 0x40000
-#define RD_KAFKA_DBG_ALL 0xfffff
-#define RD_KAFKA_DBG_NONE 0x0
-
-
-void rd_kafka_log0(const rd_kafka_conf_t *conf,
- const rd_kafka_t *rk,
- const char *extra,
- int level,
- int ctx,
- const char *fac,
- const char *fmt,
- ...) RD_FORMAT(printf, 7, 8);
-
-#define rd_kafka_log(rk, level, fac, ...) \
- rd_kafka_log0(&rk->rk_conf, rk, NULL, level, RD_KAFKA_DBG_NONE, fac, \
- __VA_ARGS__)
-
-#define rd_kafka_dbg(rk, ctx, fac, ...) \
- do { \
- if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_##ctx))) \
- rd_kafka_log0(&rk->rk_conf, rk, NULL, LOG_DEBUG, \
- (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \
- } while (0)
-
-/* dbg() not requiring an rk, just the conf object, for early logging */
-#define rd_kafka_dbg0(conf, ctx, fac, ...) \
- do { \
- if (unlikely((conf)->debug & (RD_KAFKA_DBG_##ctx))) \
- rd_kafka_log0(conf, NULL, NULL, LOG_DEBUG, \
- (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \
- } while (0)
-
-/* NOTE: The local copy of _logname is needed due rkb_logname_lock lock-ordering
- * when logging another broker's name in the message. */
-#define rd_rkb_log0(rkb, level, ctx, fac, ...) \
- do { \
- char _logname[RD_KAFKA_NODENAME_SIZE]; \
- mtx_lock(&(rkb)->rkb_logname_lock); \
- rd_strlcpy(_logname, rkb->rkb_logname, sizeof(_logname)); \
- mtx_unlock(&(rkb)->rkb_logname_lock); \
- rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, (rkb)->rkb_rk, \
- _logname, level, ctx, fac, __VA_ARGS__); \
- } while (0)
-
-#define rd_rkb_log(rkb, level, fac, ...) \
- rd_rkb_log0(rkb, level, RD_KAFKA_DBG_NONE, fac, __VA_ARGS__)
-
-#define rd_rkb_dbg(rkb, ctx, fac, ...) \
- do { \
- if (unlikely((rkb)->rkb_rk->rk_conf.debug & \
- (RD_KAFKA_DBG_##ctx))) { \
- rd_rkb_log0(rkb, LOG_DEBUG, (RD_KAFKA_DBG_##ctx), fac, \
- __VA_ARGS__); \
- } \
- } while (0)
-
-
-
-extern rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
-
-static RD_UNUSED RD_INLINE rd_kafka_resp_err_t
-rd_kafka_set_last_error(rd_kafka_resp_err_t err, int errnox) {
- if (errnox) {
- /* MSVC:
- * This is the correct way to set errno on Windows,
- * but it is still pointless due to different errnos in
- * in different runtimes:
- * https://social.msdn.microsoft.com/Forums/vstudio/en-US/b4500c0d-1b69-40c7-9ef5-08da1025b5bf/setting-errno-from-within-a-dll?forum=vclanguage/
- * errno is thus highly deprecated, and buggy, on Windows
- * when using librdkafka as a dynamically loaded DLL. */
- rd_set_errno(errnox);
- }
- rd_kafka_last_error_code = err;
- return err;
-}
-
-
-int rd_kafka_set_fatal_error0(rd_kafka_t *rk,
- rd_dolock_t do_lock,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 4, 5);
-#define rd_kafka_set_fatal_error(rk, err, fmt, ...) \
- rd_kafka_set_fatal_error0(rk, RD_DO_LOCK, err, fmt, __VA_ARGS__)
-
-rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk);
-
-static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
-rd_kafka_fatal_error_code(rd_kafka_t *rk) {
- /* This is an optimization to avoid an atomic read which are costly
- * on some platforms:
- * Fatal errors are currently only raised by the idempotent producer
- * and static consumers (group.instance.id). */
- if ((rk->rk_type == RD_KAFKA_PRODUCER && rk->rk_conf.eos.idempotence) ||
- (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_conf.group_instance_id))
- return rd_atomic32_get(&rk->rk_fatal.err);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-extern rd_atomic32_t rd_kafka_thread_cnt_curr;
-extern char RD_TLS rd_kafka_thread_name[64];
-
-void rd_kafka_set_thread_name(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
-void rd_kafka_set_thread_sysname(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
-
-int rd_kafka_path_is_dir(const char *path);
-rd_bool_t rd_kafka_dir_is_empty(const char *path);
-
-rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_subscribe_rkt(rd_kafka_topic_t *rkt);
-
-
-/**
- * @returns the number of milliseconds the maximum poll interval
- * was exceeded, or 0 if not exceeded.
- *
- * @remark Only relevant for high-level consumer.
- *
- * @locality any
- * @locks none
- */
-static RD_INLINE RD_UNUSED int rd_kafka_max_poll_exceeded(rd_kafka_t *rk) {
- rd_ts_t last_poll;
- int exceeded;
-
- if (rk->rk_type != RD_KAFKA_CONSUMER)
- return 0;
-
- last_poll = rd_atomic64_get(&rk->rk_ts_last_poll);
-
- /* Application is blocked in librdkafka function, see
- * rd_kafka_app_poll_blocking(). */
- if (last_poll == INT64_MAX)
- return 0;
-
- exceeded = (int)((rd_clock() - last_poll) / 1000ll) -
- rk->rk_conf.max_poll_interval_ms;
-
- if (unlikely(exceeded > 0))
- return exceeded;
-
- return 0;
-}
-
-/**
- * @brief Call on entry to blocking polling function to indicate
- * that the application is blocked waiting for librdkafka
- * and that max.poll.interval.ms should not be enforced.
- *
- * Call app_polled() Upon return from the function calling
- * this function to register the application's last time of poll.
- *
- * @remark Only relevant for high-level consumer.
- *
- * @locality any
- * @locks none
- */
-static RD_INLINE RD_UNUSED void rd_kafka_app_poll_blocking(rd_kafka_t *rk) {
- if (rk->rk_type == RD_KAFKA_CONSUMER)
- rd_atomic64_set(&rk->rk_ts_last_poll, INT64_MAX);
-}
-
-/**
- * @brief Set the last application poll time to now.
- *
- * @remark Only relevant for high-level consumer.
- *
- * @locality any
- * @locks none
- */
-static RD_INLINE RD_UNUSED void rd_kafka_app_polled(rd_kafka_t *rk) {
- if (rk->rk_type == RD_KAFKA_CONSUMER)
- rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock());
-}
-
-
-
-void rd_kafka_term_sig_handler(int sig);
-
-/**
- * rdkafka_background.c
- */
-int rd_kafka_background_thread_main(void *arg);
-rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size);
-
-
-#endif /* _RDKAFKA_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c
deleted file mode 100644
index c962d2d99..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_interceptor.h"
-#include "rdstring.h"
-
-/**
- * @brief Interceptor methodtion/method reference
- */
-typedef struct rd_kafka_interceptor_method_s {
- union {
- rd_kafka_interceptor_f_on_conf_set_t *on_conf_set;
- rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup;
- rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy;
- rd_kafka_interceptor_f_on_new_t *on_new;
- rd_kafka_interceptor_f_on_destroy_t *on_destroy;
- rd_kafka_interceptor_f_on_send_t *on_send;
- rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement;
- rd_kafka_interceptor_f_on_consume_t *on_consume;
- rd_kafka_interceptor_f_on_commit_t *on_commit;
- rd_kafka_interceptor_f_on_request_sent_t *on_request_sent;
- rd_kafka_interceptor_f_on_response_received_t
- *on_response_received;
- rd_kafka_interceptor_f_on_thread_start_t *on_thread_start;
- rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit;
- rd_kafka_interceptor_f_on_broker_state_change_t
- *on_broker_state_change;
- void *generic; /* For easy assignment */
-
- } u;
- char *ic_name;
- void *ic_opaque;
-} rd_kafka_interceptor_method_t;
-
-/**
- * @brief Destroy interceptor methodtion reference
- */
-static void rd_kafka_interceptor_method_destroy(void *ptr) {
- rd_kafka_interceptor_method_t *method = ptr;
- rd_free(method->ic_name);
- rd_free(method);
-}
-
-
-
-/**
- * @brief Handle an interceptor on_... methodtion call failures.
- */
-static RD_INLINE void
-rd_kafka_interceptor_failed(rd_kafka_t *rk,
- const rd_kafka_interceptor_method_t *method,
- const char *method_name,
- rd_kafka_resp_err_t err,
- const rd_kafka_message_t *rkmessage,
- const char *errstr) {
-
- /* FIXME: Suppress log messages, eventually */
- if (rkmessage)
- rd_kafka_log(
- rk, LOG_WARNING, "ICFAIL",
- "Interceptor %s failed %s for "
- "message on %s [%" PRId32 "] @ %" PRId64 ": %s%s%s",
- method->ic_name, method_name,
- rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition,
- rkmessage->offset, rd_kafka_err2str(err),
- errstr ? ": " : "", errstr ? errstr : "");
- else
- rd_kafka_log(rk, LOG_WARNING, "ICFAIL",
- "Interceptor %s failed %s: %s%s%s",
- method->ic_name, method_name,
- rd_kafka_err2str(err), errstr ? ": " : "",
- errstr ? errstr : "");
-}
-
-
-
-/**
- * @brief Create interceptor method reference.
- * Duplicates are rejected
- */
-static rd_kafka_interceptor_method_t *
-rd_kafka_interceptor_method_new(const char *ic_name,
- void *func,
- void *ic_opaque) {
- rd_kafka_interceptor_method_t *method;
-
- method = rd_calloc(1, sizeof(*method));
- method->ic_name = rd_strdup(ic_name);
- method->ic_opaque = ic_opaque;
- method->u.generic = func;
-
- return method;
-}
-
-
-/**
- * @brief Method comparator to be used for finding, not sorting.
- */
-static int rd_kafka_interceptor_method_cmp(const void *_a, const void *_b) {
- const rd_kafka_interceptor_method_t *a = _a, *b = _b;
-
- if (a->u.generic != b->u.generic)
- return -1;
-
- return strcmp(a->ic_name, b->ic_name);
-}
-
-/**
- * @brief Add interceptor method reference
- */
-static rd_kafka_resp_err_t rd_kafka_interceptor_method_add(rd_list_t *list,
- const char *ic_name,
- void *func,
- void *ic_opaque) {
- rd_kafka_interceptor_method_t *method;
- const rd_kafka_interceptor_method_t skel = {.ic_name = (char *)ic_name,
- .u = {.generic = func}};
-
- /* Reject same method from same interceptor.
- * This is needed to avoid duplicate interceptors when configuration
- * objects are duplicated.
- * An exception is made for lists with _F_UNIQUE, which is currently
- * only on_conf_destroy() to allow interceptor cleanup. */
- if ((list->rl_flags & RD_LIST_F_UNIQUE) &&
- rd_list_find(list, &skel, rd_kafka_interceptor_method_cmp))
- return RD_KAFKA_RESP_ERR__CONFLICT;
-
- method = rd_kafka_interceptor_method_new(ic_name, func, ic_opaque);
- rd_list_add(list, method);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Destroy all interceptors
- * @locality application thread calling rd_kafka_conf_destroy() or
- * rd_kafka_destroy()
- */
-void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf) {
- rd_list_destroy(&conf->interceptors.on_conf_set);
- rd_list_destroy(&conf->interceptors.on_conf_dup);
- rd_list_destroy(&conf->interceptors.on_conf_destroy);
- rd_list_destroy(&conf->interceptors.on_new);
- rd_list_destroy(&conf->interceptors.on_destroy);
- rd_list_destroy(&conf->interceptors.on_send);
- rd_list_destroy(&conf->interceptors.on_acknowledgement);
- rd_list_destroy(&conf->interceptors.on_consume);
- rd_list_destroy(&conf->interceptors.on_commit);
- rd_list_destroy(&conf->interceptors.on_request_sent);
- rd_list_destroy(&conf->interceptors.on_response_received);
- rd_list_destroy(&conf->interceptors.on_thread_start);
- rd_list_destroy(&conf->interceptors.on_thread_exit);
- rd_list_destroy(&conf->interceptors.on_broker_state_change);
-
- /* Interceptor config */
- rd_list_destroy(&conf->interceptors.config);
-}
-
-
-/**
- * @brief Initialize interceptor sub-system for config object.
- * @locality application thread
- */
-static void rd_kafka_interceptors_init(rd_kafka_conf_t *conf) {
- rd_list_init(&conf->interceptors.on_conf_set, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_conf_dup, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- /* conf_destroy() allows duplicates entries. */
- rd_list_init(&conf->interceptors.on_conf_destroy, 0,
- rd_kafka_interceptor_method_destroy);
- rd_list_init(&conf->interceptors.on_new, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_destroy, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_send, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_acknowledgement, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_consume, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_commit, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_request_sent, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_response_received, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_thread_start, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_thread_exit, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
- rd_list_init(&conf->interceptors.on_broker_state_change, 0,
- rd_kafka_interceptor_method_destroy)
- ->rl_flags |= RD_LIST_F_UNIQUE;
-
- /* Interceptor config */
- rd_list_init(&conf->interceptors.config, 0,
- (void (*)(void *))rd_strtup_destroy);
-}
-
-
-
-/**
- * @name Configuration backend
- */
-
-
-/**
- * @brief Constructor called when configuration object is created.
- */
-void rd_kafka_conf_interceptor_ctor(int scope, void *pconf) {
- rd_kafka_conf_t *conf = pconf;
- assert(scope == _RK_GLOBAL);
- rd_kafka_interceptors_init(conf);
-}
-
-/**
- * @brief Destructor called when configuration object is destroyed.
- */
-void rd_kafka_conf_interceptor_dtor(int scope, void *pconf) {
- rd_kafka_conf_t *conf = pconf;
- assert(scope == _RK_GLOBAL);
- rd_kafka_interceptors_destroy(conf);
-}
-
-/**
- * @brief Copy-constructor called when configuration object \p psrcp is
- * duplicated to \p dstp.
- * @remark Interceptors are NOT copied, but interceptor config is.
- *
- */
-void rd_kafka_conf_interceptor_copy(int scope,
- void *pdst,
- const void *psrc,
- void *dstptr,
- const void *srcptr,
- size_t filter_cnt,
- const char **filter) {
- rd_kafka_conf_t *dconf = pdst;
- const rd_kafka_conf_t *sconf = psrc;
- int i;
- const rd_strtup_t *confval;
-
- assert(scope == _RK_GLOBAL);
-
- /* Apply interceptor configuration values.
- * on_conf_dup() has already been called for dconf so
- * on_conf_set() interceptors are already in place and we can
- * apply the configuration through the standard conf_set() API. */
- RD_LIST_FOREACH(confval, &sconf->interceptors.config, i) {
- size_t fi;
- size_t nlen = strlen(confval->name);
-
- /* Apply filter */
- for (fi = 0; fi < filter_cnt; fi++) {
- size_t flen = strlen(filter[fi]);
- if (nlen >= flen &&
- !strncmp(filter[fi], confval->name, flen))
- break;
- }
-
- if (fi < filter_cnt)
- continue; /* Filter matched: ignore property. */
-
- /* Ignore errors for now */
- rd_kafka_conf_set(dconf, confval->name, confval->value, NULL,
- 0);
- }
-}
-
-
-
-/**
- * @brief Call interceptor on_conf_set methods.
- * @locality application thread calling rd_kafka_conf_set() and
- * rd_kafka_conf_dup()
- */
-rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf,
- const char *name,
- const char *val,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &conf->interceptors.on_conf_set, i) {
- rd_kafka_conf_res_t res;
-
- res = method->u.on_conf_set(conf, name, val, errstr,
- errstr_size, method->ic_opaque);
- if (res == RD_KAFKA_CONF_UNKNOWN)
- continue;
-
- /* Add successfully handled properties to list of
- * interceptor config properties so conf_t objects
- * can be copied. */
- if (res == RD_KAFKA_CONF_OK)
- rd_list_add(&conf->interceptors.config,
- rd_strtup_new(name, val));
- return res;
- }
-
- return RD_KAFKA_CONF_UNKNOWN;
-}
-
-/**
- * @brief Call interceptor on_conf_dup methods.
- * @locality application thread calling rd_kafka_conf_dup()
- */
-void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf,
- const rd_kafka_conf_t *old_conf,
- size_t filter_cnt,
- const char **filter) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &old_conf->interceptors.on_conf_dup, i) {
- /* FIXME: Ignore error for now */
- method->u.on_conf_dup(new_conf, old_conf, filter_cnt, filter,
- method->ic_opaque);
- }
-}
-
-
-/**
- * @brief Call interceptor on_conf_destroy methods.
- * @locality application thread calling rd_kafka_conf_destroy(), rd_kafka_new(),
- * rd_kafka_destroy()
- */
-void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &conf->interceptors.on_conf_destroy, i) {
- /* FIXME: Ignore error for now */
- method->u.on_conf_destroy(method->ic_opaque);
- }
-}
-
-
-/**
- * @brief Call interceptor on_new methods.
- * @locality application thread calling rd_kafka_new()
- */
-void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf) {
- rd_kafka_interceptor_method_t *method;
- int i;
- char errstr[512];
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_new, i) {
- rd_kafka_resp_err_t err;
-
- err = method->u.on_new(rk, conf, method->ic_opaque, errstr,
- sizeof(errstr));
- if (unlikely(err))
- rd_kafka_interceptor_failed(rk, method, "on_new", err,
- NULL, errstr);
- }
-}
-
-
-
-/**
- * @brief Call interceptor on_destroy methods.
- * @locality application thread calling rd_kafka_new() or rd_kafka_destroy()
- */
-void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_destroy, i) {
- rd_kafka_resp_err_t err;
-
- err = method->u.on_destroy(rk, method->ic_opaque);
- if (unlikely(err))
- rd_kafka_interceptor_failed(rk, method, "on_destroy",
- err, NULL, NULL);
- }
-}
-
-
-
-/**
- * @brief Call interceptor on_send methods.
- * @locality application thread calling produce()
- */
-void rd_kafka_interceptors_on_send(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_send, i) {
- rd_kafka_resp_err_t err;
-
- err = method->u.on_send(rk, rkmessage, method->ic_opaque);
- if (unlikely(err))
- rd_kafka_interceptor_failed(rk, method, "on_send", err,
- rkmessage, NULL);
- }
-}
-
-
-
-/**
- * @brief Call interceptor on_acknowledgement methods.
- * @locality application thread calling poll(), or the broker thread if
- * if dr callback has been set.
- */
-void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_acknowledgement,
- i) {
- rd_kafka_resp_err_t err;
-
- err = method->u.on_acknowledgement(rk, rkmessage,
- method->ic_opaque);
- if (unlikely(err))
- rd_kafka_interceptor_failed(rk, method,
- "on_acknowledgement", err,
- rkmessage, NULL);
- }
-}
-
-
-/**
- * @brief Call on_acknowledgement methods for all messages in queue.
- *
- * @param force_err If non-zero, sets this error on each message.
- *
- * @locality broker thread
- */
-void rd_kafka_interceptors_on_acknowledgement_queue(
- rd_kafka_t *rk,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_resp_err_t force_err) {
- rd_kafka_msg_t *rkm;
-
- RD_KAFKA_MSGQ_FOREACH(rkm, rkmq) {
- if (force_err)
- rkm->rkm_err = force_err;
- rd_kafka_interceptors_on_acknowledgement(rk,
- &rkm->rkm_rkmessage);
- }
-}
-
-
-/**
- * @brief Call interceptor on_consume methods.
- * @locality application thread calling poll(), consume() or similar prior to
- * passing the message to the application.
- */
-void rd_kafka_interceptors_on_consume(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_consume, i) {
- rd_kafka_resp_err_t err;
-
- err = method->u.on_consume(rk, rkmessage, method->ic_opaque);
- if (unlikely(err))
- rd_kafka_interceptor_failed(rk, method, "on_consume",
- err, rkmessage, NULL);
- }
-}
-
-
-/**
- * @brief Call interceptor on_commit methods.
- * @locality application thread calling poll(), consume() or similar,
- * or rdkafka main thread if no commit_cb or handler registered.
- */
-void rd_kafka_interceptors_on_commit(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_commit, i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err =
- method->u.on_commit(rk, offsets, err, method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(rk, method, "on_commit",
- ic_err, NULL, NULL);
- }
-}
-
-
-/**
- * @brief Call interceptor on_request_sent methods
- * @locality internal broker thread
- */
-void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_request_sent, i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err = method->u.on_request_sent(
- rk, sockfd, brokername, brokerid, ApiKey, ApiVersion,
- CorrId, size, method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(
- rk, method, "on_request_sent", ic_err, NULL, NULL);
- }
-}
-
-
-/**
- * @brief Call interceptor on_response_received methods
- * @locality internal broker thread
- */
-void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_response_received,
- i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err = method->u.on_response_received(
- rk, sockfd, brokername, brokerid, ApiKey, ApiVersion,
- CorrId, size, rtt, err, method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(rk, method,
- "on_response_received",
- ic_err, NULL, NULL);
- }
-}
-
-
-void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_start, i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err = method->u.on_thread_start(
- rk, thread_type, rd_kafka_thread_name, method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(
- rk, method, "on_thread_start", ic_err, NULL, NULL);
- }
-}
-
-
-void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_exit, i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err = method->u.on_thread_exit(
- rk, thread_type, rd_kafka_thread_name, method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(
- rk, method, "on_thread_exit", ic_err, NULL, NULL);
- }
-}
-
-
-/**
- * @brief Call interceptor on_broker_state_change methods.
- * @locality any.
- */
-void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk,
- int32_t broker_id,
- const char *secproto,
- const char *name,
- int port,
- const char *state) {
- rd_kafka_interceptor_method_t *method;
- int i;
-
- RD_LIST_FOREACH(method,
- &rk->rk_conf.interceptors.on_broker_state_change, i) {
- rd_kafka_resp_err_t ic_err;
-
- ic_err = method->u.on_broker_state_change(
- rk, broker_id, secproto, name, port, state,
- method->ic_opaque);
- if (unlikely(ic_err))
- rd_kafka_interceptor_failed(rk, method,
- "on_broker_state_change",
- ic_err, NULL, NULL);
- }
-}
-
-
-
-/**
- * @name Public API (backend)
- * @{
- */
-
-
-rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_set_t *on_conf_set,
- void *ic_opaque) {
- return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_set,
- ic_name, (void *)on_conf_set,
- ic_opaque);
-}
-
-rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup,
- void *ic_opaque) {
- return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_dup,
- ic_name, (void *)on_conf_dup,
- ic_opaque);
-}
-
-rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(
- rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy,
- void *ic_opaque) {
- return rd_kafka_interceptor_method_add(
- &conf->interceptors.on_conf_destroy, ic_name,
- (void *)on_conf_destroy, ic_opaque);
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf,
- const char *ic_name,
- rd_kafka_interceptor_f_on_new_t *on_new,
- void *ic_opaque) {
- return rd_kafka_interceptor_method_add(
- &conf->interceptors.on_new, ic_name, (void *)on_new, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_destroy_t *on_destroy,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_destroy, ic_name, (void *)on_destroy,
- ic_opaque);
-}
-
-rd_kafka_resp_err_t
-rd_kafka_interceptor_add_on_send(rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_send_t *on_send,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_send, ic_name, (void *)on_send,
- ic_opaque);
-}
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_acknowledgement, ic_name,
- (void *)on_acknowledgement, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_consume_t *on_consume,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_consume, ic_name, (void *)on_consume,
- ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_commit_t *on_commit,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_commit, ic_name, (void *)on_commit,
- ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_request_sent_t *on_request_sent,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_request_sent, ic_name,
- (void *)on_request_sent, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_response_received_t *on_response_received,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_response_received, ic_name,
- (void *)on_response_received, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_thread_start_t *on_thread_start,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_thread_start, ic_name,
- (void *)on_thread_start, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_thread_exit, ic_name,
- (void *)on_thread_exit, ic_opaque);
-}
-
-
-rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(
- rd_kafka_t *rk,
- const char *ic_name,
- rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change,
- void *ic_opaque) {
- assert(!rk->rk_initialized);
- return rd_kafka_interceptor_method_add(
- &rk->rk_conf.interceptors.on_broker_state_change, ic_name,
- (void *)on_broker_state_change, ic_opaque);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h
deleted file mode 100644
index 85f061ba9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_INTERCEPTOR_H
-#define _RDKAFKA_INTERCEPTOR_H
-
-rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf,
- const char *name,
- const char *val,
- char *errstr,
- size_t errstr_size);
-void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf,
- const rd_kafka_conf_t *old_conf,
- size_t filter_cnt,
- const char **filter);
-void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf);
-void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf);
-void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk);
-void rd_kafka_interceptors_on_send(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage);
-void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage);
-void rd_kafka_interceptors_on_acknowledgement_queue(
- rd_kafka_t *rk,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_resp_err_t force_err);
-
-void rd_kafka_interceptors_on_consume(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage);
-void rd_kafka_interceptors_on_commit(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err);
-
-void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size);
-
-void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err);
-
-void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type);
-void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type);
-
-void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk,
- int32_t broker_id,
- const char *secproto,
- const char *name,
- int port,
- const char *state);
-
-void rd_kafka_conf_interceptor_ctor(int scope, void *pconf);
-void rd_kafka_conf_interceptor_dtor(int scope, void *pconf);
-void rd_kafka_conf_interceptor_copy(int scope,
- void *pdst,
- const void *psrc,
- void *dstptr,
- const void *srcptr,
- size_t filter_cnt,
- const char **filter);
-
-void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf);
-
-#endif /* _RDKAFKA_INTERCEPTOR_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c
deleted file mode 100644
index b52108bb1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_lz4.h"
-
-#if WITH_LZ4_EXT
-#include <lz4frame.h>
-#else
-#include "lz4frame.h"
-#endif
-#include "rdxxhash.h"
-
-#include "rdbuf.h"
-
-/**
- * Fix-up bad LZ4 framing caused by buggy Kafka client / broker.
- * The LZ4F framing format is described in detail here:
- * https://github.com/lz4/lz4/blob/master/doc/lz4_Frame_format.md
- *
- * NOTE: This modifies 'inbuf'.
- *
- * Returns an error on failure to fix (nothing modified), else NO_ERROR.
- */
-static rd_kafka_resp_err_t
-rd_kafka_lz4_decompress_fixup_bad_framing(rd_kafka_broker_t *rkb,
- char *inbuf,
- size_t inlen) {
- static const char magic[4] = {0x04, 0x22, 0x4d, 0x18};
- uint8_t FLG, HC, correct_HC;
- size_t of = 4;
-
- /* Format is:
- * int32_t magic;
- * int8_t_ FLG;
- * int8_t BD;
- * [ int64_t contentSize; ]
- * int8_t HC;
- */
- if (inlen < 4 + 3 || memcmp(inbuf, magic, 4)) {
- rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
- "Unable to fix-up legacy LZ4 framing "
- "(%" PRIusz " bytes): invalid length or magic value",
- inlen);
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
-
- of = 4; /* past magic */
- FLG = inbuf[of++];
- of++; /* BD */
-
- if ((FLG >> 3) & 1) /* contentSize */
- of += 8;
-
- if (of >= inlen) {
- rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
- "Unable to fix-up legacy LZ4 framing "
- "(%" PRIusz " bytes): requires %" PRIusz " bytes",
- inlen, of);
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
-
- /* Header hash code */
- HC = inbuf[of];
-
- /* Calculate correct header hash code */
- correct_HC = (XXH32(inbuf + 4, of - 4, 0) >> 8) & 0xff;
-
- if (HC != correct_HC)
- inbuf[of] = correct_HC;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Reverse of fix-up: break LZ4 framing caused to be compatbile with with
- * buggy Kafka client / broker.
- *
- * NOTE: This modifies 'outbuf'.
- *
- * Returns an error on failure to recognize format (nothing modified),
- * else NO_ERROR.
- */
-static rd_kafka_resp_err_t
-rd_kafka_lz4_compress_break_framing(rd_kafka_broker_t *rkb,
- char *outbuf,
- size_t outlen) {
- static const char magic[4] = {0x04, 0x22, 0x4d, 0x18};
- uint8_t FLG, HC, bad_HC;
- size_t of = 4;
-
- /* Format is:
- * int32_t magic;
- * int8_t_ FLG;
- * int8_t BD;
- * [ int64_t contentSize; ]
- * int8_t HC;
- */
- if (outlen < 4 + 3 || memcmp(outbuf, magic, 4)) {
- rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN",
- "Unable to break legacy LZ4 framing "
- "(%" PRIusz " bytes): invalid length or magic value",
- outlen);
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
-
- of = 4; /* past magic */
- FLG = outbuf[of++];
- of++; /* BD */
-
- if ((FLG >> 3) & 1) /* contentSize */
- of += 8;
-
- if (of >= outlen) {
- rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
- "Unable to break legacy LZ4 framing "
- "(%" PRIusz " bytes): requires %" PRIusz " bytes",
- outlen, of);
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
-
- /* Header hash code */
- HC = outbuf[of];
-
- /* Calculate bad header hash code (include magic) */
- bad_HC = (XXH32(outbuf, of, 0) >> 8) & 0xff;
-
- if (HC != bad_HC)
- outbuf[of] = bad_HC;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Decompress LZ4F (framed) data.
- * Kafka broker versions <0.10.0.0 (MsgVersion 0) breaks LZ4 framing
- * checksum, if \p proper_hc we assume the checksum is okay
- * (broker version >=0.10.0, MsgVersion >= 1) else we fix it up.
- *
- * @remark May modify \p inbuf (if not \p proper_hc)
- */
-rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb,
- int proper_hc,
- int64_t Offset,
- char *inbuf,
- size_t inlen,
- void **outbuf,
- size_t *outlenp) {
- LZ4F_errorCode_t code;
- LZ4F_decompressionContext_t dctx;
- LZ4F_frameInfo_t fi;
- size_t in_sz, out_sz;
- size_t in_of, out_of;
- size_t r;
- size_t estimated_uncompressed_size;
- size_t outlen;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- char *out = NULL;
-
- *outbuf = NULL;
-
- code = LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION);
- if (LZ4F_isError(code)) {
- rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
- "Unable to create LZ4 decompression context: %s",
- LZ4F_getErrorName(code));
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
- if (!proper_hc) {
- /* The original/legacy LZ4 framing in Kafka was buggy and
- * calculated the LZ4 framing header hash code (HC) incorrectly.
- * We do a fix-up of it here. */
- if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, inbuf,
- inlen)))
- goto done;
- }
-
- in_sz = inlen;
- r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz);
- if (LZ4F_isError(r)) {
- rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
- "Failed to gather LZ4 frame info: %s",
- LZ4F_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- /* If uncompressed size is unknown or out of bounds, use a sane
- * default (4x compression) and reallocate if needed
- * More info on max size: http://stackoverflow.com/a/25751871/1821055
- * More info on lz4 compression ratios seen for different data sets:
- * http://dev.ti.com/tirex/content/simplelink_msp432p4_sdk_1_50_00_12/docs/lz4/users_guide/docguide.llQpgm/benchmarking.html
- */
- if (fi.contentSize == 0 || fi.contentSize > inlen * 255) {
- estimated_uncompressed_size = RD_MIN(
- inlen * 4, (size_t)(rkb->rkb_rk->rk_conf.max_msg_size));
- } else {
- estimated_uncompressed_size = (size_t)fi.contentSize;
- }
-
- /* Allocate output buffer, we increase this later if needed,
- * but hopefully not. */
- out = rd_malloc(estimated_uncompressed_size);
- if (!out) {
- rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
- "Unable to allocate decompression "
- "buffer of %" PRIusz " bytes: %s",
- estimated_uncompressed_size, rd_strerror(errno));
- err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- goto done;
- }
-
-
- /* Decompress input buffer to output buffer until input is exhausted. */
- outlen = estimated_uncompressed_size;
- in_of = in_sz;
- out_of = 0;
- while (in_of < inlen) {
- out_sz = outlen - out_of;
- in_sz = inlen - in_of;
- r = LZ4F_decompress(dctx, out + out_of, &out_sz, inbuf + in_of,
- &in_sz, NULL);
- if (unlikely(LZ4F_isError(r))) {
- rd_rkb_dbg(rkb, MSG, "LZ4DEC",
- "Failed to LZ4 (%s HC) decompress message "
- "(offset %" PRId64
- ") at "
- "payload offset %" PRIusz "/%" PRIusz ": %s",
- proper_hc ? "proper" : "legacy", Offset,
- in_of, inlen, LZ4F_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- rd_kafka_assert(NULL, out_of + out_sz <= outlen &&
- in_of + in_sz <= inlen);
- out_of += out_sz;
- in_of += in_sz;
- if (r == 0)
- break;
-
- /* Need to grow output buffer, this shouldn't happen if
- * contentSize was properly set. */
- if (unlikely(out_of == outlen)) {
- char *tmp;
- /* Grow exponentially with some factor > 1 (using 1.75)
- * for amortized O(1) copying */
- size_t extra = RD_MAX(outlen * 3 / 4, 1024);
-
- rd_atomic64_add(&rkb->rkb_c.zbuf_grow, 1);
-
- if (!(tmp = rd_realloc(out, outlen + extra))) {
- rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
- "Unable to grow decompression "
- "buffer to %" PRIusz "+%" PRIusz
- " bytes: %s",
- outlen, extra, rd_strerror(errno));
- err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- goto done;
- }
- out = tmp;
- outlen += extra;
- }
- }
-
-
- if (in_of < inlen) {
- rd_rkb_dbg(rkb, MSG, "LZ4DEC",
- "Failed to LZ4 (%s HC) decompress message "
- "(offset %" PRId64
- "): "
- "%" PRIusz " (out of %" PRIusz ") bytes remaining",
- proper_hc ? "proper" : "legacy", Offset,
- inlen - in_of, inlen);
- err = RD_KAFKA_RESP_ERR__BAD_MSG;
- goto done;
- }
-
- *outbuf = out;
- *outlenp = out_of;
-
-done:
- code = LZ4F_freeDecompressionContext(dctx);
- if (LZ4F_isError(code)) {
- rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
- "Failed to close LZ4 compression context: %s",
- LZ4F_getErrorName(code));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
-
- if (err && out)
- rd_free(out);
-
- return err;
-}
-
-
-/**
- * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov.
- * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0,
- * MsgVersion >= 1)
- * @param MessageSetSize indicates (at least) full uncompressed data size,
- * possibly including MessageSet fields that will not
- * be compressed.
- *
- * @returns allocated buffer in \p *outbuf, length in \p *outlenp.
- */
-rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb,
- int proper_hc,
- int comp_level,
- rd_slice_t *slice,
- void **outbuf,
- size_t *outlenp) {
- LZ4F_compressionContext_t cctx;
- LZ4F_errorCode_t r;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- size_t len = rd_slice_remains(slice);
- size_t out_sz;
- size_t out_of = 0;
- char *out;
- const void *p;
- size_t rlen;
-
- /* Required by Kafka */
- const LZ4F_preferences_t prefs = {
- .frameInfo = {.blockMode = LZ4F_blockIndependent},
- .compressionLevel = comp_level};
-
- *outbuf = NULL;
-
- out_sz = LZ4F_compressBound(len, NULL) + 1000;
- if (LZ4F_isError(out_sz)) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "Unable to query LZ4 compressed size "
- "(for %" PRIusz " uncompressed bytes): %s",
- len, LZ4F_getErrorName(out_sz));
- return RD_KAFKA_RESP_ERR__BAD_MSG;
- }
-
- out = rd_malloc(out_sz);
- if (!out) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "Unable to allocate output buffer "
- "(%" PRIusz " bytes): %s",
- out_sz, rd_strerror(errno));
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
- r = LZ4F_createCompressionContext(&cctx, LZ4F_VERSION);
- if (LZ4F_isError(r)) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "Unable to create LZ4 compression context: %s",
- LZ4F_getErrorName(r));
- rd_free(out);
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
- r = LZ4F_compressBegin(cctx, out, out_sz, &prefs);
- if (LZ4F_isError(r)) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "Unable to begin LZ4 compression "
- "(out buffer is %" PRIusz " bytes): %s",
- out_sz, LZ4F_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- out_of += r;
-
- while ((rlen = rd_slice_reader(slice, &p))) {
- rd_assert(out_of < out_sz);
- r = LZ4F_compressUpdate(cctx, out + out_of, out_sz - out_of, p,
- rlen, NULL);
- if (unlikely(LZ4F_isError(r))) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "LZ4 compression failed "
- "(at of %" PRIusz
- " bytes, with "
- "%" PRIusz
- " bytes remaining in out buffer): "
- "%s",
- rlen, out_sz - out_of, LZ4F_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- out_of += r;
- }
-
- rd_assert(rd_slice_remains(slice) == 0);
-
- r = LZ4F_compressEnd(cctx, out + out_of, out_sz - out_of, NULL);
- if (unlikely(LZ4F_isError(r))) {
- rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
- "Failed to finalize LZ4 compression "
- "of %" PRIusz " bytes: %s",
- len, LZ4F_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- out_of += r;
-
- /* For the broken legacy framing we need to mess up the header checksum
- * so that the Kafka client / broker code accepts it. */
- if (!proper_hc)
- if ((err =
- rd_kafka_lz4_compress_break_framing(rkb, out, out_of)))
- goto done;
-
-
- *outbuf = out;
- *outlenp = out_of;
-
-done:
- LZ4F_freeCompressionContext(cctx);
-
- if (err)
- rd_free(out);
-
- return err;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h
deleted file mode 100644
index eb0ef9883..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_LZ4_H_
-#define _RDKAFKA_LZ4_H_
-
-
-rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb,
- int proper_hc,
- int64_t Offset,
- char *inbuf,
- size_t inlen,
- void **outbuf,
- size_t *outlenp);
-
-rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb,
- int proper_hc,
- int comp_level,
- rd_slice_t *slice,
- void **outbuf,
- size_t *outlenp);
-
-#endif /* _RDKAFKA_LZ4_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c
deleted file mode 100644
index 4e32e5d58..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c
+++ /dev/null
@@ -1,1468 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_metadata.h"
-
-#include <string.h>
-#include <stdarg.h>
-
-
-rd_kafka_resp_err_t
-rd_kafka_metadata(rd_kafka_t *rk,
- int all_topics,
- rd_kafka_topic_t *only_rkt,
- const struct rd_kafka_metadata **metadatap,
- int timeout_ms) {
- rd_kafka_q_t *rkq;
- rd_kafka_broker_t *rkb;
- rd_kafka_op_t *rko;
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- rd_list_t topics;
- rd_bool_t allow_auto_create_topics =
- rk->rk_conf.allow_auto_create_topics;
-
- /* Query any broker that is up, and if none are up pick the first one,
- * if we're lucky it will be up before the timeout */
- rkb = rd_kafka_broker_any_usable(rk, timeout_ms, RD_DO_LOCK, 0,
- "application metadata request");
- if (!rkb)
- return RD_KAFKA_RESP_ERR__TRANSPORT;
-
- rkq = rd_kafka_q_new(rk);
-
- rd_list_init(&topics, 0, rd_free);
- if (!all_topics) {
- if (only_rkt)
- rd_list_add(&topics,
- rd_strdup(rd_kafka_topic_name(only_rkt)));
- else {
- int cache_cnt;
- rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics,
- &cache_cnt);
- /* Don't trigger auto-create for cached topics */
- if (rd_list_cnt(&topics) == cache_cnt)
- allow_auto_create_topics = rd_true;
- }
- }
-
- /* Async: request metadata */
- rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA);
- rd_kafka_op_set_replyq(rko, rkq, 0);
- rko->rko_u.metadata.force = 1; /* Force metadata request regardless
- * of outstanding metadata requests. */
- rd_kafka_MetadataRequest(rkb, &topics, "application requested",
- allow_auto_create_topics,
- /* cgrp_update:
- * Only update consumer group state
- * on response if this lists all
- * topics in the cluster, since a
- * partial request may make it seem
- * like some subscribed topics are missing. */
- all_topics ? rd_true : rd_false, rko);
-
- rd_list_destroy(&topics);
- rd_kafka_broker_destroy(rkb);
-
- /* Wait for reply (or timeout) */
- rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(ts_end), 0);
-
- rd_kafka_q_destroy_owner(rkq);
-
- /* Timeout */
- if (!rko)
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- /* Error */
- if (rko->rko_err) {
- rd_kafka_resp_err_t err = rko->rko_err;
- rd_kafka_op_destroy(rko);
- return err;
- }
-
- /* Reply: pass metadata pointer to application who now owns it*/
- rd_kafka_assert(rk, rko->rko_u.metadata.md);
- *metadatap = rko->rko_u.metadata.md;
- rko->rko_u.metadata.md = NULL;
- rd_kafka_op_destroy(rko);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata) {
- rd_free((void *)metadata);
-}
-
-
-/**
- * @returns a newly allocated copy of metadata \p src of size \p size
- */
-struct rd_kafka_metadata *
-rd_kafka_metadata_copy(const struct rd_kafka_metadata *src, size_t size) {
- struct rd_kafka_metadata *md;
- rd_tmpabuf_t tbuf;
- int i;
-
- /* metadata is stored in one contigious buffer where structs and
- * and pointed-to fields are layed out in a memory aligned fashion.
- * rd_tmpabuf_t provides the infrastructure to do this.
- * Because of this we copy all the structs verbatim but
- * any pointer fields needs to be copied explicitly to update
- * the pointer address. */
- rd_tmpabuf_new(&tbuf, size, 1 /*assert on fail*/);
- md = rd_tmpabuf_write(&tbuf, src, sizeof(*md));
-
- rd_tmpabuf_write_str(&tbuf, src->orig_broker_name);
-
-
- /* Copy Brokers */
- md->brokers = rd_tmpabuf_write(&tbuf, src->brokers,
- md->broker_cnt * sizeof(*md->brokers));
-
- for (i = 0; i < md->broker_cnt; i++)
- md->brokers[i].host =
- rd_tmpabuf_write_str(&tbuf, src->brokers[i].host);
-
-
- /* Copy TopicMetadata */
- md->topics = rd_tmpabuf_write(&tbuf, src->topics,
- md->topic_cnt * sizeof(*md->topics));
-
- for (i = 0; i < md->topic_cnt; i++) {
- int j;
-
- md->topics[i].topic =
- rd_tmpabuf_write_str(&tbuf, src->topics[i].topic);
-
-
- /* Copy partitions */
- md->topics[i].partitions =
- rd_tmpabuf_write(&tbuf, src->topics[i].partitions,
- md->topics[i].partition_cnt *
- sizeof(*md->topics[i].partitions));
-
- for (j = 0; j < md->topics[i].partition_cnt; j++) {
- /* Copy replicas and ISRs */
- md->topics[i].partitions[j].replicas = rd_tmpabuf_write(
- &tbuf, src->topics[i].partitions[j].replicas,
- md->topics[i].partitions[j].replica_cnt *
- sizeof(*md->topics[i].partitions[j].replicas));
-
- md->topics[i].partitions[j].isrs = rd_tmpabuf_write(
- &tbuf, src->topics[i].partitions[j].isrs,
- md->topics[i].partitions[j].isr_cnt *
- sizeof(*md->topics[i].partitions[j].isrs));
- }
- }
-
- /* Check for tmpabuf errors */
- if (rd_tmpabuf_failed(&tbuf))
- rd_kafka_assert(NULL, !*"metadata copy failed");
-
- /* Delibarely not destroying the tmpabuf since we return
- * its allocated memory. */
-
- return md;
-}
-
-
-
-/**
- * @brief Partition (id) comparator for partition_id_leader_epoch struct.
- */
-static int rd_kafka_metadata_partition_leader_epoch_cmp(const void *_a,
- const void *_b) {
- const rd_kafka_partition_leader_epoch_t *a = _a, *b = _b;
- return RD_CMP(a->partition_id, b->partition_id);
-}
-
-
-
-/**
- * @brief Update topic state and information based on topic metadata.
- *
- * @param mdt Topic metadata.
- * @param leader_epochs Per-partition leader epoch array, or NULL if not known.
- *
- * @locality rdkafka main thread
- * @locks_acquired rd_kafka_wrlock(rk)
- */
-static void rd_kafka_parse_Metadata_update_topic(
- rd_kafka_broker_t *rkb,
- const rd_kafka_metadata_topic_t *mdt,
- const rd_kafka_partition_leader_epoch_t *leader_epochs) {
-
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- /* The indent below is intentional */
- " Topic %s with %i partitions%s%s", mdt->topic,
- mdt->partition_cnt, mdt->err ? ": " : "",
- mdt->err ? rd_kafka_err2str(mdt->err) : "");
-
- /* Ignore metadata completely for temporary errors. (issue #513)
- * LEADER_NOT_AVAILABLE: Broker is rebalancing
- */
- if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE &&
- mdt->partition_cnt == 0) {
- rd_rkb_dbg(rkb, TOPIC, "METADATA",
- "Temporary error in metadata reply for "
- "topic %s (PartCnt %i): %s: ignoring",
- mdt->topic, mdt->partition_cnt,
- rd_kafka_err2str(mdt->err));
- } else {
- /* Update local topic & partition state based
- * on metadata */
- rd_kafka_topic_metadata_update2(rkb, mdt, leader_epochs);
- }
-}
-
-/**
- * @brief Only brokers with Metadata version >= 9 have reliable leader
- * epochs. Before that version, leader epoch must be treated
- * as missing (-1).
- *
- * @param rkb The broker
- * @return Is this a broker version with reliable leader epochs?
- *
- * @locality rdkafka main thread
- */
-rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb) {
- int features;
- int16_t ApiVersion = 0;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_Metadata, 0, 9, &features);
-
- return ApiVersion >= 9;
-}
-
-
-/**
- * @brief Handle a Metadata response message.
- *
- * @param topics are the requested topics (may be NULL)
- *
- * The metadata will be marshalled into 'struct rd_kafka_metadata*' structs.
- *
- * The marshalled metadata is returned in \p *mdp, (NULL on error).
-
- * @returns an error code on parse failure, else NO_ERRRO.
- *
- * @locality rdkafka main thread
- */
-rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *request,
- rd_kafka_buf_t *rkbuf,
- struct rd_kafka_metadata **mdp) {
- rd_kafka_t *rk = rkb->rkb_rk;
- int i, j, k;
- rd_tmpabuf_t tbuf;
- struct rd_kafka_metadata *md = NULL;
- size_t rkb_namelen;
- const int log_decode_errors = LOG_ERR;
- rd_list_t *missing_topics = NULL;
- const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics;
- rd_bool_t all_topics = request->rkbuf_u.Metadata.all_topics;
- rd_bool_t cgrp_update =
- request->rkbuf_u.Metadata.cgrp_update && rk->rk_cgrp;
- const char *reason = request->rkbuf_u.Metadata.reason
- ? request->rkbuf_u.Metadata.reason
- : "(no reason)";
- int ApiVersion = request->rkbuf_reqhdr.ApiVersion;
- rd_kafkap_str_t cluster_id = RD_ZERO_INIT;
- int32_t controller_id = -1;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int broker_changes = 0;
- int cache_changes = 0;
- /** This array is reused and resized as necessary to hold per-partition
- * leader epochs (ApiVersion >= 7). */
- rd_kafka_partition_leader_epoch_t *leader_epochs = NULL;
- /** Number of allocated elements in leader_epochs. */
- size_t leader_epochs_size = 0;
- rd_ts_t ts_start = rd_clock();
-
- /* Ignore metadata updates when terminating */
- if (rd_kafka_terminating(rkb->rkb_rk)) {
- err = RD_KAFKA_RESP_ERR__DESTROY;
- goto done;
- }
-
- rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread));
-
- /* Remove topics from missing_topics as they are seen in Metadata. */
- if (requested_topics)
- missing_topics =
- rd_list_copy(requested_topics, rd_list_string_copy, NULL);
-
- rd_kafka_broker_lock(rkb);
- rkb_namelen = strlen(rkb->rkb_name) + 1;
- /* We assume that the marshalled representation is
- * no more than 4 times larger than the wire representation. */
- rd_tmpabuf_new(&tbuf,
- sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4),
- 0 /*dont assert on fail*/);
-
- if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)))) {
- rd_kafka_broker_unlock(rkb);
- err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- goto err;
- }
-
- md->orig_broker_id = rkb->rkb_nodeid;
- md->orig_broker_name =
- rd_tmpabuf_write(&tbuf, rkb->rkb_name, rkb_namelen);
- rd_kafka_broker_unlock(rkb);
-
- if (ApiVersion >= 3)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- /* Read Brokers */
- rd_kafka_buf_read_arraycnt(rkbuf, &md->broker_cnt,
- RD_KAFKAP_BROKERS_MAX);
-
- if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt *
- sizeof(*md->brokers))))
- rd_kafka_buf_parse_fail(rkbuf,
- "%d brokers: tmpabuf memory shortage",
- md->broker_cnt);
-
- for (i = 0; i < md->broker_cnt; i++) {
- rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id);
- rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf,
- md->brokers[i].host);
- rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port);
-
- if (ApiVersion >= 1) {
- rd_kafkap_str_t rack;
- rd_kafka_buf_read_str(rkbuf, &rack);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- if (ApiVersion >= 2)
- rd_kafka_buf_read_str(rkbuf, &cluster_id);
-
- if (ApiVersion >= 1) {
- rd_kafka_buf_read_i32(rkbuf, &controller_id);
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "ClusterId: %.*s, ControllerId: %" PRId32,
- RD_KAFKAP_STR_PR(&cluster_id), controller_id);
- }
-
-
-
- /* Read TopicMetadata */
- rd_kafka_buf_read_arraycnt(rkbuf, &md->topic_cnt, RD_KAFKAP_TOPICS_MAX);
- rd_rkb_dbg(rkb, METADATA, "METADATA", "%i brokers, %i topics",
- md->broker_cnt, md->topic_cnt);
-
- if (!(md->topics =
- rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics))))
- rd_kafka_buf_parse_fail(
- rkbuf, "%d topics: tmpabuf memory shortage", md->topic_cnt);
-
- for (i = 0; i < md->topic_cnt; i++) {
- rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err);
- rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf,
- md->topics[i].topic);
- if (ApiVersion >= 1) {
- int8_t is_internal;
- rd_kafka_buf_read_i8(rkbuf, &is_internal);
- }
-
- /* PartitionMetadata */
- rd_kafka_buf_read_arraycnt(rkbuf, &md->topics[i].partition_cnt,
- RD_KAFKAP_PARTITIONS_MAX);
-
- if (!(md->topics[i].partitions = rd_tmpabuf_alloc(
- &tbuf, md->topics[i].partition_cnt *
- sizeof(*md->topics[i].partitions))))
- rd_kafka_buf_parse_fail(rkbuf,
- "%s: %d partitions: "
- "tmpabuf memory shortage",
- md->topics[i].topic,
- md->topics[i].partition_cnt);
-
- /* Resize reused leader_epochs array to fit this partition's
- * leader epochs. */
- if (ApiVersion >= 7 && md->topics[i].partition_cnt > 0 &&
- (size_t)md->topics[i].partition_cnt > leader_epochs_size) {
- leader_epochs_size =
- RD_MAX(32, md->topics[i].partition_cnt);
- leader_epochs =
- rd_realloc(leader_epochs, sizeof(*leader_epochs) *
- leader_epochs_size);
- }
-
- for (j = 0; j < md->topics[i].partition_cnt; j++) {
- rd_kafka_buf_read_i16a(rkbuf,
- md->topics[i].partitions[j].err);
- rd_kafka_buf_read_i32a(rkbuf,
- md->topics[i].partitions[j].id);
- rd_kafka_buf_read_i32a(
- rkbuf, md->topics[i].partitions[j].leader);
- if (ApiVersion >= 7) {
- leader_epochs[j].partition_id =
- md->topics[i].partitions[j].id;
- rd_kafka_buf_read_i32(
- rkbuf, &leader_epochs[j].leader_epoch);
- }
-
- /* Replicas */
- rd_kafka_buf_read_arraycnt(
- rkbuf, &md->topics[i].partitions[j].replica_cnt,
- RD_KAFKAP_BROKERS_MAX);
-
- if (!(md->topics[i].partitions[j].replicas =
- rd_tmpabuf_alloc(
- &tbuf,
- md->topics[i].partitions[j].replica_cnt *
- sizeof(*md->topics[i]
- .partitions[j]
- .replicas))))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%s [%" PRId32
- "]: %d replicas: "
- "tmpabuf memory shortage",
- md->topics[i].topic,
- md->topics[i].partitions[j].id,
- md->topics[i].partitions[j].replica_cnt);
-
-
- for (k = 0; k < md->topics[i].partitions[j].replica_cnt;
- k++)
- rd_kafka_buf_read_i32a(
- rkbuf,
- md->topics[i].partitions[j].replicas[k]);
-
- /* Isrs */
- rd_kafka_buf_read_arraycnt(
- rkbuf, &md->topics[i].partitions[j].isr_cnt,
- RD_KAFKAP_BROKERS_MAX);
-
- if (!(md->topics[i]
- .partitions[j]
- .isrs = rd_tmpabuf_alloc(
- &tbuf,
- md->topics[i].partitions[j].isr_cnt *
- sizeof(
- *md->topics[i].partitions[j].isrs))))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%s [%" PRId32
- "]: %d isrs: "
- "tmpabuf memory shortage",
- md->topics[i].topic,
- md->topics[i].partitions[j].id,
- md->topics[i].partitions[j].isr_cnt);
-
-
- for (k = 0; k < md->topics[i].partitions[j].isr_cnt;
- k++)
- rd_kafka_buf_read_i32a(
- rkbuf, md->topics[i].partitions[j].isrs[k]);
-
- if (ApiVersion >= 5) {
- /* OfflineReplicas int32 array (ignored) */
- int32_t offline_replicas_cnt;
-
- /* #OfflineReplicas */
- rd_kafka_buf_read_arraycnt(
- rkbuf, &offline_replicas_cnt,
- RD_KAFKAP_BROKERS_MAX);
- rd_kafka_buf_skip(rkbuf, offline_replicas_cnt *
- sizeof(int32_t));
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- if (ApiVersion >= 8) {
- int32_t TopicAuthorizedOperations;
- /* TopicAuthorizedOperations */
- rd_kafka_buf_read_i32(rkbuf,
- &TopicAuthorizedOperations);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Ignore topics in blacklist */
- if (rkb->rkb_rk->rk_conf.topic_blacklist &&
- rd_kafka_pattern_match(rkb->rkb_rk->rk_conf.topic_blacklist,
- md->topics[i].topic)) {
- rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_METADATA,
- "BLACKLIST",
- "Ignoring blacklisted topic \"%s\" "
- "in metadata",
- md->topics[i].topic);
- continue;
- }
-
- if (leader_epochs_size > 0 &&
- !rd_kafka_has_reliable_leader_epochs(rkb)) {
- /* Prior to Kafka version 2.4 (which coincides with
- * Metadata version 9), the broker does not propagate
- * leader epoch information accurately while a
- * reassignment is in progress. Relying on a stale
- * epoch can lead to FENCED_LEADER_EPOCH errors which
- * can prevent consumption throughout the course of
- * a reassignment. It is safer in this case to revert
- * to the behavior in previous protocol versions
- * which checks leader status only. */
- leader_epochs_size = 0;
- rd_free(leader_epochs);
- leader_epochs = NULL;
- }
-
-
- /* Sort partitions by partition id */
- qsort(md->topics[i].partitions, md->topics[i].partition_cnt,
- sizeof(*md->topics[i].partitions),
- rd_kafka_metadata_partition_id_cmp);
- if (leader_epochs_size > 0) {
- /* And sort leader_epochs by partition id */
- qsort(leader_epochs, md->topics[i].partition_cnt,
- sizeof(*leader_epochs),
- rd_kafka_metadata_partition_leader_epoch_cmp);
- }
-
- /* Update topic state based on the topic metadata */
- rd_kafka_parse_Metadata_update_topic(rkb, &md->topics[i],
- leader_epochs);
-
-
- if (requested_topics) {
- rd_list_free_cb(missing_topics,
- rd_list_remove_cmp(missing_topics,
- md->topics[i].topic,
- (void *)strcmp));
- if (!all_topics) {
- /* Only update cache when not asking
- * for all topics. */
-
- rd_kafka_wrlock(rk);
- rd_kafka_metadata_cache_topic_update(
- rk, &md->topics[i],
- rd_false /*propagate later*/);
- cache_changes++;
- rd_kafka_wrunlock(rk);
- }
- }
- }
-
- if (ApiVersion >= 8 && ApiVersion <= 10) {
- int32_t ClusterAuthorizedOperations;
- /* ClusterAuthorizedOperations */
- rd_kafka_buf_read_i32(rkbuf, &ClusterAuthorizedOperations);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Entire Metadata response now parsed without errors:
- * update our internal state according to the response. */
-
- if (md->broker_cnt == 0 && md->topic_cnt == 0) {
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "No brokers or topics in metadata: should retry");
- err = RD_KAFKA_RESP_ERR__PARTIAL;
- goto err;
- }
-
- /* Update our list of brokers. */
- for (i = 0; i < md->broker_cnt; i++) {
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- " Broker #%i/%i: %s:%i NodeId %" PRId32, i,
- md->broker_cnt, md->brokers[i].host,
- md->brokers[i].port, md->brokers[i].id);
- rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto,
- &md->brokers[i], NULL);
- }
-
- /* Requested topics not seen in metadata? Propogate to topic code. */
- if (missing_topics) {
- char *topic;
- rd_rkb_dbg(rkb, TOPIC, "METADATA",
- "%d/%d requested topic(s) seen in metadata",
- rd_list_cnt(requested_topics) -
- rd_list_cnt(missing_topics),
- rd_list_cnt(requested_topics));
- for (i = 0; i < rd_list_cnt(missing_topics); i++)
- rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s",
- (char *)(missing_topics->rl_elems[i]));
- RD_LIST_FOREACH(topic, missing_topics, i) {
- rd_kafka_topic_t *rkt;
-
- rkt =
- rd_kafka_topic_find(rkb->rkb_rk, topic, 1 /*lock*/);
- if (rkt) {
- /* Received metadata response contained no
- * information about topic 'rkt' and thus
- * indicates the topic is not available in the
- * cluster.
- * Mark the topic as non-existent */
- rd_kafka_topic_wrlock(rkt);
- rd_kafka_topic_set_notexists(
- rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
- rd_kafka_topic_wrunlock(rkt);
-
- rd_kafka_topic_destroy0(rkt);
- }
- }
- }
-
-
- rd_kafka_wrlock(rkb->rkb_rk);
-
- rkb->rkb_rk->rk_ts_metadata = rd_clock();
-
- /* Update cached cluster id. */
- if (RD_KAFKAP_STR_LEN(&cluster_id) > 0 &&
- (!rk->rk_clusterid ||
- rd_kafkap_str_cmp_str(&cluster_id, rk->rk_clusterid))) {
- rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CLUSTERID",
- "ClusterId update \"%s\" -> \"%.*s\"",
- rk->rk_clusterid ? rk->rk_clusterid : "",
- RD_KAFKAP_STR_PR(&cluster_id));
- if (rk->rk_clusterid) {
- rd_kafka_log(rk, LOG_WARNING, "CLUSTERID",
- "Broker %s reports different ClusterId "
- "\"%.*s\" than previously known \"%s\": "
- "a client must not be simultaneously "
- "connected to multiple clusters",
- rd_kafka_broker_name(rkb),
- RD_KAFKAP_STR_PR(&cluster_id),
- rk->rk_clusterid);
- rd_free(rk->rk_clusterid);
- }
-
- rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id);
- /* rd_kafka_clusterid() waits for a cache update even though
- * the clusterid is not in the cache itself. (#3620) */
- cache_changes++;
- }
-
- /* Update controller id. */
- if (rkb->rkb_rk->rk_controllerid != controller_id) {
- rd_rkb_dbg(rkb, BROKER, "CONTROLLERID",
- "ControllerId update %" PRId32 " -> %" PRId32,
- rkb->rkb_rk->rk_controllerid, controller_id);
- rkb->rkb_rk->rk_controllerid = controller_id;
- broker_changes++;
- }
-
- if (all_topics) {
- /* Expire all cache entries that were not updated. */
- rd_kafka_metadata_cache_evict_by_age(rkb->rkb_rk, ts_start);
-
- if (rkb->rkb_rk->rk_full_metadata)
- rd_kafka_metadata_destroy(
- rkb->rkb_rk->rk_full_metadata);
- rkb->rkb_rk->rk_full_metadata =
- rd_kafka_metadata_copy(md, tbuf.of);
- rkb->rkb_rk->rk_ts_full_metadata = rkb->rkb_rk->rk_ts_metadata;
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "Caching full metadata with "
- "%d broker(s) and %d topic(s): %s",
- md->broker_cnt, md->topic_cnt, reason);
- } else {
- if (cache_changes)
- rd_kafka_metadata_cache_propagate_changes(rk);
- rd_kafka_metadata_cache_expiry_start(rk);
- }
-
- /* Remove cache hints for the originally requested topics. */
- if (requested_topics)
- rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
-
- rd_kafka_wrunlock(rkb->rkb_rk);
-
- if (broker_changes) {
- /* Broadcast broker metadata changes to listeners. */
- rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
- }
-
- /* Check if cgrp effective subscription is affected by
- * new topic metadata.
- * Ignore if this was a broker-only refresh (no topics), or
- * the request was from the partition assignor (!cgrp_update)
- * which may contain only a sub-set of the subscribed topics (namely
- * the effective subscription of available topics) as to not
- * propagate non-included topics as non-existent. */
- if (cgrp_update && (requested_topics || all_topics))
- rd_kafka_cgrp_metadata_update_check(rkb->rkb_rk->rk_cgrp,
- rd_true /*do join*/);
-
- /* Try to acquire a Producer ID from this broker if we
- * don't have one. */
- if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
- rd_kafka_wrlock(rkb->rkb_rk);
- rd_kafka_idemp_pid_fsm(rkb->rkb_rk);
- rd_kafka_wrunlock(rkb->rkb_rk);
- }
-
-done:
- if (missing_topics)
- rd_list_destroy(missing_topics);
-
- if (leader_epochs)
- rd_free(leader_epochs);
-
- /* This metadata request was triggered by someone wanting
- * the metadata information back as a reply, so send that reply now.
- * In this case we must not rd_free the metadata memory here,
- * the requestee will do.
- * The tbuf is explicitly not destroyed as we return its memory
- * to the caller. */
- *mdp = md;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- if (requested_topics) {
- /* Failed requests shall purge cache hints for
- * the requested topics. */
- rd_kafka_wrlock(rkb->rkb_rk);
- rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
- rd_kafka_wrunlock(rkb->rkb_rk);
- }
-
- if (missing_topics)
- rd_list_destroy(missing_topics);
-
- if (leader_epochs)
- rd_free(leader_epochs);
-
- rd_tmpabuf_destroy(&tbuf);
-
- return err;
-}
-
-
-/**
- * @brief Add all topics in current cached full metadata
- * that matches the topics in \p match
- * to \p tinfos (rd_kafka_topic_info_t *).
- *
- * @param errored Any topic or wildcard pattern that did not match
- * an available topic will be added to this list with
- * the appropriate error set.
- *
- * @returns the number of topics matched and added to \p list
- *
- * @locks none
- * @locality any
- */
-size_t
-rd_kafka_metadata_topic_match(rd_kafka_t *rk,
- rd_list_t *tinfos,
- const rd_kafka_topic_partition_list_t *match,
- rd_kafka_topic_partition_list_t *errored) {
- int ti, i;
- size_t cnt = 0;
- const struct rd_kafka_metadata *metadata;
- rd_kafka_topic_partition_list_t *unmatched;
-
- rd_kafka_rdlock(rk);
- metadata = rk->rk_full_metadata;
- if (!metadata) {
- rd_kafka_rdunlock(rk);
- return 0;
- }
-
- /* To keep track of which patterns and topics in `match` that
- * did not match any topic (or matched an errored topic), we
- * create a set of all topics to match in `unmatched` and then
- * remove from this set as a match is found.
- * Whatever remains in `unmatched` after all matching is performed
- * are the topics and patterns that did not match a topic. */
- unmatched = rd_kafka_topic_partition_list_copy(match);
-
- /* For each topic in the cluster, scan through the match list
- * to find matching topic. */
- for (ti = 0; ti < metadata->topic_cnt; ti++) {
- const char *topic = metadata->topics[ti].topic;
-
- /* Ignore topics in blacklist */
- if (rk->rk_conf.topic_blacklist &&
- rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
- continue;
-
- /* Scan for matches */
- for (i = 0; i < match->cnt; i++) {
- if (!rd_kafka_topic_match(rk, match->elems[i].topic,
- topic))
- continue;
-
- /* Remove from unmatched */
- rd_kafka_topic_partition_list_del(
- unmatched, match->elems[i].topic,
- RD_KAFKA_PARTITION_UA);
-
- if (metadata->topics[ti].err) {
- rd_kafka_topic_partition_list_add(
- errored, topic, RD_KAFKA_PARTITION_UA)
- ->err = metadata->topics[ti].err;
- continue; /* Skip errored topics */
- }
-
- rd_list_add(
- tinfos,
- rd_kafka_topic_info_new(
- topic, metadata->topics[ti].partition_cnt));
-
- cnt++;
- }
- }
- rd_kafka_rdunlock(rk);
-
- /* Any topics/patterns still in unmatched did not match any
- * existing topics, add them to `errored`. */
- for (i = 0; i < unmatched->cnt; i++) {
- rd_kafka_topic_partition_t *elem = &unmatched->elems[i];
-
- rd_kafka_topic_partition_list_add(errored, elem->topic,
- RD_KAFKA_PARTITION_UA)
- ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- }
-
- rd_kafka_topic_partition_list_destroy(unmatched);
-
- return cnt;
-}
-
-
-/**
- * @brief Add all topics in \p match that matches cached metadata.
- * @remark MUST NOT be used with wildcard topics,
- * see rd_kafka_metadata_topic_match() for that.
- *
- * @param errored Non-existent and unauthorized topics are added to this
- * list with the appropriate error code.
- *
- * @returns the number of topics matched and added to \p tinfos
- * @locks none
- */
-size_t
-rd_kafka_metadata_topic_filter(rd_kafka_t *rk,
- rd_list_t *tinfos,
- const rd_kafka_topic_partition_list_t *match,
- rd_kafka_topic_partition_list_t *errored) {
- int i;
- size_t cnt = 0;
-
- rd_kafka_rdlock(rk);
- /* For each topic in match, look up the topic in the cache. */
- for (i = 0; i < match->cnt; i++) {
- const char *topic = match->elems[i].topic;
- const rd_kafka_metadata_topic_t *mtopic;
-
- /* Ignore topics in blacklist */
- if (rk->rk_conf.topic_blacklist &&
- rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
- continue;
-
- mtopic =
- rd_kafka_metadata_cache_topic_get(rk, topic, 1 /*valid*/);
-
- if (!mtopic)
- rd_kafka_topic_partition_list_add(errored, topic,
- RD_KAFKA_PARTITION_UA)
- ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- else if (mtopic->err)
- rd_kafka_topic_partition_list_add(errored, topic,
- RD_KAFKA_PARTITION_UA)
- ->err = mtopic->err;
- else {
- rd_list_add(tinfos, rd_kafka_topic_info_new(
- topic, mtopic->partition_cnt));
-
- cnt++;
- }
- }
- rd_kafka_rdunlock(rk);
-
- return cnt;
-}
-
-
-void rd_kafka_metadata_log(rd_kafka_t *rk,
- const char *fac,
- const struct rd_kafka_metadata *md) {
- int i;
-
- rd_kafka_dbg(rk, METADATA, fac,
- "Metadata with %d broker(s) and %d topic(s):",
- md->broker_cnt, md->topic_cnt);
-
- for (i = 0; i < md->broker_cnt; i++) {
- rd_kafka_dbg(rk, METADATA, fac,
- " Broker #%i/%i: %s:%i NodeId %" PRId32, i,
- md->broker_cnt, md->brokers[i].host,
- md->brokers[i].port, md->brokers[i].id);
- }
-
- for (i = 0; i < md->topic_cnt; i++) {
- rd_kafka_dbg(
- rk, METADATA, fac,
- " Topic #%i/%i: %s with %i partitions%s%s", i,
- md->topic_cnt, md->topics[i].topic,
- md->topics[i].partition_cnt, md->topics[i].err ? ": " : "",
- md->topics[i].err ? rd_kafka_err2str(md->topics[i].err)
- : "");
- }
-}
-
-
-
-/**
- * @brief Refresh metadata for \p topics
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- * @param force: force refresh even if topics are up-to-date in cache
- * @param allow_auto_create: Enable/disable auto creation of topics
- * (through MetadataRequest). Requires a modern
- * broker version.
- * Takes precedence over allow.auto.create.topics.
- * @param cgrp_update: Allow consumer group state update on response.
- *
- * @returns an error code
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- rd_bool_t force,
- rd_bool_t allow_auto_create,
- rd_bool_t cgrp_update,
- const char *reason) {
- rd_list_t q_topics;
- int destroy_rkb = 0;
-
- if (!rk) {
- rd_assert(rkb);
- rk = rkb->rkb_rk;
- }
-
- rd_kafka_wrlock(rk);
-
- if (!rkb) {
- if (!(rkb = rd_kafka_broker_any_usable(
- rk, RD_POLL_NOWAIT, RD_DONT_LOCK, 0, reason))) {
- /* Hint cache that something is interested in
- * these topics so that they will be included in
- * a future all known_topics query. */
- rd_kafka_metadata_cache_hint(rk, topics, NULL,
- RD_KAFKA_RESP_ERR__NOENT,
- 0 /*dont replace*/);
-
- rd_kafka_wrunlock(rk);
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Skipping metadata refresh of %d topic(s):"
- " %s: no usable brokers",
- rd_list_cnt(topics), reason);
-
- return RD_KAFKA_RESP_ERR__TRANSPORT;
- }
- destroy_rkb = 1;
- }
-
- rd_list_init(&q_topics, rd_list_cnt(topics), rd_free);
-
- if (!force) {
-
- /* Hint cache of upcoming MetadataRequest and filter
- * out any topics that are already being requested.
- * q_topics will contain remaining topics to query. */
- rd_kafka_metadata_cache_hint(rk, topics, &q_topics,
- RD_KAFKA_RESP_ERR__WAIT_CACHE,
- rd_false /*dont replace*/);
- rd_kafka_wrunlock(rk);
-
- if (rd_list_cnt(&q_topics) == 0) {
- /* No topics need new query. */
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Skipping metadata refresh of "
- "%d topic(s): %s: "
- "already being requested",
- rd_list_cnt(topics), reason);
- rd_list_destroy(&q_topics);
- if (destroy_rkb)
- rd_kafka_broker_destroy(rkb);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- } else {
- rd_kafka_wrunlock(rk);
- rd_list_copy_to(&q_topics, topics, rd_list_string_copy, NULL);
- }
-
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Requesting metadata for %d/%d topics: %s",
- rd_list_cnt(&q_topics), rd_list_cnt(topics), reason);
-
- rd_kafka_MetadataRequest(rkb, &q_topics, reason, allow_auto_create,
- cgrp_update, NULL);
-
- rd_list_destroy(&q_topics);
-
- if (destroy_rkb)
- rd_kafka_broker_destroy(rkb);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Refresh metadata for known topics
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- * @param force: refresh even if cache is up-to-date
- *
- * @returns an error code (__UNKNOWN_TOPIC if there are no local topics)
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_bool_t force,
- const char *reason) {
- rd_list_t topics;
- rd_kafka_resp_err_t err;
- int cache_cnt = 0;
- rd_bool_t allow_auto_create_topics;
-
- if (!rk)
- rk = rkb->rkb_rk;
-
- rd_list_init(&topics, 8, rd_free);
- rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt);
-
- /* Allow topic auto creation if there are locally known topics (rkt)
- * and not just cached (to be queried) topics. */
- allow_auto_create_topics = rk->rk_conf.allow_auto_create_topics &&
- rd_list_cnt(&topics) > cache_cnt;
-
- if (rd_list_cnt(&topics) == 0)
- err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- else
- err = rd_kafka_metadata_refresh_topics(
- rk, rkb, &topics, force, allow_auto_create_topics,
- rd_false /*!cgrp_update*/, reason);
-
- rd_list_destroy(&topics);
-
- return err;
-}
-
-
-/**
- * @brief Refresh metadata for known and subscribed topics.
- *
- * @param rk used to look up usable broker if \p rkb is NULL..
- * @param rkb use this broker, unless NULL then any usable broker from \p rk.
- * @param reason reason of refresh, used in debug logs.
- *
- * @returns an error code (ERR__UNKNOWN_TOPIC if no topics are desired).
- *
- * @locality rdkafka main thread
- * @locks_required none
- * @locks_acquired rk(read)
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason) {
- rd_list_t topics;
- rd_kafka_resp_err_t err;
- rd_kafka_cgrp_t *rkcg;
- rd_bool_t allow_auto_create_topics =
- rk->rk_conf.allow_auto_create_topics;
- int cache_cnt = 0;
-
- if (!rk) {
- rd_assert(rkb);
- rk = rkb->rkb_rk;
- }
-
- rkcg = rk->rk_cgrp;
- rd_assert(rkcg != NULL);
-
- if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) {
- /* If there is a wildcard subscription we need to request
- * all topics in the cluster so that we can perform
- * regexp matching. */
- return rd_kafka_metadata_refresh_all(rk, rkb, reason);
- }
-
- rd_list_init(&topics, 8, rd_free);
-
- /* Add locally known topics, i.e., those that are currently
- * being consumed or otherwise referenced through topic_t objects. */
- rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt);
- if (rd_list_cnt(&topics) == cache_cnt)
- allow_auto_create_topics = rd_false;
-
- /* Add subscribed (non-wildcard) topics, if any. */
- if (rkcg->rkcg_subscription)
- rd_kafka_topic_partition_list_get_topic_names(
- rkcg->rkcg_subscription, &topics,
- rd_false /*no wildcards*/);
-
- if (rd_list_cnt(&topics) == 0)
- err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- else
- err = rd_kafka_metadata_refresh_topics(
- rk, rkb, &topics, rd_true /*force*/,
- allow_auto_create_topics, rd_true /*cgrp_update*/, reason);
-
- rd_list_destroy(&topics);
-
- return err;
-}
-
-
-/**
- * @brief Refresh broker list by metadata.
- *
- * Attempts to use sparse metadata request if possible, else falls back
- * on a full metadata request. (NOTE: sparse not implemented, KIP-4)
- *
- * @param rk: used to look up usable broker if \p rkb is NULL.
- * @param rkb: use this broker, unless NULL then any usable broker from \p rk
- *
- * @returns an error code
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason) {
- return rd_kafka_metadata_request(rk, rkb, NULL /*brokers only*/,
- rd_false /*!allow auto create topics*/,
- rd_false /*no cgrp update */, reason,
- NULL);
-}
-
-
-
-/**
- * @brief Refresh metadata for all topics in cluster.
- * This is a full metadata request which might be taxing on the
- * broker if the cluster has many topics.
- *
- * @locality any
- * @locks none
- */
-rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason) {
- int destroy_rkb = 0;
- rd_list_t topics;
-
- if (!rk) {
- rd_assert(rkb);
- rk = rkb->rkb_rk;
- }
-
- if (!rkb) {
- if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT,
- RD_DO_LOCK, 0, reason)))
- return RD_KAFKA_RESP_ERR__TRANSPORT;
- destroy_rkb = 1;
- }
-
- rd_list_init(&topics, 0, NULL); /* empty list = all topics */
- rd_kafka_MetadataRequest(rkb, &topics, reason,
- rd_false /*no auto create*/,
- rd_true /*cgrp update*/, NULL);
- rd_list_destroy(&topics);
-
- if (destroy_rkb)
- rd_kafka_broker_destroy(rkb);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
-
- * @brief Lower-level Metadata request that takes a callback (with replyq set)
- * which will be triggered after parsing is complete.
- *
- * @param cgrp_update Allow consumer group updates from the response.
- *
- * @locks none
- * @locality any
- */
-rd_kafka_resp_err_t
-rd_kafka_metadata_request(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- rd_bool_t allow_auto_create_topics,
- rd_bool_t cgrp_update,
- const char *reason,
- rd_kafka_op_t *rko) {
- int destroy_rkb = 0;
-
- if (!rkb) {
- if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT,
- RD_DO_LOCK, 0, reason)))
- return RD_KAFKA_RESP_ERR__TRANSPORT;
- destroy_rkb = 1;
- }
-
- rd_kafka_MetadataRequest(rkb, topics, reason, allow_auto_create_topics,
- cgrp_update, rko);
-
- if (destroy_rkb)
- rd_kafka_broker_destroy(rkb);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Query timer callback to trigger refresh for topics
- * that have partitions missing their leaders.
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static void rd_kafka_metadata_leader_query_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_t *rk = rkts->rkts_rk;
- rd_kafka_timer_t *rtmr = &rk->rk_metadata_cache.rkmc_query_tmr;
- rd_kafka_topic_t *rkt;
- rd_list_t topics;
-
- rd_kafka_wrlock(rk);
- rd_list_init(&topics, rk->rk_topic_cnt, rd_free);
-
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- int i, require_metadata;
- rd_kafka_topic_rdlock(rkt);
-
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) {
- /* Skip topics that are known to not exist. */
- rd_kafka_topic_rdunlock(rkt);
- continue;
- }
-
- require_metadata =
- rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
- /* Check if any partitions are missing brokers. */
- for (i = 0; !require_metadata && i < rkt->rkt_partition_cnt;
- i++) {
- rd_kafka_toppar_t *rktp = rkt->rkt_p[i];
- rd_kafka_toppar_lock(rktp);
- require_metadata =
- !rktp->rktp_broker && !rktp->rktp_next_broker;
- rd_kafka_toppar_unlock(rktp);
- }
-
- if (require_metadata || rkt->rkt_partition_cnt == 0)
- rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
-
- rd_kafka_topic_rdunlock(rkt);
- }
-
- rd_kafka_wrunlock(rk);
-
- if (rd_list_cnt(&topics) == 0) {
- /* No leader-less topics+partitions, stop the timer. */
- rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/);
- } else {
- rd_kafka_metadata_refresh_topics(
- rk, NULL, &topics, rd_true /*force*/,
- rk->rk_conf.allow_auto_create_topics,
- rd_false /*!cgrp_update*/, "partition leader query");
- /* Back off next query exponentially until we reach
- * the standard query interval - then stop the timer
- * since the intervalled querier will do the job for us. */
- if (rk->rk_conf.metadata_refresh_interval_ms > 0 &&
- rtmr->rtmr_interval * 2 / 1000 >=
- rk->rk_conf.metadata_refresh_interval_ms)
- rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/);
- else
- rd_kafka_timer_exp_backoff(rkts, rtmr);
- }
-
- rd_list_destroy(&topics);
-}
-
-
-
-/**
- * @brief Trigger fast leader query to quickly pick up on leader changes.
- * The fast leader query is a quick query followed by later queries at
- * exponentially increased intervals until no topics are missing
- * leaders.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk) {
- rd_ts_t next;
-
- /* Restart the timer if it will speed things up. */
- next = rd_kafka_timer_next(
- &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/);
- if (next == -1 /* not started */ ||
- next >
- (rd_ts_t)rk->rk_conf.metadata_refresh_fast_interval_ms * 1000) {
- rd_kafka_dbg(rk, METADATA | RD_KAFKA_DBG_TOPIC, "FASTQUERY",
- "Starting fast leader query");
- rd_kafka_timer_start(
- &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr,
- rk->rk_conf.metadata_refresh_fast_interval_ms * 1000,
- rd_kafka_metadata_leader_query_tmr_cb, NULL);
- }
-}
-
-
-
-/**
- * @brief Create mock Metadata (for testing) based on the provided topics.
- *
- * @param topics elements are checked for .topic and .partition_cnt
- * @param topic_cnt is the number of topic elements in \p topics.
- *
- * @returns a newly allocated metadata object that must be freed with
- * rd_kafka_metadata_destroy().
- *
- * @sa rd_kafka_metadata_copy()
- */
-rd_kafka_metadata_t *
-rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics,
- size_t topic_cnt) {
- rd_kafka_metadata_t *md;
- rd_tmpabuf_t tbuf;
- size_t topic_names_size = 0;
- int total_partition_cnt = 0;
- size_t i;
-
- /* Calculate total partition count and topic names size before
- * allocating memory. */
- for (i = 0; i < topic_cnt; i++) {
- topic_names_size += 1 + strlen(topics[i].topic);
- total_partition_cnt += topics[i].partition_cnt;
- }
-
-
- /* Allocate contiguous buffer which will back all the memory
- * needed by the final metadata_t object */
- rd_tmpabuf_new(
- &tbuf,
- sizeof(*md) + (sizeof(*md->topics) * topic_cnt) + topic_names_size +
- (64 /*topic name size..*/ * topic_cnt) +
- (sizeof(*md->topics[0].partitions) * total_partition_cnt),
- 1 /*assert on fail*/);
-
- md = rd_tmpabuf_alloc(&tbuf, sizeof(*md));
- memset(md, 0, sizeof(*md));
-
- md->topic_cnt = (int)topic_cnt;
- md->topics =
- rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics));
-
- for (i = 0; i < (size_t)md->topic_cnt; i++) {
- int j;
-
- md->topics[i].topic =
- rd_tmpabuf_write_str(&tbuf, topics[i].topic);
- md->topics[i].partition_cnt = topics[i].partition_cnt;
- md->topics[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- md->topics[i].partitions = rd_tmpabuf_alloc(
- &tbuf, md->topics[i].partition_cnt *
- sizeof(*md->topics[i].partitions));
-
- for (j = 0; j < md->topics[i].partition_cnt; j++) {
- memset(&md->topics[i].partitions[j], 0,
- sizeof(md->topics[i].partitions[j]));
- md->topics[i].partitions[j].id = j;
- }
- }
-
- /* Check for tmpabuf errors */
- if (rd_tmpabuf_failed(&tbuf))
- rd_assert(!*"metadata mock failed");
-
- /* Not destroying the tmpabuf since we return
- * its allocated memory. */
- return md;
-}
-
-
-/**
- * @brief Create mock Metadata (for testing) based on the
- * var-arg tuples of (const char *topic, int partition_cnt).
- *
- * @param topic_cnt is the number of topic,partition_cnt tuples.
- *
- * @returns a newly allocated metadata object that must be freed with
- * rd_kafka_metadata_destroy().
- *
- * @sa rd_kafka_metadata_new_topic_mock()
- */
-rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...) {
- rd_kafka_metadata_topic_t *topics;
- va_list ap;
- size_t i;
-
- topics = rd_alloca(sizeof(*topics) * topic_cnt);
-
- va_start(ap, topic_cnt);
- for (i = 0; i < topic_cnt; i++) {
- topics[i].topic = va_arg(ap, char *);
- topics[i].partition_cnt = va_arg(ap, int);
- }
- va_end(ap);
-
- return rd_kafka_metadata_new_topic_mock(topics, topic_cnt);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h
deleted file mode 100644
index 53a959b8e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_METADATA_H_
-#define _RDKAFKA_METADATA_H_
-
-#include "rdavl.h"
-
-rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb);
-
-rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *request,
- rd_kafka_buf_t *rkbuf,
- struct rd_kafka_metadata **mdp);
-
-struct rd_kafka_metadata *
-rd_kafka_metadata_copy(const struct rd_kafka_metadata *md, size_t size);
-
-size_t
-rd_kafka_metadata_topic_match(rd_kafka_t *rk,
- rd_list_t *tinfos,
- const rd_kafka_topic_partition_list_t *match,
- rd_kafka_topic_partition_list_t *errored);
-size_t
-rd_kafka_metadata_topic_filter(rd_kafka_t *rk,
- rd_list_t *tinfos,
- const rd_kafka_topic_partition_list_t *match,
- rd_kafka_topic_partition_list_t *errored);
-
-void rd_kafka_metadata_log(rd_kafka_t *rk,
- const char *fac,
- const struct rd_kafka_metadata *md);
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- rd_bool_t force,
- rd_bool_t allow_auto_create,
- rd_bool_t cgrp_update,
- const char *reason);
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_bool_t force,
- const char *reason);
-rd_kafka_resp_err_t
-rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason);
-rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason);
-rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *reason);
-
-rd_kafka_resp_err_t
-rd_kafka_metadata_request(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- rd_bool_t allow_auto_create_topics,
- rd_bool_t cgrp_update,
- const char *reason,
- rd_kafka_op_t *rko);
-
-
-
-int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b);
-
-rd_kafka_metadata_t *
-rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics,
- size_t topic_cnt);
-rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...);
-
-
-/**
- * @{
- *
- * @brief Metadata cache
- */
-
-struct rd_kafka_metadata_cache_entry {
- rd_avl_node_t rkmce_avlnode; /* rkmc_avl */
- TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */
- rd_ts_t rkmce_ts_expires; /* Expire time */
- rd_ts_t rkmce_ts_insert; /* Insert time */
- /** Last known leader epochs array (same size as the partition count),
- * or NULL if not known. */
- rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */
- /* rkmce_topics.partitions memory points here. */
-};
-
-
-#define RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(ERR) \
- ((ERR) == RD_KAFKA_RESP_ERR__WAIT_CACHE || \
- (ERR) == RD_KAFKA_RESP_ERR__NOENT)
-
-#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \
- !RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY((rkmce)->rkmce_mtopic.err)
-
-
-
-struct rd_kafka_metadata_cache {
- rd_avl_t rkmc_avl;
- TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry;
- rd_kafka_timer_t rkmc_expiry_tmr;
- int rkmc_cnt;
-
- /* Protected by rk_lock */
- rd_list_t rkmc_observers; /**< (rd_kafka_enq_once_t*) */
-
- /* Protected by full_lock: */
- mtx_t rkmc_full_lock;
- int rkmc_full_topics_sent; /* Full MetadataRequest for
- * all topics has been sent,
- * awaiting response. */
- int rkmc_full_brokers_sent; /* Full MetadataRequest for
- * all brokers (but not topics)
- * has been sent,
- * awaiting response. */
-
- rd_kafka_timer_t rkmc_query_tmr; /* Query timer for topic's without
- * leaders. */
- cnd_t rkmc_cnd; /* cache_wait_change() cond. */
- mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */
-};
-
-
-
-void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk);
-int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts);
-void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk,
- const rd_kafka_metadata_topic_t *mdt,
- rd_bool_t propagate);
-void rd_kafka_metadata_cache_update(rd_kafka_t *rk,
- const rd_kafka_metadata_t *md,
- int abs_update);
-void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk);
-struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid);
-void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk,
- const rd_list_t *topics);
-int rd_kafka_metadata_cache_hint(rd_kafka_t *rk,
- const rd_list_t *topics,
- rd_list_t *dst,
- rd_kafka_resp_err_t err,
- rd_bool_t replace);
-
-int rd_kafka_metadata_cache_hint_rktparlist(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *dst,
- int replace);
-
-const rd_kafka_metadata_topic_t *
-rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, const char *topic, int valid);
-int rd_kafka_metadata_cache_topic_partition_get(
- rd_kafka_t *rk,
- const rd_kafka_metadata_topic_t **mtopicp,
- const rd_kafka_metadata_partition_t **mpartp,
- const char *topic,
- int32_t partition,
- int valid);
-
-int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk,
- const rd_list_t *topics,
- int *metadata_agep);
-
-void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk);
-
-void rd_kafka_metadata_cache_init(rd_kafka_t *rk);
-void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk);
-void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers);
-int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms);
-void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk);
-
-int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics);
-
-void rd_kafka_metadata_cache_wait_state_change_async(
- rd_kafka_t *rk,
- rd_kafka_enq_once_t *eonce);
-
-/**@}*/
-#endif /* _RDKAFKA_METADATA_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c
deleted file mode 100644
index 514d391a8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c
+++ /dev/null
@@ -1,836 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_metadata.h"
-
-#include <string.h>
-/**
- * @{
- *
- * @brief Metadata cache
- *
- * The metadata cache consists of cached topic metadata as
- * retrieved from the cluster using MetadataRequest.
- *
- * The topic cache entries are made up \c struct rd_kafka_metadata_cache_entry
- * each containing the topic name, a copy of the topic's metadata
- * and a cache expiry time.
- *
- * On update any previous entry for the topic are removed and replaced
- * with a new entry.
- *
- * The cache is also populated when the topic metadata is being requested
- * for specific topics, this will not interfere with existing cache entries
- * for topics, but for any topics not currently in the cache a new
- * entry will be added with a flag (RD_KAFKA_METADATA_CACHE_VALID(rkmce))
- * indicating that the entry is waiting to be populated by the MetadataResponse.
- * Two special error codes are used for this purpose:
- * RD_KAFKA_RESP_ERR__NOENT - to indicate that a topic needs to be queried,
- * RD_KAFKA_RESP_ERR__WAIT_CACHE - to indicate that a topic is being queried
- * and there is no need to re-query it prior
- * to the current query finishing.
- *
- * The cache is locked in its entirety with rd_kafka_wr/rdlock() by the caller
- * and the returned cache entry must only be accessed during the duration
- * of the lock.
- *
- */
-
-
-
-/**
- * @brief Remove and free cache entry.
- *
- * @remark The expiry timer is not updated, for simplicity.
- * @locks rd_kafka_wrlock()
- */
-static RD_INLINE void
-rd_kafka_metadata_cache_delete(rd_kafka_t *rk,
- struct rd_kafka_metadata_cache_entry *rkmce,
- int unlink_avl) {
- if (unlink_avl)
- RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce);
- TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link);
- rd_kafka_assert(NULL, rk->rk_metadata_cache.rkmc_cnt > 0);
- rk->rk_metadata_cache.rkmc_cnt--;
-
- rd_free(rkmce);
-}
-
-/**
- * @brief Delete cache entry by topic name
- * @locks rd_kafka_wrlock()
- * @returns 1 if entry was found and removed, else 0.
- */
-static int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk,
- const char *topic) {
- struct rd_kafka_metadata_cache_entry *rkmce;
-
- rkmce = rd_kafka_metadata_cache_find(rk, topic, 1);
- if (rkmce)
- rd_kafka_metadata_cache_delete(rk, rkmce, 1);
- return rkmce ? 1 : 0;
-}
-
-static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk);
-
-/**
- * @brief Cache eviction timer callback.
- * @locality rdkafka main thread
- * @locks NOT rd_kafka_*lock()
- */
-static void rd_kafka_metadata_cache_evict_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_t *rk = arg;
-
- rd_kafka_wrlock(rk);
- rd_kafka_metadata_cache_evict(rk);
- rd_kafka_wrunlock(rk);
-}
-
-
-/**
- * @brief Evict timed out entries from cache and rearm timer for
- * next expiry.
- *
- * @returns the number of entries evicted.
- *
- * @locks_required rd_kafka_wrlock()
- */
-static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk) {
- int cnt = 0;
- rd_ts_t now = rd_clock();
- struct rd_kafka_metadata_cache_entry *rkmce;
-
- while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)) &&
- rkmce->rkmce_ts_expires <= now) {
- rd_kafka_metadata_cache_delete(rk, rkmce, 1);
- cnt++;
- }
-
- if (rkmce)
- rd_kafka_timer_start(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr,
- rkmce->rkmce_ts_expires - now,
- rd_kafka_metadata_cache_evict_tmr_cb, rk);
- else
- rd_kafka_timer_stop(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
-
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Expired %d entries from metadata cache "
- "(%d entries remain)",
- cnt, rk->rk_metadata_cache.rkmc_cnt);
-
- if (cnt)
- rd_kafka_metadata_cache_propagate_changes(rk);
-
- return cnt;
-}
-
-
-/**
- * @brief Evict timed out entries from cache based on their insert/update time
- * rather than expiry time. Any entries older than \p ts will be evicted.
- *
- * @returns the number of entries evicted.
- *
- * @locks_required rd_kafka_wrlock()
- */
-int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts) {
- int cnt = 0;
- struct rd_kafka_metadata_cache_entry *rkmce, *tmp;
-
- TAILQ_FOREACH_SAFE(rkmce, &rk->rk_metadata_cache.rkmc_expiry,
- rkmce_link, tmp) {
- if (rkmce->rkmce_ts_insert <= ts) {
- rd_kafka_metadata_cache_delete(rk, rkmce, 1);
- cnt++;
- }
- }
-
- /* Update expiry timer */
- rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry);
- if (rkmce)
- rd_kafka_timer_start(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr,
- rkmce->rkmce_ts_expires - rd_clock(),
- rd_kafka_metadata_cache_evict_tmr_cb, rk);
- else
- rd_kafka_timer_stop(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
-
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Expired %d entries older than %dms from metadata cache "
- "(%d entries remain)",
- cnt, (int)((rd_clock() - ts) / 1000),
- rk->rk_metadata_cache.rkmc_cnt);
-
- if (cnt)
- rd_kafka_metadata_cache_propagate_changes(rk);
-
- return cnt;
-}
-
-
-/**
- * @brief Find cache entry by topic name
- *
- * @param valid: entry must be valid (not hint)
- *
- * @locks rd_kafka_*lock()
- */
-struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid) {
- struct rd_kafka_metadata_cache_entry skel, *rkmce;
- skel.rkmce_mtopic.topic = (char *)topic;
- rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl, &skel);
- if (rkmce && (!valid || RD_KAFKA_METADATA_CACHE_VALID(rkmce)))
- return rkmce;
- return NULL;
-}
-
-
-/**
- * @brief Partition (id) comparator
- */
-int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b) {
- const rd_kafka_metadata_partition_t *a = _a, *b = _b;
- return RD_CMP(a->id, b->id);
-}
-
-
-/**
- * @brief Add (and replace) cache entry for topic.
- *
- * This makes a copy of \p topic
- *
- * @locks_required rd_kafka_wrlock()
- */
-static struct rd_kafka_metadata_cache_entry *
-rd_kafka_metadata_cache_insert(rd_kafka_t *rk,
- const rd_kafka_metadata_topic_t *mtopic,
- rd_ts_t now,
- rd_ts_t ts_expires) {
- struct rd_kafka_metadata_cache_entry *rkmce, *old;
- size_t topic_len;
- rd_tmpabuf_t tbuf;
- int i;
-
- /* Metadata is stored in one contigious buffer where structs and
- * and pointed-to fields are layed out in a memory aligned fashion.
- * rd_tmpabuf_t provides the infrastructure to do this.
- * Because of this we copy all the structs verbatim but
- * any pointer fields needs to be copied explicitly to update
- * the pointer address. */
- topic_len = strlen(mtopic->topic) + 1;
- rd_tmpabuf_new(&tbuf,
- RD_ROUNDUP(sizeof(*rkmce), 8) +
- RD_ROUNDUP(topic_len, 8) +
- (mtopic->partition_cnt *
- RD_ROUNDUP(sizeof(*mtopic->partitions), 8)),
- 1 /*assert on fail*/);
-
- rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce));
-
- rkmce->rkmce_mtopic = *mtopic;
-
- /* Copy topic name and update pointer */
- rkmce->rkmce_mtopic.topic = rd_tmpabuf_write_str(&tbuf, mtopic->topic);
-
- /* Copy partition array and update pointer */
- rkmce->rkmce_mtopic.partitions = rd_tmpabuf_write(
- &tbuf, mtopic->partitions,
- mtopic->partition_cnt * sizeof(*mtopic->partitions));
-
- /* Clear uncached fields. */
- for (i = 0; i < mtopic->partition_cnt; i++) {
- rkmce->rkmce_mtopic.partitions[i].replicas = NULL;
- rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0;
- rkmce->rkmce_mtopic.partitions[i].isrs = NULL;
- rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0;
- }
-
- /* Sort partitions for future bsearch() lookups. */
- qsort(rkmce->rkmce_mtopic.partitions, rkmce->rkmce_mtopic.partition_cnt,
- sizeof(*rkmce->rkmce_mtopic.partitions),
- rd_kafka_metadata_partition_id_cmp);
-
- TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, rkmce,
- rkmce_link);
- rk->rk_metadata_cache.rkmc_cnt++;
- rkmce->rkmce_ts_expires = ts_expires;
- rkmce->rkmce_ts_insert = now;
-
- /* Insert (and replace existing) entry. */
- old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce,
- rkmce_avlnode);
- if (old)
- rd_kafka_metadata_cache_delete(rk, old, 0);
-
- /* Explicitly not freeing the tmpabuf since rkmce points to its
- * memory. */
- return rkmce;
-}
-
-
-/**
- * @brief Purge the metadata cache
- *
- * @locks_required rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers) {
- struct rd_kafka_metadata_cache_entry *rkmce;
- int was_empty = TAILQ_EMPTY(&rk->rk_metadata_cache.rkmc_expiry);
-
- while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
- rd_kafka_metadata_cache_delete(rk, rkmce, 1);
-
- rd_kafka_timer_stop(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
-
- if (!was_empty)
- rd_kafka_metadata_cache_propagate_changes(rk);
-
- if (purge_observers)
- rd_list_clear(&rk->rk_metadata_cache.rkmc_observers);
-}
-
-
-/**
- * @brief Start or update the cache expiry timer.
- * Typically done after a series of cache_topic_update()
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk) {
- struct rd_kafka_metadata_cache_entry *rkmce;
-
- if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
- rd_kafka_timer_start(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr,
- rkmce->rkmce_ts_expires - rd_clock(),
- rd_kafka_metadata_cache_evict_tmr_cb, rk);
-}
-
-/**
- * @brief Update the metadata cache for a single topic
- * with the provided metadata.
- *
- * If the topic has a temporary error the existing entry is removed
- * and no new entry is added, which avoids the topic to be
- * suppressed in upcoming metadata requests because being in the cache.
- * In other words: we want to re-query errored topics.
- * If the broker reports ERR_UNKNOWN_TOPIC_OR_PART we add a negative cache
- * entry with an low expiry time, this is so that client code (cgrp) knows
- * the topic has been queried but did not exist, otherwise it would wait
- * forever for the unknown topic to surface.
- *
- * For permanent errors (authorization failures), we keep
- * the entry cached for metadata.max.age.ms.
- *
- * @remark The cache expiry timer will not be updated/started,
- * call rd_kafka_metadata_cache_expiry_start() instead.
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk,
- const rd_kafka_metadata_topic_t *mdt,
- rd_bool_t propagate) {
- rd_ts_t now = rd_clock();
- rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
- int changed = 1;
-
- /* Cache unknown topics for a short while (100ms) to allow the cgrp
- * logic to find negative cache hits. */
- if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
- ts_expires = RD_MIN(ts_expires, now + (100 * 1000));
-
- if (!mdt->err ||
- mdt->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED ||
- mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
- rd_kafka_metadata_cache_insert(rk, mdt, now, ts_expires);
- else
- changed =
- rd_kafka_metadata_cache_delete_by_name(rk, mdt->topic);
-
- if (changed && propagate)
- rd_kafka_metadata_cache_propagate_changes(rk);
-}
-
-
-/**
- * @brief Update the metadata cache with the provided metadata.
- *
- * @param abs_update int: absolute update: purge cache before updating.
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_update(rd_kafka_t *rk,
- const rd_kafka_metadata_t *md,
- int abs_update) {
- struct rd_kafka_metadata_cache_entry *rkmce;
- rd_ts_t now = rd_clock();
- rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
- int i;
-
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "%s of metadata cache with %d topic(s)",
- abs_update ? "Absolute update" : "Update", md->topic_cnt);
-
- if (abs_update)
- rd_kafka_metadata_cache_purge(rk, rd_false /*not observers*/);
-
-
- for (i = 0; i < md->topic_cnt; i++)
- rd_kafka_metadata_cache_insert(rk, &md->topics[i], now,
- ts_expires);
-
- /* Update expiry timer */
- if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
- rd_kafka_timer_start(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_expiry_tmr,
- rkmce->rkmce_ts_expires - now,
- rd_kafka_metadata_cache_evict_tmr_cb, rk);
-
- if (md->topic_cnt > 0 || abs_update)
- rd_kafka_metadata_cache_propagate_changes(rk);
-}
-
-
-/**
- * @brief Remove cache hints for topics in \p topics
- * This is done when the Metadata response has been parsed and
- * replaced hints with existing topic information, thus this will
- * only remove unmatched topics from the cache.
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk,
- const rd_list_t *topics) {
- const char *topic;
- int i;
- int cnt = 0;
-
- RD_LIST_FOREACH(topic, topics, i) {
- struct rd_kafka_metadata_cache_entry *rkmce;
-
- if (!(rkmce =
- rd_kafka_metadata_cache_find(rk, topic, 0 /*any*/)) ||
- RD_KAFKA_METADATA_CACHE_VALID(rkmce))
- continue;
-
- rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/);
- cnt++;
- }
-
- if (cnt > 0) {
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Purged %d/%d cached topic hint(s)", cnt,
- rd_list_cnt(topics));
- rd_kafka_metadata_cache_propagate_changes(rk);
- }
-}
-
-
-/**
- * @brief Inserts a non-valid entry for topics in \p topics indicating
- * that a MetadataRequest is in progress.
- * This avoids sending multiple MetadataRequests for the same topics
- * if there are already outstanding requests, see
- * \c rd_kafka_metadata_refresh_topics().
- *
- * @remark These non-valid cache entries' expire time is set to the
- * MetadataRequest timeout.
- *
- * @param dst rd_list_t(char *topicname): if not NULL: populated with
- * topics that were added as hints to cache, e.q., topics to query.
- * @param dst rd_list_t(char *topicname)
- * @param err is the error to set on hint cache entries,
- * typically ERR__WAIT_CACHE.
- * @param replace replace existing valid entries
- *
- * @returns the number of topic hints inserted.
- *
- * @locks_required rd_kafka_wrlock()
- */
-int rd_kafka_metadata_cache_hint(rd_kafka_t *rk,
- const rd_list_t *topics,
- rd_list_t *dst,
- rd_kafka_resp_err_t err,
- rd_bool_t replace) {
- const char *topic;
- rd_ts_t now = rd_clock();
- rd_ts_t ts_expires = now + (rk->rk_conf.socket_timeout_ms * 1000);
- int i;
- int cnt = 0;
-
- RD_LIST_FOREACH(topic, topics, i) {
- rd_kafka_metadata_topic_t mtopic = {.topic = (char *)topic,
- .err = err};
- /*const*/ struct rd_kafka_metadata_cache_entry *rkmce;
-
- /* !replace: Dont overwrite valid entries */
- if (!replace && (rkmce = rd_kafka_metadata_cache_find(
- rk, topic, 0 /*any*/))) {
- if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) ||
- (dst && rkmce->rkmce_mtopic.err !=
- RD_KAFKA_RESP_ERR__NOENT))
- continue;
- rkmce->rkmce_mtopic.err = err;
- /* FALLTHRU */
- }
-
- rd_kafka_metadata_cache_insert(rk, &mtopic, now, ts_expires);
- cnt++;
-
- if (dst)
- rd_list_add(dst, rd_strdup(topic));
- }
-
- if (cnt > 0)
- rd_kafka_dbg(rk, METADATA, "METADATA",
- "Hinted cache of %d/%d topic(s) being queried",
- cnt, rd_list_cnt(topics));
-
- return cnt;
-}
-
-
-/**
- * @brief Same as rd_kafka_metadata_cache_hint() but takes
- * a topic+partition list as input instead.
- *
- * @locks_acquired rd_kafka_wrlock()
- */
-int rd_kafka_metadata_cache_hint_rktparlist(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *dst,
- int replace) {
- rd_list_t topics;
- int r;
-
- rd_list_init(&topics, rktparlist->cnt, rd_free);
- rd_kafka_topic_partition_list_get_topic_names(rktparlist, &topics,
- 0 /*dont include regex*/);
- rd_kafka_wrlock(rk);
- r = rd_kafka_metadata_cache_hint(
- rk, &topics, dst, RD_KAFKA_RESP_ERR__WAIT_CACHE, replace);
- rd_kafka_wrunlock(rk);
-
- rd_list_destroy(&topics);
- return r;
-}
-
-
-/**
- * @brief Cache entry comparator (on topic name)
- */
-static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) {
- const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b;
- return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic);
-}
-
-
-/**
- * @brief Initialize the metadata cache
- *
- * @locks rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_init(rd_kafka_t *rk) {
- rd_avl_init(&rk->rk_metadata_cache.rkmc_avl,
- rd_kafka_metadata_cache_entry_cmp, 0);
- TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry);
- mtx_init(&rk->rk_metadata_cache.rkmc_full_lock, mtx_plain);
- mtx_init(&rk->rk_metadata_cache.rkmc_cnd_lock, mtx_plain);
- cnd_init(&rk->rk_metadata_cache.rkmc_cnd);
- rd_list_init(&rk->rk_metadata_cache.rkmc_observers, 8,
- rd_kafka_enq_once_trigger_destroy);
-}
-
-/**
- * @brief Purge and destroy metadata cache.
- *
- * @locks_required rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk) {
- rd_list_destroy(&rk->rk_metadata_cache.rkmc_observers);
- rd_kafka_timer_stop(&rk->rk_timers,
- &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/);
- rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/);
- mtx_destroy(&rk->rk_metadata_cache.rkmc_full_lock);
- mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock);
- cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd);
- rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl);
-}
-
-
-
-/**
- * @brief Add eonce to list of async cache observers.
- *
- * @locks_required rd_kafka_wrlock()
- */
-void rd_kafka_metadata_cache_wait_state_change_async(
- rd_kafka_t *rk,
- rd_kafka_enq_once_t *eonce) {
- rd_kafka_enq_once_add_source(eonce, "wait metadata cache change");
- rd_list_add(&rk->rk_metadata_cache.rkmc_observers, eonce);
-}
-
-
-/**
- * @brief Wait for cache update, or timeout.
- *
- * @returns 1 on cache update or 0 on timeout.
- * @locks none
- * @locality any
- */
-int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms) {
- int r;
-#if ENABLE_DEVEL
- rd_ts_t ts_start = rd_clock();
-#endif
- mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
- r = cnd_timedwait_ms(&rk->rk_metadata_cache.rkmc_cnd,
- &rk->rk_metadata_cache.rkmc_cnd_lock, timeout_ms);
- mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-
-#if ENABLE_DEVEL
- rd_kafka_dbg(rk, METADATA, "CACHEWAIT", "%s wait took %dms: %s",
- __FUNCTION__, (int)((rd_clock() - ts_start) / 1000),
- r == thrd_success ? "succeeded" : "timed out");
-#endif
- return r == thrd_success;
-}
-
-
-/**
- * @brief eonce trigger callback for rd_list_apply() call in
- * rd_kafka_metadata_cache_propagate_changes()
- */
-static int
-rd_kafka_metadata_cache_propagate_changes_trigger_eonce(void *elem,
- void *opaque) {
- rd_kafka_enq_once_t *eonce = elem;
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
- "wait metadata cache change");
- return 0; /* remove eonce from list */
-}
-
-
-/**
- * @brief Propagate that the cache changed (but not what changed) to
- * any cnd listeners and eonce observers.
- * @locks_required rd_kafka_wrlock(rk)
- * @locks_acquired rkmc_cnd_lock
- * @locality any
- */
-void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk) {
- mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
- cnd_broadcast(&rk->rk_metadata_cache.rkmc_cnd);
- mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
-
- /* Trigger observers */
- rd_list_apply(&rk->rk_metadata_cache.rkmc_observers,
- rd_kafka_metadata_cache_propagate_changes_trigger_eonce,
- NULL);
-}
-
-/**
- * @returns the shared metadata for a topic, or NULL if not found in
- * cache.
- *
- * @locks rd_kafka_*lock()
- */
-const rd_kafka_metadata_topic_t *
-rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk,
- const char *topic,
- int valid) {
- struct rd_kafka_metadata_cache_entry *rkmce;
-
- if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, valid)))
- return NULL;
-
- return &rkmce->rkmce_mtopic;
-}
-
-
-
-/**
- * @brief Looks up the shared metadata for a partition along with its topic.
- *
- * Cache entries with errors (such as auth errors) will not be returned unless
- * \p valid is set to false.
- *
- * @param mtopicp: pointer to topic metadata
- * @param mpartp: pointer to partition metadata
- * @param valid: only return valid entries (no hints)
- *
- * @returns -1 if topic was not found in cache, 0 if topic was found
- * but not the partition, 1 if both topic and partition was found.
- *
- * @locks rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topic_partition_get(
- rd_kafka_t *rk,
- const rd_kafka_metadata_topic_t **mtopicp,
- const rd_kafka_metadata_partition_t **mpartp,
- const char *topic,
- int32_t partition,
- int valid) {
-
- const rd_kafka_metadata_topic_t *mtopic;
- const rd_kafka_metadata_partition_t *mpart;
- rd_kafka_metadata_partition_t skel = {.id = partition};
-
- *mtopicp = NULL;
- *mpartp = NULL;
-
- if (!(mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, valid)))
- return -1;
-
- *mtopicp = mtopic;
-
- if (mtopic->err)
- return -1;
-
- /* Partitions array may be sparse so use bsearch lookup. */
- mpart = bsearch(&skel, mtopic->partitions, mtopic->partition_cnt,
- sizeof(*mtopic->partitions),
- rd_kafka_metadata_partition_id_cmp);
-
- if (!mpart)
- return 0;
-
- *mpartp = mpart;
-
- return 1;
-}
-
-
-/**
- * @returns the number of topics in \p topics that are in the cache.
- *
- * @param topics rd_list(const char *): topic names
- * @param metadata_agep: age of oldest entry will be returned.
- *
- * @locks rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk,
- const rd_list_t *topics,
- int *metadata_agep) {
- const char *topic;
- int i;
- int cnt = 0;
- int max_age = -1;
-
- RD_LIST_FOREACH(topic, topics, i) {
- const struct rd_kafka_metadata_cache_entry *rkmce;
- int age;
-
- if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic,
- 1 /*valid only*/)))
- continue;
-
- age = (int)((rd_clock() - rkmce->rkmce_ts_insert) / 1000);
- if (age > max_age)
- max_age = age;
- cnt++;
- }
-
- *metadata_agep = max_age;
-
- return cnt;
-}
-
-
-/**
- * @brief Add all topics in the metadata cache to \p topics, avoid duplicates.
- *
- * Element type is (char *topic_name).
- *
- * @returns the number of elements added to \p topics
- *
- * @locks_required rd_kafka_*lock()
- */
-int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics) {
- const struct rd_kafka_metadata_cache_entry *rkmce;
- int precnt = rd_list_cnt(topics);
-
- TAILQ_FOREACH(rkmce, &rk->rk_metadata_cache.rkmc_expiry, rkmce_link) {
- /* Ignore topics that have up to date metadata info */
- if (RD_KAFKA_METADATA_CACHE_VALID(rkmce))
- continue;
-
- if (rd_list_find(topics, rkmce->rkmce_mtopic.topic,
- rd_list_cmp_str))
- continue;
-
- rd_list_add(topics, rd_strdup(rkmce->rkmce_mtopic.topic));
- }
-
- return rd_list_cnt(topics) - precnt;
-}
-
-
-/**
- * @brief Dump cache to \p fp
- *
- * @locks rd_kafka_*lock()
- */
-void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk) {
- const struct rd_kafka_metadata_cache *rkmc = &rk->rk_metadata_cache;
- const struct rd_kafka_metadata_cache_entry *rkmce;
- rd_ts_t now = rd_clock();
-
- fprintf(fp, "Metadata cache with %d entries:\n", rkmc->rkmc_cnt);
- TAILQ_FOREACH(rkmce, &rkmc->rkmc_expiry, rkmce_link) {
- fprintf(fp,
- " %s (inserted %dms ago, expires in %dms, "
- "%d partition(s), %s)%s%s\n",
- rkmce->rkmce_mtopic.topic,
- (int)((now - rkmce->rkmce_ts_insert) / 1000),
- (int)((rkmce->rkmce_ts_expires - now) / 1000),
- rkmce->rkmce_mtopic.partition_cnt,
- RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid" : "hint",
- rkmce->rkmce_mtopic.err ? " error: " : "",
- rkmce->rkmce_mtopic.err
- ? rd_kafka_err2str(rkmce->rkmce_mtopic.err)
- : "");
- }
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c
deleted file mode 100644
index ae7940533..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c
+++ /dev/null
@@ -1,2585 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Mocks
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdbuf.h"
-#include "rdrand.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_mock_int.h"
-#include "rdkafka_transport_int.h"
-
-#include <stdarg.h>
-
-static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster);
-
-
-static rd_kafka_mock_broker_t *
-rd_kafka_mock_broker_find(const rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id) {
- const rd_kafka_mock_broker_t *mrkb;
-
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link)
- if (mrkb->id == broker_id)
- return (rd_kafka_mock_broker_t *)mrkb;
-
- return NULL;
-}
-
-
-
-/**
- * @brief Unlink and free message set.
- */
-static void rd_kafka_mock_msgset_destroy(rd_kafka_mock_partition_t *mpart,
- rd_kafka_mock_msgset_t *mset) {
- const rd_kafka_mock_msgset_t *next = TAILQ_NEXT(mset, link);
-
- /* Removing last messageset */
- if (!next)
- mpart->start_offset = mpart->end_offset;
- else if (mset == TAILQ_FIRST(&mpart->msgsets))
- /* Removing first messageset */
- mpart->start_offset = next->first_offset;
-
- if (mpart->update_follower_start_offset)
- mpart->follower_start_offset = mpart->start_offset;
-
- rd_assert(mpart->cnt > 0);
- mpart->cnt--;
- mpart->size -= RD_KAFKAP_BYTES_LEN(&mset->bytes);
- TAILQ_REMOVE(&mpart->msgsets, mset, link);
- rd_free(mset);
-}
-
-
-/**
- * @brief Create a new msgset object with a copy of \p bytes
- * and appends it to the partition log.
- */
-static rd_kafka_mock_msgset_t *
-rd_kafka_mock_msgset_new(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_bytes_t *bytes,
- size_t msgcnt) {
- rd_kafka_mock_msgset_t *mset;
- size_t totsize = sizeof(*mset) + RD_KAFKAP_BYTES_LEN(bytes);
- int64_t BaseOffset;
- int32_t PartitionLeaderEpoch;
- int64_t orig_start_offset = mpart->start_offset;
-
- rd_assert(!RD_KAFKAP_BYTES_IS_NULL(bytes));
-
- mset = rd_malloc(totsize);
- rd_assert(mset != NULL);
-
- mset->first_offset = mpart->end_offset;
- mset->last_offset = mset->first_offset + msgcnt - 1;
- mpart->end_offset = mset->last_offset + 1;
- if (mpart->update_follower_end_offset)
- mpart->follower_end_offset = mpart->end_offset;
- mpart->cnt++;
-
- mset->bytes.len = bytes->len;
- mset->leader_epoch = mpart->leader_epoch;
-
-
- mset->bytes.data = (void *)(mset + 1);
- memcpy((void *)mset->bytes.data, bytes->data, mset->bytes.len);
- mpart->size += mset->bytes.len;
-
- /* Update the base Offset in the MessageSet with the
- * actual absolute log offset. */
- BaseOffset = htobe64(mset->first_offset);
- memcpy((void *)mset->bytes.data, &BaseOffset, sizeof(BaseOffset));
- /* Update the base PartitionLeaderEpoch in the MessageSet with the
- * actual partition leader epoch. */
- PartitionLeaderEpoch = htobe32(mset->leader_epoch);
- memcpy(((char *)mset->bytes.data) + 12, &PartitionLeaderEpoch,
- sizeof(PartitionLeaderEpoch));
-
- /* Remove old msgsets until within limits */
- while (mpart->cnt > 1 &&
- (mpart->cnt > mpart->max_cnt || mpart->size > mpart->max_size))
- rd_kafka_mock_msgset_destroy(mpart,
- TAILQ_FIRST(&mpart->msgsets));
-
- TAILQ_INSERT_TAIL(&mpart->msgsets, mset, link);
-
- rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK",
- "Broker %" PRId32 ": Log append %s [%" PRId32
- "] "
- "%" PRIusz " messages, %" PRId32
- " bytes at offset %" PRId64 " (log now %" PRId64
- "..%" PRId64
- ", "
- "original start %" PRId64 ")",
- mpart->leader->id, mpart->topic->name, mpart->id, msgcnt,
- RD_KAFKAP_BYTES_LEN(&mset->bytes), mset->first_offset,
- mpart->start_offset, mpart->end_offset, orig_start_offset);
-
- return mset;
-}
-
-/**
- * @brief Find message set containing \p offset
- */
-const rd_kafka_mock_msgset_t *
-rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
- int64_t offset,
- rd_bool_t on_follower) {
- const rd_kafka_mock_msgset_t *mset;
-
- if (!on_follower &&
- (offset < mpart->start_offset || offset > mpart->end_offset))
- return NULL;
-
- if (on_follower && (offset < mpart->follower_start_offset ||
- offset > mpart->follower_end_offset))
- return NULL;
-
- /* FIXME: Maintain an index */
-
- TAILQ_FOREACH(mset, &mpart->msgsets, link) {
- if (mset->first_offset <= offset && offset <= mset->last_offset)
- return mset;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Looks up or creates a new pidstate for the given partition and PID.
- *
- * The pidstate is used to verify per-partition per-producer BaseSequences
- * for the idempotent/txn producer.
- */
-static rd_kafka_mock_pid_t *
-rd_kafka_mock_partition_pidstate_get(rd_kafka_mock_partition_t *mpart,
- const rd_kafka_mock_pid_t *mpid) {
- rd_kafka_mock_pid_t *pidstate;
- size_t tidlen;
-
- pidstate = rd_list_find(&mpart->pidstates, mpid, rd_kafka_mock_pid_cmp);
- if (pidstate)
- return pidstate;
-
- tidlen = strlen(mpid->TransactionalId);
- pidstate = rd_malloc(sizeof(*pidstate) + tidlen);
- pidstate->pid = mpid->pid;
- memcpy(pidstate->TransactionalId, mpid->TransactionalId, tidlen);
- pidstate->TransactionalId[tidlen] = '\0';
-
- pidstate->lo = pidstate->hi = pidstate->window = 0;
- memset(pidstate->seq, 0, sizeof(pidstate->seq));
-
- rd_list_add(&mpart->pidstates, pidstate);
-
- return pidstate;
-}
-
-
-/**
- * @brief Validate ProduceRequest records in \p rkbuf.
- *
- * @warning The \p rkbuf must not be read, just peek()ed.
- *
- * This is a very selective validation, currently only:
- * - verify idempotency TransactionalId,PID,Epoch,Seq
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_validate_records(rd_kafka_mock_partition_t *mpart,
- rd_kafka_buf_t *rkbuf,
- size_t RecordCount,
- const rd_kafkap_str_t *TransactionalId,
- rd_bool_t *is_dupd) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster;
- rd_kafka_mock_pid_t *mpid;
- rd_kafka_mock_pid_t *mpidstate = NULL;
- rd_kafka_pid_t pid;
- int32_t expected_BaseSequence = -1, BaseSequence = -1;
- rd_kafka_resp_err_t err;
-
- *is_dupd = rd_false;
-
- if (!TransactionalId || RD_KAFKAP_STR_LEN(TransactionalId) < 1)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_buf_peek_i64(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerId,
- &pid.id);
- rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch,
- &pid.epoch);
- rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_BaseSequence,
- &BaseSequence);
-
- mtx_lock(&mcluster->lock);
- err = rd_kafka_mock_pid_find(mcluster, TransactionalId, pid, &mpid);
- mtx_unlock(&mcluster->lock);
-
- if (likely(!err)) {
-
- if (mpid->pid.epoch != pid.epoch)
- err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
-
- /* Each partition tracks the 5 last Produce requests per PID.*/
- mpidstate = rd_kafka_mock_partition_pidstate_get(mpart, mpid);
-
- expected_BaseSequence = mpidstate->seq[mpidstate->hi];
-
- /* A BaseSequence within the range of the last 5 requests is
- * considered a legal duplicate and will be successfully acked
- * but not written to the log. */
- if (BaseSequence < mpidstate->seq[mpidstate->lo])
- err = RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER;
- else if (BaseSequence > mpidstate->seq[mpidstate->hi])
- err = RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER;
- else if (BaseSequence != expected_BaseSequence)
- *is_dupd = rd_true;
- }
-
- if (unlikely(err)) {
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Broker %" PRId32 ": Log append %s [%" PRId32
- "] failed: PID mismatch: TransactionalId=%.*s "
- "expected %s BaseSeq %" PRId32
- ", not %s BaseSeq %" PRId32 ": %s",
- mpart->leader->id, mpart->topic->name, mpart->id,
- RD_KAFKAP_STR_PR(TransactionalId),
- mpid ? rd_kafka_pid2str(mpid->pid) : "n/a",
- expected_BaseSequence, rd_kafka_pid2str(pid),
- BaseSequence, rd_kafka_err2name(err));
- return err;
- }
-
- /* Update BaseSequence window */
- if (unlikely(mpidstate->window < 5))
- mpidstate->window++;
- else
- mpidstate->lo = (mpidstate->lo + 1) % mpidstate->window;
- mpidstate->hi = (mpidstate->hi + 1) % mpidstate->window;
- mpidstate->seq[mpidstate->hi] = (int32_t)(BaseSequence + RecordCount);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- return rkbuf->rkbuf_err;
-}
-
-/**
- * @brief Append the MessageSets in \p bytes to the \p mpart partition log.
- *
- * @param BaseOffset will contain the first assigned offset of the message set.
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_bytes_t *records,
- const rd_kafkap_str_t *TransactionalId,
- int64_t *BaseOffset) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_buf_t *rkbuf;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int8_t MagicByte;
- int32_t RecordCount;
- int16_t Attributes;
- rd_kafka_mock_msgset_t *mset;
- rd_bool_t is_dup = rd_false;
-
- /* Partially parse the MessageSet in \p bytes to get
- * the message count. */
- rkbuf = rd_kafka_buf_new_shadow(records->data,
- RD_KAFKAP_BYTES_LEN(records), NULL);
-
- rd_kafka_buf_peek_i8(rkbuf, RD_KAFKAP_MSGSET_V2_OF_MagicByte,
- &MagicByte);
- if (MagicByte != 2) {
- /* We only support MsgVersion 2 for now */
- err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION;
- goto err;
- }
-
- rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_RecordCount,
- &RecordCount);
- rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_Attributes,
- &Attributes);
-
- if (RecordCount < 1 ||
- (!(Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
- (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(records) /
- RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD)) {
- err = RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE;
- goto err;
- }
-
- if ((err = rd_kafka_mock_validate_records(
- mpart, rkbuf, (size_t)RecordCount, TransactionalId, &is_dup)))
- goto err;
-
- /* If this is a legit duplicate, don't write it to the log. */
- if (is_dup)
- goto err;
-
- rd_kafka_buf_destroy(rkbuf);
-
- mset = rd_kafka_mock_msgset_new(mpart, records, (size_t)RecordCount);
-
- *BaseOffset = mset->first_offset;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- rd_kafka_buf_destroy(rkbuf);
- return err;
-}
-
-
-/**
- * @brief Set the partition leader, or NULL for leader-less.
- */
-static void
-rd_kafka_mock_partition_set_leader0(rd_kafka_mock_partition_t *mpart,
- rd_kafka_mock_broker_t *mrkb) {
- mpart->leader = mrkb;
- mpart->leader_epoch++;
-}
-
-
-/**
- * @brief Verifies that the client-provided leader_epoch matches that of the
- * partition, else returns the appropriate error.
- */
-rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check(
- const rd_kafka_mock_partition_t *mpart,
- int32_t leader_epoch) {
- if (likely(leader_epoch == -1 || mpart->leader_epoch == leader_epoch))
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- else if (mpart->leader_epoch < leader_epoch)
- return RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH;
- else if (mpart->leader_epoch > leader_epoch)
- return RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH;
-
- /* NOTREACHED, but avoids warning */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Returns the end offset (last offset + 1)
- * for the passed leader epoch in the mock partition.
- *
- * @param mpart The mock partition
- * @param leader_epoch The leader epoch
- *
- * @return The end offset for the passed \p leader_epoch in \p mpart
- */
-int64_t rd_kafka_mock_partition_offset_for_leader_epoch(
- const rd_kafka_mock_partition_t *mpart,
- int32_t leader_epoch) {
- const rd_kafka_mock_msgset_t *mset = NULL;
-
- if (leader_epoch < 0)
- return -1;
-
- TAILQ_FOREACH_REVERSE(mset, &mpart->msgsets,
- rd_kafka_mock_msgset_tailq_s, link) {
- if (mset->leader_epoch == leader_epoch)
- return mset->last_offset + 1;
- }
-
- return -1;
-}
-
-
-/**
- * @brief Automatically assign replicas for partition
- */
-static void
-rd_kafka_mock_partition_assign_replicas(rd_kafka_mock_partition_t *mpart) {
- rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster;
- int replica_cnt =
- RD_MIN(mcluster->defaults.replication_factor, mcluster->broker_cnt);
- rd_kafka_mock_broker_t *mrkb;
- int i = 0;
-
- if (mpart->replicas)
- rd_free(mpart->replicas);
-
- mpart->replicas = rd_calloc(replica_cnt, sizeof(*mpart->replicas));
- mpart->replica_cnt = replica_cnt;
-
- /* FIXME: randomize this using perhaps reservoir sampling */
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
- if (i == mpart->replica_cnt)
- break;
- mpart->replicas[i++] = mrkb;
- }
-
- /* Select a random leader */
- rd_kafka_mock_partition_set_leader0(
- mpart, mpart->replicas[rd_jitter(0, replica_cnt - 1)]);
-}
-
-
-
-/**
- * @brief Unlink and destroy committed offset
- */
-static void
-rd_kafka_mock_committed_offset_destroy(rd_kafka_mock_partition_t *mpart,
- rd_kafka_mock_committed_offset_t *coff) {
- rd_kafkap_str_destroy(coff->metadata);
- TAILQ_REMOVE(&mpart->committed_offsets, coff, link);
- rd_free(coff);
-}
-
-
-/**
- * @brief Find previously committed offset for group.
- */
-rd_kafka_mock_committed_offset_t *
-rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_str_t *group) {
- const rd_kafka_mock_committed_offset_t *coff;
-
- TAILQ_FOREACH(coff, &mpart->committed_offsets, link) {
- if (!rd_kafkap_str_cmp_str(group, coff->group))
- return (rd_kafka_mock_committed_offset_t *)coff;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Commit offset for group
- */
-rd_kafka_mock_committed_offset_t *
-rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_str_t *group,
- int64_t offset,
- const rd_kafkap_str_t *metadata) {
- rd_kafka_mock_committed_offset_t *coff;
-
- if (!(coff = rd_kafka_mock_committed_offset_find(mpart, group))) {
- size_t slen = (size_t)RD_KAFKAP_STR_LEN(group);
-
- coff = rd_malloc(sizeof(*coff) + slen + 1);
-
- coff->group = (char *)(coff + 1);
- memcpy(coff->group, group->str, slen);
- coff->group[slen] = '\0';
-
- coff->metadata = NULL;
-
- TAILQ_INSERT_HEAD(&mpart->committed_offsets, coff, link);
- }
-
- if (coff->metadata)
- rd_kafkap_str_destroy(coff->metadata);
-
- coff->metadata = rd_kafkap_str_copy(metadata);
-
- coff->offset = offset;
-
- rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK",
- "Topic %s [%" PRId32 "] committing offset %" PRId64
- " for group %.*s",
- mpart->topic->name, mpart->id, offset,
- RD_KAFKAP_STR_PR(group));
-
- return coff;
-}
-
-/**
- * @brief Destroy resources for partition, but the \p mpart itself is not freed.
- */
-static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) {
- rd_kafka_mock_msgset_t *mset, *tmp;
- rd_kafka_mock_committed_offset_t *coff, *tmpcoff;
-
- TAILQ_FOREACH_SAFE(mset, &mpart->msgsets, link, tmp)
- rd_kafka_mock_msgset_destroy(mpart, mset);
-
- TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff)
- rd_kafka_mock_committed_offset_destroy(mpart, coff);
-
- rd_list_destroy(&mpart->pidstates);
-
- rd_free(mpart->replicas);
-}
-
-
-static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic,
- rd_kafka_mock_partition_t *mpart,
- int id,
- int replication_factor) {
- mpart->topic = mtopic;
- mpart->id = id;
-
- mpart->follower_id = -1;
- mpart->leader_epoch = -1; /* Start at -1 since assign_replicas() will
- * bump it right away to 0. */
-
- TAILQ_INIT(&mpart->msgsets);
-
- mpart->max_size = 1024 * 1024 * 5;
- mpart->max_cnt = 100000;
-
- mpart->update_follower_start_offset = rd_true;
- mpart->update_follower_end_offset = rd_true;
-
- TAILQ_INIT(&mpart->committed_offsets);
-
- rd_list_init(&mpart->pidstates, 0, rd_free);
-
- rd_kafka_mock_partition_assign_replicas(mpart);
-}
-
-rd_kafka_mock_partition_t *
-rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic,
- int32_t partition) {
- if (!mtopic || partition < 0 || partition >= mtopic->partition_cnt)
- return NULL;
-
- return (rd_kafka_mock_partition_t *)&mtopic->partitions[partition];
-}
-
-
-static void rd_kafka_mock_topic_destroy(rd_kafka_mock_topic_t *mtopic) {
- int i;
-
- for (i = 0; i < mtopic->partition_cnt; i++)
- rd_kafka_mock_partition_destroy(&mtopic->partitions[i]);
-
- TAILQ_REMOVE(&mtopic->cluster->topics, mtopic, link);
- mtopic->cluster->topic_cnt--;
-
- rd_free(mtopic->partitions);
- rd_free(mtopic->name);
- rd_free(mtopic);
-}
-
-
-static rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_new(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt,
- int replication_factor) {
- rd_kafka_mock_topic_t *mtopic;
- int i;
-
- mtopic = rd_calloc(1, sizeof(*mtopic));
- mtopic->name = rd_strdup(topic);
- mtopic->cluster = mcluster;
-
- mtopic->partition_cnt = partition_cnt;
- mtopic->partitions =
- rd_calloc(partition_cnt, sizeof(*mtopic->partitions));
-
- for (i = 0; i < partition_cnt; i++)
- rd_kafka_mock_partition_init(mtopic, &mtopic->partitions[i], i,
- replication_factor);
-
- TAILQ_INSERT_TAIL(&mcluster->topics, mtopic, link);
- mcluster->topic_cnt++;
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Created topic \"%s\" with %d partition(s) and "
- "replication-factor %d",
- mtopic->name, mtopic->partition_cnt, replication_factor);
-
- return mtopic;
-}
-
-
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster,
- const char *name) {
- const rd_kafka_mock_topic_t *mtopic;
-
- TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
- if (!strcmp(mtopic->name, name))
- return (rd_kafka_mock_topic_t *)mtopic;
- }
-
- return NULL;
-}
-
-
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *kname) {
- const rd_kafka_mock_topic_t *mtopic;
-
- TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
- if (!strncmp(mtopic->name, kname->str,
- RD_KAFKAP_STR_LEN(kname)) &&
- mtopic->name[RD_KAFKAP_STR_LEN(kname)] == '\0')
- return (rd_kafka_mock_topic_t *)mtopic;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Create a topic using default settings.
- * The topic must not already exist.
- *
- * @param errp will be set to an error code that is consistent with
- * new topics on real clusters.
- */
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt,
- rd_kafka_resp_err_t *errp) {
- rd_assert(!rd_kafka_mock_topic_find(mcluster, topic));
- *errp = 0; // FIXME? RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE;
- return rd_kafka_mock_topic_new(mcluster, topic,
- partition_cnt == -1
- ? mcluster->defaults.partition_cnt
- : partition_cnt,
- mcluster->defaults.replication_factor);
-}
-
-
-/**
- * @brief Find or create topic.
- *
- * @param partition_cnt If not -1 and the topic does not exist, the automatic
- * topic creation will create this number of topics.
- * Otherwise use the default.
- */
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_get(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt) {
- rd_kafka_mock_topic_t *mtopic;
- rd_kafka_resp_err_t err;
-
- if ((mtopic = rd_kafka_mock_topic_find(mcluster, topic)))
- return mtopic;
-
- return rd_kafka_mock_topic_auto_create(mcluster, topic, partition_cnt,
- &err);
-}
-
-/**
- * @brief Find or create a partition.
- *
- * @returns NULL if topic already exists and partition is out of range.
- */
-static rd_kafka_mock_partition_t *
-rd_kafka_mock_partition_get(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition) {
- rd_kafka_mock_topic_t *mtopic;
- rd_kafka_resp_err_t err;
-
- if (!(mtopic = rd_kafka_mock_topic_find(mcluster, topic)))
- mtopic = rd_kafka_mock_topic_auto_create(mcluster, topic,
- partition + 1, &err);
-
- if (partition >= mtopic->partition_cnt)
- return NULL;
-
- return &mtopic->partitions[partition];
-}
-
-
-/**
- * @brief Set IO events for fd
- */
-static void
-rd_kafka_mock_cluster_io_set_events(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events) {
- int i;
-
- for (i = 0; i < mcluster->fd_cnt; i++) {
- if (mcluster->fds[i].fd == fd) {
- mcluster->fds[i].events |= events;
- return;
- }
- }
-
- rd_assert(!*"mock_cluster_io_set_events: fd not found");
-}
-
-/**
- * @brief Set or clear single IO events for fd
- */
-static void
-rd_kafka_mock_cluster_io_set_event(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- rd_bool_t set,
- int event) {
- int i;
-
- for (i = 0; i < mcluster->fd_cnt; i++) {
- if (mcluster->fds[i].fd == fd) {
- if (set)
- mcluster->fds[i].events |= event;
- else
- mcluster->fds[i].events &= ~event;
- return;
- }
- }
-
- rd_assert(!*"mock_cluster_io_set_event: fd not found");
-}
-
-
-/**
- * @brief Clear IO events for fd
- */
-static void
-rd_kafka_mock_cluster_io_clear_events(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events) {
- int i;
-
- for (i = 0; i < mcluster->fd_cnt; i++) {
- if (mcluster->fds[i].fd == fd) {
- mcluster->fds[i].events &= ~events;
- return;
- }
- }
-
- rd_assert(!*"mock_cluster_io_set_events: fd not found");
-}
-
-
-static void rd_kafka_mock_cluster_io_del(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd) {
- int i;
-
- for (i = 0; i < mcluster->fd_cnt; i++) {
- if (mcluster->fds[i].fd == fd) {
- if (i + 1 < mcluster->fd_cnt) {
- memmove(&mcluster->fds[i],
- &mcluster->fds[i + 1],
- sizeof(*mcluster->fds) *
- (mcluster->fd_cnt - i));
- memmove(&mcluster->handlers[i],
- &mcluster->handlers[i + 1],
- sizeof(*mcluster->handlers) *
- (mcluster->fd_cnt - i));
- }
-
- mcluster->fd_cnt--;
- return;
- }
- }
-
- rd_assert(!*"mock_cluster_io_del: fd not found");
-}
-
-
-/**
- * @brief Add \p fd to IO poll with initial desired events (POLLIN, et.al).
- */
-static void rd_kafka_mock_cluster_io_add(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events,
- rd_kafka_mock_io_handler_t handler,
- void *opaque) {
-
- if (mcluster->fd_cnt + 1 >= mcluster->fd_size) {
- mcluster->fd_size += 8;
-
- mcluster->fds = rd_realloc(
- mcluster->fds, sizeof(*mcluster->fds) * mcluster->fd_size);
- mcluster->handlers =
- rd_realloc(mcluster->handlers,
- sizeof(*mcluster->handlers) * mcluster->fd_size);
- }
-
- memset(&mcluster->fds[mcluster->fd_cnt], 0,
- sizeof(mcluster->fds[mcluster->fd_cnt]));
- mcluster->fds[mcluster->fd_cnt].fd = fd;
- mcluster->fds[mcluster->fd_cnt].events = events;
- mcluster->fds[mcluster->fd_cnt].revents = 0;
- mcluster->handlers[mcluster->fd_cnt].cb = handler;
- mcluster->handlers[mcluster->fd_cnt].opaque = opaque;
- mcluster->fd_cnt++;
-}
-
-
-static void rd_kafka_mock_connection_close(rd_kafka_mock_connection_t *mconn,
- const char *reason) {
- rd_kafka_buf_t *rkbuf;
-
- rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK",
- "Broker %" PRId32 ": Connection from %s closed: %s",
- mconn->broker->id,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT),
- reason);
-
- rd_kafka_mock_cgrps_connection_closed(mconn->broker->cluster, mconn);
-
- rd_kafka_timer_stop(&mconn->broker->cluster->timers, &mconn->write_tmr,
- rd_true);
-
- while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) {
- rd_kafka_bufq_deq(&mconn->outbufs, rkbuf);
- rd_kafka_buf_destroy(rkbuf);
- }
-
- if (mconn->rxbuf)
- rd_kafka_buf_destroy(mconn->rxbuf);
-
- rd_kafka_mock_cluster_io_del(mconn->broker->cluster,
- mconn->transport->rktrans_s);
- TAILQ_REMOVE(&mconn->broker->connections, mconn, link);
- rd_kafka_transport_close(mconn->transport);
- rd_free(mconn);
-}
-
-
-void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp) {
-
- if (resp->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
- /* Empty struct tags */
- rd_kafka_buf_write_i8(resp, 0);
- }
-
- /* rkbuf_ts_sent might be initialized with a RTT delay, else 0. */
- resp->rkbuf_ts_sent += rd_clock();
-
- resp->rkbuf_reshdr.Size =
- (int32_t)(rd_buf_write_pos(&resp->rkbuf_buf) - 4);
-
- rd_kafka_buf_update_i32(resp, 0, resp->rkbuf_reshdr.Size);
-
- rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK",
- "Broker %" PRId32 ": Sending %sResponseV%hd to %s",
- mconn->broker->id,
- rd_kafka_ApiKey2str(resp->rkbuf_reqhdr.ApiKey),
- resp->rkbuf_reqhdr.ApiVersion,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
-
- /* Set up a buffer reader for sending the buffer. */
- rd_slice_init_full(&resp->rkbuf_reader, &resp->rkbuf_buf);
-
- rd_kafka_bufq_enq(&mconn->outbufs, resp);
-
- rd_kafka_mock_cluster_io_set_events(
- mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
-}
-
-
-/**
- * @returns 1 if a complete request is available in which case \p slicep
- * is set to a new slice containing the data,
- * 0 if a complete request is not yet available,
- * -1 on error.
- */
-static int
-rd_kafka_mock_connection_read_request(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t **rkbufp) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_t *rk = mcluster->rk;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *rkbuf;
- char errstr[128];
- ssize_t r;
-
- if (!(rkbuf = mconn->rxbuf)) {
- /* Initial read for a protocol request.
- * Allocate enough room for the protocol header
- * (where the total size is located). */
- rkbuf = mconn->rxbuf =
- rd_kafka_buf_new(2, RD_KAFKAP_REQHDR_SIZE);
-
- /* Protocol parsing code needs the rkb for logging */
- rkbuf->rkbuf_rkb = mconn->broker->cluster->dummy_rkb;
- rd_kafka_broker_keep(rkbuf->rkbuf_rkb);
-
- /* Make room for request header */
- rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_REQHDR_SIZE,
- RD_KAFKAP_REQHDR_SIZE);
- }
-
- /* Read as much data as possible from the socket into the
- * connection receive buffer. */
- r = rd_kafka_transport_recv(mconn->transport, &rkbuf->rkbuf_buf, errstr,
- sizeof(errstr));
- if (r == -1) {
- rd_kafka_dbg(
- rk, MOCK, "MOCK",
- "Broker %" PRId32
- ": Connection %s: "
- "receive failed: %s",
- mconn->broker->id,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT),
- errstr);
- return -1;
- } else if (r == 0) {
- return 0; /* Need more data */
- }
-
- if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == RD_KAFKAP_REQHDR_SIZE) {
- /* Received the full header, now check full request
- * size and allocate the buffer accordingly. */
-
- /* Initialize reader */
- rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0,
- RD_KAFKAP_REQHDR_SIZE);
-
- rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.Size);
- rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiKey);
- rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiVersion);
-
- if (rkbuf->rkbuf_reqhdr.ApiKey < 0 ||
- rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM) {
- rd_kafka_buf_parse_fail(
- rkbuf, "Invalid ApiKey %hd from %s",
- rkbuf->rkbuf_reqhdr.ApiKey,
- rd_sockaddr2str(&mconn->peer,
- RD_SOCKADDR2STR_F_PORT));
- RD_NOTREACHED();
- }
-
- /* Check if request version has flexible fields (KIP-482) */
- if (mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]
- .FlexVersion != -1 &&
- rkbuf->rkbuf_reqhdr.ApiVersion >=
- mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]
- .FlexVersion)
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
-
-
- rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.CorrId);
-
- rkbuf->rkbuf_totlen = rkbuf->rkbuf_reqhdr.Size + 4;
-
- if (rkbuf->rkbuf_totlen < RD_KAFKAP_REQHDR_SIZE + 2 ||
- rkbuf->rkbuf_totlen >
- (size_t)rk->rk_conf.recv_max_msg_size) {
- rd_kafka_buf_parse_fail(
- rkbuf, "Invalid request size %" PRId32 " from %s",
- rkbuf->rkbuf_reqhdr.Size,
- rd_sockaddr2str(&mconn->peer,
- RD_SOCKADDR2STR_F_PORT));
- RD_NOTREACHED();
- }
-
- /* Now adjust totlen to skip the header */
- rkbuf->rkbuf_totlen -= RD_KAFKAP_REQHDR_SIZE;
-
- if (!rkbuf->rkbuf_totlen) {
- /* Empty request (valid) */
- *rkbufp = rkbuf;
- mconn->rxbuf = NULL;
- return 1;
- }
-
- /* Allocate space for the request payload */
- rd_buf_write_ensure(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen,
- rkbuf->rkbuf_totlen);
-
- } else if (rd_buf_write_pos(&rkbuf->rkbuf_buf) -
- RD_KAFKAP_REQHDR_SIZE ==
- rkbuf->rkbuf_totlen) {
- /* The full request is now read into the buffer. */
-
- /* Set up response reader slice starting past the
- * request header */
- rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf,
- RD_KAFKAP_REQHDR_SIZE,
- rd_buf_len(&rkbuf->rkbuf_buf) -
- RD_KAFKAP_REQHDR_SIZE);
-
- /* For convenience, shave off the ClientId */
- rd_kafka_buf_skip_str(rkbuf);
-
- /* And the flexible versions header tags, if any */
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Return the buffer to the caller */
- *rkbufp = rkbuf;
- mconn->rxbuf = NULL;
- return 1;
- }
-
- return 0;
-
-
-err_parse:
- return -1;
-}
-
-rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request) {
- rd_kafka_buf_t *rkbuf = rd_kafka_buf_new(1, 100);
-
- /* Copy request header so the ApiVersion remains known */
- rkbuf->rkbuf_reqhdr = request->rkbuf_reqhdr;
-
- /* Size, updated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* CorrId */
- rd_kafka_buf_write_i32(rkbuf, request->rkbuf_reqhdr.CorrId);
-
- if (request->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
- /* Write empty response header tags, unless this is the
- * ApiVersionResponse which needs to be backwards compatible. */
- if (request->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion)
- rd_kafka_buf_write_i8(rkbuf, 0);
- }
-
- return rkbuf;
-}
-
-
-
-/**
- * @brief Parse protocol request.
- *
- * @returns 0 on success, -1 on parse error.
- */
-static int
-rd_kafka_mock_connection_parse_request(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_t *rk = mcluster->rk;
-
- if (rkbuf->rkbuf_reqhdr.ApiKey < 0 ||
- rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM ||
- !mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb) {
- rd_kafka_log(
- rk, LOG_ERR, "MOCK",
- "Broker %" PRId32
- ": unsupported %sRequestV%hd "
- "from %s",
- mconn->broker->id,
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
- return -1;
- }
-
- /* ApiVersionRequest handles future versions, for everything else
- * make sure the ApiVersion is supported. */
- if (rkbuf->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion &&
- !rd_kafka_mock_cluster_ApiVersion_check(
- mcluster, rkbuf->rkbuf_reqhdr.ApiKey,
- rkbuf->rkbuf_reqhdr.ApiVersion)) {
- rd_kafka_log(
- rk, LOG_ERR, "MOCK",
- "Broker %" PRId32
- ": unsupported %sRequest "
- "version %hd from %s",
- mconn->broker->id,
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
- return -1;
- }
-
- rd_kafka_dbg(rk, MOCK, "MOCK",
- "Broker %" PRId32 ": Received %sRequestV%hd from %s",
- mconn->broker->id,
- rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
- rkbuf->rkbuf_reqhdr.ApiVersion,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
-
- return mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb(mconn,
- rkbuf);
-}
-
-
-/**
- * @brief Timer callback to set the POLLOUT flag for a connection after
- * the delay has expired.
- */
-static void rd_kafka_mock_connection_write_out_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_mock_connection_t *mconn = arg;
-
- rd_kafka_mock_cluster_io_set_events(
- mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
-}
-
-
-/**
- * @brief Send as many bytes as possible from the output buffer.
- *
- * @returns 1 if all buffers were sent, 0 if more buffers need to be sent, or
- * -1 on error.
- */
-static ssize_t
-rd_kafka_mock_connection_write_out(rd_kafka_mock_connection_t *mconn) {
- rd_kafka_buf_t *rkbuf;
- rd_ts_t now = rd_clock();
- rd_ts_t rtt = mconn->broker->rtt;
-
- while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) {
- ssize_t r;
- char errstr[128];
- rd_ts_t ts_delay = 0;
-
- /* Connection delay/rtt is set. */
- if (rkbuf->rkbuf_ts_sent + rtt > now)
- ts_delay = rkbuf->rkbuf_ts_sent + rtt;
-
- /* Response is being delayed */
- if (rkbuf->rkbuf_ts_retry && rkbuf->rkbuf_ts_retry > now)
- ts_delay = rkbuf->rkbuf_ts_retry + rtt;
-
- if (ts_delay) {
- /* Delay response */
- rd_kafka_timer_start_oneshot(
- &mconn->broker->cluster->timers, &mconn->write_tmr,
- rd_false, ts_delay - now,
- rd_kafka_mock_connection_write_out_tmr_cb, mconn);
- break;
- }
-
- if ((r = rd_kafka_transport_send(mconn->transport,
- &rkbuf->rkbuf_reader, errstr,
- sizeof(errstr))) == -1)
- return -1;
-
- if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0)
- return 0; /* Partial send, continue next time */
-
- /* Entire buffer sent, unlink and free */
- rd_kafka_bufq_deq(&mconn->outbufs, rkbuf);
-
- rd_kafka_buf_destroy(rkbuf);
- }
-
- rd_kafka_mock_cluster_io_clear_events(
- mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
-
- return 1;
-}
-
-
-/**
- * @brief Call connection_write_out() for all the broker's connections.
- *
- * Use to check if any responses should be sent when RTT has changed.
- */
-static void
-rd_kafka_mock_broker_connections_write_out(rd_kafka_mock_broker_t *mrkb) {
- rd_kafka_mock_connection_t *mconn, *tmp;
-
- /* Need a safe loop since connections may be removed on send error */
- TAILQ_FOREACH_SAFE(mconn, &mrkb->connections, link, tmp) {
- rd_kafka_mock_connection_write_out(mconn);
- }
-}
-
-
-/**
- * @brief Per-Connection IO handler
- */
-static void rd_kafka_mock_connection_io(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events,
- void *opaque) {
- rd_kafka_mock_connection_t *mconn = opaque;
-
- if (events & POLLIN) {
- rd_kafka_buf_t *rkbuf;
- int r;
-
- while (1) {
- /* Read full request */
- r = rd_kafka_mock_connection_read_request(mconn,
- &rkbuf);
- if (r == 0)
- break; /* Need more data */
- else if (r == -1) {
- rd_kafka_mock_connection_close(mconn,
- "Read error");
- return;
- }
-
- /* Parse and handle request */
- r = rd_kafka_mock_connection_parse_request(mconn,
- rkbuf);
- rd_kafka_buf_destroy(rkbuf);
- if (r == -1) {
- rd_kafka_mock_connection_close(mconn,
- "Parse error");
- return;
- }
- }
- }
-
- if (events & (POLLERR | POLLHUP)) {
- rd_kafka_mock_connection_close(mconn, "Disconnected");
- return;
- }
-
- if (events & POLLOUT) {
- if (rd_kafka_mock_connection_write_out(mconn) == -1) {
- rd_kafka_mock_connection_close(mconn, "Write error");
- return;
- }
- }
-}
-
-
-/**
- * @brief Set connection as blocking, POLLIN will not be served.
- */
-void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn,
- rd_bool_t blocking) {
- rd_kafka_mock_cluster_io_set_event(mconn->broker->cluster,
- mconn->transport->rktrans_s,
- !blocking, POLLIN);
-}
-
-
-static rd_kafka_mock_connection_t *
-rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb,
- rd_socket_t fd,
- const struct sockaddr_in *peer) {
- rd_kafka_mock_connection_t *mconn;
- rd_kafka_transport_t *rktrans;
- char errstr[128];
-
- if (!mrkb->up) {
- rd_socket_close(fd);
- return NULL;
- }
-
- rktrans = rd_kafka_transport_new(mrkb->cluster->dummy_rkb, fd, errstr,
- sizeof(errstr));
- if (!rktrans) {
- rd_kafka_log(mrkb->cluster->rk, LOG_ERR, "MOCK",
- "Failed to create transport for new "
- "mock connection: %s",
- errstr);
- rd_socket_close(fd);
- return NULL;
- }
-
- rd_kafka_transport_post_connect_setup(rktrans);
-
- mconn = rd_calloc(1, sizeof(*mconn));
- mconn->broker = mrkb;
- mconn->transport = rktrans;
- mconn->peer = *peer;
- rd_kafka_bufq_init(&mconn->outbufs);
-
- TAILQ_INSERT_TAIL(&mrkb->connections, mconn, link);
-
- rd_kafka_mock_cluster_io_add(mrkb->cluster, mconn->transport->rktrans_s,
- POLLIN, rd_kafka_mock_connection_io,
- mconn);
-
- rd_kafka_dbg(mrkb->cluster->rk, MOCK, "MOCK",
- "Broker %" PRId32 ": New connection from %s", mrkb->id,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
-
- return mconn;
-}
-
-
-
-static void rd_kafka_mock_cluster_op_io(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events,
- void *opaque) {
- /* Read wake-up fd data and throw away, just used for wake-ups*/
- char buf[1024];
- while (rd_socket_read(fd, buf, sizeof(buf)) > 0)
- ; /* Read all buffered signalling bytes */
-}
-
-
-static int rd_kafka_mock_cluster_io_poll(rd_kafka_mock_cluster_t *mcluster,
- int timeout_ms) {
- int r;
- int i;
-
- r = rd_socket_poll(mcluster->fds, mcluster->fd_cnt, timeout_ms);
- if (r == RD_SOCKET_ERROR) {
- rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
- "Mock cluster failed to poll %d fds: %d: %s",
- mcluster->fd_cnt, r,
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
-
- /* Serve ops, if any */
- rd_kafka_q_serve(mcluster->ops, RD_POLL_NOWAIT, 0,
- RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
-
- /* Handle IO events, if any, and if not terminating */
- for (i = 0; mcluster->run && r > 0 && i < mcluster->fd_cnt; i++) {
- if (!mcluster->fds[i].revents)
- continue;
-
- /* Call IO handler */
- mcluster->handlers[i].cb(mcluster, mcluster->fds[i].fd,
- mcluster->fds[i].revents,
- mcluster->handlers[i].opaque);
- r--;
- }
-
- return 0;
-}
-
-
-static int rd_kafka_mock_cluster_thread_main(void *arg) {
- rd_kafka_mock_cluster_t *mcluster = arg;
-
- rd_kafka_set_thread_name("mock");
- rd_kafka_set_thread_sysname("rdk:mock");
- rd_kafka_interceptors_on_thread_start(mcluster->rk,
- RD_KAFKA_THREAD_BACKGROUND);
- rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
-
- /* Op wakeup fd */
- rd_kafka_mock_cluster_io_add(mcluster, mcluster->wakeup_fds[0], POLLIN,
- rd_kafka_mock_cluster_op_io, NULL);
-
- mcluster->run = rd_true;
-
- while (mcluster->run) {
- int sleeptime = (int)((rd_kafka_timers_next(&mcluster->timers,
- 1000 * 1000 /*1s*/,
- 1 /*lock*/) +
- 999) /
- 1000);
-
- if (rd_kafka_mock_cluster_io_poll(mcluster, sleeptime) == -1)
- break;
-
- rd_kafka_timers_run(&mcluster->timers, RD_POLL_NOWAIT);
- }
-
- rd_kafka_mock_cluster_io_del(mcluster, mcluster->wakeup_fds[0]);
-
-
- rd_kafka_interceptors_on_thread_exit(mcluster->rk,
- RD_KAFKA_THREAD_BACKGROUND);
- rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
-
- rd_kafka_mock_cluster_destroy0(mcluster);
-
- return 0;
-}
-
-
-
-static void rd_kafka_mock_broker_listen_io(rd_kafka_mock_cluster_t *mcluster,
- rd_socket_t fd,
- int events,
- void *opaque) {
- rd_kafka_mock_broker_t *mrkb = opaque;
-
- if (events & (POLLERR | POLLHUP))
- rd_assert(!*"Mock broker listen socket error");
-
- if (events & POLLIN) {
- rd_socket_t new_s;
- struct sockaddr_in peer;
- socklen_t peer_size = sizeof(peer);
-
- new_s = accept(mrkb->listen_s, (struct sockaddr *)&peer,
- &peer_size);
- if (new_s == RD_SOCKET_ERROR) {
- rd_kafka_log(mcluster->rk, LOG_ERR, "MOCK",
- "Failed to accept mock broker socket: %s",
- rd_socket_strerror(rd_socket_errno));
- return;
- }
-
- rd_kafka_mock_connection_new(mrkb, new_s, &peer);
- }
-}
-
-
-/**
- * @brief Close all connections to broker.
- */
-static void rd_kafka_mock_broker_close_all(rd_kafka_mock_broker_t *mrkb,
- const char *reason) {
- rd_kafka_mock_connection_t *mconn;
-
- while ((mconn = TAILQ_FIRST(&mrkb->connections)))
- rd_kafka_mock_connection_close(mconn, reason);
-}
-
-/**
- * @brief Destroy error stack, must be unlinked.
- */
-static void
-rd_kafka_mock_error_stack_destroy(rd_kafka_mock_error_stack_t *errstack) {
- if (errstack->errs)
- rd_free(errstack->errs);
- rd_free(errstack);
-}
-
-
-static void rd_kafka_mock_broker_destroy(rd_kafka_mock_broker_t *mrkb) {
- rd_kafka_mock_error_stack_t *errstack;
-
- rd_kafka_mock_broker_close_all(mrkb, "Destroying broker");
-
- if (mrkb->listen_s != -1) {
- if (mrkb->up)
- rd_kafka_mock_cluster_io_del(mrkb->cluster,
- mrkb->listen_s);
- rd_socket_close(mrkb->listen_s);
- }
-
- while ((errstack = TAILQ_FIRST(&mrkb->errstacks))) {
- TAILQ_REMOVE(&mrkb->errstacks, errstack, link);
- rd_kafka_mock_error_stack_destroy(errstack);
- }
-
- TAILQ_REMOVE(&mrkb->cluster->brokers, mrkb, link);
- mrkb->cluster->broker_cnt--;
-
- rd_free(mrkb);
-}
-
-
-/**
- * @brief Starts listening on the mock broker socket.
- *
- * @returns 0 on success or -1 on error (logged).
- */
-static int rd_kafka_mock_broker_start_listener(rd_kafka_mock_broker_t *mrkb) {
- rd_assert(mrkb->listen_s != -1);
-
- if (listen(mrkb->listen_s, 5) == RD_SOCKET_ERROR) {
- rd_kafka_log(mrkb->cluster->rk, LOG_CRIT, "MOCK",
- "Failed to listen on mock broker socket: %s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
-
- rd_kafka_mock_cluster_io_add(mrkb->cluster, mrkb->listen_s, POLLIN,
- rd_kafka_mock_broker_listen_io, mrkb);
-
- return 0;
-}
-
-
-/**
- * @brief Creates a new listener socket for \p mrkb but does NOT starts
- * listening.
- *
- * @param sin is the address and port to bind. If the port is zero a random
- * port will be assigned (by the kernel) and the address and port
- * will be returned in this pointer.
- *
- * @returns listener socket on success or -1 on error (errors are logged).
- */
-static int rd_kafka_mock_broker_new_listener(rd_kafka_mock_cluster_t *mcluster,
- struct sockaddr_in *sinp) {
- struct sockaddr_in sin = *sinp;
- socklen_t sin_len = sizeof(sin);
- int listen_s;
- int on = 1;
-
- if (!sin.sin_family)
- sin.sin_family = AF_INET;
-
- /*
- * Create and bind socket to any loopback port
- */
- listen_s =
- rd_kafka_socket_cb_linux(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL);
- if (listen_s == RD_SOCKET_ERROR) {
- rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
- "Unable to create mock broker listen socket: %s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
-
- if (setsockopt(listen_s, SOL_SOCKET, SO_REUSEADDR, (void *)&on,
- sizeof(on)) == -1) {
- rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
- "Failed to set SO_REUSEADDR on mock broker "
- "listen socket: %s",
- rd_socket_strerror(rd_socket_errno));
- rd_socket_close(listen_s);
- return -1;
- }
-
- if (bind(listen_s, (struct sockaddr *)&sin, sizeof(sin)) ==
- RD_SOCKET_ERROR) {
- rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
- "Failed to bind mock broker socket to %s: %s",
- rd_socket_strerror(rd_socket_errno),
- rd_sockaddr2str(&sin, RD_SOCKADDR2STR_F_PORT));
- rd_socket_close(listen_s);
- return -1;
- }
-
- if (getsockname(listen_s, (struct sockaddr *)&sin, &sin_len) ==
- RD_SOCKET_ERROR) {
- rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
- "Failed to get mock broker socket name: %s",
- rd_socket_strerror(rd_socket_errno));
- rd_socket_close(listen_s);
- return -1;
- }
- rd_assert(sin.sin_family == AF_INET);
- /* If a filled in sinp was passed make sure nothing changed. */
- rd_assert(!sinp->sin_port || !memcmp(sinp, &sin, sizeof(sin)));
-
- *sinp = sin;
-
- return listen_s;
-}
-
-
-static rd_kafka_mock_broker_t *
-rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) {
- rd_kafka_mock_broker_t *mrkb;
- rd_socket_t listen_s;
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)}};
-
- listen_s = rd_kafka_mock_broker_new_listener(mcluster, &sin);
- if (listen_s == -1)
- return NULL;
-
- /*
- * Create mock broker object
- */
- mrkb = rd_calloc(1, sizeof(*mrkb));
-
- mrkb->id = broker_id;
- mrkb->cluster = mcluster;
- mrkb->up = rd_true;
- mrkb->listen_s = listen_s;
- mrkb->sin = sin;
- mrkb->port = ntohs(sin.sin_port);
- rd_snprintf(mrkb->advertised_listener,
- sizeof(mrkb->advertised_listener), "%s",
- rd_sockaddr2str(&sin, 0));
-
- TAILQ_INIT(&mrkb->connections);
- TAILQ_INIT(&mrkb->errstacks);
-
- TAILQ_INSERT_TAIL(&mcluster->brokers, mrkb, link);
- mcluster->broker_cnt++;
-
- if (rd_kafka_mock_broker_start_listener(mrkb) == -1) {
- rd_kafka_mock_broker_destroy(mrkb);
- return NULL;
- }
-
- return mrkb;
-}
-
-
-/**
- * @returns the coordtype_t for a coord type string, or -1 on error.
- */
-static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type(const char *str) {
- if (!strcmp(str, "transaction"))
- return RD_KAFKA_COORD_TXN;
- else if (!strcmp(str, "group"))
- return RD_KAFKA_COORD_GROUP;
- else
- return (rd_kafka_coordtype_t)-1;
-}
-
-
-/**
- * @brief Unlink and destroy coordinator.
- */
-static void rd_kafka_mock_coord_destroy(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_mock_coord_t *mcoord) {
- TAILQ_REMOVE(&mcluster->coords, mcoord, link);
- rd_free(mcoord->key);
- rd_free(mcoord);
-}
-
-/**
- * @brief Find coordinator by type and key.
- */
-static rd_kafka_mock_coord_t *
-rd_kafka_mock_coord_find(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_coordtype_t type,
- const char *key) {
- rd_kafka_mock_coord_t *mcoord;
-
- TAILQ_FOREACH(mcoord, &mcluster->coords, link) {
- if (mcoord->type == type && !strcmp(mcoord->key, key))
- return mcoord;
- }
-
- return NULL;
-}
-
-
-/**
- * @returns the coordinator for KeyType,Key (e.g., GROUP,mygroup).
- */
-rd_kafka_mock_broker_t *
-rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_coordtype_t KeyType,
- const rd_kafkap_str_t *Key) {
- rd_kafka_mock_broker_t *mrkb;
- rd_kafka_mock_coord_t *mcoord;
- char *key;
- rd_crc32_t hash;
- int idx;
-
- /* Try the explicit coord list first */
- RD_KAFKAP_STR_DUPA(&key, Key);
- if ((mcoord = rd_kafka_mock_coord_find(mcluster, KeyType, key)))
- return rd_kafka_mock_broker_find(mcluster, mcoord->broker_id);
-
- /* Else hash the key to select an available broker. */
- hash = rd_crc32(Key->str, RD_KAFKAP_STR_LEN(Key));
- idx = (int)(hash % mcluster->broker_cnt);
-
- /* Use the broker index in the list */
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link)
- if (idx-- == 0)
- return mrkb;
-
- RD_NOTREACHED();
- return NULL;
-}
-
-
-/**
- * @brief Explicitly set coordinator for \p key_type ("transaction", "group")
- * and \p key.
- */
-static rd_kafka_mock_coord_t *
-rd_kafka_mock_coord_set(rd_kafka_mock_cluster_t *mcluster,
- const char *key_type,
- const char *key,
- int32_t broker_id) {
- rd_kafka_mock_coord_t *mcoord;
- rd_kafka_coordtype_t type;
-
- if ((int)(type = rd_kafka_mock_coord_str2type(key_type)) == -1)
- return NULL;
-
- if ((mcoord = rd_kafka_mock_coord_find(mcluster, type, key)))
- rd_kafka_mock_coord_destroy(mcluster, mcoord);
-
- mcoord = rd_calloc(1, sizeof(*mcoord));
- mcoord->type = type;
- mcoord->key = rd_strdup(key);
- mcoord->broker_id = broker_id;
-
- TAILQ_INSERT_TAIL(&mcluster->coords, mcoord, link);
-
- return mcoord;
-}
-
-
-/**
- * @brief Remove and return the next error, or RD_KAFKA_RESP_ERR_NO_ERROR
- * if no error.
- */
-static rd_kafka_mock_error_rtt_t
-rd_kafka_mock_error_stack_next(rd_kafka_mock_error_stack_t *errstack) {
- rd_kafka_mock_error_rtt_t err_rtt = {RD_KAFKA_RESP_ERR_NO_ERROR, 0};
-
- if (likely(errstack->cnt == 0))
- return err_rtt;
-
- err_rtt = errstack->errs[0];
- errstack->cnt--;
- if (errstack->cnt > 0)
- memmove(errstack->errs, &errstack->errs[1],
- sizeof(*errstack->errs) * errstack->cnt);
-
- return err_rtt;
-}
-
-
-/**
- * @brief Find an error stack based on \p ApiKey
- */
-static rd_kafka_mock_error_stack_t *
-rd_kafka_mock_error_stack_find(const rd_kafka_mock_error_stack_head_t *shead,
- int16_t ApiKey) {
- const rd_kafka_mock_error_stack_t *errstack;
-
- TAILQ_FOREACH(errstack, shead, link)
- if (errstack->ApiKey == ApiKey)
- return (rd_kafka_mock_error_stack_t *)errstack;
-
- return NULL;
-}
-
-
-
-/**
- * @brief Find or create an error stack based on \p ApiKey
- */
-static rd_kafka_mock_error_stack_t *
-rd_kafka_mock_error_stack_get(rd_kafka_mock_error_stack_head_t *shead,
- int16_t ApiKey) {
- rd_kafka_mock_error_stack_t *errstack;
-
- if ((errstack = rd_kafka_mock_error_stack_find(shead, ApiKey)))
- return errstack;
-
- errstack = rd_calloc(1, sizeof(*errstack));
-
- errstack->ApiKey = ApiKey;
- TAILQ_INSERT_TAIL(shead, errstack, link);
-
- return errstack;
-}
-
-
-
-/**
- * @brief Removes and returns the next request error for response's ApiKey.
- *
- * If the error stack has a corresponding rtt/delay it is set on the
- * provided response \p resp buffer.
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_mock_error_stack_t *errstack;
- rd_kafka_mock_error_rtt_t err_rtt;
-
- mtx_lock(&mcluster->lock);
-
- errstack = rd_kafka_mock_error_stack_find(&mconn->broker->errstacks,
- resp->rkbuf_reqhdr.ApiKey);
- if (likely(!errstack)) {
- errstack = rd_kafka_mock_error_stack_find(
- &mcluster->errstacks, resp->rkbuf_reqhdr.ApiKey);
- if (likely(!errstack)) {
- mtx_unlock(&mcluster->lock);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
- err_rtt = rd_kafka_mock_error_stack_next(errstack);
- resp->rkbuf_ts_sent = err_rtt.rtt;
-
- mtx_unlock(&mcluster->lock);
-
- /* If the error is ERR__TRANSPORT (a librdkafka-specific error code
- * that will never be returned by a broker), we close the connection.
- * This allows closing the connection as soon as a certain
- * request is seen.
- * The handler code in rdkafka_mock_handlers.c does not need to
- * handle this case specifically and will generate a response and
- * enqueue it, but the connection will be down by the time it will
- * be sent.
- * Note: Delayed disconnects (rtt-based) are not supported. */
- if (err_rtt.err == RD_KAFKA_RESP_ERR__TRANSPORT) {
- rd_kafka_dbg(
- mcluster->rk, MOCK, "MOCK",
- "Broker %" PRId32
- ": Forcing close of connection "
- "from %s",
- mconn->broker->id,
- rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
- rd_kafka_transport_shutdown(mconn->transport);
- }
-
-
- return err_rtt.err;
-}
-
-
-void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey) {
- rd_kafka_mock_error_stack_t *errstack;
-
- mtx_lock(&mcluster->lock);
-
- errstack = rd_kafka_mock_error_stack_find(&mcluster->errstacks, ApiKey);
- if (errstack)
- errstack->cnt = 0;
-
- mtx_unlock(&mcluster->lock);
-}
-
-
-void rd_kafka_mock_push_request_errors_array(
- rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- size_t cnt,
- const rd_kafka_resp_err_t *errors) {
- rd_kafka_mock_error_stack_t *errstack;
- size_t totcnt;
- size_t i;
-
- mtx_lock(&mcluster->lock);
-
- errstack = rd_kafka_mock_error_stack_get(&mcluster->errstacks, ApiKey);
-
- totcnt = errstack->cnt + cnt;
-
- if (totcnt > errstack->size) {
- errstack->size = totcnt + 4;
- errstack->errs = rd_realloc(
- errstack->errs, errstack->size * sizeof(*errstack->errs));
- }
-
- for (i = 0; i < cnt; i++) {
- errstack->errs[errstack->cnt].err = errors[i];
- errstack->errs[errstack->cnt++].rtt = 0;
- }
-
- mtx_unlock(&mcluster->lock);
-}
-
-void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- size_t cnt,
- ...) {
- va_list ap;
- rd_kafka_resp_err_t *errors = rd_alloca(sizeof(*errors) * cnt);
- size_t i;
-
- va_start(ap, cnt);
- for (i = 0; i < cnt; i++)
- errors[i] = va_arg(ap, rd_kafka_resp_err_t);
- va_end(ap);
-
- rd_kafka_mock_push_request_errors_array(mcluster, ApiKey, cnt, errors);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int16_t ApiKey,
- size_t cnt,
- ...) {
- rd_kafka_mock_broker_t *mrkb;
- va_list ap;
- rd_kafka_mock_error_stack_t *errstack;
- size_t totcnt;
-
- mtx_lock(&mcluster->lock);
-
- if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) {
- mtx_unlock(&mcluster->lock);
- return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
- }
-
- errstack = rd_kafka_mock_error_stack_get(&mrkb->errstacks, ApiKey);
-
- totcnt = errstack->cnt + cnt;
-
- if (totcnt > errstack->size) {
- errstack->size = totcnt + 4;
- errstack->errs = rd_realloc(
- errstack->errs, errstack->size * sizeof(*errstack->errs));
- }
-
- va_start(ap, cnt);
- while (cnt-- > 0) {
- errstack->errs[errstack->cnt].err =
- va_arg(ap, rd_kafka_resp_err_t);
- errstack->errs[errstack->cnt++].rtt =
- ((rd_ts_t)va_arg(ap, int)) * 1000;
- }
- va_end(ap);
-
- mtx_unlock(&mcluster->lock);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int16_t ApiKey,
- size_t *cntp) {
- rd_kafka_mock_broker_t *mrkb;
- rd_kafka_mock_error_stack_t *errstack;
-
- if (!mcluster || !cntp)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- mtx_lock(&mcluster->lock);
-
- if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) {
- mtx_unlock(&mcluster->lock);
- return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
- }
-
- if ((errstack =
- rd_kafka_mock_error_stack_find(&mrkb->errstacks, ApiKey)))
- *cntp = errstack->cnt;
-
- mtx_unlock(&mcluster->lock);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- rd_kafka_resp_err_t err) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(topic);
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR;
- rko->rko_u.mock.err = err;
-
- rko = rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE);
- if (rko)
- rd_kafka_op_destroy(rko);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt,
- int replication_factor) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(topic);
- rko->rko_u.mock.lo = partition_cnt;
- rko->rko_u.mock.hi = replication_factor;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_CREATE;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int32_t broker_id) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(topic);
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_LEADER;
- rko->rko_u.mock.partition = partition;
- rko->rko_u.mock.broker_id = broker_id;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int32_t broker_id) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(topic);
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER;
- rko->rko_u.mock.partition = partition;
- rko->rko_u.mock.broker_id = broker_id;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int64_t lo,
- int64_t hi) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(topic);
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS;
- rko->rko_u.mock.partition = partition;
- rko->rko_u.mock.lo = lo;
- rko->rko_u.mock.hi = hi;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.broker_id = broker_id;
- rko->rko_u.mock.lo = rd_false;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.broker_id = broker_id;
- rko->rko_u.mock.lo = rd_true;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int rtt_ms) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.broker_id = broker_id;
- rko->rko_u.mock.lo = rtt_ms;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RTT;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- const char *rack) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.broker_id = broker_id;
- rko->rko_u.mock.name = rd_strdup(rack);
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RACK;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster,
- const char *key_type,
- const char *key,
- int32_t broker_id) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.name = rd_strdup(key_type);
- rko->rko_u.mock.str = rd_strdup(key);
- rko->rko_u.mock.broker_id = broker_id;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_COORD_SET;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-rd_kafka_resp_err_t
-rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- int16_t MinVersion,
- int16_t MaxVersion) {
- rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
-
- rko->rko_u.mock.partition = ApiKey;
- rko->rko_u.mock.lo = MinVersion;
- rko->rko_u.mock.hi = MaxVersion;
- rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_APIVERSION_SET;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
-}
-
-
-/**
- * @brief Apply command to specific broker.
- *
- * @locality mcluster thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_broker_cmd(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_mock_broker_t *mrkb,
- rd_kafka_op_t *rko) {
- switch (rko->rko_u.mock.cmd) {
- case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
- if ((rd_bool_t)rko->rko_u.mock.lo == mrkb->up)
- break;
-
- mrkb->up = (rd_bool_t)rko->rko_u.mock.lo;
-
- if (!mrkb->up) {
- rd_kafka_mock_cluster_io_del(mcluster, mrkb->listen_s);
- rd_socket_close(mrkb->listen_s);
- /* Re-create the listener right away so we retain the
- * same port. The listener is not started until
- * the broker is set up (below). */
- mrkb->listen_s = rd_kafka_mock_broker_new_listener(
- mcluster, &mrkb->sin);
- rd_assert(mrkb->listen_s != -1 ||
- !*"Failed to-create mock broker listener");
-
- rd_kafka_mock_broker_close_all(mrkb, "Broker down");
-
- } else {
- int r;
- rd_assert(mrkb->listen_s != -1);
- r = rd_kafka_mock_broker_start_listener(mrkb);
- rd_assert(r == 0 || !*"broker_start_listener() failed");
- }
- break;
-
- case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
- mrkb->rtt = (rd_ts_t)rko->rko_u.mock.lo * 1000;
-
- /* Check if there is anything to send now that the RTT
- * has changed or if a timer is to be started. */
- rd_kafka_mock_broker_connections_write_out(mrkb);
- break;
-
- case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
- if (mrkb->rack)
- rd_free(mrkb->rack);
-
- if (rko->rko_u.mock.name)
- mrkb->rack = rd_strdup(rko->rko_u.mock.name);
- else
- mrkb->rack = NULL;
- break;
-
- default:
- RD_BUG("Unhandled mock cmd %d", rko->rko_u.mock.cmd);
- break;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Apply command to to one or all brokers, depending on the value of
- * broker_id, where -1 means all, and != -1 means a specific broker.
- *
- * @locality mcluster thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_brokers_cmd(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_op_t *rko) {
- rd_kafka_mock_broker_t *mrkb;
-
- if (rko->rko_u.mock.broker_id != -1) {
- /* Specific broker */
- mrkb = rd_kafka_mock_broker_find(mcluster,
- rko->rko_u.mock.broker_id);
- if (!mrkb)
- return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
-
- return rd_kafka_mock_broker_cmd(mcluster, mrkb, rko);
- }
-
- /* All brokers */
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
- rd_kafka_resp_err_t err;
-
- if ((err = rd_kafka_mock_broker_cmd(mcluster, mrkb, rko)))
- return err;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Handle command op
- *
- * @locality mcluster thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_op_t *rko) {
- rd_kafka_mock_topic_t *mtopic;
- rd_kafka_mock_partition_t *mpart;
- rd_kafka_mock_broker_t *mrkb;
-
- switch (rko->rko_u.mock.cmd) {
- case RD_KAFKA_MOCK_CMD_TOPIC_CREATE:
- if (rd_kafka_mock_topic_find(mcluster, rko->rko_u.mock.name))
- return RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS;
-
- if (!rd_kafka_mock_topic_new(mcluster, rko->rko_u.mock.name,
- /* partition_cnt */
- (int)rko->rko_u.mock.lo,
- /* replication_factor */
- (int)rko->rko_u.mock.hi))
- return RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION;
- break;
-
- case RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR:
- mtopic =
- rd_kafka_mock_topic_get(mcluster, rko->rko_u.mock.name, -1);
- mtopic->err = rko->rko_u.mock.err;
- break;
-
- case RD_KAFKA_MOCK_CMD_PART_SET_LEADER:
- mpart = rd_kafka_mock_partition_get(
- mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
- if (!mpart)
- return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- if (rko->rko_u.mock.broker_id != -1) {
- mrkb = rd_kafka_mock_broker_find(
- mcluster, rko->rko_u.mock.broker_id);
- if (!mrkb)
- return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
- } else {
- mrkb = NULL;
- }
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Set %s [%" PRId32 "] leader to %" PRId32,
- rko->rko_u.mock.name, rko->rko_u.mock.partition,
- rko->rko_u.mock.broker_id);
-
- rd_kafka_mock_partition_set_leader0(mpart, mrkb);
- break;
-
- case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER:
- mpart = rd_kafka_mock_partition_get(
- mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
- if (!mpart)
- return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Set %s [%" PRId32
- "] preferred follower "
- "to %" PRId32,
- rko->rko_u.mock.name, rko->rko_u.mock.partition,
- rko->rko_u.mock.broker_id);
-
- mpart->follower_id = rko->rko_u.mock.broker_id;
- break;
-
- case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS:
- mpart = rd_kafka_mock_partition_get(
- mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
- if (!mpart)
- return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Set %s [%" PRId32
- "] follower "
- "watermark offsets to %" PRId64 "..%" PRId64,
- rko->rko_u.mock.name, rko->rko_u.mock.partition,
- rko->rko_u.mock.lo, rko->rko_u.mock.hi);
-
- if (rko->rko_u.mock.lo == -1) {
- mpart->follower_start_offset = mpart->start_offset;
- mpart->update_follower_start_offset = rd_true;
- } else {
- mpart->follower_start_offset = rko->rko_u.mock.lo;
- mpart->update_follower_start_offset = rd_false;
- }
-
- if (rko->rko_u.mock.hi == -1) {
- mpart->follower_end_offset = mpart->end_offset;
- mpart->update_follower_end_offset = rd_true;
- } else {
- mpart->follower_end_offset = rko->rko_u.mock.hi;
- mpart->update_follower_end_offset = rd_false;
- }
- break;
-
- /* Broker commands */
- case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
- case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
- case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
- return rd_kafka_mock_brokers_cmd(mcluster, rko);
-
- case RD_KAFKA_MOCK_CMD_COORD_SET:
- if (!rd_kafka_mock_coord_set(mcluster, rko->rko_u.mock.name,
- rko->rko_u.mock.str,
- rko->rko_u.mock.broker_id))
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- break;
-
- case RD_KAFKA_MOCK_CMD_APIVERSION_SET:
- if (rko->rko_u.mock.partition < 0 ||
- rko->rko_u.mock.partition >= RD_KAFKAP__NUM)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- mcluster->api_handlers[(int)rko->rko_u.mock.partition]
- .MinVersion = (int16_t)rko->rko_u.mock.lo;
- mcluster->api_handlers[(int)rko->rko_u.mock.partition]
- .MaxVersion = (int16_t)rko->rko_u.mock.hi;
- break;
-
- default:
- rd_assert(!*"unknown mock cmd");
- break;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static rd_kafka_op_res_t
-rd_kafka_mock_cluster_op_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- rd_kafka_mock_cluster_t *mcluster = opaque;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- switch ((int)rko->rko_type) {
- case RD_KAFKA_OP_TERMINATE:
- mcluster->run = rd_false;
- break;
-
- case RD_KAFKA_OP_MOCK:
- err = rd_kafka_mock_cluster_cmd(mcluster, rko);
- break;
-
- default:
- rd_assert(!"*unhandled op");
- break;
- }
-
- rd_kafka_op_reply(rko, err);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Destroy cluster (internal)
- */
-static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) {
- rd_kafka_mock_topic_t *mtopic;
- rd_kafka_mock_broker_t *mrkb;
- rd_kafka_mock_cgrp_t *mcgrp;
- rd_kafka_mock_coord_t *mcoord;
- rd_kafka_mock_error_stack_t *errstack;
- thrd_t dummy_rkb_thread;
- int ret;
-
- while ((mtopic = TAILQ_FIRST(&mcluster->topics)))
- rd_kafka_mock_topic_destroy(mtopic);
-
- while ((mrkb = TAILQ_FIRST(&mcluster->brokers)))
- rd_kafka_mock_broker_destroy(mrkb);
-
- while ((mcgrp = TAILQ_FIRST(&mcluster->cgrps)))
- rd_kafka_mock_cgrp_destroy(mcgrp);
-
- while ((mcoord = TAILQ_FIRST(&mcluster->coords)))
- rd_kafka_mock_coord_destroy(mcluster, mcoord);
-
- rd_list_destroy(&mcluster->pids);
-
- while ((errstack = TAILQ_FIRST(&mcluster->errstacks))) {
- TAILQ_REMOVE(&mcluster->errstacks, errstack, link);
- rd_kafka_mock_error_stack_destroy(errstack);
- }
-
- /*
- * Destroy dummy broker
- */
- rd_kafka_q_enq(mcluster->dummy_rkb->rkb_ops,
- rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
-
- dummy_rkb_thread = mcluster->dummy_rkb->rkb_thread;
-
- rd_kafka_broker_destroy(mcluster->dummy_rkb);
-
- if (thrd_join(dummy_rkb_thread, &ret) != thrd_success)
- rd_assert(!*"failed to join mock dummy broker thread");
-
-
- rd_kafka_q_destroy_owner(mcluster->ops);
-
- rd_kafka_timers_destroy(&mcluster->timers);
-
- if (mcluster->fd_size > 0) {
- rd_free(mcluster->fds);
- rd_free(mcluster->handlers);
- }
-
- mtx_destroy(&mcluster->lock);
-
- rd_free(mcluster->bootstraps);
-
- rd_socket_close(mcluster->wakeup_fds[0]);
- rd_socket_close(mcluster->wakeup_fds[1]);
-}
-
-
-
-void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) {
- int res;
- rd_kafka_op_t *rko;
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Destroying cluster");
-
- rd_assert(rd_atomic32_get(&mcluster->rk->rk_mock.cluster_cnt) > 0);
- rd_atomic32_sub(&mcluster->rk->rk_mock.cluster_cnt, 1);
-
- rko = rd_kafka_op_req2(mcluster->ops, RD_KAFKA_OP_TERMINATE);
-
- if (rko)
- rd_kafka_op_destroy(rko);
-
- if (thrd_join(mcluster->thread, &res) != thrd_success)
- rd_assert(!*"failed to join mock thread");
-
- rd_free(mcluster);
-}
-
-
-
-rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk,
- int broker_cnt) {
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_mock_broker_t *mrkb;
- int i, r;
- size_t bootstraps_len = 0;
- size_t of;
-
- mcluster = rd_calloc(1, sizeof(*mcluster));
- mcluster->rk = rk;
-
- mcluster->dummy_rkb =
- rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT,
- "mock", 0, RD_KAFKA_NODEID_UA);
- rd_snprintf(mcluster->id, sizeof(mcluster->id), "mockCluster%lx",
- (intptr_t)mcluster >> 2);
-
- TAILQ_INIT(&mcluster->brokers);
-
- for (i = 1; i <= broker_cnt; i++) {
- if (!(mrkb = rd_kafka_mock_broker_new(mcluster, i))) {
- rd_kafka_mock_cluster_destroy(mcluster);
- return NULL;
- }
-
- /* advertised listener + ":port" + "," */
- bootstraps_len += strlen(mrkb->advertised_listener) + 6 + 1;
- }
-
- mtx_init(&mcluster->lock, mtx_plain);
-
- TAILQ_INIT(&mcluster->topics);
- mcluster->defaults.partition_cnt = 4;
- mcluster->defaults.replication_factor = RD_MIN(3, broker_cnt);
-
- TAILQ_INIT(&mcluster->cgrps);
-
- TAILQ_INIT(&mcluster->coords);
-
- rd_list_init(&mcluster->pids, 16, rd_free);
-
- TAILQ_INIT(&mcluster->errstacks);
-
- memcpy(mcluster->api_handlers, rd_kafka_mock_api_handlers,
- sizeof(mcluster->api_handlers));
-
- /* Use an op queue for controlling the cluster in
- * a thread-safe manner without locking. */
- mcluster->ops = rd_kafka_q_new(rk);
- mcluster->ops->rkq_serve = rd_kafka_mock_cluster_op_serve;
- mcluster->ops->rkq_opaque = mcluster;
-
- rd_kafka_timers_init(&mcluster->timers, rk, mcluster->ops);
-
- if ((r = rd_pipe_nonblocking(mcluster->wakeup_fds)) == -1) {
- rd_kafka_log(rk, LOG_ERR, "MOCK",
- "Failed to setup mock cluster wake-up fds: %s",
- rd_socket_strerror(r));
- } else {
- const char onebyte = 1;
- rd_kafka_q_io_event_enable(mcluster->ops,
- mcluster->wakeup_fds[1], &onebyte,
- sizeof(onebyte));
- }
-
-
- if (thrd_create(&mcluster->thread, rd_kafka_mock_cluster_thread_main,
- mcluster) != thrd_success) {
- rd_kafka_log(rk, LOG_CRIT, "MOCK",
- "Failed to create mock cluster thread: %s",
- rd_strerror(errno));
- rd_kafka_mock_cluster_destroy(mcluster);
- return NULL;
- }
-
-
- /* Construct bootstrap.servers list */
- mcluster->bootstraps = rd_malloc(bootstraps_len + 1);
- of = 0;
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
- r = rd_snprintf(&mcluster->bootstraps[of], bootstraps_len - of,
- "%s%s:%hu", of > 0 ? "," : "",
- mrkb->advertised_listener, mrkb->port);
- of += r;
- rd_assert(of < bootstraps_len);
- }
- mcluster->bootstraps[of] = '\0';
-
- rd_kafka_dbg(rk, MOCK, "MOCK", "Mock cluster %s bootstrap.servers=%s",
- mcluster->id, mcluster->bootstraps);
-
- rd_atomic32_add(&rk->rk_mock.cluster_cnt, 1);
-
- return mcluster;
-}
-
-
-rd_kafka_t *
-rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster) {
- return (rd_kafka_t *)mcluster->rk;
-}
-
-rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk) {
- return (rd_kafka_mock_cluster_t *)rk->rk_mock.cluster;
-}
-
-
-const char *
-rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster) {
- return mcluster->bootstraps;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h
deleted file mode 100644
index f06efe8fd..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019-2022 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_MOCK_H_
-#define _RDKAFKA_MOCK_H_
-
-#ifndef _RDKAFKA_H_
-#error "rdkafka_mock.h must be included after rdkafka.h"
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#if 0
-} /* Restore indent */
-#endif
-#endif
-
-
-/**
- * @name Mock cluster
- *
- * Provides a mock Kafka cluster with a configurable number of brokers
- * that support a reasonable subset of Kafka protocol operations,
- * error injection, etc.
- *
- * There are two ways to use the mock clusters, the most simple approach
- * is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t
- * in an existing application, which will replace the configured
- * `bootstrap.servers` with the mock cluster brokers.
- * This approach is convenient to easily test existing applications.
- *
- * The second approach is to explicitly create a mock cluster on an
- * rd_kafka_t instance by using rd_kafka_mock_cluster_new().
- *
- * Mock clusters provide localhost listeners that can be used as the bootstrap
- * servers by multiple rd_kafka_t instances.
- *
- * Currently supported functionality:
- * - Producer
- * - Idempotent Producer
- * - Transactional Producer
- * - Low-level consumer
- * - High-level balanced consumer groups with offset commits
- * - Topic Metadata and auto creation
- *
- * @remark This is an experimental public API that is NOT covered by the
- * librdkafka API or ABI stability guarantees.
- *
- *
- * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
- *
- * @{
- */
-
-typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
-
-
-/**
- * @brief Create new mock cluster with \p broker_cnt brokers.
- *
- * The broker ids will start at 1 up to and including \p broker_cnt.
- *
- * The \p rk instance is required for internal book keeping but continues
- * to operate as usual.
- */
-RD_EXPORT
-rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk,
- int broker_cnt);
-
-
-/**
- * @brief Destroy mock cluster.
- */
-RD_EXPORT
-void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
-
-
-
-/**
- * @returns the rd_kafka_t instance for a cluster as passed to
- * rd_kafka_mock_cluster_new().
- */
-RD_EXPORT rd_kafka_t *
-rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
-
-
-/**
- * @returns the rd_kafka_mock_cluster_t instance as created by
- * setting the `test.mock.num.brokers` configuration property,
- * or NULL if no such instance.
- */
-RD_EXPORT rd_kafka_mock_cluster_t *
-rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
-
-
-
-/**
- * @returns the mock cluster's bootstrap.servers list
- */
-RD_EXPORT const char *
-rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
-
-
-/**
- * @brief Clear the cluster's error state for the given \p ApiKey.
- */
-RD_EXPORT
-void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey);
-
-
-/**
- * @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's
- * error stack for the given \p ApiKey.
- *
- * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
- *
- * The following \p cnt protocol requests matching \p ApiKey will fail with the
- * provided error code and removed from the stack, starting with
- * the first error code, then the second, etc.
- *
- * Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker
- * disconnect the client which can be useful to trigger a disconnect on certain
- * requests.
- */
-RD_EXPORT
-void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- size_t cnt,
- ...);
-
-
-/**
- * @brief Same as rd_kafka_mock_push_request_errors() but takes
- * an array of errors.
- */
-RD_EXPORT void
-rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- size_t cnt,
- const rd_kafka_resp_err_t *errors);
-
-
-/**
- * @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto
- * the broker's error stack for the given \p ApiKey.
- *
- * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
- *
- * Each entry is a tuple of:
- * rd_kafka_resp_err_t err - error to return (or 0)
- * int rtt_ms - response RTT/delay in milliseconds (or 0)
- *
- * The following \p cnt protocol requests matching \p ApiKey will fail with the
- * provided error code and removed from the stack, starting with
- * the first error code, then the second, etc.
- *
- * @remark The broker errors take precedence over the cluster errors.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int16_t ApiKey,
- size_t cnt,
- ...);
-
-
-
-/**
- * @brief Get the count of errors in the broker's error stack for
- * the given \p ApiKey.
- *
- * @param mcluster the mock cluster.
- * @param broker_id id of the broker in the cluster.
- * @param ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
- * @param cntp pointer for receiving the count.
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR if the count was retrieved,
- * \c RD_KAFKA_RESP_ERR__UNKNOWN_BROKER if there was no broker with this id,
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if some of the parameters are not valid.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int16_t ApiKey,
- size_t *cntp);
-
-
-/**
- * @brief Set the topic error to return in protocol requests.
- *
- * Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.
- */
-RD_EXPORT
-void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- rd_kafka_resp_err_t err);
-
-
-/**
- * @brief Creates a topic.
- *
- * This is an alternative to automatic topic creation as performed by
- * the client itself.
- *
- * @remark The Topic Admin API (CreateTopics) is not supported by the
- * mock broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt,
- int replication_factor);
-
-
-/**
- * @brief Sets the partition leader.
- *
- * The topic will be created if it does not exist.
- *
- * \p broker_id needs to be an existing broker, or -1 to make the
- * partition leader-less.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int32_t broker_id);
-
-/**
- * @brief Sets the partition's preferred replica / follower.
- *
- * The topic will be created if it does not exist.
- *
- * \p broker_id does not need to point to an existing broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int32_t broker_id);
-
-/**
- * @brief Sets the partition's preferred replica / follower low and high
- * watermarks.
- *
- * The topic will be created if it does not exist.
- *
- * Setting an offset to -1 will revert back to the leader's corresponding
- * watermark.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int32_t partition,
- int64_t lo,
- int64_t hi);
-
-
-/**
- * @brief Disconnects the broker and disallows any new connections.
- * This does NOT trigger leader change.
- *
- * @param mcluster Mock cluster instance.
- * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id);
-
-/**
- * @brief Makes the broker accept connections again.
- * This does NOT trigger leader change.
- *
- * @param mcluster Mock cluster instance.
- * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id);
-
-
-/**
- * @brief Set broker round-trip-time delay in milliseconds.
- *
- * @param mcluster Mock cluster instance.
- * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- int rtt_ms);
-
-/**
- * @brief Sets the broker's rack as reported in Metadata to the client.
- *
- * @param mcluster Mock cluster instance.
- * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster,
- int32_t broker_id,
- const char *rack);
-
-
-
-/**
- * @brief Explicitly sets the coordinator. If this API is not a standard
- * hashing scheme will be used.
- *
- * @param key_type "transaction" or "group"
- * @param key The transactional.id or group.id
- * @param broker_id The new coordinator, does not have to be a valid broker.
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster,
- const char *key_type,
- const char *key,
- int32_t broker_id);
-
-
-
-/**
- * @brief Set the allowed ApiVersion range for \p ApiKey.
- *
- * Set \p MinVersion and \p MaxVersion to -1 to disable the API
- * completely.
- *
- * \p MaxVersion MUST not exceed the maximum implemented value,
- * see rdkafka_mock_handlers.c.
- *
- * @param ApiKey Protocol request type/key
- * @param MinVersion Minimum version supported (or -1 to disable).
- * @param MinVersion Maximum version supported (or -1 to disable).
- */
-RD_EXPORT rd_kafka_resp_err_t
-rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- int16_t MinVersion,
- int16_t MaxVersion);
-
-
-/**@}*/
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* _RDKAFKA_MOCK_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c
deleted file mode 100644
index 8f71fb48c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c
+++ /dev/null
@@ -1,687 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Mocks
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdbuf.h"
-#include "rdkafka_mock_int.h"
-
-
-static const char *rd_kafka_mock_cgrp_state_names[] = {
- "Empty", "Joining", "Syncing", "Rebalancing", "Up"};
-
-
-static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp,
- const char *reason);
-static void
-rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member);
-
-static void rd_kafka_mock_cgrp_set_state(rd_kafka_mock_cgrp_t *mcgrp,
- unsigned int new_state,
- const char *reason) {
- if (mcgrp->state == new_state)
- return;
-
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Mock consumer group %s with %d member(s) "
- "changing state %s -> %s: %s",
- mcgrp->id, mcgrp->member_cnt,
- rd_kafka_mock_cgrp_state_names[mcgrp->state],
- rd_kafka_mock_cgrp_state_names[new_state], reason);
-
- mcgrp->state = new_state;
-}
-
-
-/**
- * @brief Mark member as active (restart session timer)
- */
-void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member) {
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Marking mock consumer group member %s as active",
- member->id);
- member->ts_last_activity = rd_clock();
-}
-
-
-/**
- * @brief Verify that the protocol request is valid in the current state.
- *
- * @param member may be NULL.
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- const rd_kafka_buf_t *request,
- int32_t generation_id) {
- int16_t ApiKey = request->rkbuf_reqhdr.ApiKey;
- rd_bool_t has_generation_id = ApiKey == RD_KAFKAP_SyncGroup ||
- ApiKey == RD_KAFKAP_Heartbeat ||
- ApiKey == RD_KAFKAP_OffsetCommit;
-
- if (has_generation_id && generation_id != mcgrp->generation_id)
- return RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION;
-
- if (ApiKey == RD_KAFKAP_OffsetCommit && !member)
- return RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
-
- switch (mcgrp->state) {
- case RD_KAFKA_MOCK_CGRP_STATE_EMPTY:
- if (ApiKey == RD_KAFKAP_JoinGroup)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- break;
-
- case RD_KAFKA_MOCK_CGRP_STATE_JOINING:
- if (ApiKey == RD_KAFKAP_JoinGroup ||
- ApiKey == RD_KAFKAP_LeaveGroup)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- else
- return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
-
- case RD_KAFKA_MOCK_CGRP_STATE_SYNCING:
- if (ApiKey == RD_KAFKAP_SyncGroup ||
- ApiKey == RD_KAFKAP_JoinGroup ||
- ApiKey == RD_KAFKAP_LeaveGroup)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- else
- return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
-
- case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING:
- if (ApiKey == RD_KAFKAP_JoinGroup ||
- ApiKey == RD_KAFKAP_LeaveGroup ||
- ApiKey == RD_KAFKAP_OffsetCommit)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- else
- return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
-
- case RD_KAFKA_MOCK_CGRP_STATE_UP:
- if (ApiKey == RD_KAFKAP_JoinGroup ||
- ApiKey == RD_KAFKAP_LeaveGroup ||
- ApiKey == RD_KAFKAP_Heartbeat ||
- ApiKey == RD_KAFKAP_OffsetCommit)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- break;
- }
-
- return RD_KAFKA_RESP_ERR_INVALID_REQUEST;
-}
-
-
-/**
- * @brief Set a member's assignment (from leader's SyncGroupRequest)
- */
-void rd_kafka_mock_cgrp_member_assignment_set(
- rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- const rd_kafkap_bytes_t *Metadata) {
- if (member->assignment) {
- rd_assert(mcgrp->assignment_cnt > 0);
- mcgrp->assignment_cnt--;
- rd_kafkap_bytes_destroy(member->assignment);
- member->assignment = NULL;
- }
-
- if (Metadata) {
- mcgrp->assignment_cnt++;
- member->assignment = rd_kafkap_bytes_copy(Metadata);
- }
-}
-
-
-/**
- * @brief Sync done (successfully) or failed, send responses back to members.
- */
-static void rd_kafka_mock_cgrp_sync_done(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_resp_err_t err) {
- rd_kafka_mock_cgrp_member_t *member;
-
- TAILQ_FOREACH(member, &mcgrp->members, link) {
- rd_kafka_buf_t *resp;
-
- if ((resp = member->resp)) {
- member->resp = NULL;
- rd_assert(resp->rkbuf_reqhdr.ApiKey ==
- RD_KAFKAP_SyncGroup);
-
- rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
- /* MemberState */
- rd_kafka_buf_write_kbytes(
- resp, !err ? member->assignment : NULL);
- }
-
- rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL);
-
- if (member->conn) {
- rd_kafka_mock_connection_set_blocking(member->conn,
- rd_false);
- if (resp)
- rd_kafka_mock_connection_send_response(
- member->conn, resp);
- } else if (resp) {
- /* Member has disconnected. */
- rd_kafka_buf_destroy(resp);
- }
- }
-}
-
-
-/**
- * @brief Check if all members have sent SyncGroupRequests, if so, propagate
- * assignment to members.
- */
-static void rd_kafka_mock_cgrp_sync_check(rd_kafka_mock_cgrp_t *mcgrp) {
-
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Mock consumer group %s: awaiting %d/%d syncing members "
- "in state %s",
- mcgrp->id, mcgrp->assignment_cnt, mcgrp->member_cnt,
- rd_kafka_mock_cgrp_state_names[mcgrp->state]);
-
- if (mcgrp->assignment_cnt < mcgrp->member_cnt)
- return;
-
- rd_kafka_mock_cgrp_sync_done(mcgrp, RD_KAFKA_RESP_ERR_NO_ERROR);
- rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_UP,
- "all members synced");
-}
-
-
-/**
- * @brief Member has sent SyncGroupRequest and is waiting for a response,
- * which will be sent when the all group member SyncGroupRequest are
- * received.
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp) {
-
- if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_SYNCING)
- return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; /* FIXME */
-
- rd_kafka_mock_cgrp_member_active(mcgrp, member);
-
- rd_assert(!member->resp);
-
- member->resp = resp;
- member->conn = mconn;
- rd_kafka_mock_connection_set_blocking(member->conn, rd_true);
-
- /* Check if all members now have an assignment, if so, send responses */
- rd_kafka_mock_cgrp_sync_check(mcgrp);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Member is explicitly leaving the group (through LeaveGroupRequest)
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member) {
-
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Member %s is leaving group %s", member->id, mcgrp->id);
-
- rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
-
- rd_kafka_mock_cgrp_rebalance(mcgrp, "explicit member leave");
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Destroys/frees an array of protocols, including the array itself.
- */
-void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos,
- int proto_cnt) {
- int i;
-
- for (i = 0; i < proto_cnt; i++) {
- rd_free(protos[i].name);
- if (protos[i].metadata)
- rd_free(protos[i].metadata);
- }
-
- rd_free(protos);
-}
-
-static void
-rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp,
- int timeout_ms);
-
-/**
- * @brief Elect consumer group leader and send JoinGroup responses
- */
-static void rd_kafka_mock_cgrp_elect_leader(rd_kafka_mock_cgrp_t *mcgrp) {
- rd_kafka_mock_cgrp_member_t *member;
-
- rd_assert(mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING);
- rd_assert(!TAILQ_EMPTY(&mcgrp->members));
-
- mcgrp->generation_id++;
-
- /* Elect a leader.
- * FIXME: For now we'll use the first member */
- mcgrp->leader = TAILQ_FIRST(&mcgrp->members);
-
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Consumer group %s with %d member(s) is rebalancing: "
- "elected leader is %s, generation id %d",
- mcgrp->id, mcgrp->member_cnt, mcgrp->leader->id,
- mcgrp->generation_id);
-
- /* Find the most commonly supported protocol name among the members.
- * FIXME: For now we'll blindly use the first protocol of the leader. */
- if (mcgrp->protocol_name)
- rd_free(mcgrp->protocol_name);
- mcgrp->protocol_name = RD_KAFKAP_STR_DUP(mcgrp->leader->protos[0].name);
-
- /* Send JoinGroupResponses to all members */
- TAILQ_FOREACH(member, &mcgrp->members, link) {
- rd_bool_t is_leader = member == mcgrp->leader;
- int member_cnt = is_leader ? mcgrp->member_cnt : 0;
- rd_kafka_buf_t *resp;
- rd_kafka_mock_cgrp_member_t *member2;
- rd_kafka_mock_connection_t *mconn;
-
- /* Member connection has been closed, it will eventually
- * reconnect or time out from the group. */
- if (!member->conn || !member->resp)
- continue;
- mconn = member->conn;
- member->conn = NULL;
- resp = member->resp;
- member->resp = NULL;
-
- rd_assert(resp->rkbuf_reqhdr.ApiKey == RD_KAFKAP_JoinGroup);
-
- rd_kafka_buf_write_i16(resp, 0); /* ErrorCode */
- rd_kafka_buf_write_i32(resp, mcgrp->generation_id);
- rd_kafka_buf_write_str(resp, mcgrp->protocol_name, -1);
- rd_kafka_buf_write_str(resp, mcgrp->leader->id, -1);
- rd_kafka_buf_write_str(resp, member->id, -1);
- rd_kafka_buf_write_i32(resp, member_cnt);
-
- /* Send full member list to leader */
- if (member_cnt > 0) {
- TAILQ_FOREACH(member2, &mcgrp->members, link) {
- rd_kafka_buf_write_str(resp, member2->id, -1);
- if (resp->rkbuf_reqhdr.ApiVersion >= 5)
- rd_kafka_buf_write_str(
- resp, member2->group_instance_id,
- -1);
- /* FIXME: look up correct protocol name */
- rd_assert(!rd_kafkap_str_cmp_str(
- member2->protos[0].name,
- mcgrp->protocol_name));
-
- rd_kafka_buf_write_kbytes(
- resp, member2->protos[0].metadata);
- }
- }
-
- /* Mark each member as active to avoid them timing out
- * at the same time as a JoinGroup handler that blocks
- * session.timeout.ms to elect a leader. */
- rd_kafka_mock_cgrp_member_active(mcgrp, member);
-
- rd_kafka_mock_connection_set_blocking(mconn, rd_false);
- rd_kafka_mock_connection_send_response(mconn, resp);
- }
-
- mcgrp->last_member_cnt = mcgrp->member_cnt;
-
- rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_SYNCING,
- "leader elected, waiting for all "
- "members to sync");
-
- rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp,
- mcgrp->session_timeout_ms);
-}
-
-
-/**
- * @brief Trigger group rebalance.
- */
-static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp,
- const char *reason) {
- int timeout_ms;
-
- if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING)
- return; /* Do nothing, group is already rebalancing. */
- else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_EMPTY)
- timeout_ms = 3000; /* First join, low timeout.
- * Same as group.initial.rebalance.delay.ms
- * on the broker. */
- else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_REBALANCING &&
- mcgrp->member_cnt == mcgrp->last_member_cnt)
- timeout_ms = 100; /* All members rejoined, quickly transition
- * to election. */
- else /* Let the rebalance delay be a bit shorter than the
- * session timeout so that we don't time out waiting members
- * who are also subject to the session timeout. */
- timeout_ms = mcgrp->session_timeout_ms > 1000
- ? mcgrp->session_timeout_ms - 1000
- : mcgrp->session_timeout_ms;
-
- if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_SYNCING)
- /* Abort current Syncing state */
- rd_kafka_mock_cgrp_sync_done(
- mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS);
-
- rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_JOINING,
- reason);
- rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp, timeout_ms);
-}
-
-/**
- * @brief Consumer group state machine triggered by timer events.
- */
-static void rd_kafka_mock_cgrp_fsm_timeout(rd_kafka_mock_cgrp_t *mcgrp) {
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Mock consumer group %s FSM timeout in state %s",
- mcgrp->id, rd_kafka_mock_cgrp_state_names[mcgrp->state]);
-
- switch (mcgrp->state) {
- case RD_KAFKA_MOCK_CGRP_STATE_EMPTY:
- /* No members, do nothing */
- break;
- case RD_KAFKA_MOCK_CGRP_STATE_JOINING:
- /* Timed out waiting for more members, elect a leader */
- if (mcgrp->member_cnt > 0)
- rd_kafka_mock_cgrp_elect_leader(mcgrp);
- else
- rd_kafka_mock_cgrp_set_state(
- mcgrp, RD_KAFKA_MOCK_CGRP_STATE_EMPTY,
- "no members joined");
- break;
-
- case RD_KAFKA_MOCK_CGRP_STATE_SYNCING:
- /* Timed out waiting for all members to sync */
-
- /* Send error response to all waiting members */
- rd_kafka_mock_cgrp_sync_done(
- mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS /* FIXME */);
-
- rd_kafka_mock_cgrp_set_state(
- mcgrp, RD_KAFKA_MOCK_CGRP_STATE_REBALANCING,
- "timed out waiting for all members to synchronize");
- break;
-
- case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING:
- /* Timed out waiting for all members to Leave or re-Join */
- rd_kafka_mock_cgrp_set_state(mcgrp,
- RD_KAFKA_MOCK_CGRP_STATE_JOINING,
- "timed out waiting for all "
- "members to re-Join or Leave");
- break;
-
- case RD_KAFKA_MOCK_CGRP_STATE_UP:
- /* No fsm timers triggered in this state, see
- * the session_tmr instead */
- break;
- }
-}
-
-static void rd_kafka_mcgrp_rebalance_timer_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_mock_cgrp_t *mcgrp = arg;
-
- rd_kafka_mock_cgrp_fsm_timeout(mcgrp);
-}
-
-
-/**
- * @brief Restart the rebalance timer, postponing leader election.
- */
-static void
-rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp,
- int timeout_ms) {
- rd_kafka_timer_start_oneshot(
- &mcgrp->cluster->timers, &mcgrp->rebalance_tmr, rd_true,
- timeout_ms * 1000, rd_kafka_mcgrp_rebalance_timer_cb, mcgrp);
-}
-
-
-static void
-rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member) {
- rd_assert(mcgrp->member_cnt > 0);
- TAILQ_REMOVE(&mcgrp->members, member, link);
- mcgrp->member_cnt--;
-
- rd_free(member->id);
-
- if (member->resp)
- rd_kafka_buf_destroy(member->resp);
-
- if (member->group_instance_id)
- rd_free(member->group_instance_id);
-
- rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL);
-
- rd_kafka_mock_cgrp_protos_destroy(member->protos, member->proto_cnt);
-
- rd_free(member);
-}
-
-
-/**
- * @brief Find member in group.
- */
-rd_kafka_mock_cgrp_member_t *
-rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp,
- const rd_kafkap_str_t *MemberId) {
- const rd_kafka_mock_cgrp_member_t *member;
- TAILQ_FOREACH(member, &mcgrp->members, link) {
- if (!rd_kafkap_str_cmp_str(MemberId, member->id))
- return (rd_kafka_mock_cgrp_member_t *)member;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Update or add member to consumer group
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp,
- const rd_kafkap_str_t *MemberId,
- const rd_kafkap_str_t *ProtocolType,
- rd_kafka_mock_cgrp_proto_t *protos,
- int proto_cnt,
- int session_timeout_ms) {
- rd_kafka_mock_cgrp_member_t *member;
- rd_kafka_resp_err_t err;
-
- err = rd_kafka_mock_cgrp_check_state(mcgrp, NULL, resp, -1);
- if (err)
- return err;
-
- /* Find member */
- member = rd_kafka_mock_cgrp_member_find(mcgrp, MemberId);
- if (!member) {
- /* Not found, add member */
- member = rd_calloc(1, sizeof(*member));
-
- if (!RD_KAFKAP_STR_LEN(MemberId)) {
- /* Generate a member id */
- char memberid[32];
- rd_snprintf(memberid, sizeof(memberid), "%p", member);
- member->id = rd_strdup(memberid);
- } else
- member->id = RD_KAFKAP_STR_DUP(MemberId);
-
- TAILQ_INSERT_TAIL(&mcgrp->members, member, link);
- mcgrp->member_cnt++;
- }
-
- if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_JOINING)
- rd_kafka_mock_cgrp_rebalance(mcgrp, "member join");
-
- mcgrp->session_timeout_ms = session_timeout_ms;
-
- if (member->protos)
- rd_kafka_mock_cgrp_protos_destroy(member->protos,
- member->proto_cnt);
- member->protos = protos;
- member->proto_cnt = proto_cnt;
-
- rd_assert(!member->resp);
- member->resp = resp;
- member->conn = mconn;
- rd_kafka_mock_cgrp_member_active(mcgrp, member);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Check if any members have exceeded the session timeout.
- */
-static void rd_kafka_mock_cgrp_session_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_mock_cgrp_t *mcgrp = arg;
- rd_kafka_mock_cgrp_member_t *member, *tmp;
- rd_ts_t now = rd_clock();
- int timeout_cnt = 0;
-
- TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) {
- if (member->ts_last_activity +
- (mcgrp->session_timeout_ms * 1000) >
- now)
- continue;
-
- rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
- "Member %s session timed out for group %s",
- member->id, mcgrp->id);
-
- rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
- timeout_cnt++;
- }
-
- if (timeout_cnt)
- rd_kafka_mock_cgrp_rebalance(mcgrp, "member timeout");
-}
-
-
-void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp) {
- rd_kafka_mock_cgrp_member_t *member;
-
- TAILQ_REMOVE(&mcgrp->cluster->cgrps, mcgrp, link);
-
- rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->rebalance_tmr,
- rd_true);
- rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->session_tmr,
- rd_true);
- rd_free(mcgrp->id);
- rd_free(mcgrp->protocol_type);
- if (mcgrp->protocol_name)
- rd_free(mcgrp->protocol_name);
- while ((member = TAILQ_FIRST(&mcgrp->members)))
- rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
- rd_free(mcgrp);
-}
-
-
-rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *GroupId) {
- rd_kafka_mock_cgrp_t *mcgrp;
- TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) {
- if (!rd_kafkap_str_cmp_str(GroupId, mcgrp->id))
- return mcgrp;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Find or create a consumer group
- */
-rd_kafka_mock_cgrp_t *
-rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *GroupId,
- const rd_kafkap_str_t *ProtocolType) {
- rd_kafka_mock_cgrp_t *mcgrp;
-
- mcgrp = rd_kafka_mock_cgrp_find(mcluster, GroupId);
- if (mcgrp)
- return mcgrp;
-
- /* FIXME: What to do with mismatching ProtocolTypes? */
-
- mcgrp = rd_calloc(1, sizeof(*mcgrp));
-
- mcgrp->cluster = mcluster;
- mcgrp->id = RD_KAFKAP_STR_DUP(GroupId);
- mcgrp->protocol_type = RD_KAFKAP_STR_DUP(ProtocolType);
- mcgrp->generation_id = 1;
- TAILQ_INIT(&mcgrp->members);
- rd_kafka_timer_start(&mcluster->timers, &mcgrp->session_tmr,
- 1000 * 1000 /*1s*/,
- rd_kafka_mock_cgrp_session_tmr_cb, mcgrp);
-
- TAILQ_INSERT_TAIL(&mcluster->cgrps, mcgrp, link);
-
- return mcgrp;
-}
-
-
-/**
- * @brief A client connection closed, check if any cgrp has any state
- * for this connection that needs to be cleared.
- */
-void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_mock_connection_t *mconn) {
- rd_kafka_mock_cgrp_t *mcgrp;
-
- TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) {
- rd_kafka_mock_cgrp_member_t *member, *tmp;
- TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) {
- if (member->conn == mconn) {
- member->conn = NULL;
- if (member->resp) {
- rd_kafka_buf_destroy(member->resp);
- member->resp = NULL;
- }
- }
- }
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c
deleted file mode 100644
index 3a004d41d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Mocks - protocol request handlers
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdbuf.h"
-#include "rdrand.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_mock_int.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_offset.h"
-
-
-
-/**
- * @brief Handle ProduceRequest
- */
-static int rd_kafka_mock_handle_Produce(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- int32_t TopicsCnt;
- rd_kafkap_str_t TransactionalId = RD_KAFKAP_STR_INITIALIZER;
- int16_t Acks;
- int32_t TimeoutMs;
- rd_kafka_resp_err_t all_err;
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
-
- rd_kafka_buf_read_i16(rkbuf, &Acks);
- rd_kafka_buf_read_i32(rkbuf, &TimeoutMs);
- rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
-
- /* Response: #Topics */
- rd_kafka_buf_write_i32(resp, TopicsCnt);
-
- /* Inject error, if any */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartitionCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
- /* Response: #Partitions */
- rd_kafka_buf_write_i32(resp, PartitionCnt);
-
- while (PartitionCnt-- > 0) {
- int32_t Partition;
- rd_kafka_mock_partition_t *mpart = NULL;
- rd_kafkap_bytes_t records;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int64_t BaseOffset = -1;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
-
- if (mtopic)
- mpart = rd_kafka_mock_partition_find(mtopic,
- Partition);
-
- rd_kafka_buf_read_bytes(rkbuf, &records);
-
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- if (all_err)
- err = all_err;
- else if (!mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else if (mpart->leader != mconn->broker)
- err =
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
-
- /* Append to partition log */
- if (!err)
- err = rd_kafka_mock_partition_log_append(
- mpart, &records, &TransactionalId,
- &BaseOffset);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- if (err) {
- /* Response: BaseOffset */
- rd_kafka_buf_write_i64(resp, BaseOffset);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: LogAppendTimeMs */
- rd_kafka_buf_write_i64(resp, -1);
- }
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
- /* Response: LogStartOffset */
- rd_kafka_buf_write_i64(resp, -1);
- }
-
- } else {
- /* Response: BaseOffset */
- rd_kafka_buf_write_i64(resp, BaseOffset);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: LogAppendTimeMs */
- rd_kafka_buf_write_i64(resp, 1234);
- }
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
- /* Response: LogStartOffset */
- rd_kafka_buf_write_i64(
- resp, mpart->start_offset);
- }
- }
- }
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle FetchRequest
- */
-static int rd_kafka_mock_handle_Fetch(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t all_err;
- int32_t ReplicaId, MaxWait, MinBytes, MaxBytes = -1, SessionId = -1,
- Epoch, TopicsCnt;
- int8_t IsolationLevel;
- size_t totsize = 0;
-
- rd_kafka_buf_read_i32(rkbuf, &ReplicaId);
- rd_kafka_buf_read_i32(rkbuf, &MaxWait);
- rd_kafka_buf_read_i32(rkbuf, &MinBytes);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
- rd_kafka_buf_read_i32(rkbuf, &MaxBytes);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
- rd_kafka_buf_read_i8(rkbuf, &IsolationLevel);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
- rd_kafka_buf_read_i32(rkbuf, &SessionId);
- rd_kafka_buf_read_i32(rkbuf, &Epoch);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
-
- /* Inject error, if any */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, all_err);
-
- /* Response: SessionId */
- rd_kafka_buf_write_i32(resp, SessionId);
- }
-
- rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
-
- /* Response: #Topics */
- rd_kafka_buf_write_i32(resp, TopicsCnt);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartitionCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
- /* Response: #Partitions */
- rd_kafka_buf_write_i32(resp, PartitionCnt);
-
- while (PartitionCnt-- > 0) {
- int32_t Partition, CurrentLeaderEpoch = -1,
- PartMaxBytes;
- int64_t FetchOffset, LogStartOffset;
- rd_kafka_mock_partition_t *mpart = NULL;
- rd_kafka_resp_err_t err = all_err;
- rd_bool_t on_follower;
- size_t partsize = 0;
- const rd_kafka_mock_msgset_t *mset = NULL;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 9)
- rd_kafka_buf_read_i32(rkbuf,
- &CurrentLeaderEpoch);
-
- rd_kafka_buf_read_i64(rkbuf, &FetchOffset);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5)
- rd_kafka_buf_read_i64(rkbuf, &LogStartOffset);
-
- rd_kafka_buf_read_i32(rkbuf, &PartMaxBytes);
-
- if (mtopic)
- mpart = rd_kafka_mock_partition_find(mtopic,
- Partition);
-
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- /* Fetch is directed at follower and this is
- * the follower broker. */
- on_follower =
- mpart && mpart->follower_id == mconn->broker->id;
-
- if (!all_err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else if (!all_err && mpart->leader != mconn->broker &&
- !on_follower)
- err =
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
-
- if (!err && mpart)
- err =
- rd_kafka_mock_partition_leader_epoch_check(
- mpart, CurrentLeaderEpoch);
-
- /* Find MessageSet for FetchOffset */
- if (!err && FetchOffset != mpart->end_offset) {
- /* Kafka currently only returns
- * OFFSET_NOT_AVAILABLE
- * in ListOffsets calls */
- if (!(mset = rd_kafka_mock_msgset_find(
- mpart, FetchOffset, on_follower)))
- err =
- RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE;
- rd_kafka_dbg(
- mcluster->rk, MOCK, "MOCK",
- "Topic %.*s [%" PRId32
- "] fetch err %s for offset %" PRId64
- " mset %p, on_follower %d, "
- "start %" PRId64 ", end_offset %" PRId64
- ", current epoch %" PRId32,
- RD_KAFKAP_STR_PR(&Topic), Partition,
- rd_kafka_err2name(err), FetchOffset, mset,
- on_follower, mpart->start_offset,
- mpart->end_offset, mpart->leader_epoch);
- }
-
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- /* Response: Highwatermark */
- rd_kafka_buf_write_i64(
- resp,
- mpart ? (on_follower ? mpart->follower_end_offset
- : mpart->end_offset)
- : -1);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
- /* Response: LastStableOffset */
- rd_kafka_buf_write_i64(
- resp, mpart ? mpart->end_offset : -1);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) {
- /* Response: LogStartOffset */
- rd_kafka_buf_write_i64(
- resp,
- !mpart ? -1
- : (on_follower
- ? mpart->follower_start_offset
- : mpart->start_offset));
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
- /* Response: #Aborted */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) {
- int32_t PreferredReadReplica =
- mpart && mpart->leader == mconn->broker &&
- mpart->follower_id != -1
- ? mpart->follower_id
- : -1;
-
- /* Response: PreferredReplica */
- rd_kafka_buf_write_i32(resp,
- PreferredReadReplica);
-
- if (PreferredReadReplica != -1) {
- /* Don't return any data when
- * PreferredReadReplica is set */
- mset = NULL;
- MaxWait = 0;
- }
- }
-
-
- if (mset && partsize < (size_t)PartMaxBytes &&
- totsize < (size_t)MaxBytes) {
- /* Response: Records */
- rd_kafka_buf_write_kbytes(resp, &mset->bytes);
- partsize += RD_KAFKAP_BYTES_SIZE(&mset->bytes);
- totsize += RD_KAFKAP_BYTES_SIZE(&mset->bytes);
-
- /* FIXME: Multiple messageSets ? */
- } else {
- /* Empty Response: Records: Null */
- rd_kafka_buf_write_i32(resp, 0);
- }
- }
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
- int32_t ForgottenTopicCnt;
- rd_kafka_buf_read_i32(rkbuf, &ForgottenTopicCnt);
- while (ForgottenTopicCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t ForgPartCnt;
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &ForgPartCnt);
- while (ForgPartCnt-- > 0) {
- int32_t Partition;
- rd_kafka_buf_read_i32(rkbuf, &Partition);
- }
- }
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) {
- rd_kafkap_str_t RackId;
- char *rack;
- rd_kafka_buf_read_str(rkbuf, &RackId);
- RD_KAFKAP_STR_DUPA(&rack, &RackId);
- /* Matt might do something sensible with this */
- }
-
- /* If there was no data, delay up to MaxWait.
- * This isn't strictly correct since we should cut the wait short
- * and feed newly produced data if a producer writes to the
- * partitions, but that is too much of a hassle here since we
- * can't block the thread. */
- if (!totsize && MaxWait > 0)
- resp->rkbuf_ts_retry = rd_clock() + (MaxWait * 1000);
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle ListOffsets
- */
-static int rd_kafka_mock_handle_ListOffsets(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t all_err;
- int32_t ReplicaId, TopicsCnt;
- int8_t IsolationLevel;
-
- rd_kafka_buf_read_i32(rkbuf, &ReplicaId);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2)
- rd_kafka_buf_read_i8(rkbuf, &IsolationLevel);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
-
- /* Inject error, if any */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
-
- /* Response: #Topics */
- rd_kafka_buf_write_i32(resp, TopicsCnt);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartitionCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
- /* Response: #Partitions */
- rd_kafka_buf_write_i32(resp, PartitionCnt);
-
- while (PartitionCnt-- > 0) {
- int32_t Partition, CurrentLeaderEpoch = -1;
- int64_t Timestamp, Offset = -1;
- int32_t MaxNumOffsets;
- rd_kafka_mock_partition_t *mpart = NULL;
- rd_kafka_resp_err_t err = all_err;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
- rd_kafka_buf_read_i32(rkbuf,
- &CurrentLeaderEpoch);
-
- rd_kafka_buf_read_i64(rkbuf, &Timestamp);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion == 0)
- rd_kafka_buf_read_i32(rkbuf, &MaxNumOffsets);
-
- if (mtopic)
- mpart = rd_kafka_mock_partition_find(mtopic,
- Partition);
-
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- if (!all_err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else if (!all_err && mpart->leader != mconn->broker)
- err =
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
-
- if (!err && mpart)
- err =
- rd_kafka_mock_partition_leader_epoch_check(
- mpart, CurrentLeaderEpoch);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- if (!err && mpart) {
- if (Timestamp == RD_KAFKA_OFFSET_BEGINNING)
- Offset = mpart->start_offset;
- else if (Timestamp == RD_KAFKA_OFFSET_END)
- Offset = mpart->end_offset;
- else if (Timestamp < 0)
- Offset = -1;
- else /* FIXME: by timestamp */
- Offset = -1;
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion == 0) {
- /* Response: #OldStyleOffsets */
- rd_kafka_buf_write_i32(resp,
- Offset != -1 ? 1 : 0);
- /* Response: OldStyleOffsets[0] */
- if (Offset != -1)
- rd_kafka_buf_write_i64(resp, Offset);
- } else {
- /* Response: Timestamp (FIXME) */
- rd_kafka_buf_write_i64(resp, -1);
-
- /* Response: Offset */
- rd_kafka_buf_write_i64(resp, Offset);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
- /* Response: LeaderEpoch */
- rd_kafka_buf_write_i32(
- resp, mpart ? mpart->leader_epoch : -1);
- }
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Topic %.*s [%" PRId32
- "] returning "
- "offset %" PRId64 " (leader epoch %" PRId32
- ") for %s: %s",
- RD_KAFKAP_STR_PR(&Topic), Partition,
- Offset, mpart ? mpart->leader_epoch : -1,
- rd_kafka_offset2str(Timestamp),
- rd_kafka_err2str(err));
- }
- }
-
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle OffsetFetch (fetch committed offsets)
- */
-static int rd_kafka_mock_handle_OffsetFetch(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_mock_broker_t *mrkb;
- rd_kafka_resp_err_t all_err;
- int32_t TopicsCnt;
- rd_kafkap_str_t GroupId;
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
-
- /* Inject error, if any */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
- &GroupId);
- if (!mrkb && !all_err)
- all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; // FIXME? check if
- // its this mrkb?
-
-
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000);
-
- /* Response: #Topics */
- rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartitionCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, 100000);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
- /* Response: #Partitions */
- rd_kafka_buf_write_arraycnt(resp, PartitionCnt);
-
- while (PartitionCnt-- > 0) {
- int32_t Partition;
- rd_kafka_mock_partition_t *mpart = NULL;
- const rd_kafka_mock_committed_offset_t *coff = NULL;
- rd_kafka_resp_err_t err = all_err;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
-
- if (mtopic)
- mpart = rd_kafka_mock_partition_find(mtopic,
- Partition);
-
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- if (!all_err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- if (!err)
- coff = rd_kafka_mock_committed_offset_find(
- mpart, &GroupId);
-
- /* Response: CommittedOffset */
- rd_kafka_buf_write_i64(resp, coff ? coff->offset : -1);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) {
- /* Response: CommittedLeaderEpoch */
- rd_kafka_buf_write_i32(
- resp, mpart ? mpart->leader_epoch : -1);
- }
-
- /* Response: Metadata */
- rd_kafka_buf_write_kstr(resp,
- coff ? coff->metadata : NULL);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- /* Response: Struct tags */
- rd_kafka_buf_write_tags(resp);
-
- if (coff)
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Topic %s [%" PRId32
- "] returning "
- "committed offset %" PRId64
- " for group %s",
- mtopic->name, mpart->id,
- coff->offset, coff->group);
- else
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "Topic %.*s [%" PRId32
- "] has no "
- "committed offset for group %.*s: "
- "%s",
- RD_KAFKAP_STR_PR(&Topic),
- Partition,
- RD_KAFKAP_STR_PR(&GroupId),
- rd_kafka_err2str(err));
- }
-
- /* Request: Skip struct tags */
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Response: Struct tags */
- rd_kafka_buf_write_tags(resp);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: Outer ErrorCode */
- rd_kafka_buf_write_i16(resp, all_err);
- }
-
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle OffsetCommit
- */
-static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_mock_broker_t *mrkb;
- rd_kafka_resp_err_t all_err;
- int32_t GenerationId = -1, TopicsCnt;
- rd_kafkap_str_t GroupId, MemberId, GroupInstanceId;
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- rd_kafka_buf_read_i32(rkbuf, &GenerationId);
- rd_kafka_buf_read_str(rkbuf, &MemberId);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7)
- rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2 &&
- rkbuf->rkbuf_reqhdr.ApiVersion <= 4) {
- int64_t RetentionTimeMs;
- rd_kafka_buf_read_i64(rkbuf, &RetentionTimeMs);
- }
-
-
- /* Inject error, if any */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
- &GroupId);
- if (!mrkb && !all_err)
- all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
-
-
- if (!all_err) {
- rd_kafka_mock_cgrp_t *mcgrp;
-
- mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
- if (mcgrp) {
- rd_kafka_mock_cgrp_member_t *member = NULL;
-
- if (!RD_KAFKAP_STR_IS_NULL(&MemberId))
- member = rd_kafka_mock_cgrp_member_find(
- mcgrp, &MemberId);
-
- if (!member)
- all_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
- else
- all_err = rd_kafka_mock_cgrp_check_state(
- mcgrp, member, rkbuf, GenerationId);
- }
-
- /* FIXME: also check that partitions are assigned to member */
- }
-
- rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
-
- /* Response: #Topics */
- rd_kafka_buf_write_i32(resp, TopicsCnt);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartitionCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
- /* Response: #Partitions */
- rd_kafka_buf_write_i32(resp, PartitionCnt);
-
- while (PartitionCnt-- > 0) {
- int32_t Partition;
- rd_kafka_mock_partition_t *mpart = NULL;
- rd_kafka_resp_err_t err = all_err;
- int64_t CommittedOffset;
- rd_kafkap_str_t Metadata;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
-
- if (mtopic)
- mpart = rd_kafka_mock_partition_find(mtopic,
- Partition);
-
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- if (!all_err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- rd_kafka_buf_read_i64(rkbuf, &CommittedOffset);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
- int32_t CommittedLeaderEpoch;
- rd_kafka_buf_read_i32(rkbuf,
- &CommittedLeaderEpoch);
-
- if (!err && mpart)
- err =
- rd_kafka_mock_partition_leader_epoch_check(
- mpart, CommittedLeaderEpoch);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion == 1) {
- int64_t CommitTimestamp;
- rd_kafka_buf_read_i64(rkbuf, &CommitTimestamp);
- }
-
- rd_kafka_buf_read_str(rkbuf, &Metadata);
-
- if (!err)
- rd_kafka_mock_commit_offset(mpart, &GroupId,
- CommittedOffset,
- &Metadata);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
- }
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle ApiVersionRequest
- */
-static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf);
-
-
-/**
- * @brief Write a MetadataResponse.Topics. entry to \p resp.
- *
- * @param mtopic may be NULL
- */
-static void
-rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp,
- int16_t ApiVersion,
- const char *topic,
- const rd_kafka_mock_topic_t *mtopic,
- rd_kafka_resp_err_t err) {
- int i;
- int partition_cnt =
- (!mtopic || err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
- ? 0
- : mtopic->partition_cnt;
-
- /* Response: Topics.ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
- /* Response: Topics.Name */
- rd_kafka_buf_write_str(resp, topic, -1);
- if (ApiVersion >= 1) {
- /* Response: Topics.IsInternal */
- rd_kafka_buf_write_bool(resp, rd_false);
- }
- /* Response: Topics.#Partitions */
- rd_kafka_buf_write_arraycnt(resp, partition_cnt);
-
- for (i = 0; mtopic && i < partition_cnt; i++) {
- const rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i];
- int r;
-
- /* Response: ..Partitions.ErrorCode */
- rd_kafka_buf_write_i16(resp, 0);
- /* Response: ..Partitions.PartitionIndex */
- rd_kafka_buf_write_i32(resp, mpart->id);
- /* Response: ..Partitions.Leader */
- rd_kafka_buf_write_i32(resp,
- mpart->leader ? mpart->leader->id : -1);
-
- if (ApiVersion >= 7) {
- /* Response: ..Partitions.LeaderEpoch */
- rd_kafka_buf_write_i32(resp, mpart->leader_epoch);
- }
-
- /* Response: ..Partitions.#ReplicaNodes */
- rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt);
- for (r = 0; r < mpart->replica_cnt; r++)
- rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id);
-
- /* Response: ..Partitions.#IsrNodes */
- /* Let Replicas == ISRs for now */
- rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt);
- for (r = 0; r < mpart->replica_cnt; r++)
- rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id);
-
- if (ApiVersion >= 5) {
- /* Response: ...OfflineReplicas */
- rd_kafka_buf_write_arraycnt(resp, 0);
- }
-
- rd_kafka_buf_write_tags(resp);
- }
-
- if (ApiVersion >= 8) {
- /* Response: Topics.TopicAuthorizedOperations */
- rd_kafka_buf_write_i32(resp, INT32_MIN);
- }
-
- rd_kafka_buf_write_tags(resp);
-}
-
-
-/**
- * @brief Handle MetadataRequest
- */
-static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_bool_t AllowAutoTopicCreation = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- const rd_kafka_mock_broker_t *mrkb;
- rd_kafka_topic_partition_list_t *requested_topics = NULL;
- rd_bool_t list_all_topics = rd_false;
- int32_t TopicsCnt;
- int i;
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- /* Response: ThrottleTime */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Response: #Brokers */
- rd_kafka_buf_write_arraycnt(resp, mcluster->broker_cnt);
-
- TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
- /* Response: Brokers.Nodeid */
- rd_kafka_buf_write_i32(resp, mrkb->id);
- /* Response: Brokers.Host */
- rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1);
- /* Response: Brokers.Port */
- rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: Brokers.Rack (Matt's going to love this) */
- rd_kafka_buf_write_str(resp, mrkb->rack, -1);
- }
- rd_kafka_buf_write_tags(resp);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: ClusterId */
- rd_kafka_buf_write_str(resp, mcluster->id, -1);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: ControllerId */
- rd_kafka_buf_write_i32(resp, mcluster->controller_id);
- }
-
- /* #Topics */
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX);
-
- if (TopicsCnt > 0)
- requested_topics = rd_kafka_topic_partition_list_new(TopicsCnt);
- else if (rkbuf->rkbuf_reqhdr.ApiVersion == 0 || TopicsCnt == -1)
- list_all_topics = rd_true;
-
- for (i = 0; i < TopicsCnt; i++) {
- rd_kafkap_str_t Topic;
- char *topic;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- RD_KAFKAP_STR_DUPA(&topic, &Topic);
-
- rd_kafka_topic_partition_list_add(requested_topics, topic,
- RD_KAFKA_PARTITION_UA);
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
- rd_kafka_buf_read_bool(rkbuf, &AllowAutoTopicCreation);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) {
- rd_bool_t IncludeClusterAuthorizedOperations;
- rd_bool_t IncludeTopicAuthorizedOperations;
- rd_kafka_buf_read_bool(rkbuf,
- &IncludeClusterAuthorizedOperations);
- rd_kafka_buf_read_bool(rkbuf,
- &IncludeTopicAuthorizedOperations);
- }
-
- if (list_all_topics) {
- rd_kafka_mock_topic_t *mtopic;
- /* Response: #Topics */
- rd_kafka_buf_write_arraycnt(resp, mcluster->topic_cnt);
-
- TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
- rd_kafka_mock_buf_write_Metadata_Topic(
- resp, rkbuf->rkbuf_reqhdr.ApiVersion, mtopic->name,
- mtopic, mtopic->err);
- }
-
- } else if (requested_topics) {
- /* Response: #Topics */
- rd_kafka_buf_write_arraycnt(resp, requested_topics->cnt);
-
- for (i = 0; i < requested_topics->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &requested_topics->elems[i];
- rd_kafka_mock_topic_t *mtopic;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- mtopic =
- rd_kafka_mock_topic_find(mcluster, rktpar->topic);
- if (!mtopic && AllowAutoTopicCreation)
- mtopic = rd_kafka_mock_topic_auto_create(
- mcluster, rktpar->topic, -1, &err);
- else if (!mtopic)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- rd_kafka_mock_buf_write_Metadata_Topic(
- resp, rkbuf->rkbuf_reqhdr.ApiVersion, rktpar->topic,
- mtopic, err ? err : mtopic->err);
- }
-
- } else {
- /* Response: #Topics: brokers only */
- rd_kafka_buf_write_arraycnt(resp, 0);
- }
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8 &&
- rkbuf->rkbuf_reqhdr.ApiVersion <= 10) {
- /* ClusterAuthorizedOperations */
- rd_kafka_buf_write_i32(resp, INT32_MIN);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
- rd_kafka_buf_write_tags(resp);
-
- if (requested_topics)
- rd_kafka_topic_partition_list_destroy(requested_topics);
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- if (requested_topics)
- rd_kafka_topic_partition_list_destroy(requested_topics);
-
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle FindCoordinatorRequest
- */
-static int
-rd_kafka_mock_handle_FindCoordinator(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t Key;
- int8_t KeyType = RD_KAFKA_COORD_GROUP;
- const rd_kafka_mock_broker_t *mrkb = NULL;
- rd_kafka_resp_err_t err;
-
- /* Key */
- rd_kafka_buf_read_str(rkbuf, &Key);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* KeyType */
- rd_kafka_buf_read_i8(rkbuf, &KeyType);
- }
-
-
- /*
- * Construct response
- */
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: Throttle */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Inject error, if any */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err && RD_KAFKAP_STR_LEN(&Key) > 0) {
- mrkb = rd_kafka_mock_cluster_get_coord(mcluster, KeyType, &Key);
- rd_assert(mrkb);
- }
-
- if (!mrkb && !err)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
-
- if (err) {
- /* Response: ErrorCode and ErrorMessage */
- rd_kafka_buf_write_i16(resp, err);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_write_str(resp, rd_kafka_err2str(err), -1);
-
- /* Response: NodeId, Host, Port */
- rd_kafka_buf_write_i32(resp, -1);
- rd_kafka_buf_write_str(resp, NULL, -1);
- rd_kafka_buf_write_i32(resp, -1);
- } else {
- /* Response: ErrorCode and ErrorMessage */
- rd_kafka_buf_write_i16(resp, 0);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_write_str(resp, NULL, -1);
-
- /* Response: NodeId, Host, Port */
- rd_kafka_buf_write_i32(resp, mrkb->id);
- rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1);
- rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port);
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle JoinGroupRequest
- */
-static int rd_kafka_mock_handle_JoinGroup(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_mock_broker_t *mrkb;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t GroupId, MemberId, ProtocolType;
- rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
- int32_t SessionTimeoutMs;
- int32_t MaxPollIntervalMs = -1;
- int32_t ProtocolCnt = 0;
- int32_t i;
- rd_kafka_resp_err_t err;
- rd_kafka_mock_cgrp_t *mcgrp;
- rd_kafka_mock_cgrp_proto_t *protos = NULL;
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
- rd_kafka_buf_read_i32(rkbuf, &SessionTimeoutMs);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_i32(rkbuf, &MaxPollIntervalMs);
- rd_kafka_buf_read_str(rkbuf, &MemberId);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5)
- rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
- rd_kafka_buf_read_str(rkbuf, &ProtocolType);
- rd_kafka_buf_read_i32(rkbuf, &ProtocolCnt);
-
- if (ProtocolCnt > 1000) {
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "JoinGroupRequest: ProtocolCnt %" PRId32
- " > max allowed 1000",
- ProtocolCnt);
- rd_kafka_buf_destroy(resp);
- return -1;
- }
-
- protos = rd_malloc(sizeof(*protos) * ProtocolCnt);
- for (i = 0; i < ProtocolCnt; i++) {
- rd_kafkap_str_t ProtocolName;
- rd_kafkap_bytes_t Metadata;
- rd_kafka_buf_read_str(rkbuf, &ProtocolName);
- rd_kafka_buf_read_bytes(rkbuf, &Metadata);
- protos[i].name = rd_kafkap_str_copy(&ProtocolName);
- protos[i].metadata = rd_kafkap_bytes_copy(&Metadata);
- }
-
- /*
- * Construct response
- */
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* Response: Throttle */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Inject error, if any */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err) {
- mrkb = rd_kafka_mock_cluster_get_coord(
- mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
-
- if (!mrkb)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
- else if (mrkb != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
- }
-
- if (!err) {
- mcgrp =
- rd_kafka_mock_cgrp_get(mcluster, &GroupId, &ProtocolType);
- rd_assert(mcgrp);
-
- /* This triggers an async rebalance, the response will be
- * sent later. */
- err = rd_kafka_mock_cgrp_member_add(
- mcgrp, mconn, resp, &MemberId, &ProtocolType, protos,
- ProtocolCnt, SessionTimeoutMs);
- if (!err) {
- /* .._add() assumes ownership of resp and protos */
- protos = NULL;
- rd_kafka_mock_connection_set_blocking(mconn, rd_true);
- return 0;
- }
- }
-
- rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt);
-
- /* Error case */
- rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
- rd_kafka_buf_write_i32(resp, -1); /* GenerationId */
- rd_kafka_buf_write_str(resp, NULL, -1); /* ProtocolName */
- rd_kafka_buf_write_str(resp, NULL, -1); /* LeaderId */
- rd_kafka_buf_write_kstr(resp, NULL); /* MemberId */
- rd_kafka_buf_write_i32(resp, 0); /* MemberCnt */
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- if (protos)
- rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt);
- return -1;
-}
-
-
-/**
- * @brief Handle HeartbeatRequest
- */
-static int rd_kafka_mock_handle_Heartbeat(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_mock_broker_t *mrkb;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t GroupId, MemberId;
- rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
- int32_t GenerationId;
- rd_kafka_resp_err_t err;
- rd_kafka_mock_cgrp_t *mcgrp;
- rd_kafka_mock_cgrp_member_t *member = NULL;
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
- rd_kafka_buf_read_i32(rkbuf, &GenerationId);
- rd_kafka_buf_read_str(rkbuf, &MemberId);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
- rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
-
- /*
- * Construct response
- */
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: Throttle */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Inject error, if any */
- err = rd_kafka_mock_next_request_error(mconn, resp);
- if (!err) {
- mrkb = rd_kafka_mock_cluster_get_coord(
- mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
-
- if (!mrkb)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
- else if (mrkb != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
- }
-
- if (!err) {
- mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
- if (!mcgrp)
- err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
- }
-
- if (!err) {
- member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
- if (!member)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
- }
-
- if (!err)
- err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf,
- GenerationId);
-
- if (!err)
- rd_kafka_mock_cgrp_member_active(mcgrp, member);
-
- rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle LeaveGroupRequest
- */
-static int rd_kafka_mock_handle_LeaveGroup(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_mock_broker_t *mrkb;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t GroupId, MemberId;
- rd_kafka_resp_err_t err;
- rd_kafka_mock_cgrp_t *mcgrp;
- rd_kafka_mock_cgrp_member_t *member = NULL;
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
- rd_kafka_buf_read_str(rkbuf, &MemberId);
-
- /*
- * Construct response
- */
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: Throttle */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Inject error, if any */
- err = rd_kafka_mock_next_request_error(mconn, resp);
- if (!err) {
- mrkb = rd_kafka_mock_cluster_get_coord(
- mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
-
- if (!mrkb)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
- else if (mrkb != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
- }
-
- if (!err) {
- mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
- if (!mcgrp)
- err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
- }
-
- if (!err) {
- member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
- if (!member)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
- }
-
- if (!err)
- err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf, -1);
-
- if (!err)
- rd_kafka_mock_cgrp_member_leave(mcgrp, member);
-
- rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle SyncGroupRequest
- */
-static int rd_kafka_mock_handle_SyncGroup(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_mock_broker_t *mrkb;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t GroupId, MemberId;
- rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
- int32_t GenerationId, AssignmentCnt;
- int32_t i;
- rd_kafka_resp_err_t err;
- rd_kafka_mock_cgrp_t *mcgrp = NULL;
- rd_kafka_mock_cgrp_member_t *member = NULL;
-
- rd_kafka_buf_read_str(rkbuf, &GroupId);
- rd_kafka_buf_read_i32(rkbuf, &GenerationId);
- rd_kafka_buf_read_str(rkbuf, &MemberId);
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
- rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
- rd_kafka_buf_read_i32(rkbuf, &AssignmentCnt);
-
- /*
- * Construct response
- */
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* Response: Throttle */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- /* Inject error, if any */
- err = rd_kafka_mock_next_request_error(mconn, resp);
- if (!err) {
- mrkb = rd_kafka_mock_cluster_get_coord(
- mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
-
- if (!mrkb)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
- else if (mrkb != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
- }
-
- if (!err) {
- mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
- if (!mcgrp)
- err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
- }
-
- if (!err) {
- member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
- if (!member)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
- }
-
- if (!err)
- err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf,
- GenerationId);
-
- if (!err)
- rd_kafka_mock_cgrp_member_active(mcgrp, member);
-
- if (!err) {
- rd_bool_t is_leader = mcgrp->leader && mcgrp->leader == member;
-
- if (AssignmentCnt > 0 && !is_leader)
- err =
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* FIXME
- */
- else if (AssignmentCnt == 0 && is_leader)
- err = RD_KAFKA_RESP_ERR_INVALID_PARTITIONS; /* FIXME */
- }
-
- for (i = 0; i < AssignmentCnt; i++) {
- rd_kafkap_str_t MemberId2;
- rd_kafkap_bytes_t Metadata;
- rd_kafka_mock_cgrp_member_t *member2;
-
- rd_kafka_buf_read_str(rkbuf, &MemberId2);
- rd_kafka_buf_read_bytes(rkbuf, &Metadata);
-
- if (err)
- continue;
-
- /* Find member */
- member2 = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId2);
- if (!member2)
- continue;
-
- rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member2,
- &Metadata);
- }
-
- if (!err) {
- err = rd_kafka_mock_cgrp_member_sync_set(mcgrp, member, mconn,
- resp);
- /* .._sync_set() assumes ownership of resp */
- if (!err)
- return 0; /* Response will be sent when all members
- * are synchronized */
- }
-
- /* Error case */
- rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
- rd_kafka_buf_write_bytes(resp, NULL, -1); /* MemberState */
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Generate a unique ProducerID
- */
-static const rd_kafka_pid_t
-rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *TransactionalId) {
- size_t tidlen =
- TransactionalId ? RD_KAFKAP_STR_LEN(TransactionalId) : 0;
- rd_kafka_mock_pid_t *mpid = rd_malloc(sizeof(*mpid) + tidlen);
- rd_kafka_pid_t ret;
-
- mpid->pid.id = rd_jitter(1, 900000) * 1000;
- mpid->pid.epoch = 0;
-
- if (tidlen > 0)
- memcpy(mpid->TransactionalId, TransactionalId->str, tidlen);
- mpid->TransactionalId[tidlen] = '\0';
-
- mtx_lock(&mcluster->lock);
- rd_list_add(&mcluster->pids, mpid);
- ret = mpid->pid;
- mtx_unlock(&mcluster->lock);
-
- return ret;
-}
-
-
-/**
- * @brief Finds a matching mcluster mock PID for the given \p pid.
- *
- * @locks_required mcluster->lock
- */
-rd_kafka_resp_err_t
-rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *TransactionalId,
- const rd_kafka_pid_t pid,
- rd_kafka_mock_pid_t **mpidp) {
- rd_kafka_mock_pid_t *mpid;
- rd_kafka_mock_pid_t skel = {pid};
-
- *mpidp = NULL;
- mpid = rd_list_find(&mcluster->pids, &skel, rd_kafka_mock_pid_cmp_pid);
-
- if (!mpid)
- return RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
- else if (((TransactionalId != NULL) !=
- (*mpid->TransactionalId != '\0')) ||
- (TransactionalId &&
- rd_kafkap_str_cmp_str(TransactionalId,
- mpid->TransactionalId)))
- return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING;
-
- *mpidp = mpid;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Checks if the given pid is known, else returns an error.
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *TransactionalId,
- const rd_kafka_pid_t check_pid) {
- rd_kafka_mock_pid_t *mpid;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- mtx_lock(&mcluster->lock);
- err =
- rd_kafka_mock_pid_find(mcluster, TransactionalId, check_pid, &mpid);
- if (!err && check_pid.epoch != mpid->pid.epoch)
- err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
- mtx_unlock(&mcluster->lock);
-
- if (unlikely(err))
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
- "PID check failed for TransactionalId=%.*s: "
- "expected %s, not %s: %s",
- RD_KAFKAP_STR_PR(TransactionalId),
- mpid ? rd_kafka_pid2str(mpid->pid) : "none",
- rd_kafka_pid2str(check_pid),
- rd_kafka_err2name(err));
- return err;
-}
-
-
-/**
- * @brief Bump the epoch for an existing pid, or return an error
- * if the current_pid does not match an existing pid.
- */
-static rd_kafka_resp_err_t
-rd_kafka_mock_pid_bump(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *TransactionalId,
- rd_kafka_pid_t *current_pid) {
- rd_kafka_mock_pid_t *mpid;
- rd_kafka_resp_err_t err;
-
- mtx_lock(&mcluster->lock);
- err = rd_kafka_mock_pid_find(mcluster, TransactionalId, *current_pid,
- &mpid);
- if (err) {
- mtx_unlock(&mcluster->lock);
- return err;
- }
-
- if (current_pid->epoch != mpid->pid.epoch) {
- mtx_unlock(&mcluster->lock);
- return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
- }
-
- mpid->pid.epoch++;
- *current_pid = mpid->pid;
- mtx_unlock(&mcluster->lock);
-
- rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Bumped PID %s",
- rd_kafka_pid2str(*current_pid));
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Handle InitProducerId
- */
-static int
-rd_kafka_mock_handle_InitProducerId(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafkap_str_t TransactionalId;
- rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER;
- rd_kafka_pid_t current_pid = RD_KAFKA_PID_INITIALIZER;
- int32_t TxnTimeoutMs;
- rd_kafka_resp_err_t err;
-
- /* TransactionalId */
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
- /* TransactionTimeoutMs */
- rd_kafka_buf_read_i32(rkbuf, &TxnTimeoutMs);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- /* ProducerId */
- rd_kafka_buf_read_i64(rkbuf, &current_pid.id);
- /* ProducerEpoch */
- rd_kafka_buf_read_i16(rkbuf, &current_pid.epoch);
- }
-
- /*
- * Construct response
- */
-
- /* ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err && !RD_KAFKAP_STR_IS_NULL(&TransactionalId)) {
- if (RD_KAFKAP_STR_LEN(&TransactionalId) == 0)
- err = RD_KAFKA_RESP_ERR_INVALID_REQUEST;
- else if (rd_kafka_mock_cluster_get_coord(
- mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) !=
- mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
- }
-
- if (!err) {
- if (rd_kafka_pid_valid(current_pid)) {
- /* Producer is asking for the transactional coordinator
- * to bump the epoch (KIP-360).
- * Verify that current_pid matches and then
- * bump the epoch. */
- err = rd_kafka_mock_pid_bump(mcluster, &TransactionalId,
- &current_pid);
- if (!err)
- pid = current_pid;
-
- } else {
- /* Generate a new pid */
- pid = rd_kafka_mock_pid_new(mcluster, &TransactionalId);
- }
- }
-
- /* ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- /* ProducerId */
- rd_kafka_buf_write_i64(resp, pid.id);
- /* ProducerEpoch */
- rd_kafka_buf_write_i16(resp, pid.epoch);
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-
-/**
- * @brief Handle AddPartitionsToTxn
- */
-static int
-rd_kafka_mock_handle_AddPartitionsToTxn(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t all_err;
- rd_kafkap_str_t TransactionalId;
- rd_kafka_pid_t pid;
- int32_t TopicsCnt;
-
- /* Response: ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* TransactionalId */
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
- /* ProducerId */
- rd_kafka_buf_read_i64(rkbuf, &pid.id);
- /* Epoch */
- rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
- /* #Topics */
- rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
-
- /* Response: #Results */
- rd_kafka_buf_write_i32(resp, TopicsCnt);
-
- /* Inject error */
- all_err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!all_err &&
- rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
- &TransactionalId) != mconn->broker)
- all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
-
- if (!all_err)
- all_err =
- rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartsCnt;
- const rd_kafka_mock_topic_t *mtopic;
-
- /* Topic */
- rd_kafka_buf_read_str(rkbuf, &Topic);
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
-
- /* #Partitions */
- rd_kafka_buf_read_i32(rkbuf, &PartsCnt);
- /* Response: #Partitions */
- rd_kafka_buf_write_i32(resp, PartsCnt);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- while (PartsCnt--) {
- int32_t Partition;
- rd_kafka_resp_err_t err = all_err;
-
- /* Partition */
- rd_kafka_buf_read_i32(rkbuf, &Partition);
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- if (!mtopic || Partition < 0 ||
- Partition >= mtopic->partition_cnt)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else if (mtopic && mtopic->err)
- err = mtopic->err;
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
- }
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle AddOffsetsToTxn
- */
-static int
-rd_kafka_mock_handle_AddOffsetsToTxn(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t err;
- rd_kafkap_str_t TransactionalId, GroupId;
- rd_kafka_pid_t pid;
-
- /* TransactionalId */
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
- /* ProducerId */
- rd_kafka_buf_read_i64(rkbuf, &pid.id);
- /* Epoch */
- rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
- /* GroupIdId */
- rd_kafka_buf_read_str(rkbuf, &GroupId);
-
- /* Response: ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err &&
- rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
- &TransactionalId) != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
-
- if (!err)
- err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle TxnOffsetCommit
- */
-static int
-rd_kafka_mock_handle_TxnOffsetCommit(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t err;
- rd_kafkap_str_t TransactionalId, GroupId;
- rd_kafka_pid_t pid;
- int32_t TopicsCnt;
-
- /* Response: ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* TransactionalId */
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
- /* GroupId */
- rd_kafka_buf_read_str(rkbuf, &GroupId);
- /* ProducerId */
- rd_kafka_buf_read_i64(rkbuf, &pid.id);
- /* Epoch */
- rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- int32_t GenerationId;
- rd_kafkap_str_t kMemberId, kGroupInstanceId;
-
- /* GenerationId */
- rd_kafka_buf_read_i32(rkbuf, &GenerationId);
- /* MemberId */
- rd_kafka_buf_read_str(rkbuf, &kMemberId);
- /* GroupInstanceId */
- rd_kafka_buf_read_str(rkbuf, &kGroupInstanceId);
- }
-
- /* #Topics */
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000);
-
- /* Response: #Results */
- rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err &&
- rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
- &GroupId) != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
-
- if (!err)
- err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
-
- while (TopicsCnt-- > 0) {
- rd_kafkap_str_t Topic;
- int32_t PartsCnt;
- rd_kafka_mock_topic_t *mtopic;
-
- /* Topic */
- rd_kafka_buf_read_str(rkbuf, &Topic);
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* #Partitions */
- rd_kafka_buf_read_arraycnt(rkbuf, &PartsCnt, 100000);
-
- /* Response: #Partitions */
- rd_kafka_buf_write_arraycnt(resp, PartsCnt);
-
- while (PartsCnt-- > 0) {
- int32_t Partition;
- int64_t Offset;
- rd_kafkap_str_t Metadata;
- rd_kafka_mock_partition_t *mpart;
-
- /* Partition */
- rd_kafka_buf_read_i32(rkbuf, &Partition);
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
-
- mpart = rd_kafka_mock_partition_find(mtopic, Partition);
- if (!err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- /* CommittedOffset */
- rd_kafka_buf_read_i64(rkbuf, &Offset);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
- /* CommittedLeaderEpoch */
- int32_t CommittedLeaderEpoch;
- rd_kafka_buf_read_i32(rkbuf,
- &CommittedLeaderEpoch);
- if (!err && mpart)
- err =
- rd_kafka_mock_partition_leader_epoch_check(
- mpart, CommittedLeaderEpoch);
- }
-
- /* CommittedMetadata */
- rd_kafka_buf_read_str(rkbuf, &Metadata);
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- /* Request: Struct tags */
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Response: Struct tags */
- rd_kafka_buf_write_tags(resp);
- }
-
- /* Request: Struct tags */
- rd_kafka_buf_skip_tags(rkbuf);
-
- /* Response: Struct tags */
- rd_kafka_buf_write_tags(resp);
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Handle EndTxn
- */
-static int rd_kafka_mock_handle_EndTxn(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t err;
- rd_kafkap_str_t TransactionalId;
- rd_kafka_pid_t pid;
- rd_bool_t committed;
-
- /* TransactionalId */
- rd_kafka_buf_read_str(rkbuf, &TransactionalId);
- /* ProducerId */
- rd_kafka_buf_read_i64(rkbuf, &pid.id);
- /* ProducerEpoch */
- rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
- /* Committed */
- rd_kafka_buf_read_bool(rkbuf, &committed);
-
- /*
- * Construct response
- */
-
- /* ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err &&
- rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
- &TransactionalId) != mconn->broker)
- err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
-
- if (!err)
- err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
-
- /* ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-static int
-rd_kafka_mock_handle_OffsetForLeaderEpoch(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- const rd_bool_t log_decode_errors = rd_true;
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- rd_kafka_resp_err_t err;
- int32_t TopicsCnt, i;
-
- /* Response: ThrottleTimeMs */
- rd_kafka_buf_write_i32(resp, 0);
-
- /* #Topics */
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX);
-
- /* Response: #Topics */
- rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- for (i = 0; i < TopicsCnt; i++) {
- rd_kafkap_str_t Topic;
- int32_t PartitionsCnt, j;
- rd_kafka_mock_topic_t *mtopic;
-
- /* Topic */
- rd_kafka_buf_read_str(rkbuf, &Topic);
-
- mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
-
- /* Response: Topic */
- rd_kafka_buf_write_kstr(resp, &Topic);
-
- /* #Partitions */
- rd_kafka_buf_read_arraycnt(rkbuf, &PartitionsCnt,
- RD_KAFKAP_PARTITIONS_MAX);
-
- /* Response: #Partitions */
- rd_kafka_buf_write_arraycnt(resp, PartitionsCnt);
-
- for (j = 0; j < PartitionsCnt; j++) {
- rd_kafka_mock_partition_t *mpart;
- int32_t Partition, CurrentLeaderEpoch, LeaderEpoch;
- int64_t EndOffset = -1;
-
- /* Partition */
- rd_kafka_buf_read_i32(rkbuf, &Partition);
- /* CurrentLeaderEpoch */
- rd_kafka_buf_read_i32(rkbuf, &CurrentLeaderEpoch);
- /* LeaderEpoch */
- rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch);
-
- mpart = rd_kafka_mock_partition_find(mtopic, Partition);
- if (!err && !mpart)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
-
- if (!err && mpart)
- err =
- rd_kafka_mock_partition_leader_epoch_check(
- mpart, CurrentLeaderEpoch);
-
- if (!err && mpart) {
- EndOffset =
- rd_kafka_mock_partition_offset_for_leader_epoch(
- mpart, LeaderEpoch);
- }
-
- /* Response: ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
- /* Response: Partition */
- rd_kafka_buf_write_i32(resp, Partition);
- /* Response: LeaderEpoch */
- rd_kafka_buf_write_i32(resp, LeaderEpoch);
- /* Response: Partition */
- rd_kafka_buf_write_i64(resp, EndOffset);
- }
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-
-err_parse:
- rd_kafka_buf_destroy(resp);
- return -1;
-}
-
-
-/**
- * @brief Default request handlers
- */
-const struct rd_kafka_mock_api_handler
- rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = {
- /* [request-type] = { MinVersion, MaxVersion, FlexVersion, callback } */
- [RD_KAFKAP_Produce] = {0, 7, -1, rd_kafka_mock_handle_Produce},
- [RD_KAFKAP_Fetch] = {0, 11, -1, rd_kafka_mock_handle_Fetch},
- [RD_KAFKAP_ListOffsets] = {0, 5, -1, rd_kafka_mock_handle_ListOffsets},
- [RD_KAFKAP_OffsetFetch] = {0, 6, 6, rd_kafka_mock_handle_OffsetFetch},
- [RD_KAFKAP_OffsetCommit] = {0, 8, 8, rd_kafka_mock_handle_OffsetCommit},
- [RD_KAFKAP_ApiVersion] = {0, 2, 3, rd_kafka_mock_handle_ApiVersion},
- [RD_KAFKAP_Metadata] = {0, 9, 9, rd_kafka_mock_handle_Metadata},
- [RD_KAFKAP_FindCoordinator] = {0, 3, 3,
- rd_kafka_mock_handle_FindCoordinator},
- [RD_KAFKAP_InitProducerId] = {0, 4, 2,
- rd_kafka_mock_handle_InitProducerId},
- [RD_KAFKAP_JoinGroup] = {0, 6, 6, rd_kafka_mock_handle_JoinGroup},
- [RD_KAFKAP_Heartbeat] = {0, 5, 4, rd_kafka_mock_handle_Heartbeat},
- [RD_KAFKAP_LeaveGroup] = {0, 4, 4, rd_kafka_mock_handle_LeaveGroup},
- [RD_KAFKAP_SyncGroup] = {0, 4, 4, rd_kafka_mock_handle_SyncGroup},
- [RD_KAFKAP_AddPartitionsToTxn] =
- {0, 1, -1, rd_kafka_mock_handle_AddPartitionsToTxn},
- [RD_KAFKAP_AddOffsetsToTxn] = {0, 1, -1,
- rd_kafka_mock_handle_AddOffsetsToTxn},
- [RD_KAFKAP_TxnOffsetCommit] = {0, 3, 3,
- rd_kafka_mock_handle_TxnOffsetCommit},
- [RD_KAFKAP_EndTxn] = {0, 1, -1, rd_kafka_mock_handle_EndTxn},
- [RD_KAFKAP_OffsetForLeaderEpoch] =
- {2, 2, -1, rd_kafka_mock_handle_OffsetForLeaderEpoch},
-};
-
-
-
-/**
- * @brief Handle ApiVersionRequest.
- *
- * @remark This is the only handler that needs to handle unsupported
- * ApiVersions.
- */
-static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
- rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
- size_t of_ApiKeysCnt;
- int cnt = 0;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int i;
-
- /* Inject error */
- err = rd_kafka_mock_next_request_error(mconn, resp);
-
- if (!err && !rd_kafka_mock_cluster_ApiVersion_check(
- mcluster, rkbuf->rkbuf_reqhdr.ApiKey,
- rkbuf->rkbuf_reqhdr.ApiVersion))
- err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION;
-
- /* ApiVersionRequest/Response with flexver (>=v3) has a mix
- * of flexver and standard fields for backwards compatibility reasons,
- * so we handcraft the response instead. */
- resp->rkbuf_flags &= ~RD_KAFKA_OP_F_FLEXVER;
-
- /* ErrorCode */
- rd_kafka_buf_write_i16(resp, err);
-
- /* #ApiKeys (updated later) */
- /* FIXME: FLEXVER: This is a uvarint and will require more than 1 byte
- * if the array count exceeds 126. */
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)
- of_ApiKeysCnt = rd_kafka_buf_write_i8(resp, 0);
- else
- of_ApiKeysCnt = rd_kafka_buf_write_i32(resp, 0);
-
- for (i = 0; i < RD_KAFKAP__NUM; i++) {
- if (!mcluster->api_handlers[i].cb ||
- mcluster->api_handlers[i].MaxVersion == -1)
- continue;
-
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
- if (err && i != RD_KAFKAP_ApiVersion)
- continue;
- }
-
- /* ApiKey */
- rd_kafka_buf_write_i16(resp, (int16_t)i);
- /* MinVersion */
- rd_kafka_buf_write_i16(resp,
- mcluster->api_handlers[i].MinVersion);
- /* MaxVersion */
- rd_kafka_buf_write_i16(resp,
- mcluster->api_handlers[i].MaxVersion);
-
- cnt++;
- }
-
- /* FIXME: uvarint */
- if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
- rd_assert(cnt <= 126);
- rd_kafka_buf_update_i8(resp, of_ApiKeysCnt, cnt);
- } else
- rd_kafka_buf_update_i32(resp, of_ApiKeysCnt, cnt);
-
- if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
- /* ThrottletimeMs */
- rd_kafka_buf_write_i32(resp, 0);
- }
-
- rd_kafka_mock_connection_send_response(mconn, resp);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h
deleted file mode 100644
index ea3b6cab4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_MOCK_INT_H_
-#define _RDKAFKA_MOCK_INT_H_
-
-/**
- * @name Mock cluster - internal data types
- *
- */
-
-
-/**
- * @struct Response error and/or RTT-delay to return to client.
- */
-typedef struct rd_kafka_mock_error_rtt_s {
- rd_kafka_resp_err_t err; /**< Error response (or 0) */
- rd_ts_t rtt; /**< RTT/delay in microseconds (or 0) */
-} rd_kafka_mock_error_rtt_t;
-
-/**
- * @struct A stack of errors or rtt latencies to return to the client,
- * one by one until the stack is depleted.
- */
-typedef struct rd_kafka_mock_error_stack_s {
- TAILQ_ENTRY(rd_kafka_mock_error_stack_s) link;
- int16_t ApiKey; /**< Optional ApiKey for which this stack
- * applies to, else -1. */
- size_t cnt; /**< Current number of errors in .errs */
- size_t size; /**< Current allocated size for .errs (in elements) */
- rd_kafka_mock_error_rtt_t *errs; /**< Array of errors/rtts */
-} rd_kafka_mock_error_stack_t;
-
-typedef TAILQ_HEAD(rd_kafka_mock_error_stack_head_s,
- rd_kafka_mock_error_stack_s)
- rd_kafka_mock_error_stack_head_t;
-
-
-/**
- * @struct Consumer group protocol name and metadata.
- */
-typedef struct rd_kafka_mock_cgrp_proto_s {
- rd_kafkap_str_t *name;
- rd_kafkap_bytes_t *metadata;
-} rd_kafka_mock_cgrp_proto_t;
-
-/**
- * @struct Consumer group member
- */
-typedef struct rd_kafka_mock_cgrp_member_s {
- TAILQ_ENTRY(rd_kafka_mock_cgrp_member_s) link;
- char *id; /**< MemberId */
- char *group_instance_id; /**< Group instance id */
- rd_ts_t ts_last_activity; /**< Last activity, e.g., Heartbeat */
- rd_kafka_mock_cgrp_proto_t *protos; /**< Protocol names */
- int proto_cnt; /**< Number of protocols */
- rd_kafkap_bytes_t *assignment; /**< Current assignment */
- rd_kafka_buf_t *resp; /**< Current response buffer */
- struct rd_kafka_mock_connection_s *conn; /**< Connection, may be NULL
- * if there is no ongoing
- * request. */
-} rd_kafka_mock_cgrp_member_t;
-
-/**
- * @struct Consumer group.
- */
-typedef struct rd_kafka_mock_cgrp_s {
- TAILQ_ENTRY(rd_kafka_mock_cgrp_s) link;
- struct rd_kafka_mock_cluster_s *cluster; /**< Cluster */
- struct rd_kafka_mock_connection_s *conn; /**< Connection */
- char *id; /**< Group Id */
- char *protocol_type; /**< Protocol type */
- char *protocol_name; /**< Elected protocol name */
- int32_t generation_id; /**< Generation Id */
- int session_timeout_ms; /**< Session timeout */
- enum { RD_KAFKA_MOCK_CGRP_STATE_EMPTY, /* No members */
- RD_KAFKA_MOCK_CGRP_STATE_JOINING, /* Members are joining */
- RD_KAFKA_MOCK_CGRP_STATE_SYNCING, /* Syncing assignments */
- RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, /* Rebalance triggered */
- RD_KAFKA_MOCK_CGRP_STATE_UP, /* Group is operational */
- } state; /**< Consumer group state */
- rd_kafka_timer_t session_tmr; /**< Session timeout timer */
- rd_kafka_timer_t rebalance_tmr; /**< Rebalance state timer */
- TAILQ_HEAD(, rd_kafka_mock_cgrp_member_s) members; /**< Group members */
- int member_cnt; /**< Number of group members */
- int last_member_cnt; /**< Mumber of group members at last election */
- int assignment_cnt; /**< Number of member assignments in last Sync */
- rd_kafka_mock_cgrp_member_t *leader; /**< Elected leader */
-} rd_kafka_mock_cgrp_t;
-
-
-/**
- * @struct TransactionalId + PID (+ optional sequence state)
- */
-typedef struct rd_kafka_mock_pid_s {
- rd_kafka_pid_t pid;
-
- /* BaseSequence tracking (partition) */
- int8_t window; /**< increases up to 5 */
- int8_t lo; /**< Window low bucket: oldest */
- int8_t hi; /**< Window high bucket: most recent */
- int32_t seq[5]; /**< Next expected BaseSequence for each bucket */
-
- char TransactionalId[1]; /**< Allocated after this structure */
-} rd_kafka_mock_pid_t;
-
-/**
- * @brief rd_kafka_mock_pid_t.pid Pid (not epoch) comparator
- */
-static RD_UNUSED int rd_kafka_mock_pid_cmp_pid(const void *_a, const void *_b) {
- const rd_kafka_mock_pid_t *a = _a, *b = _b;
-
- if (a->pid.id < b->pid.id)
- return -1;
- else if (a->pid.id > b->pid.id)
- return 1;
-
- return 0;
-}
-
-/**
- * @brief rd_kafka_mock_pid_t.pid TransactionalId,Pid,epoch comparator
- */
-static RD_UNUSED int rd_kafka_mock_pid_cmp(const void *_a, const void *_b) {
- const rd_kafka_mock_pid_t *a = _a, *b = _b;
- int r;
-
- r = strcmp(a->TransactionalId, b->TransactionalId);
- if (r)
- return r;
-
- if (a->pid.id < b->pid.id)
- return -1;
- else if (a->pid.id > b->pid.id)
- return 1;
-
- if (a->pid.epoch < b->pid.epoch)
- return -1;
- if (a->pid.epoch > b->pid.epoch)
- return 1;
-
- return 0;
-}
-
-
-
-/**
- * @struct A real TCP connection from the client to a mock broker.
- */
-typedef struct rd_kafka_mock_connection_s {
- TAILQ_ENTRY(rd_kafka_mock_connection_s) link;
- rd_kafka_transport_t *transport; /**< Socket transport */
- rd_kafka_buf_t *rxbuf; /**< Receive buffer */
- rd_kafka_bufq_t outbufs; /**< Send buffers */
- short *poll_events; /**< Events to poll, points to
- * the broker's pfd array */
- struct sockaddr_in peer; /**< Peer address */
- struct rd_kafka_mock_broker_s *broker;
- rd_kafka_timer_t write_tmr; /**< Socket write delay timer */
-} rd_kafka_mock_connection_t;
-
-
-/**
- * @struct Mock broker
- */
-typedef struct rd_kafka_mock_broker_s {
- TAILQ_ENTRY(rd_kafka_mock_broker_s) link;
- int32_t id;
- char advertised_listener[128];
- struct sockaddr_in sin; /**< Bound address:port */
- uint16_t port;
- char *rack;
- rd_bool_t up;
- rd_ts_t rtt; /**< RTT in microseconds */
-
- rd_socket_t listen_s; /**< listen() socket */
-
- TAILQ_HEAD(, rd_kafka_mock_connection_s) connections;
-
- /**< Per-protocol request error stack.
- * @locks mcluster->lock */
- rd_kafka_mock_error_stack_head_t errstacks;
-
- struct rd_kafka_mock_cluster_s *cluster;
-} rd_kafka_mock_broker_t;
-
-
-/**
- * @struct A Kafka-serialized MessageSet
- */
-typedef struct rd_kafka_mock_msgset_s {
- TAILQ_ENTRY(rd_kafka_mock_msgset_s) link;
- int64_t first_offset; /**< First offset in batch */
- int64_t last_offset; /**< Last offset in batch */
- int32_t leader_epoch; /**< Msgset leader epoch */
- rd_kafkap_bytes_t bytes;
- /* Space for bytes.data is allocated after the msgset_t */
-} rd_kafka_mock_msgset_t;
-
-
-/**
- * @struct Committed offset for a group and partition.
- */
-typedef struct rd_kafka_mock_committed_offset_s {
- /**< mpart.committed_offsets */
- TAILQ_ENTRY(rd_kafka_mock_committed_offset_s) link;
- char *group; /**< Allocated along with the struct */
- int64_t offset; /**< Committed offset */
- rd_kafkap_str_t *metadata; /**< Metadata, allocated separately */
-} rd_kafka_mock_committed_offset_t;
-
-
-TAILQ_HEAD(rd_kafka_mock_msgset_tailq_s, rd_kafka_mock_msgset_s);
-
-/**
- * @struct Mock partition
- */
-typedef struct rd_kafka_mock_partition_s {
- TAILQ_ENTRY(rd_kafka_mock_partition_s) leader_link;
- int32_t id;
-
- int32_t leader_epoch; /**< Leader epoch, bumped on each
- * partition leader change. */
- int64_t start_offset; /**< Actual/leader start offset */
- int64_t end_offset; /**< Actual/leader end offset */
- int64_t follower_start_offset; /**< Follower's start offset */
- int64_t follower_end_offset; /**< Follower's end offset */
- rd_bool_t update_follower_start_offset; /**< Keep follower_start_offset
- * in synch with start_offset
- */
- rd_bool_t update_follower_end_offset; /**< Keep follower_end_offset
- * in synch with end_offset
- */
-
- struct rd_kafka_mock_msgset_tailq_s msgsets;
- size_t size; /**< Total size of all .msgsets */
- size_t cnt; /**< Total count of .msgsets */
- size_t max_size; /**< Maximum size of all .msgsets, may be overshot. */
- size_t max_cnt; /**< Maximum number of .msgsets */
-
- /**< Committed offsets */
- TAILQ_HEAD(, rd_kafka_mock_committed_offset_s) committed_offsets;
-
- rd_kafka_mock_broker_t *leader;
- rd_kafka_mock_broker_t **replicas;
- int replica_cnt;
-
- rd_list_t pidstates; /**< PID states */
-
- int32_t follower_id; /**< Preferred replica/follower */
-
- struct rd_kafka_mock_topic_s *topic;
-} rd_kafka_mock_partition_t;
-
-
-/**
- * @struct Mock topic
- */
-typedef struct rd_kafka_mock_topic_s {
- TAILQ_ENTRY(rd_kafka_mock_topic_s) link;
- char *name;
-
- rd_kafka_mock_partition_t *partitions;
- int partition_cnt;
-
- rd_kafka_resp_err_t err; /**< Error to return in protocol requests
- * for this topic. */
-
- struct rd_kafka_mock_cluster_s *cluster;
-} rd_kafka_mock_topic_t;
-
-/**
- * @struct Explicitly set coordinator.
- */
-typedef struct rd_kafka_mock_coord_s {
- TAILQ_ENTRY(rd_kafka_mock_coord_s) link;
- rd_kafka_coordtype_t type;
- char *key;
- int32_t broker_id;
-} rd_kafka_mock_coord_t;
-
-
-typedef void(rd_kafka_mock_io_handler_t)(
- struct rd_kafka_mock_cluster_s *mcluster,
- rd_socket_t fd,
- int events,
- void *opaque);
-
-struct rd_kafka_mock_api_handler {
- int16_t MinVersion;
- int16_t MaxVersion;
- int16_t FlexVersion; /**< First Flexible version */
- int (*cb)(rd_kafka_mock_connection_t *mconn, rd_kafka_buf_t *rkbuf);
-};
-
-extern const struct rd_kafka_mock_api_handler
- rd_kafka_mock_api_handlers[RD_KAFKAP__NUM];
-
-
-
-/**
- * @struct Mock cluster.
- *
- * The cluster IO loop runs in a separate thread where all
- * broker IO is handled.
- *
- * No locking is needed.
- */
-struct rd_kafka_mock_cluster_s {
- char id[32]; /**< Generated cluster id */
-
- rd_kafka_t *rk;
-
- int32_t controller_id; /**< Current controller */
-
- TAILQ_HEAD(, rd_kafka_mock_broker_s) brokers;
- int broker_cnt;
-
- TAILQ_HEAD(, rd_kafka_mock_topic_s) topics;
- int topic_cnt;
-
- TAILQ_HEAD(, rd_kafka_mock_cgrp_s) cgrps;
-
- /** Explicit coordinators (set with mock_set_coordinator()) */
- TAILQ_HEAD(, rd_kafka_mock_coord_s) coords;
-
- /** Current transactional producer PIDs.
- * Element type is a malloced rd_kafka_mock_pid_t*. */
- rd_list_t pids;
-
- char *bootstraps; /**< bootstrap.servers */
-
- thrd_t thread; /**< Mock thread */
-
- rd_kafka_q_t *ops; /**< Control ops queue for interacting with the
- * cluster. */
-
- rd_socket_t wakeup_fds[2]; /**< Wake-up fds for use with .ops */
-
- rd_bool_t run; /**< Cluster will run while this value is true */
-
- int fd_cnt; /**< Number of file descriptors */
- int fd_size; /**< Allocated size of .fds
- * and .handlers */
- struct pollfd *fds; /**< Dynamic array */
-
- rd_kafka_broker_t *dummy_rkb; /**< Some internal librdkafka APIs
- * that we are reusing requires a
- * broker object, we use the
- * internal broker and store it
- * here for convenient access. */
-
- struct {
- int partition_cnt; /**< Auto topic create part cnt */
- int replication_factor; /**< Auto topic create repl factor */
- } defaults;
-
- /**< Dynamic array of IO handlers for corresponding fd in .fds */
- struct {
- rd_kafka_mock_io_handler_t *cb; /**< Callback */
- void *opaque; /**< Callbacks' opaque */
- } * handlers;
-
- /**< Per-protocol request error stack. */
- rd_kafka_mock_error_stack_head_t errstacks;
-
- /**< Request handlers */
- struct rd_kafka_mock_api_handler api_handlers[RD_KAFKAP__NUM];
-
- /**< Mutex for:
- * .errstacks
- * .apiversions
- */
- mtx_t lock;
-
- rd_kafka_timers_t timers; /**< Timers */
-};
-
-
-
-rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request);
-void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp);
-void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn,
- rd_bool_t blocking);
-
-rd_kafka_mock_partition_t *
-rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic,
- int32_t partition);
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster,
- const char *topic,
- int partition_cnt,
- rd_kafka_resp_err_t *errp);
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster,
- const char *name);
-rd_kafka_mock_topic_t *
-rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *kname);
-rd_kafka_mock_broker_t *
-rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_coordtype_t KeyType,
- const rd_kafkap_str_t *Key);
-
-rd_kafka_mock_committed_offset_t *
-rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_str_t *group);
-rd_kafka_mock_committed_offset_t *
-rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_str_t *group,
- int64_t offset,
- const rd_kafkap_str_t *metadata);
-
-const rd_kafka_mock_msgset_t *
-rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
- int64_t offset,
- rd_bool_t on_follower);
-
-rd_kafka_resp_err_t
-rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp);
-
-rd_kafka_resp_err_t
-rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_bytes_t *records,
- const rd_kafkap_str_t *TransactionalId,
- int64_t *BaseOffset);
-
-rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check(
- const rd_kafka_mock_partition_t *mpart,
- int32_t leader_epoch);
-
-int64_t rd_kafka_mock_partition_offset_for_leader_epoch(
- const rd_kafka_mock_partition_t *mpart,
- int32_t leader_epoch);
-
-
-/**
- * @returns true if the ApiVersion is supported, else false.
- */
-static RD_UNUSED rd_bool_t
-rd_kafka_mock_cluster_ApiVersion_check(const rd_kafka_mock_cluster_t *mcluster,
- int16_t ApiKey,
- int16_t ApiVersion) {
- return (ApiVersion >= mcluster->api_handlers[ApiKey].MinVersion &&
- ApiVersion <= mcluster->api_handlers[ApiKey].MaxVersion);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *TransactionalId,
- const rd_kafka_pid_t pid,
- rd_kafka_mock_pid_t **mpidp);
-
-
-/**
- * @name Mock consumer group (rdkafka_mock_cgrp.c)
- * @{
- */
-void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member);
-void rd_kafka_mock_cgrp_member_assignment_set(
- rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- const rd_kafkap_bytes_t *Metadata);
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp);
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member);
-void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos,
- int proto_cnt);
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_connection_t *mconn,
- rd_kafka_buf_t *resp,
- const rd_kafkap_str_t *MemberId,
- const rd_kafkap_str_t *ProtocolType,
- rd_kafka_mock_cgrp_proto_t *protos,
- int proto_cnt,
- int session_timeout_ms);
-rd_kafka_resp_err_t
-rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp,
- rd_kafka_mock_cgrp_member_t *member,
- const rd_kafka_buf_t *request,
- int32_t generation_id);
-rd_kafka_mock_cgrp_member_t *
-rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp,
- const rd_kafkap_str_t *MemberId);
-void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp);
-rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *GroupId);
-rd_kafka_mock_cgrp_t *
-rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster,
- const rd_kafkap_str_t *GroupId,
- const rd_kafkap_str_t *ProtocolType);
-void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster,
- rd_kafka_mock_connection_t *mconn);
-
-
-/**
- *@}
- */
-
-
-#include "rdkafka_mock.h"
-
-#endif /* _RDKAFKA_MOCK_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c
deleted file mode 100644
index 17b67999b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c
+++ /dev/null
@@ -1,2517 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_interceptor.h"
-#include "rdkafka_header.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_txnmgr.h"
-#include "rdkafka_error.h"
-#include "rdcrc32.h"
-#include "rdfnv1a.h"
-#include "rdmurmur2.h"
-#include "rdrand.h"
-#include "rdtime.h"
-#include "rdsysqueue.h"
-#include "rdunittest.h"
-
-#include <stdarg.h>
-
-
-const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) {
- if (!rkmessage->err)
- return NULL;
-
- if (rkmessage->payload)
- return (const char *)rkmessage->payload;
-
- return rd_kafka_err2str(rkmessage->err);
-}
-
-
-/**
- * @brief Check if producing is allowed.
- *
- * @param errorp If non-NULL and an producing is prohibited a new error_t
- * object will be allocated and returned in this pointer.
- *
- * @returns an error if not allowed, else 0.
- *
- * @remarks Also sets the corresponding errno.
- */
-static RD_INLINE rd_kafka_resp_err_t
-rd_kafka_check_produce(rd_kafka_t *rk, rd_kafka_error_t **errorp) {
- rd_kafka_resp_err_t err;
-
- if (unlikely((err = rd_kafka_fatal_error_code(rk)))) {
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__FATAL, ECANCELED);
- if (errorp) {
- rd_kafka_rdlock(rk);
- *errorp = rd_kafka_error_new_fatal(
- err,
- "Producing not allowed since a previous fatal "
- "error was raised: %s",
- rk->rk_fatal.errstr);
- rd_kafka_rdunlock(rk);
- }
- return RD_KAFKA_RESP_ERR__FATAL;
- }
-
- if (likely(rd_kafka_txn_may_enq_msg(rk)))
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* Transactional state forbids producing */
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__STATE, ENOEXEC);
-
- if (errorp) {
- rd_kafka_rdlock(rk);
- *errorp = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__STATE,
- "Producing not allowed in transactional state %s",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state));
- rd_kafka_rdunlock(rk);
- }
-
- return RD_KAFKA_RESP_ERR__STATE;
-}
-
-
-void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm) {
- // FIXME
- if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) {
- rd_dassert(rk || rkm->rkm_rkmessage.rkt);
- rd_kafka_curr_msgs_sub(rk ? rk : rkm->rkm_rkmessage.rkt->rkt_rk,
- 1, rkm->rkm_len);
- }
-
- if (rkm->rkm_headers)
- rd_kafka_headers_destroy(rkm->rkm_headers);
-
- if (likely(rkm->rkm_rkmessage.rkt != NULL))
- rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt);
-
- if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload)
- rd_free(rkm->rkm_payload);
-
- if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM)
- rd_free(rkm);
-}
-
-
-
-/**
- * @brief Create a new Producer message, copying the payload as
- * indicated by msgflags.
- *
- * @returns the new message
- */
-static rd_kafka_msg_t *rd_kafka_msg_new00(rd_kafka_topic_t *rkt,
- int32_t partition,
- int msgflags,
- char *payload,
- size_t len,
- const void *key,
- size_t keylen,
- void *msg_opaque) {
- rd_kafka_msg_t *rkm;
- size_t mlen = sizeof(*rkm);
- char *p;
-
- /* If we are to make a copy of the payload, allocate space for it too */
- if (msgflags & RD_KAFKA_MSG_F_COPY) {
- msgflags &= ~RD_KAFKA_MSG_F_FREE;
- mlen += len;
- }
-
- mlen += keylen;
-
- /* Note: using rd_malloc here, not rd_calloc, so make sure all fields
- * are properly set up. */
- rkm = rd_malloc(mlen);
- rkm->rkm_err = 0;
- rkm->rkm_flags =
- (RD_KAFKA_MSG_F_PRODUCER | RD_KAFKA_MSG_F_FREE_RKM | msgflags);
- rkm->rkm_len = len;
- rkm->rkm_opaque = msg_opaque;
- rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt);
-
- rkm->rkm_broker_id = -1;
- rkm->rkm_partition = partition;
- rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
- rkm->rkm_timestamp = 0;
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
- rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- rkm->rkm_headers = NULL;
-
- p = (char *)(rkm + 1);
-
- if (payload && msgflags & RD_KAFKA_MSG_F_COPY) {
- /* Copy payload to space following the ..msg_t */
- rkm->rkm_payload = p;
- memcpy(rkm->rkm_payload, payload, len);
- p += len;
-
- } else {
- /* Just point to the provided payload. */
- rkm->rkm_payload = payload;
- }
-
- if (key) {
- rkm->rkm_key = p;
- rkm->rkm_key_len = keylen;
- memcpy(rkm->rkm_key, key, keylen);
- } else {
- rkm->rkm_key = NULL;
- rkm->rkm_key_len = 0;
- }
-
- return rkm;
-}
-
-
-
-/**
- * @brief Create a new Producer message.
- *
- * @remark Must only be used by producer code.
- *
- * Returns 0 on success or -1 on error.
- * Both errno and 'errp' are set appropriately.
- */
-static rd_kafka_msg_t *rd_kafka_msg_new0(rd_kafka_topic_t *rkt,
- int32_t force_partition,
- int msgflags,
- char *payload,
- size_t len,
- const void *key,
- size_t keylen,
- void *msg_opaque,
- rd_kafka_resp_err_t *errp,
- int *errnop,
- rd_kafka_headers_t *hdrs,
- int64_t timestamp,
- rd_ts_t now) {
- rd_kafka_msg_t *rkm;
- size_t hdrs_size = 0;
-
- if (unlikely(!payload))
- len = 0;
- if (!key)
- keylen = 0;
- if (hdrs)
- hdrs_size = rd_kafka_headers_serialized_size(hdrs);
-
- if (unlikely(len > INT32_MAX || keylen > INT32_MAX ||
- rd_kafka_msg_max_wire_size(keylen, len, hdrs_size) >
- (size_t)rkt->rkt_rk->rk_conf.max_msg_size)) {
- *errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
- if (errnop)
- *errnop = EMSGSIZE;
- return NULL;
- }
-
- if (msgflags & RD_KAFKA_MSG_F_BLOCK)
- *errp = rd_kafka_curr_msgs_add(
- rkt->rkt_rk, 1, len, 1 /*block*/,
- (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? &rkt->rkt_lock
- : NULL);
- else
- *errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len, 0, NULL);
-
- if (unlikely(*errp)) {
- if (errnop)
- *errnop = ENOBUFS;
- return NULL;
- }
-
-
- rkm = rd_kafka_msg_new00(
- rkt, force_partition,
- msgflags | RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, payload,
- len, key, keylen, msg_opaque);
-
- memset(&rkm->rkm_u.producer, 0, sizeof(rkm->rkm_u.producer));
-
- if (timestamp)
- rkm->rkm_timestamp = timestamp;
- else
- rkm->rkm_timestamp = rd_uclock() / 1000;
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
-
- if (hdrs) {
- rd_dassert(!rkm->rkm_headers);
- rkm->rkm_headers = hdrs;
- }
-
- rkm->rkm_ts_enq = now;
-
- if (rkt->rkt_conf.message_timeout_ms == 0) {
- rkm->rkm_ts_timeout = INT64_MAX;
- } else {
- rkm->rkm_ts_timeout =
- now + (int64_t)rkt->rkt_conf.message_timeout_ms * 1000;
- }
-
- /* Call interceptor chain for on_send */
- rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage);
-
- return rkm;
-}
-
-
-/**
- * @brief Produce: creates a new message, runs the partitioner and enqueues
- * into on the selected partition.
- *
- * @returns 0 on success or -1 on error.
- *
- * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
- * the memory associated with the payload is still the caller's
- * responsibility.
- *
- * @locks none
- */
-int rd_kafka_msg_new(rd_kafka_topic_t *rkt,
- int32_t force_partition,
- int msgflags,
- char *payload,
- size_t len,
- const void *key,
- size_t keylen,
- void *msg_opaque) {
- rd_kafka_msg_t *rkm;
- rd_kafka_resp_err_t err;
- int errnox;
-
- if (unlikely((err = rd_kafka_check_produce(rkt->rkt_rk, NULL))))
- return -1;
-
- /* Create message */
- rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, payload, len,
- key, keylen, msg_opaque, &err, &errnox, NULL, 0,
- rd_clock());
- if (unlikely(!rkm)) {
- /* errno is already set by msg_new() */
- rd_kafka_set_last_error(err, errnox);
- return -1;
- }
-
-
- /* Partition the message */
- err = rd_kafka_msg_partitioner(rkt, rkm, 1);
- if (likely(!err)) {
- rd_kafka_set_last_error(0, 0);
- return 0;
- }
-
- /* Interceptor: unroll failing messages by triggering on_ack.. */
- rkm->rkm_err = err;
- rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk,
- &rkm->rkm_rkmessage);
-
- /* Handle partitioner failures: it only fails when the application
- * attempts to force a destination partition that does not exist
- * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE
- * flag since our contract says we don't free the payload on
- * failure. */
-
- rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
- rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
-
- /* Translate error codes to errnos. */
- if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- rd_kafka_set_last_error(err, ESRCH);
- else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- rd_kafka_set_last_error(err, ENOENT);
- else
- rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */
-
- return -1;
-}
-
-
-/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
-rd_kafka_error_t *
-rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt) {
- rd_kafka_msg_t s_rkm = {
- /* Message defaults */
- .rkm_partition = RD_KAFKA_PARTITION_UA,
- .rkm_timestamp = 0, /* current time */
- };
- rd_kafka_msg_t *rkm = &s_rkm;
- rd_kafka_topic_t *rkt = NULL;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_error_t *error = NULL;
- rd_kafka_headers_t *hdrs = NULL;
- rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
- size_t i;
-
- if (unlikely(rd_kafka_check_produce(rk, &error)))
- return error;
-
- for (i = 0; i < cnt; i++) {
- const rd_kafka_vu_t *vu = &vus[i];
- switch (vu->vtype) {
- case RD_KAFKA_VTYPE_TOPIC:
- rkt =
- rd_kafka_topic_new0(rk, vu->u.cstr, NULL, NULL, 1);
- break;
-
- case RD_KAFKA_VTYPE_RKT:
- rkt = rd_kafka_topic_proper(vu->u.rkt);
- rd_kafka_topic_keep(rkt);
- break;
-
- case RD_KAFKA_VTYPE_PARTITION:
- rkm->rkm_partition = vu->u.i32;
- break;
-
- case RD_KAFKA_VTYPE_VALUE:
- rkm->rkm_payload = vu->u.mem.ptr;
- rkm->rkm_len = vu->u.mem.size;
- break;
-
- case RD_KAFKA_VTYPE_KEY:
- rkm->rkm_key = vu->u.mem.ptr;
- rkm->rkm_key_len = vu->u.mem.size;
- break;
-
- case RD_KAFKA_VTYPE_OPAQUE:
- rkm->rkm_opaque = vu->u.ptr;
- break;
-
- case RD_KAFKA_VTYPE_MSGFLAGS:
- rkm->rkm_flags = vu->u.i;
- break;
-
- case RD_KAFKA_VTYPE_TIMESTAMP:
- rkm->rkm_timestamp = vu->u.i64;
- break;
-
- case RD_KAFKA_VTYPE_HEADER:
- if (unlikely(app_hdrs != NULL)) {
- error = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__CONFLICT,
- "VTYPE_HEADER and VTYPE_HEADERS "
- "are mutually exclusive");
- goto err;
- }
-
- if (unlikely(!hdrs))
- hdrs = rd_kafka_headers_new(8);
-
- err = rd_kafka_header_add(hdrs, vu->u.header.name, -1,
- vu->u.header.val,
- vu->u.header.size);
- if (unlikely(err)) {
- error = rd_kafka_error_new(
- err, "Failed to add header: %s",
- rd_kafka_err2str(err));
- goto err;
- }
- break;
-
- case RD_KAFKA_VTYPE_HEADERS:
- if (unlikely(hdrs != NULL)) {
- error = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__CONFLICT,
- "VTYPE_HEADERS and VTYPE_HEADER "
- "are mutually exclusive");
- goto err;
- }
- app_hdrs = vu->u.headers;
- break;
-
- default:
- error = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Unsupported VTYPE %d", (int)vu->vtype);
- goto err;
- }
- }
-
- rd_assert(!error);
-
- if (unlikely(!rkt)) {
- error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Topic name or object required");
- goto err;
- }
-
- rkm = rd_kafka_msg_new0(
- rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
- rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, rkm->rkm_opaque, &err,
- NULL, app_hdrs ? app_hdrs : hdrs, rkm->rkm_timestamp, rd_clock());
-
- if (unlikely(err)) {
- error = rd_kafka_error_new(err, "Failed to produce message: %s",
- rd_kafka_err2str(err));
- goto err;
- }
-
- /* Partition the message */
- err = rd_kafka_msg_partitioner(rkt, rkm, 1);
- if (unlikely(err)) {
- /* Handle partitioner failures: it only fails when
- * the application attempts to force a destination
- * partition that does not exist in the cluster. */
-
- /* Interceptors: Unroll on_send by on_ack.. */
- rkm->rkm_err = err;
- rd_kafka_interceptors_on_acknowledgement(rk,
- &rkm->rkm_rkmessage);
-
- /* Note we must clear the RD_KAFKA_MSG_F_FREE
- * flag since our contract says we don't free the payload on
- * failure. */
- rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
-
- /* Deassociate application owned headers from message
- * since headers remain in application ownership
- * when producev() fails */
- if (app_hdrs && app_hdrs == rkm->rkm_headers)
- rkm->rkm_headers = NULL;
-
- rd_kafka_msg_destroy(rk, rkm);
-
- error = rd_kafka_error_new(err, "Failed to enqueue message: %s",
- rd_kafka_err2str(err));
- goto err;
- }
-
- rd_kafka_topic_destroy0(rkt);
-
- return NULL;
-
-err:
- if (rkt)
- rd_kafka_topic_destroy0(rkt);
-
- if (hdrs)
- rd_kafka_headers_destroy(hdrs);
-
- rd_assert(error != NULL);
- return error;
-}
-
-
-
-/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
-rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...) {
- va_list ap;
- rd_kafka_msg_t s_rkm = {
- /* Message defaults */
- .rkm_partition = RD_KAFKA_PARTITION_UA,
- .rkm_timestamp = 0, /* current time */
- };
- rd_kafka_msg_t *rkm = &s_rkm;
- rd_kafka_vtype_t vtype;
- rd_kafka_topic_t *rkt = NULL;
- rd_kafka_resp_err_t err;
- rd_kafka_headers_t *hdrs = NULL;
- rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
-
- if (unlikely((err = rd_kafka_check_produce(rk, NULL))))
- return err;
-
- va_start(ap, rk);
- while (!err &&
- (vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) {
- switch (vtype) {
- case RD_KAFKA_VTYPE_TOPIC:
- rkt = rd_kafka_topic_new0(rk, va_arg(ap, const char *),
- NULL, NULL, 1);
- break;
-
- case RD_KAFKA_VTYPE_RKT:
- rkt = rd_kafka_topic_proper(
- va_arg(ap, rd_kafka_topic_t *));
- rd_kafka_topic_keep(rkt);
- break;
-
- case RD_KAFKA_VTYPE_PARTITION:
- rkm->rkm_partition = va_arg(ap, int32_t);
- break;
-
- case RD_KAFKA_VTYPE_VALUE:
- rkm->rkm_payload = va_arg(ap, void *);
- rkm->rkm_len = va_arg(ap, size_t);
- break;
-
- case RD_KAFKA_VTYPE_KEY:
- rkm->rkm_key = va_arg(ap, void *);
- rkm->rkm_key_len = va_arg(ap, size_t);
- break;
-
- case RD_KAFKA_VTYPE_OPAQUE:
- rkm->rkm_opaque = va_arg(ap, void *);
- break;
-
- case RD_KAFKA_VTYPE_MSGFLAGS:
- rkm->rkm_flags = va_arg(ap, int);
- break;
-
- case RD_KAFKA_VTYPE_TIMESTAMP:
- rkm->rkm_timestamp = va_arg(ap, int64_t);
- break;
-
- case RD_KAFKA_VTYPE_HEADER: {
- const char *name;
- const void *value;
- ssize_t size;
-
- if (unlikely(app_hdrs != NULL)) {
- err = RD_KAFKA_RESP_ERR__CONFLICT;
- break;
- }
-
- if (unlikely(!hdrs))
- hdrs = rd_kafka_headers_new(8);
-
- name = va_arg(ap, const char *);
- value = va_arg(ap, const void *);
- size = va_arg(ap, ssize_t);
-
- err = rd_kafka_header_add(hdrs, name, -1, value, size);
- } break;
-
- case RD_KAFKA_VTYPE_HEADERS:
- if (unlikely(hdrs != NULL)) {
- err = RD_KAFKA_RESP_ERR__CONFLICT;
- break;
- }
- app_hdrs = va_arg(ap, rd_kafka_headers_t *);
- break;
-
- default:
- err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- break;
- }
- }
-
- va_end(ap);
-
- if (unlikely(!rkt))
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- if (likely(!err))
- rkm = rd_kafka_msg_new0(
- rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
- rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len,
- rkm->rkm_opaque, &err, NULL, app_hdrs ? app_hdrs : hdrs,
- rkm->rkm_timestamp, rd_clock());
-
- if (unlikely(err)) {
- rd_kafka_topic_destroy0(rkt);
- if (hdrs)
- rd_kafka_headers_destroy(hdrs);
- return err;
- }
-
- /* Partition the message */
- err = rd_kafka_msg_partitioner(rkt, rkm, 1);
- if (unlikely(err)) {
- /* Handle partitioner failures: it only fails when
- * the application attempts to force a destination
- * partition that does not exist in the cluster. */
-
- /* Interceptors: Unroll on_send by on_ack.. */
- rkm->rkm_err = err;
- rd_kafka_interceptors_on_acknowledgement(rk,
- &rkm->rkm_rkmessage);
-
- /* Note we must clear the RD_KAFKA_MSG_F_FREE
- * flag since our contract says we don't free the payload on
- * failure. */
- rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
-
- /* Deassociate application owned headers from message
- * since headers remain in application ownership
- * when producev() fails */
- if (app_hdrs && app_hdrs == rkm->rkm_headers)
- rkm->rkm_headers = NULL;
-
- rd_kafka_msg_destroy(rk, rkm);
- }
-
- rd_kafka_topic_destroy0(rkt);
-
- return err;
-}
-
-
-
-/**
- * @brief Produce a single message.
- * @locality any application thread
- * @locks none
- */
-int rd_kafka_produce(rd_kafka_topic_t *rkt,
- int32_t partition,
- int msgflags,
- void *payload,
- size_t len,
- const void *key,
- size_t keylen,
- void *msg_opaque) {
- return rd_kafka_msg_new(rkt, partition, msgflags, payload, len, key,
- keylen, msg_opaque);
-}
-
-
-
-/**
- * Produce a batch of messages.
- * Returns the number of messages succesfully queued for producing.
- * Each message's .err will be set accordingly.
- */
-int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int msgflags,
- rd_kafka_message_t *rkmessages,
- int message_cnt) {
- rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq);
- int i;
- int64_t utc_now = rd_uclock() / 1000;
- rd_ts_t now = rd_clock();
- int good = 0;
- int multiple_partitions = (partition == RD_KAFKA_PARTITION_UA ||
- (msgflags & RD_KAFKA_MSG_F_PARTITION));
- rd_kafka_resp_err_t all_err;
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp = NULL;
-
- /* Propagated per-message below */
- all_err = rd_kafka_check_produce(rkt->rkt_rk, NULL);
-
- rd_kafka_topic_rdlock(rkt);
- if (!multiple_partitions) {
- /* Single partition: look up the rktp once. */
- rktp = rd_kafka_toppar_get_avail(rkt, partition,
- 1 /*ua on miss*/, &all_err);
-
- } else {
- /* Indicate to lower-level msg_new..() that rkt is locked
- * so that they may unlock it momentarily if blocking. */
- msgflags |= RD_KAFKA_MSG_F_RKT_RDLOCKED;
- }
-
- for (i = 0; i < message_cnt; i++) {
- rd_kafka_msg_t *rkm;
-
- /* Propagate error for all messages. */
- if (unlikely(all_err)) {
- rkmessages[i].err = all_err;
- continue;
- }
-
- /* Create message */
- rkm = rd_kafka_msg_new0(
- rkt,
- (msgflags & RD_KAFKA_MSG_F_PARTITION)
- ? rkmessages[i].partition
- : partition,
- msgflags, rkmessages[i].payload, rkmessages[i].len,
- rkmessages[i].key, rkmessages[i].key_len,
- rkmessages[i]._private, &rkmessages[i].err, NULL, NULL,
- utc_now, now);
- if (unlikely(!rkm)) {
- if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL)
- all_err = rkmessages[i].err;
- continue;
- }
-
- /* Three cases here:
- * partition==UA: run the partitioner (slow)
- * RD_KAFKA_MSG_F_PARTITION: produce message to specified
- * partition
- * fixed partition: simply concatenate the queue
- * to partit */
- if (multiple_partitions) {
- if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
- /* Partition the message */
- rkmessages[i].err = rd_kafka_msg_partitioner(
- rkt, rkm, 0 /*already locked*/);
- } else {
- if (rktp == NULL || rkm->rkm_partition !=
- rktp->rktp_partition) {
- rd_kafka_resp_err_t err;
- if (rktp != NULL)
- rd_kafka_toppar_destroy(rktp);
- rktp = rd_kafka_toppar_get_avail(
- rkt, rkm->rkm_partition,
- 1 /*ua on miss*/, &err);
-
- if (unlikely(!rktp)) {
- rkmessages[i].err = err;
- continue;
- }
- }
- rd_kafka_toppar_enq_msg(rktp, rkm, now);
-
- if (rd_kafka_is_transactional(rkt->rkt_rk)) {
- /* Add partition to transaction */
- rd_kafka_txn_add_partition(rktp);
- }
- }
-
- if (unlikely(rkmessages[i].err)) {
- /* Interceptors: Unroll on_send by on_ack.. */
- rd_kafka_interceptors_on_acknowledgement(
- rkt->rkt_rk, &rkmessages[i]);
-
- rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
- continue;
- }
-
-
- } else {
- /* Single destination partition. */
- rd_kafka_toppar_enq_msg(rktp, rkm, now);
- }
-
- rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
- good++;
- }
-
- rd_kafka_topic_rdunlock(rkt);
-
- if (!multiple_partitions && good > 0 &&
- rd_kafka_is_transactional(rkt->rkt_rk) &&
- rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
- /* Add single destination partition to transaction */
- rd_kafka_txn_add_partition(rktp);
- }
-
- if (rktp != NULL)
- rd_kafka_toppar_destroy(rktp);
-
- return good;
-}
-
-/**
- * @brief Scan \p rkmq for messages that have timed out and remove them from
- * \p rkmq and add to \p timedout queue.
- *
- * @param abs_next_timeout will be set to the next message timeout, or 0
- * if no timeout. Optional, may be NULL.
- *
- * @returns the number of messages timed out.
- *
- * @locality any
- * @locks toppar_lock MUST be held
- */
-int rd_kafka_msgq_age_scan(rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_msgq_t *timedout,
- rd_ts_t now,
- rd_ts_t *abs_next_timeout) {
- rd_kafka_msg_t *rkm, *tmp, *first = NULL;
- int cnt = timedout->rkmq_msg_cnt;
-
- if (abs_next_timeout)
- *abs_next_timeout = 0;
-
- /* Assume messages are added in time sequencial order */
- TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) {
- /* NOTE: this is not true for the deprecated (and soon removed)
- * LIFO queuing strategy. */
- if (likely(rkm->rkm_ts_timeout > now)) {
- if (abs_next_timeout)
- *abs_next_timeout = rkm->rkm_ts_timeout;
- break;
- }
-
- if (!first)
- first = rkm;
-
- rd_kafka_msgq_deq(rkmq, rkm, 1);
- rd_kafka_msgq_enq(timedout, rkm);
- }
-
- return timedout->rkmq_msg_cnt - cnt;
-}
-
-
-int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm,
- int (*order_cmp)(const void *, const void *)) {
- TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, rkm_link,
- order_cmp);
- rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
- return ++rkmq->rkmq_msg_cnt;
-}
-
-int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm) {
- rd_dassert(rkm->rkm_u.producer.msgid != 0);
- return rd_kafka_msgq_enq_sorted0(rkmq, rkm,
- rkt->rkt_conf.msg_order_cmp);
-}
-
-/**
- * @brief Find the insert before position (i.e., the msg which comes
- * after \p rkm sequencially) for message \p rkm.
- *
- * @param rkmq insert queue.
- * @param start_pos the element in \p rkmq to start scanning at, or NULL
- * to start with the first element.
- * @param rkm message to insert.
- * @param cmp message comparator.
- * @param cntp the accumulated number of messages up to, but not including,
- * the returned insert position. Optional (NULL).
- * Do not use when start_pos is set.
- * @param bytesp the accumulated number of bytes up to, but not inclduing,
- * the returned insert position. Optional (NULL).
- * Do not use when start_pos is set.
- *
- * @remark cntp and bytesp will NOT be accurate when \p start_pos is non-NULL.
- *
- * @returns the insert position element, or NULL if \p rkm should be
- * added at tail of queue.
- */
-rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq,
- const rd_kafka_msg_t *start_pos,
- const rd_kafka_msg_t *rkm,
- int (*cmp)(const void *, const void *),
- int *cntp,
- int64_t *bytesp) {
- const rd_kafka_msg_t *curr;
- int cnt = 0;
- int64_t bytes = 0;
-
- for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq); curr;
- curr = TAILQ_NEXT(curr, rkm_link)) {
- if (cmp(rkm, curr) < 0) {
- if (cntp) {
- *cntp = cnt;
- *bytesp = bytes;
- }
- return (rd_kafka_msg_t *)curr;
- }
- if (cntp) {
- cnt++;
- bytes += rkm->rkm_len + rkm->rkm_key_len;
- }
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Split the original \p leftq into a left and right part,
- * with element \p first_right being the first element in the
- * right part (\p rightq).
- *
- * @param cnt is the number of messages up to, but not including \p first_right
- * in \p leftq, namely the number of messages to remain in
- * \p leftq after the split.
- * @param bytes is the bytes counterpart to \p cnt.
- */
-void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq,
- rd_kafka_msgq_t *rightq,
- rd_kafka_msg_t *first_right,
- int cnt,
- int64_t bytes) {
- rd_kafka_msg_t *llast;
-
- rd_assert(first_right != TAILQ_FIRST(&leftq->rkmq_msgs));
-
- llast = TAILQ_PREV(first_right, rd_kafka_msg_head_s, rkm_link);
-
- rd_kafka_msgq_init(rightq);
-
- rightq->rkmq_msgs.tqh_first = first_right;
- rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last;
-
- first_right->rkm_link.tqe_prev = &rightq->rkmq_msgs.tqh_first;
-
- leftq->rkmq_msgs.tqh_last = &llast->rkm_link.tqe_next;
- llast->rkm_link.tqe_next = NULL;
-
- rightq->rkmq_msg_cnt = leftq->rkmq_msg_cnt - cnt;
- rightq->rkmq_msg_bytes = leftq->rkmq_msg_bytes - bytes;
- leftq->rkmq_msg_cnt = cnt;
- leftq->rkmq_msg_bytes = bytes;
-
- rd_kafka_msgq_verify_order(NULL, leftq, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, rightq, 0, rd_false);
-}
-
-
-/**
- * @brief Set per-message metadata for all messages in \p rkmq
- */
-void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq,
- int32_t broker_id,
- int64_t base_offset,
- int64_t timestamp,
- rd_kafka_msg_status_t status) {
- rd_kafka_msg_t *rkm;
-
- TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
- rkm->rkm_broker_id = broker_id;
- rkm->rkm_offset = base_offset++;
- if (timestamp != -1) {
- rkm->rkm_timestamp = timestamp;
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
- }
-
- /* Don't downgrade a message from any form of PERSISTED
- * to NOT_PERSISTED, since the original cause of indicating
- * PERSISTED can't be changed.
- * E.g., a previous ack or in-flight timeout. */
- if (unlikely(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
- rkm->rkm_status !=
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED))
- continue;
-
- rkm->rkm_status = status;
- }
-}
-
-
-/**
- * @brief Move all messages in \p src to \p dst whose msgid <= last_msgid.
- *
- * @remark src must be ordered
- */
-void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest,
- rd_kafka_msgq_t *src,
- uint64_t last_msgid,
- rd_kafka_msg_status_t status) {
- rd_kafka_msg_t *rkm;
-
- while ((rkm = rd_kafka_msgq_first(src)) &&
- rkm->rkm_u.producer.msgid <= last_msgid) {
- rd_kafka_msgq_deq(src, rkm, 1);
- rd_kafka_msgq_enq(dest, rkm);
-
- rkm->rkm_status = status;
- }
-
- rd_kafka_msgq_verify_order(NULL, dest, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, src, 0, rd_false);
-}
-
-
-
-int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- int32_t p = rd_jitter(0, partition_cnt - 1);
- if (unlikely(!rd_kafka_topic_partition_available(rkt, p)))
- return rd_jitter(0, partition_cnt - 1);
- else
- return p;
-}
-
-int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- return rd_crc32(key, keylen) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- if (keylen == 0)
- return rd_kafka_msg_partitioner_random(
- rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
- else
- return rd_kafka_msg_partitioner_consistent(
- rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
-}
-
-int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- if (!key)
- return rd_kafka_msg_partitioner_random(
- rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
- else
- return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- return rd_fnv1a(key, keylen) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- if (!key)
- return rd_kafka_msg_partitioner_random(
- rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
- else
- return rd_fnv1a(key, keylen) % partition_cnt;
-}
-
-int32_t rd_kafka_msg_sticky_partition(rd_kafka_topic_t *rkt,
- const void *key,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
-
- if (!rd_kafka_topic_partition_available(rkt, rkt->rkt_sticky_partition))
- rd_interval_expedite(&rkt->rkt_sticky_intvl, 0);
-
- if (rd_interval(&rkt->rkt_sticky_intvl,
- rkt->rkt_rk->rk_conf.sticky_partition_linger_ms * 1000,
- 0) > 0) {
- rkt->rkt_sticky_partition = rd_kafka_msg_partitioner_random(
- rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "PARTITIONER",
- "%s [%" PRId32 "] is the new sticky partition",
- rkt->rkt_topic->str, rkt->rkt_sticky_partition);
- }
-
- return rkt->rkt_sticky_partition;
-}
-
-/**
- * @brief Assigns a message to a topic partition using a partitioner.
- *
- * @param do_lock if RD_DO_LOCK then acquire topic lock.
- *
- * @returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if
- * partitioning failed, or 0 on success.
- *
- * @locality any
- * @locks rd_kafka_
- */
-int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
- rd_kafka_msg_t *rkm,
- rd_dolock_t do_lock) {
- int32_t partition;
- rd_kafka_toppar_t *rktp_new;
- rd_kafka_resp_err_t err;
-
- if (do_lock)
- rd_kafka_topic_rdlock(rkt);
-
- switch (rkt->rkt_state) {
- case RD_KAFKA_TOPIC_S_UNKNOWN:
- /* No metadata received from cluster yet.
- * Put message in UA partition and re-run partitioner when
- * cluster comes up. */
- partition = RD_KAFKA_PARTITION_UA;
- break;
-
- case RD_KAFKA_TOPIC_S_NOTEXISTS:
- /* Topic not found in cluster.
- * Fail message immediately. */
- err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- if (do_lock)
- rd_kafka_topic_rdunlock(rkt);
- return err;
-
- case RD_KAFKA_TOPIC_S_ERROR:
- /* Topic has permanent error.
- * Fail message immediately. */
- err = rkt->rkt_err;
- if (do_lock)
- rd_kafka_topic_rdunlock(rkt);
- return err;
-
- case RD_KAFKA_TOPIC_S_EXISTS:
- /* Topic exists in cluster. */
-
- /* Topic exists but has no partitions.
- * This is usually an transient state following the
- * auto-creation of a topic. */
- if (unlikely(rkt->rkt_partition_cnt == 0)) {
- partition = RD_KAFKA_PARTITION_UA;
- break;
- }
-
- /* Partition not assigned, run partitioner. */
- if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
-
- if (!rkt->rkt_conf.random_partitioner &&
- (!rkm->rkm_key ||
- (rkm->rkm_key_len == 0 &&
- rkt->rkt_conf.partitioner ==
- rd_kafka_msg_partitioner_consistent_random))) {
- partition = rd_kafka_msg_sticky_partition(
- rkt, rkm->rkm_key, rkm->rkm_key_len,
- rkt->rkt_partition_cnt,
- rkt->rkt_conf.opaque, rkm->rkm_opaque);
- } else {
- partition = rkt->rkt_conf.partitioner(
- rkt, rkm->rkm_key, rkm->rkm_key_len,
- rkt->rkt_partition_cnt,
- rkt->rkt_conf.opaque, rkm->rkm_opaque);
- }
- } else
- partition = rkm->rkm_partition;
-
- /* Check that partition exists. */
- if (partition >= rkt->rkt_partition_cnt) {
- err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- if (do_lock)
- rd_kafka_topic_rdunlock(rkt);
- return err;
- }
- break;
-
- default:
- rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
- break;
- }
-
- /* Get new partition */
- rktp_new = rd_kafka_toppar_get(rkt, partition, 0);
-
- if (unlikely(!rktp_new)) {
- /* Unknown topic or partition */
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
- err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- else
- err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- if (do_lock)
- rd_kafka_topic_rdunlock(rkt);
-
- return err;
- }
-
- rd_atomic64_add(&rktp_new->rktp_c.producer_enq_msgs, 1);
-
- /* Update message partition */
- if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA)
- rkm->rkm_partition = partition;
-
- /* Partition is available: enqueue msg on partition's queue */
- rd_kafka_toppar_enq_msg(rktp_new, rkm, rd_clock());
- if (do_lock)
- rd_kafka_topic_rdunlock(rkt);
-
- if (rktp_new->rktp_partition != RD_KAFKA_PARTITION_UA &&
- rd_kafka_is_transactional(rkt->rkt_rk)) {
- /* Add partition to transaction */
- rd_kafka_txn_add_partition(rktp_new);
- }
-
- rd_kafka_toppar_destroy(rktp_new); /* from _get() */
- return 0;
-}
-
-
-
-/**
- * @name Public message type (rd_kafka_message_t)
- */
-void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage) {
- rd_kafka_op_t *rko;
-
- if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL))
- rd_kafka_op_destroy(rko);
- else {
- rd_kafka_msg_t *rkm = rd_kafka_message2msg(rkmessage);
- rd_kafka_msg_destroy(NULL, rkm);
- }
-}
-
-
-rd_kafka_message_t *rd_kafka_message_new(void) {
- rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm));
- rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
- rkm->rkm_broker_id = -1;
- return (rd_kafka_message_t *)rkm;
-}
-
-
-/**
- * @brief Set up a rkmessage from an rko for passing to the application.
- * @remark Will trigger on_consume() interceptors if any.
- */
-static rd_kafka_message_t *
-rd_kafka_message_setup(rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) {
- rd_kafka_topic_t *rkt;
- rd_kafka_toppar_t *rktp = NULL;
-
- if (rko->rko_type == RD_KAFKA_OP_DR) {
- rkt = rko->rko_u.dr.rkt;
- } else {
- if (rko->rko_rktp) {
- rktp = rko->rko_rktp;
- rkt = rktp->rktp_rkt;
- } else
- rkt = NULL;
-
- rkmessage->_private = rko;
- }
-
-
- if (!rkmessage->rkt && rkt)
- rkmessage->rkt = rd_kafka_topic_keep(rkt);
-
- if (rktp)
- rkmessage->partition = rktp->rktp_partition;
-
- if (!rkmessage->err)
- rkmessage->err = rko->rko_err;
-
- /* Call on_consume interceptors */
- switch (rko->rko_type) {
- case RD_KAFKA_OP_FETCH:
- if (!rkmessage->err && rkt)
- rd_kafka_interceptors_on_consume(rkt->rkt_rk,
- rkmessage);
- break;
-
- default:
- break;
- }
-
- return rkmessage;
-}
-
-
-
-/**
- * @brief Get rkmessage from rkm (for EVENT_DR)
- * @remark Must only be called just prior to passing a dr to the application.
- */
-rd_kafka_message_t *rd_kafka_message_get_from_rkm(rd_kafka_op_t *rko,
- rd_kafka_msg_t *rkm) {
- return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage);
-}
-
-/**
- * @brief Convert rko to rkmessage
- * @remark Must only be called just prior to passing a consumed message
- * or event to the application.
- * @remark Will trigger on_consume() interceptors, if any.
- * @returns a rkmessage (bound to the rko).
- */
-rd_kafka_message_t *rd_kafka_message_get(rd_kafka_op_t *rko) {
- rd_kafka_message_t *rkmessage;
-
- if (!rko)
- return rd_kafka_message_new(); /* empty */
-
- switch (rko->rko_type) {
- case RD_KAFKA_OP_FETCH:
- /* Use embedded rkmessage */
- rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage;
- break;
-
- case RD_KAFKA_OP_ERR:
- case RD_KAFKA_OP_CONSUMER_ERR:
- rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage;
- rkmessage->payload = rko->rko_u.err.errstr;
- rkmessage->len =
- rkmessage->payload ? strlen(rkmessage->payload) : 0;
- rkmessage->offset = rko->rko_u.err.offset;
- break;
-
- default:
- rd_kafka_assert(NULL, !*"unhandled optype");
- RD_NOTREACHED();
- return NULL;
- }
-
- return rd_kafka_message_setup(rko, rkmessage);
-}
-
-
-int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage,
- rd_kafka_timestamp_type_t *tstype) {
- rd_kafka_msg_t *rkm;
-
- if (rkmessage->err) {
- if (tstype)
- *tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
- return -1;
- }
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- if (tstype)
- *tstype = rkm->rkm_tstype;
-
- return rkm->rkm_timestamp;
-}
-
-
-int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage) {
- rd_kafka_msg_t *rkm;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- if (unlikely(!rkm->rkm_ts_enq))
- return -1;
-
- return rd_clock() - rkm->rkm_ts_enq;
-}
-
-
-int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage) {
- rd_kafka_msg_t *rkm;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- return rkm->rkm_broker_id;
-}
-
-
-
-/**
- * @brief Parse serialized message headers and populate
- * rkm->rkm_headers (which must be NULL).
- */
-static rd_kafka_resp_err_t rd_kafka_msg_headers_parse(rd_kafka_msg_t *rkm) {
- rd_kafka_buf_t *rkbuf;
- int64_t HeaderCount;
- const int log_decode_errors = 0;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
- int i;
- rd_kafka_headers_t *hdrs = NULL;
-
- rd_dassert(!rkm->rkm_headers);
-
- if (RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs) == 0)
- return RD_KAFKA_RESP_ERR__NOENT;
-
- rkbuf = rd_kafka_buf_new_shadow(
- rkm->rkm_u.consumer.binhdrs.data,
- RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs), NULL);
-
- rd_kafka_buf_read_varint(rkbuf, &HeaderCount);
-
- if (HeaderCount <= 0) {
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR__NOENT;
- } else if (unlikely(HeaderCount > 100000)) {
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR__BAD_MSG;
- }
-
- hdrs = rd_kafka_headers_new((size_t)HeaderCount);
-
- for (i = 0; (int64_t)i < HeaderCount; i++) {
- int64_t KeyLen, ValueLen;
- const char *Key, *Value;
-
- rd_kafka_buf_read_varint(rkbuf, &KeyLen);
- rd_kafka_buf_read_ptr(rkbuf, &Key, (size_t)KeyLen);
-
- rd_kafka_buf_read_varint(rkbuf, &ValueLen);
- if (unlikely(ValueLen == -1))
- Value = NULL;
- else
- rd_kafka_buf_read_ptr(rkbuf, &Value, (size_t)ValueLen);
-
- rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, Value,
- (ssize_t)ValueLen);
- }
-
- rkm->rkm_headers = hdrs;
-
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- err = rkbuf->rkbuf_err;
- rd_kafka_buf_destroy(rkbuf);
- if (hdrs)
- rd_kafka_headers_destroy(hdrs);
- return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_message_headers(const rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t **hdrsp) {
- rd_kafka_msg_t *rkm;
- rd_kafka_resp_err_t err;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- if (rkm->rkm_headers) {
- *hdrsp = rkm->rkm_headers;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- /* Producer (rkm_headers will be set if there were any headers) */
- if (rkm->rkm_flags & RD_KAFKA_MSG_F_PRODUCER)
- return RD_KAFKA_RESP_ERR__NOENT;
-
- /* Consumer */
-
- /* No previously parsed headers, check if the underlying
- * protocol message had headers and if so, parse them. */
- if (unlikely(!RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs)))
- return RD_KAFKA_RESP_ERR__NOENT;
-
- err = rd_kafka_msg_headers_parse(rkm);
- if (unlikely(err))
- return err;
-
- *hdrsp = rkm->rkm_headers;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t **hdrsp) {
- rd_kafka_msg_t *rkm;
- rd_kafka_resp_err_t err;
-
- err = rd_kafka_message_headers(rkmessage, hdrsp);
- if (err)
- return err;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
- rkm->rkm_headers = NULL;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage,
- rd_kafka_headers_t *hdrs) {
- rd_kafka_msg_t *rkm;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- if (rkm->rkm_headers) {
- assert(rkm->rkm_headers != hdrs);
- rd_kafka_headers_destroy(rkm->rkm_headers);
- }
-
- rkm->rkm_headers = hdrs;
-}
-
-
-
-rd_kafka_msg_status_t
-rd_kafka_message_status(const rd_kafka_message_t *rkmessage) {
- rd_kafka_msg_t *rkm;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- return rkm->rkm_status;
-}
-
-
-int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage) {
- rd_kafka_msg_t *rkm;
-
- if (unlikely(!rkmessage->rkt ||
- rkmessage->rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER))
- return -1;
-
- rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
-
- return rkm->rkm_u.consumer.leader_epoch;
-}
-
-
-void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) {
- rd_kafka_msg_t *rkm;
- int cnt = 0;
-
- fprintf(fp, "%s msgq_dump (%d messages, %" PRIusz " bytes):\n", what,
- rd_kafka_msgq_len(rkmq), rd_kafka_msgq_size(rkmq));
- TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
- fprintf(fp,
- " [%" PRId32 "]@%" PRId64 ": rkm msgid %" PRIu64
- ": \"%.*s\"\n",
- rkm->rkm_partition, rkm->rkm_offset,
- rkm->rkm_u.producer.msgid, (int)rkm->rkm_len,
- (const char *)rkm->rkm_payload);
- rd_assert(cnt++ < rkmq->rkmq_msg_cnt);
- }
-}
-
-
-
-/**
- * @brief Destroy resources associated with msgbatch
- */
-void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb) {
- if (rkmb->rktp) {
- rd_kafka_toppar_destroy(rkmb->rktp);
- rkmb->rktp = NULL;
- }
-
- rd_assert(RD_KAFKA_MSGQ_EMPTY(&rkmb->msgq));
-}
-
-
-/**
- * @brief Initialize a message batch for the Idempotent Producer.
- */
-void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid) {
- memset(rkmb, 0, sizeof(*rkmb));
-
- rkmb->rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_msgq_init(&rkmb->msgq);
-
- rkmb->pid = pid;
- rkmb->first_seq = -1;
- rkmb->epoch_base_msgid = epoch_base_msgid;
-}
-
-
-/**
- * @brief Set the first message in the batch. which is used to set
- * the BaseSequence and keep track of batch reconstruction range.
- *
- * @param rkm is the first message in the batch.
- */
-void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb,
- rd_kafka_msg_t *rkm) {
- rd_assert(rkmb->first_msgid == 0);
-
- if (!rd_kafka_pid_valid(rkmb->pid))
- return;
-
- rkmb->first_msgid = rkm->rkm_u.producer.msgid;
-
- /* Our msgid counter is 64-bits, but the
- * Kafka protocol's sequence is only 31 (signed), so we'll
- * need to handle wrapping. */
- rkmb->first_seq = rd_kafka_seq_wrap(rkm->rkm_u.producer.msgid -
- rkmb->epoch_base_msgid);
-
- /* Check if there is a stored last message
- * on the first msg, which means an entire
- * batch of messages are being retried and
- * we need to maintain the exact messages
- * of the original batch.
- * Simply tracking the last message, on
- * the first message, is sufficient for now.
- * Will be 0 if not applicable. */
- rkmb->last_msgid = rkm->rkm_u.producer.last_msgid;
-}
-
-
-
-/**
- * @brief Message batch is ready to be transmitted.
- *
- * @remark This function assumes the batch will be transmitted and increases
- * the toppar's in-flight count.
- */
-void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) {
- rd_kafka_toppar_t *rktp = rkmb->rktp;
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-
- /* Keep track of number of requests in-flight per partition,
- * and the number of partitions with in-flight requests when
- * idempotent producer - this is used to drain partitions
- * before resetting the PID. */
- if (rd_atomic32_add(&rktp->rktp_msgs_inflight,
- rd_kafka_msgq_len(&rkmb->msgq)) ==
- rd_kafka_msgq_len(&rkmb->msgq) &&
- rd_kafka_is_idempotent(rk))
- rd_kafka_idemp_inflight_toppar_add(rk, rktp);
-}
-
-
-
-/**
- * @brief Allow queue wakeups after \p abstime, or when the
- * given \p batch_msg_cnt or \p batch_msg_bytes have been reached.
- *
- * @param rkmq Queue to monitor and set wakeup parameters on.
- * @param dest_rkmq Destination queue used to meter current queue depths
- * and oldest message. May be the same as \p rkmq but is
- * typically the rktp_xmit_msgq.
- * @param next_wakeup If non-NULL: update the caller's next scheduler wakeup
- * according to the wakeup time calculated by this function.
- * @param now The current time.
- * @param linger_us The configured queue linger / batching time.
- * @param batch_msg_cnt Queue threshold before signalling.
- * @param batch_msg_bytes Queue threshold before signalling.
- *
- * @returns true if the wakeup conditions are already met and messages are ready
- * to be sent, else false.
- *
- * @locks_required rd_kafka_toppar_lock()
- *
- *
- * Producer queue and broker thread wake-up behaviour.
- *
- * There are contradicting requirements at play here:
- * - Latency: queued messages must be batched and sent according to
- * batch size and linger.ms configuration.
- * - Wakeups: keep the number of thread wake-ups to a minimum to avoid
- * high CPU utilization and context switching.
- *
- * The message queue (rd_kafka_msgq_t) has functionality for the writer (app)
- * to wake up the reader (broker thread) when there's a new message added.
- * This wakeup is done thru a combination of cndvar signalling and IO writes
- * to make sure a thread wakeup is triggered regardless if the broker thread
- * is blocking on cnd_timedwait() or on IO poll.
- * When the broker thread is woken up it will scan all the partitions it is
- * the leader for to check if there are messages to be sent - all according
- * to the configured batch size and linger.ms - and then decide its next
- * wait time depending on the lowest remaining linger.ms setting of any
- * partition with messages enqueued.
- *
- * This wait time must also be set as a threshold on the message queue, telling
- * the writer (app) that it must not trigger a wakeup until the wait time
- * has expired, or the batch sizes have been exceeded.
- *
- * The message queue wakeup time is per partition, while the broker thread
- * wakeup time is the lowest of all its partitions' wakeup times.
- *
- * The per-partition wakeup constraints are calculated and set by
- * rd_kafka_msgq_allow_wakeup_at() which is called from the broker thread's
- * per-partition handler.
- * This function is called each time there are changes to the broker-local
- * partition transmit queue (rktp_xmit_msgq), such as:
- * - messages are moved from the partition queue (rktp_msgq) to rktp_xmit_msgq
- * - messages are moved to a ProduceRequest
- * - messages are timed out from the rktp_xmit_msgq
- * - the flushing state changed (rd_kafka_flush() is called or returned).
- *
- * If none of these things happen, the broker thread will simply read the
- * last stored wakeup time for each partition and use that for calculating its
- * minimum wait time.
- *
- *
- * On the writer side, namely the application calling rd_kafka_produce(), the
- * followings checks are performed to see if it may trigger a wakeup when
- * it adds a new message to the partition queue:
- * - the current time has reached the wakeup time (e.g., remaining linger.ms
- * has expired), or
- * - with the new message(s) being added, either the batch.size or
- * batch.num.messages thresholds have been exceeded, or
- * - the application is calling rd_kafka_flush(),
- * - and no wakeup has been signalled yet. This is critical since it may take
- * some time for the broker thread to do its work we'll want to avoid
- * flooding it with wakeups. So a wakeup is only sent once per
- * wakeup period.
- */
-rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
- const rd_kafka_msgq_t *dest_rkmq,
- rd_ts_t *next_wakeup,
- rd_ts_t now,
- rd_ts_t linger_us,
- int32_t batch_msg_cnt,
- int64_t batch_msg_bytes) {
- int32_t msg_cnt = rd_kafka_msgq_len(dest_rkmq);
- int64_t msg_bytes = rd_kafka_msgq_size(dest_rkmq);
-
- if (RD_KAFKA_MSGQ_EMPTY(dest_rkmq)) {
- rkmq->rkmq_wakeup.on_first = rd_true;
- rkmq->rkmq_wakeup.abstime = now + linger_us;
- /* Leave next_wakeup untouched since the queue is empty */
- msg_cnt = 0;
- msg_bytes = 0;
- } else {
- const rd_kafka_msg_t *rkm = rd_kafka_msgq_first(dest_rkmq);
-
- rkmq->rkmq_wakeup.on_first = rd_false;
-
- if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
- /* Honour retry.backoff.ms:
- * wait for backoff to expire */
- rkmq->rkmq_wakeup.abstime =
- rkm->rkm_u.producer.ts_backoff;
- } else {
- /* Use message's produce() time + linger.ms */
- rkmq->rkmq_wakeup.abstime =
- rd_kafka_msg_enq_time(rkm) + linger_us;
- if (rkmq->rkmq_wakeup.abstime <= now)
- rkmq->rkmq_wakeup.abstime = now;
- }
-
- /* Update the caller's scheduler wakeup time */
- if (next_wakeup && rkmq->rkmq_wakeup.abstime < *next_wakeup)
- *next_wakeup = rkmq->rkmq_wakeup.abstime;
-
- msg_cnt = rd_kafka_msgq_len(dest_rkmq);
- msg_bytes = rd_kafka_msgq_size(dest_rkmq);
- }
-
- /*
- * If there are more messages or bytes in queue than the batch limits,
- * or the linger time has been exceeded,
- * then there is no need for wakeup since the broker thread will
- * produce those messages as quickly as it can.
- */
- if (msg_cnt >= batch_msg_cnt || msg_bytes >= batch_msg_bytes ||
- (msg_cnt > 0 && now >= rkmq->rkmq_wakeup.abstime)) {
- /* Prevent further signalling */
- rkmq->rkmq_wakeup.signalled = rd_true;
-
- /* Batch is ready */
- return rd_true;
- }
-
- /* If the current msg or byte count is less than the batch limit
- * then set the rkmq count to the remaining count or size to
- * reach the batch limits.
- * This is for the case where the producer is waiting for more
- * messages to accumulate into a batch. The wakeup should only
- * occur once a threshold is reached or the abstime has expired.
- */
- rkmq->rkmq_wakeup.signalled = rd_false;
- rkmq->rkmq_wakeup.msg_cnt = batch_msg_cnt - msg_cnt;
- rkmq->rkmq_wakeup.msg_bytes = batch_msg_bytes - msg_bytes;
-
- return rd_false;
-}
-
-
-
-/**
- * @brief Verify order (by msgid) in message queue.
- * For development use only.
- */
-void rd_kafka_msgq_verify_order0(const char *function,
- int line,
- const rd_kafka_toppar_t *rktp,
- const rd_kafka_msgq_t *rkmq,
- uint64_t exp_first_msgid,
- rd_bool_t gapless) {
- const rd_kafka_msg_t *rkm;
- uint64_t exp;
- int errcnt = 0;
- int cnt = 0;
- const char *topic = rktp ? rktp->rktp_rkt->rkt_topic->str : "n/a";
- int32_t partition = rktp ? rktp->rktp_partition : -1;
-
- if (rd_kafka_msgq_len(rkmq) == 0)
- return;
-
- if (exp_first_msgid)
- exp = exp_first_msgid;
- else {
- exp = rd_kafka_msgq_first(rkmq)->rkm_u.producer.msgid;
- if (exp == 0) /* message without msgid (e.g., UA partition) */
- return;
- }
-
- TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
-#if 0
- printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) "
- "msgid %"PRIu64"\n",
- function, line,
- topic, partition,
- cnt, rkm, rkm->rkm_u.producer.msgid);
-#endif
- if (gapless && rkm->rkm_u.producer.msgid != exp) {
- printf("%s:%d: %s [%" PRId32
- "]: rkm #%d (%p) "
- "msgid %" PRIu64
- ": "
- "expected msgid %" PRIu64 "\n",
- function, line, topic, partition, cnt, rkm,
- rkm->rkm_u.producer.msgid, exp);
- errcnt++;
- } else if (!gapless && rkm->rkm_u.producer.msgid < exp) {
- printf("%s:%d: %s [%" PRId32
- "]: rkm #%d (%p) "
- "msgid %" PRIu64
- ": "
- "expected increased msgid >= %" PRIu64 "\n",
- function, line, topic, partition, cnt, rkm,
- rkm->rkm_u.producer.msgid, exp);
- errcnt++;
- } else
- exp++;
-
- if (cnt >= rkmq->rkmq_msg_cnt) {
- printf("%s:%d: %s [%" PRId32
- "]: rkm #%d (%p) "
- "msgid %" PRIu64 ": loop in queue?\n",
- function, line, topic, partition, cnt, rkm,
- rkm->rkm_u.producer.msgid);
- errcnt++;
- break;
- }
-
- cnt++;
- }
-
- rd_assert(!errcnt);
-}
-
-
-
-/**
- * @name Unit tests
- */
-
-/**
- * @brief Unittest: message allocator
- */
-rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize) {
- rd_kafka_msg_t *rkm;
-
- rkm = rd_calloc(1, sizeof(*rkm));
- rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
- rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
-
- if (msgsize) {
- rd_assert(msgsize <= sizeof(*rkm));
- rkm->rkm_payload = rkm;
- rkm->rkm_len = msgsize;
- }
-
- return rkm;
-}
-
-
-
-/**
- * @brief Unittest: destroy all messages in queue
- */
-void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq) {
- rd_kafka_msg_t *rkm, *tmp;
-
- TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp)
- rd_kafka_msg_destroy(NULL, rkm);
-
-
- rd_kafka_msgq_init(rkmq);
-}
-
-
-
-static int ut_verify_msgq_order(const char *what,
- const rd_kafka_msgq_t *rkmq,
- uint64_t first,
- uint64_t last,
- rd_bool_t req_consecutive) {
- const rd_kafka_msg_t *rkm;
- uint64_t expected = first;
- int incr = first < last ? +1 : -1;
- int fails = 0;
- int cnt = 0;
-
- TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
- if ((req_consecutive &&
- rkm->rkm_u.producer.msgid != expected) ||
- (!req_consecutive &&
- rkm->rkm_u.producer.msgid < expected)) {
- if (fails++ < 100)
- RD_UT_SAY("%s: expected msgid %s %" PRIu64
- " not %" PRIu64 " at index #%d",
- what, req_consecutive ? "==" : ">=",
- expected, rkm->rkm_u.producer.msgid,
- cnt);
- }
-
- cnt++;
- expected += incr;
-
- if (cnt > rkmq->rkmq_msg_cnt) {
- RD_UT_SAY("%s: loop in queue?", what);
- fails++;
- break;
- }
- }
-
- RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails);
- return fails;
-}
-
-/**
- * @brief Verify ordering comparator for message queues.
- */
-static int unittest_msgq_order(const char *what,
- int fifo,
- int (*cmp)(const void *, const void *)) {
- rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
- rd_kafka_msg_t *rkm;
- rd_kafka_msgq_t sendq, sendq2;
- const size_t msgsize = 100;
- int i;
-
- RD_UT_SAY("%s: testing in %s mode", what, fifo ? "FIFO" : "LIFO");
-
- for (i = 1; i <= 6; i++) {
- rkm = ut_rd_kafka_msg_new(msgsize);
- rkm->rkm_u.producer.msgid = i;
- rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
- }
-
- if (fifo) {
- if (ut_verify_msgq_order("added", &rkmq, 1, 6, rd_true))
- return 1;
- } else {
- if (ut_verify_msgq_order("added", &rkmq, 6, 1, rd_true))
- return 1;
- }
-
- /* Move 3 messages to "send" queue which we then re-insert
- * in the original queue (i.e., "retry"). */
- rd_kafka_msgq_init(&sendq);
- while (rd_kafka_msgq_len(&sendq) < 3)
- rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
-
- if (fifo) {
- if (ut_verify_msgq_order("send removed", &rkmq, 4, 6, rd_true))
- return 1;
-
- if (ut_verify_msgq_order("sendq", &sendq, 1, 3, rd_true))
- return 1;
- } else {
- if (ut_verify_msgq_order("send removed", &rkmq, 3, 1, rd_true))
- return 1;
-
- if (ut_verify_msgq_order("sendq", &sendq, 6, 4, rd_true))
- return 1;
- }
-
- /* Retry the messages, which moves them back to sendq
- * maintaining the original order */
- rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
- "sendq FIFO should be empty, not contain %d messages",
- rd_kafka_msgq_len(&sendq));
-
- if (fifo) {
- if (ut_verify_msgq_order("readded", &rkmq, 1, 6, rd_true))
- return 1;
- } else {
- if (ut_verify_msgq_order("readded", &rkmq, 6, 1, rd_true))
- return 1;
- }
-
- /* Move 4 first messages to to "send" queue, then
- * retry them with max_retries=1 which should now fail for
- * the 3 first messages that were already retried. */
- rd_kafka_msgq_init(&sendq);
- while (rd_kafka_msgq_len(&sendq) < 4)
- rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
-
- if (fifo) {
- if (ut_verify_msgq_order("send removed #2", &rkmq, 5, 6,
- rd_true))
- return 1;
-
- if (ut_verify_msgq_order("sendq #2", &sendq, 1, 4, rd_true))
- return 1;
- } else {
- if (ut_verify_msgq_order("send removed #2", &rkmq, 2, 1,
- rd_true))
- return 1;
-
- if (ut_verify_msgq_order("sendq #2", &sendq, 6, 3, rd_true))
- return 1;
- }
-
- /* Retry the messages, which should now keep the 3 first messages
- * on sendq (no more retries) and just number 4 moved back. */
- rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
-
- if (fifo) {
- if (ut_verify_msgq_order("readded #2", &rkmq, 4, 6, rd_true))
- return 1;
-
- if (ut_verify_msgq_order("no more retries", &sendq, 1, 3,
- rd_true))
- return 1;
-
- } else {
- if (ut_verify_msgq_order("readded #2", &rkmq, 3, 1, rd_true))
- return 1;
-
- if (ut_verify_msgq_order("no more retries", &sendq, 6, 4,
- rd_true))
- return 1;
- }
-
- /* Move all messages back on rkmq */
- rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
-
-
- /* Move first half of messages to sendq (1,2,3).
- * Move second half o messages to sendq2 (4,5,6).
- * Add new message to rkmq (7).
- * Move first half of messages back on rkmq (1,2,3,7).
- * Move second half back on the rkmq (1,2,3,4,5,6,7). */
- rd_kafka_msgq_init(&sendq);
- rd_kafka_msgq_init(&sendq2);
-
- while (rd_kafka_msgq_len(&sendq) < 3)
- rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
-
- while (rd_kafka_msgq_len(&sendq2) < 3)
- rd_kafka_msgq_enq(&sendq2, rd_kafka_msgq_pop(&rkmq));
-
- rkm = ut_rd_kafka_msg_new(msgsize);
- rkm->rkm_u.producer.msgid = i;
- rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
-
- rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
- rd_kafka_retry_msgq(&rkmq, &sendq2, 0, 1000, 0,
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
- "sendq FIFO should be empty, not contain %d messages",
- rd_kafka_msgq_len(&sendq));
- RD_UT_ASSERT(rd_kafka_msgq_len(&sendq2) == 0,
- "sendq2 FIFO should be empty, not contain %d messages",
- rd_kafka_msgq_len(&sendq2));
-
- if (fifo) {
- if (ut_verify_msgq_order("inject", &rkmq, 1, 7, rd_true))
- return 1;
- } else {
- if (ut_verify_msgq_order("readded #2", &rkmq, 7, 1, rd_true))
- return 1;
- }
-
- RD_UT_ASSERT(rd_kafka_msgq_size(&rkmq) ==
- rd_kafka_msgq_len(&rkmq) * msgsize,
- "expected msgq size %" PRIusz ", not %" PRIusz,
- (size_t)rd_kafka_msgq_len(&rkmq) * msgsize,
- rd_kafka_msgq_size(&rkmq));
-
-
- ut_rd_kafka_msgq_purge(&sendq);
- ut_rd_kafka_msgq_purge(&sendq2);
- ut_rd_kafka_msgq_purge(&rkmq);
-
- return 0;
-}
-
-/**
- * @brief Verify that rd_kafka_seq_wrap() works.
- */
-static int unittest_msg_seq_wrap(void) {
- static const struct exp {
- int64_t in;
- int32_t out;
- } exp[] = {
- {0, 0},
- {1, 1},
- {(int64_t)INT32_MAX + 2, 1},
- {(int64_t)INT32_MAX + 1, 0},
- {INT32_MAX, INT32_MAX},
- {INT32_MAX - 1, INT32_MAX - 1},
- {INT32_MAX - 2, INT32_MAX - 2},
- {((int64_t)1 << 33) - 2, INT32_MAX - 1},
- {((int64_t)1 << 33) - 1, INT32_MAX},
- {((int64_t)1 << 34), 0},
- {((int64_t)1 << 35) + 3, 3},
- {1710 + 1229, 2939},
- {-1, -1},
- };
- int i;
-
- for (i = 0; exp[i].in != -1; i++) {
- int32_t wseq = rd_kafka_seq_wrap(exp[i].in);
- RD_UT_ASSERT(wseq == exp[i].out,
- "Expected seq_wrap(%" PRId64 ") -> %" PRId32
- ", not %" PRId32,
- exp[i].in, exp[i].out, wseq);
- }
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Populate message queue with message ids from lo..hi (inclusive)
- */
-static void ut_msgq_populate(rd_kafka_msgq_t *rkmq,
- uint64_t lo,
- uint64_t hi,
- size_t msgsize) {
- uint64_t i;
-
- for (i = lo; i <= hi; i++) {
- rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize);
- rkm->rkm_u.producer.msgid = i;
- rd_kafka_msgq_enq(rkmq, rkm);
- }
-}
-
-
-struct ut_msg_range {
- uint64_t lo;
- uint64_t hi;
-};
-
-/**
- * @brief Verify that msgq insert sorts are optimized. Issue #2508.
- * All source ranges are combined into a single queue before insert.
- */
-static int
-unittest_msgq_insert_all_sort(const char *what,
- double max_us_per_msg,
- double *ret_us_per_msg,
- const struct ut_msg_range *src_ranges,
- const struct ut_msg_range *dest_ranges) {
- rd_kafka_msgq_t destq, srcq;
- int i;
- uint64_t lo = UINT64_MAX, hi = 0;
- uint64_t cnt = 0;
- const size_t msgsize = 100;
- size_t totsize = 0;
- rd_ts_t ts;
- double us_per_msg;
-
- RD_UT_SAY("Testing msgq insert (all) efficiency: %s", what);
-
- rd_kafka_msgq_init(&destq);
- rd_kafka_msgq_init(&srcq);
-
- for (i = 0; src_ranges[i].hi > 0; i++) {
- uint64_t this_cnt;
-
- ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
- msgsize);
- if (src_ranges[i].lo < lo)
- lo = src_ranges[i].lo;
- if (src_ranges[i].hi > hi)
- hi = src_ranges[i].hi;
- this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
- cnt += this_cnt;
- totsize += msgsize * (size_t)this_cnt;
- }
-
- for (i = 0; dest_ranges[i].hi > 0; i++) {
- uint64_t this_cnt;
-
- ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
- msgsize);
- if (dest_ranges[i].lo < lo)
- lo = dest_ranges[i].lo;
- if (dest_ranges[i].hi > hi)
- hi = dest_ranges[i].hi;
- this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
- cnt += this_cnt;
- totsize += msgsize * (size_t)this_cnt;
- }
-
- RD_UT_SAY("Begin insert of %d messages into destq with %d messages",
- rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
-
- ts = rd_clock();
- rd_kafka_msgq_insert_msgq(&destq, &srcq, rd_kafka_msg_cmp_msgid);
- ts = rd_clock() - ts;
- us_per_msg = (double)ts / (double)cnt;
-
- RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, us_per_msg);
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
- "srcq should be empty, but contains %d messages",
- rd_kafka_msgq_len(&srcq));
- RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
- "destq should contain %d messages, not %d", (int)cnt,
- rd_kafka_msgq_len(&destq));
-
- if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
- return 1;
-
- RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
- "expected destq size to be %" PRIusz
- " bytes, not %" PRIusz,
- totsize, rd_kafka_msgq_size(&destq));
-
- ut_rd_kafka_msgq_purge(&srcq);
- ut_rd_kafka_msgq_purge(&destq);
-
- if (!rd_unittest_slow)
- RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
- "maximum us/msg exceeded: %.4f > %.4f us/msg",
- us_per_msg, max_us_per_msg);
- else if (us_per_msg > max_us_per_msg + 0.0001)
- RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
- us_per_msg, max_us_per_msg);
-
- if (ret_us_per_msg)
- *ret_us_per_msg = us_per_msg;
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Verify that msgq insert sorts are optimized. Issue #2508.
- * Inserts each source range individually.
- */
-static int
-unittest_msgq_insert_each_sort(const char *what,
- double max_us_per_msg,
- double *ret_us_per_msg,
- const struct ut_msg_range *src_ranges,
- const struct ut_msg_range *dest_ranges) {
- rd_kafka_msgq_t destq;
- int i;
- uint64_t lo = UINT64_MAX, hi = 0;
- uint64_t cnt = 0;
- uint64_t scnt = 0;
- const size_t msgsize = 100;
- size_t totsize = 0;
- double us_per_msg;
- rd_ts_t accum_ts = 0;
-
- RD_UT_SAY("Testing msgq insert (each) efficiency: %s", what);
-
- rd_kafka_msgq_init(&destq);
-
- for (i = 0; dest_ranges[i].hi > 0; i++) {
- uint64_t this_cnt;
-
- ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
- msgsize);
- if (dest_ranges[i].lo < lo)
- lo = dest_ranges[i].lo;
- if (dest_ranges[i].hi > hi)
- hi = dest_ranges[i].hi;
- this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
- cnt += this_cnt;
- totsize += msgsize * (size_t)this_cnt;
- }
-
-
- for (i = 0; src_ranges[i].hi > 0; i++) {
- rd_kafka_msgq_t srcq;
- uint64_t this_cnt;
- rd_ts_t ts;
-
- rd_kafka_msgq_init(&srcq);
-
- ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
- msgsize);
- if (src_ranges[i].lo < lo)
- lo = src_ranges[i].lo;
- if (src_ranges[i].hi > hi)
- hi = src_ranges[i].hi;
- this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
- cnt += this_cnt;
- scnt += this_cnt;
- totsize += msgsize * (size_t)this_cnt;
-
- RD_UT_SAY(
- "Begin insert of %d messages into destq with "
- "%d messages",
- rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
-
- ts = rd_clock();
- rd_kafka_msgq_insert_msgq(&destq, &srcq,
- rd_kafka_msg_cmp_msgid);
- ts = rd_clock() - ts;
- accum_ts += ts;
-
- RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts,
- (double)ts / (double)this_cnt);
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
- "srcq should be empty, but contains %d messages",
- rd_kafka_msgq_len(&srcq));
- RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
- "destq should contain %d messages, not %d",
- (int)cnt, rd_kafka_msgq_len(&destq));
-
- if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
- return 1;
-
- RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
- "expected destq size to be %" PRIusz
- " bytes, not %" PRIusz,
- totsize, rd_kafka_msgq_size(&destq));
-
- ut_rd_kafka_msgq_purge(&srcq);
- }
-
- ut_rd_kafka_msgq_purge(&destq);
-
- us_per_msg = (double)accum_ts / (double)scnt;
-
- RD_UT_SAY("Total: %.4fus/msg over %" PRId64 " messages in %" PRId64
- "us",
- us_per_msg, scnt, accum_ts);
-
- if (!rd_unittest_slow)
- RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
- "maximum us/msg exceeded: %.4f > %.4f us/msg",
- us_per_msg, max_us_per_msg);
- else if (us_per_msg > max_us_per_msg + 0.0001)
- RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
- us_per_msg, max_us_per_msg);
-
-
- if (ret_us_per_msg)
- *ret_us_per_msg = us_per_msg;
-
- RD_UT_PASS();
-}
-
-
-
-/**
- * @brief Calls both insert_all and insert_each
- */
-static int unittest_msgq_insert_sort(const char *what,
- double max_us_per_msg,
- double *ret_us_per_msg,
- const struct ut_msg_range *src_ranges,
- const struct ut_msg_range *dest_ranges) {
- double ret_all = 0.0, ret_each = 0.0;
- int r;
-
- r = unittest_msgq_insert_all_sort(what, max_us_per_msg, &ret_all,
- src_ranges, dest_ranges);
- if (r)
- return r;
-
- r = unittest_msgq_insert_each_sort(what, max_us_per_msg, &ret_each,
- src_ranges, dest_ranges);
- if (r)
- return r;
-
- if (ret_us_per_msg)
- *ret_us_per_msg = RD_MAX(ret_all, ret_each);
-
- return 0;
-}
-
-
-int unittest_msg(void) {
- int fails = 0;
- double insert_baseline = 0.0;
-
- fails += unittest_msgq_order("FIFO", 1, rd_kafka_msg_cmp_msgid);
- fails += unittest_msg_seq_wrap();
-
- fails += unittest_msgq_insert_sort(
- "get baseline insert time", 100000.0, &insert_baseline,
- (const struct ut_msg_range[]) {{1, 1}, {3, 3}, {0, 0}},
- (const struct ut_msg_range[]) {{2, 2}, {4, 4}, {0, 0}});
-
- /* Allow some wiggle room in baseline time. */
- if (insert_baseline < 0.1)
- insert_baseline = 0.2;
- insert_baseline *= 3;
-
- fails += unittest_msgq_insert_sort(
- "single-message ranges", insert_baseline, NULL,
- (const struct ut_msg_range[]) {
- {2, 2}, {4, 4}, {9, 9}, {33692864, 33692864}, {0, 0}},
- (const struct ut_msg_range[]) {{1, 1},
- {3, 3},
- {5, 5},
- {10, 10},
- {33692865, 33692865},
- {0, 0}});
- fails += unittest_msgq_insert_sort(
- "many messages", insert_baseline, NULL,
- (const struct ut_msg_range[]) {{100000, 200000},
- {400000, 450000},
- {900000, 920000},
- {33692864, 33751992},
- {33906868, 33993690},
- {40000000, 44000000},
- {0, 0}},
- (const struct ut_msg_range[]) {{1, 199},
- {350000, 360000},
- {500000, 500010},
- {1000000, 1000200},
- {33751993, 33906867},
- {50000001, 50000001},
- {0, 0}});
- fails += unittest_msgq_insert_sort(
- "issue #2508", insert_baseline, NULL,
- (const struct ut_msg_range[]) {
- {33692864, 33751992}, {33906868, 33993690}, {0, 0}},
- (const struct ut_msg_range[]) {{33751993, 33906867}, {0, 0}});
-
- /* The standard case where all of the srcq
- * goes after the destq.
- * Create a big destq and a number of small srcqs.
- * Should not result in O(n) scans to find the insert position. */
- fails += unittest_msgq_insert_sort(
- "issue #2450 (v1.2.1 regression)", insert_baseline, NULL,
- (const struct ut_msg_range[]) {{200000, 200001},
- {200002, 200006},
- {200009, 200012},
- {200015, 200016},
- {200020, 200022},
- {200030, 200090},
- {200091, 200092},
- {200093, 200094},
- {200095, 200096},
- {200097, 200099},
- {0, 0}},
- (const struct ut_msg_range[]) {{1, 199999}, {0, 0}});
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h
deleted file mode 100644
index 877fac15c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_MSG_H_
-#define _RDKAFKA_MSG_H_
-
-#include "rdsysqueue.h"
-
-#include "rdkafka_proto.h"
-#include "rdkafka_header.h"
-
-
-/**
- * @brief Internal RD_KAFKA_MSG_F_.. flags
- */
-#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */
-
-
-/**
- * @brief Message.MsgAttributes for MsgVersion v0..v1,
- * also used for MessageSet.Attributes for MsgVersion v2.
- */
-#define RD_KAFKA_MSG_ATTR_GZIP (1 << 0)
-#define RD_KAFKA_MSG_ATTR_SNAPPY (1 << 1)
-#define RD_KAFKA_MSG_ATTR_LZ4 (3)
-#define RD_KAFKA_MSG_ATTR_ZSTD (4)
-#define RD_KAFKA_MSG_ATTR_COMPRESSION_MASK 0x7
-#define RD_KAFKA_MSG_ATTR_CREATE_TIME (0 << 3)
-#define RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME (1 << 3)
-
-/**
- * @brief MessageSet.Attributes for MsgVersion v2
- *
- * Attributes:
- * -------------------------------------------------------------------------------------------------
- * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) |
- * Compression Type (0-2) |
- * -------------------------------------------------------------------------------------------------
- */
-/* Compression types same as MsgVersion 0 above */
-/* Timestamp type same as MsgVersion 0 above */
-#define RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL (1 << 4)
-#define RD_KAFKA_MSGSET_V2_ATTR_CONTROL (1 << 5)
-
-
-typedef struct rd_kafka_msg_s {
- rd_kafka_message_t rkm_rkmessage; /* MUST be first field */
-#define rkm_len rkm_rkmessage.len
-#define rkm_payload rkm_rkmessage.payload
-#define rkm_opaque rkm_rkmessage._private
-#define rkm_partition rkm_rkmessage.partition
-#define rkm_offset rkm_rkmessage.offset
-#define rkm_key rkm_rkmessage.key
-#define rkm_key_len rkm_rkmessage.key_len
-#define rkm_err rkm_rkmessage.err
-
- TAILQ_ENTRY(rd_kafka_msg_s) rkm_link;
-
- int rkm_flags;
- /* @remark These additional flags must not collide with
- * the RD_KAFKA_MSG_F_* flags in rdkafka.h */
-#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */
-#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */
-#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */
-#define RD_KAFKA_MSG_F_CONTROL 0x80000 /* Control message */
-
- rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */
- int64_t rkm_timestamp; /* Message format V1.
- * Meaning of timestamp depends on
- * message Attribute LogAppendtime (broker)
- * or CreateTime (producer).
- * Unit is milliseconds since epoch (UTC).*/
-
-
- rd_kafka_headers_t *rkm_headers; /**< Parsed headers list, if any. */
-
- rd_kafka_msg_status_t rkm_status; /**< Persistence status. Updated in
- * the ProduceResponse handler:
- * this value is always up to date.
- */
- int32_t rkm_broker_id; /**< Broker message was produced to
- * or fetched from. */
-
- union {
- struct {
- rd_ts_t ts_timeout; /* Message timeout */
- rd_ts_t ts_enq; /* Enqueue/Produce time */
- rd_ts_t ts_backoff; /* Backoff next Produce until
- * this time. */
- uint64_t msgid; /**< Message sequencial id,
- * used to maintain ordering.
- * Starts at 1. */
- uint64_t last_msgid; /**< On retry this is set
- * on the first message
- * in a batch to point
- * out the last message
- * of the batch so that
- * the batch can be
- * identically reconstructed.
- */
- int retries; /* Number of retries so far */
- } producer;
-#define rkm_ts_timeout rkm_u.producer.ts_timeout
-#define rkm_ts_enq rkm_u.producer.ts_enq
-#define rkm_msgid rkm_u.producer.msgid
-
- struct {
- rd_kafkap_bytes_t binhdrs; /**< Unparsed
- * binary headers in
- * protocol msg */
- int32_t leader_epoch; /**< Leader epoch at the time
- * the message was fetched. */
- } consumer;
- } rkm_u;
-} rd_kafka_msg_t;
-
-TAILQ_HEAD(rd_kafka_msg_head_s, rd_kafka_msg_s);
-
-
-/** @returns the absolute time a message was enqueued (producer) */
-#define rd_kafka_msg_enq_time(rkm) ((rkm)->rkm_ts_enq)
-
-/**
- * @returns the message's total maximum on-wire size.
- * @remark Depending on message version (MagicByte) the actual size
- * may be smaller.
- */
-static RD_INLINE RD_UNUSED size_t
-rd_kafka_msg_wire_size(const rd_kafka_msg_t *rkm, int MsgVersion) {
- static const size_t overheads[] = {
- [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD,
- [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD,
- [2] = RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD};
- size_t size;
- rd_dassert(MsgVersion >= 0 && MsgVersion <= 2);
-
- size = overheads[MsgVersion] + rkm->rkm_len + rkm->rkm_key_len;
- if (MsgVersion == 2 && rkm->rkm_headers)
- size += rd_kafka_headers_serialized_size(rkm->rkm_headers);
-
- return size;
-}
-
-
-/**
- * @returns the maximum total on-wire message size regardless of MsgVersion.
- *
- * @remark This does not account for the ProduceRequest, et.al, just the
- * per-message overhead.
- */
-static RD_INLINE RD_UNUSED size_t rd_kafka_msg_max_wire_size(size_t keylen,
- size_t valuelen,
- size_t hdrslen) {
- return RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD + keylen + valuelen + hdrslen;
-}
-
-/**
- * @returns the enveloping rd_kafka_msg_t pointer for a rd_kafka_msg_t
- * wrapped rd_kafka_message_t.
- */
-static RD_INLINE RD_UNUSED rd_kafka_msg_t *
-rd_kafka_message2msg(rd_kafka_message_t *rkmessage) {
- return (rd_kafka_msg_t *)rkmessage;
-}
-
-
-
-/**
- * @brief Message queue with message and byte counters.
- */
-TAILQ_HEAD(rd_kafka_msgs_head_s, rd_kafka_msg_s);
-typedef struct rd_kafka_msgq_s {
- struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */
- int32_t rkmq_msg_cnt;
- int64_t rkmq_msg_bytes;
- struct {
- rd_ts_t abstime; /**< Allow wake-ups after this point in time.*/
- int32_t msg_cnt; /**< Signal wake-up when this message count
- * is reached. */
- int64_t msg_bytes; /**< .. or when this byte count is
- * reached. */
- rd_bool_t on_first; /**< Wake-up on first message enqueued
- * regardless of .abstime. */
- rd_bool_t signalled; /**< Wake-up (already) signalled. */
- } rkmq_wakeup;
-} rd_kafka_msgq_t;
-
-#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \
- { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) }
-
-#define RD_KAFKA_MSGQ_FOREACH(elm, head) \
- TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link)
-
-/* @brief Check if queue is empty. Proper locks must be held. */
-#define RD_KAFKA_MSGQ_EMPTY(rkmq) TAILQ_EMPTY(&(rkmq)->rkmq_msgs)
-
-/**
- * Returns the number of messages in the specified queue.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_msgq_len(const rd_kafka_msgq_t *rkmq) {
- return (int)rkmq->rkmq_msg_cnt;
-}
-
-/**
- * Returns the total number of bytes in the specified queue.
- */
-static RD_INLINE RD_UNUSED size_t
-rd_kafka_msgq_size(const rd_kafka_msgq_t *rkmq) {
- return (size_t)rkmq->rkmq_msg_bytes;
-}
-
-
-void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm);
-
-int rd_kafka_msg_new(rd_kafka_topic_t *rkt,
- int32_t force_partition,
- int msgflags,
- char *payload,
- size_t len,
- const void *keydata,
- size_t keylen,
- void *msg_opaque);
-
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_init(rd_kafka_msgq_t *rkmq) {
- TAILQ_INIT(&rkmq->rkmq_msgs);
- rkmq->rkmq_msg_cnt = 0;
- rkmq->rkmq_msg_bytes = 0;
-}
-
-#if ENABLE_DEVEL
-#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \
- rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, rktp, rkmq, \
- exp_first_msgid, gapless)
-#else
-#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \
- do { \
- } while (0)
-#endif
-
-void rd_kafka_msgq_verify_order0(const char *function,
- int line,
- const struct rd_kafka_toppar_s *rktp,
- const rd_kafka_msgq_t *rkmq,
- uint64_t exp_first_msgid,
- rd_bool_t gapless);
-
-
-/**
- * Concat all elements of 'src' onto tail of 'dst'.
- * 'src' will be cleared.
- * Proper locks for 'src' and 'dst' must be held.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat(rd_kafka_msgq_t *dst,
- rd_kafka_msgq_t *src) {
- TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
- dst->rkmq_msg_cnt += src->rkmq_msg_cnt;
- dst->rkmq_msg_bytes += src->rkmq_msg_bytes;
- rd_kafka_msgq_init(src);
- rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
-}
-
-/**
- * Move queue 'src' to 'dst' (overwrites dst)
- * Source will be cleared.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_move(rd_kafka_msgq_t *dst,
- rd_kafka_msgq_t *src) {
- TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
- dst->rkmq_msg_cnt = src->rkmq_msg_cnt;
- dst->rkmq_msg_bytes = src->rkmq_msg_bytes;
- rd_kafka_msgq_init(src);
- rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
-}
-
-
-/**
- * @brief Prepend all elements of \ src onto head of \p dst.
- * \p src will be cleared/re-initialized.
- *
- * @locks proper locks for \p src and \p dst MUST be held.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend(rd_kafka_msgq_t *dst,
- rd_kafka_msgq_t *src) {
- rd_kafka_msgq_concat(src, dst);
- rd_kafka_msgq_move(dst, src);
- rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
-}
-
-
-/**
- * rd_free all msgs in msgq and reinitialize the msgq.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge(rd_kafka_t *rk,
- rd_kafka_msgq_t *rkmq) {
- rd_kafka_msg_t *rkm, *next;
-
- next = TAILQ_FIRST(&rkmq->rkmq_msgs);
- while (next) {
- rkm = next;
- next = TAILQ_NEXT(next, rkm_link);
-
- rd_kafka_msg_destroy(rk, rkm);
- }
-
- rd_kafka_msgq_init(rkmq);
-}
-
-
-/**
- * Remove message from message queue
- */
-static RD_INLINE RD_UNUSED rd_kafka_msg_t *
-rd_kafka_msgq_deq(rd_kafka_msgq_t *rkmq, rd_kafka_msg_t *rkm, int do_count) {
- if (likely(do_count)) {
- rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0);
- rd_kafka_assert(NULL,
- rkmq->rkmq_msg_bytes >=
- (int64_t)(rkm->rkm_len + rkm->rkm_key_len));
- rkmq->rkmq_msg_cnt--;
- rkmq->rkmq_msg_bytes -= rkm->rkm_len + rkm->rkm_key_len;
- }
-
- TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link);
-
- return rkm;
-}
-
-static RD_INLINE RD_UNUSED rd_kafka_msg_t *
-rd_kafka_msgq_pop(rd_kafka_msgq_t *rkmq) {
- rd_kafka_msg_t *rkm;
-
- if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
- rd_kafka_msgq_deq(rkmq, rkm, 1);
-
- return rkm;
-}
-
-
-/**
- * @returns the first message in the queue, or NULL if empty.
- *
- * @locks caller's responsibility
- */
-static RD_INLINE RD_UNUSED rd_kafka_msg_t *
-rd_kafka_msgq_first(const rd_kafka_msgq_t *rkmq) {
- return TAILQ_FIRST(&rkmq->rkmq_msgs);
-}
-
-/**
- * @returns the last message in the queue, or NULL if empty.
- *
- * @locks caller's responsibility
- */
-static RD_INLINE RD_UNUSED rd_kafka_msg_t *
-rd_kafka_msgq_last(const rd_kafka_msgq_t *rkmq) {
- return TAILQ_LAST(&rkmq->rkmq_msgs, rd_kafka_msgs_head_s);
-}
-
-
-/**
- * @returns the MsgId of the first message in the queue, or 0 if empty.
- *
- * @locks caller's responsibility
- */
-static RD_INLINE RD_UNUSED uint64_t
-rd_kafka_msgq_first_msgid(const rd_kafka_msgq_t *rkmq) {
- const rd_kafka_msg_t *rkm = TAILQ_FIRST(&rkmq->rkmq_msgs);
- if (rkm)
- return rkm->rkm_u.producer.msgid;
- else
- return 0;
-}
-
-
-
-rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
- const rd_kafka_msgq_t *dest_rkmq,
- rd_ts_t *next_wakeup,
- rd_ts_t now,
- rd_ts_t linger_us,
- int32_t batch_msg_cnt,
- int64_t batch_msg_bytes);
-
-/**
- * @returns true if msgq may be awoken.
- */
-
-static RD_INLINE RD_UNUSED rd_bool_t
-rd_kafka_msgq_may_wakeup(const rd_kafka_msgq_t *rkmq, rd_ts_t now) {
- /* No: Wakeup already signalled */
- if (rkmq->rkmq_wakeup.signalled)
- return rd_false;
-
- /* Yes: Wakeup linger time has expired */
- if (now >= rkmq->rkmq_wakeup.abstime)
- return rd_true;
-
- /* Yes: First message enqueued may trigger wakeup */
- if (rkmq->rkmq_msg_cnt == 1 && rkmq->rkmq_wakeup.on_first)
- return rd_true;
-
- /* Yes: batch.size or batch.num.messages exceeded */
- if (rkmq->rkmq_msg_cnt >= rkmq->rkmq_wakeup.msg_cnt ||
- rkmq->rkmq_msg_bytes > rkmq->rkmq_wakeup.msg_bytes)
- return rd_true;
-
- /* No */
- return rd_false;
-}
-
-
-/**
- * @brief Message ordering comparator using the message id
- * number to order messages in ascending order (FIFO).
- */
-static RD_INLINE int rd_kafka_msg_cmp_msgid(const void *_a, const void *_b) {
- const rd_kafka_msg_t *a = _a, *b = _b;
-
- rd_dassert(a->rkm_u.producer.msgid);
-
- return RD_CMP(a->rkm_u.producer.msgid, b->rkm_u.producer.msgid);
-}
-
-/**
- * @brief Message ordering comparator using the message id
- * number to order messages in descending order (LIFO).
- */
-static RD_INLINE int rd_kafka_msg_cmp_msgid_lifo(const void *_a,
- const void *_b) {
- const rd_kafka_msg_t *a = _a, *b = _b;
-
- rd_dassert(a->rkm_u.producer.msgid);
-
- return RD_CMP(b->rkm_u.producer.msgid, a->rkm_u.producer.msgid);
-}
-
-
-/**
- * @brief Insert message at its sorted position using the msgid.
- * @remark This is an O(n) operation.
- * @warning The message must have a msgid set.
- * @returns the message count of the queue after enqueuing the message.
- */
-int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm,
- int (*order_cmp)(const void *, const void *));
-
-/**
- * @brief Insert message at its sorted position using the msgid.
- * @remark This is an O(n) operation.
- * @warning The message must have a msgid set.
- * @returns the message count of the queue after enqueuing the message.
- */
-int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm);
-
-/**
- * Insert message at head of message queue.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert(rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm) {
- TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link);
- rkmq->rkmq_msg_cnt++;
- rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
-}
-
-/**
- * Append message to tail of message queue.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq(rd_kafka_msgq_t *rkmq,
- rd_kafka_msg_t *rkm) {
- TAILQ_INSERT_TAIL(&rkmq->rkmq_msgs, rkm, rkm_link);
- rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
- return (int)++rkmq->rkmq_msg_cnt;
-}
-
-
-/**
- * @returns true if the MsgId extents (first, last) in the two queues overlap.
- */
-static RD_INLINE RD_UNUSED rd_bool_t
-rd_kafka_msgq_overlap(const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) {
- const rd_kafka_msg_t *fa, *la, *fb, *lb;
-
- if (RD_KAFKA_MSGQ_EMPTY(a) || RD_KAFKA_MSGQ_EMPTY(b))
- return rd_false;
-
- fa = rd_kafka_msgq_first(a);
- fb = rd_kafka_msgq_first(b);
- la = rd_kafka_msgq_last(a);
- lb = rd_kafka_msgq_last(b);
-
- return (rd_bool_t)(
- fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid &&
- fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid);
-}
-
-/**
- * Scans a message queue for timed out messages and removes them from
- * 'rkmq' and adds them to 'timedout', returning the number of timed out
- * messages.
- * 'timedout' must be initialized.
- */
-int rd_kafka_msgq_age_scan(struct rd_kafka_toppar_s *rktp,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_msgq_t *timedout,
- rd_ts_t now,
- rd_ts_t *abs_next_timeout);
-
-void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq,
- rd_kafka_msgq_t *rightq,
- rd_kafka_msg_t *first_right,
- int cnt,
- int64_t bytes);
-
-rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq,
- const rd_kafka_msg_t *start_pos,
- const rd_kafka_msg_t *rkm,
- int (*cmp)(const void *, const void *),
- int *cntp,
- int64_t *bytesp);
-
-void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq,
- int32_t broker_id,
- int64_t base_offset,
- int64_t timestamp,
- rd_kafka_msg_status_t status);
-
-void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest,
- rd_kafka_msgq_t *src,
- uint64_t last_msgid,
- rd_kafka_msg_status_t status);
-
-int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
- rd_kafka_msg_t *rkm,
- rd_dolock_t do_lock);
-
-
-rd_kafka_message_t *rd_kafka_message_get(struct rd_kafka_op_s *rko);
-rd_kafka_message_t *rd_kafka_message_get_from_rkm(struct rd_kafka_op_s *rko,
- rd_kafka_msg_t *rkm);
-rd_kafka_message_t *rd_kafka_message_new(void);
-
-
-/**
- * @returns a (possibly) wrapped Kafka protocol message sequence counter
- * for the non-overflowing \p seq.
- */
-static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap(int64_t seq) {
- return (int32_t)(seq & (int64_t)INT32_MAX);
-}
-
-void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq);
-
-rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize);
-void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq);
-int unittest_msg(void);
-
-#endif /* _RDKAFKA_MSG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h
deleted file mode 100644
index 09c797706..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_MSGBATCH_H_
-#define _RDKAFKA_MSGBATCH_H_
-
-typedef struct rd_kafka_msgbatch_s {
- rd_kafka_toppar_t *rktp; /**< Reference to partition */
-
- rd_kafka_msgq_t msgq; /**< Messages in batch */
-
- /* Following fields are for Idempotent Producer use */
- rd_kafka_pid_t pid; /**< Producer Id and Epoch */
- int32_t first_seq; /**< Base sequence */
- int64_t first_msgid; /**< Base msgid */
- uint64_t epoch_base_msgid; /**< The partition epoch's
- * base msgid. */
- uint64_t last_msgid; /**< Last message to add to batch.
- * This is used when reconstructing
- * batches for resends with
- * the idempotent producer which
- * require retries to have the
- * exact same messages in them. */
-
-} rd_kafka_msgbatch_t;
-
-
-
-/* defined in rdkafka_msg.c */
-void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb);
-void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid);
-void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb,
- rd_kafka_msg_t *rkm);
-void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb);
-
-#endif /* _RDKAFKA_MSGBATCH_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h
deleted file mode 100644
index b79f1c946..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_MSGSET_H_
-#define _RDKAFKA_MSGSET_H_
-
-
-
-/**
- * @struct rd_kafka_aborted_txns_t
- *
- * @brief A collection of aborted transactions.
- */
-typedef struct rd_kafka_aborted_txns_s {
- rd_avl_t avl;
- /* Note: A list of nodes is maintained alongside
- * the AVL tree to facilitate traversal.
- */
- rd_list_t list;
- int32_t cnt;
-} rd_kafka_aborted_txns_t;
-
-
-rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt);
-
-void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns);
-
-void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns);
-
-void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid,
- int64_t first_offset);
-
-
-/**
- * @name MessageSet writers
- */
-rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- const rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid,
- size_t *MessageSetSizep);
-
-/**
- * @name MessageSet readers
- */
-rd_kafka_resp_err_t
-rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_toppar_t *rktp,
- rd_kafka_aborted_txns_t *aborted_txns,
- const struct rd_kafka_toppar_ver *tver);
-
-int unittest_aborted_txns(void);
-
-#endif /* _RDKAFKA_MSGSET_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c
deleted file mode 100644
index 58779f3be..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c
+++ /dev/null
@@ -1,1794 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @name MessageSet reader interface
- *
- * Parses FetchResponse for Messages
- *
- *
- * @remark
- * The broker may send partial messages, when this happens we bail out
- * silently and keep the messages that we successfully parsed.
- *
- * "A Guide To The Kafka Protocol" states:
- * "As an optimization the server is allowed to
- * return a partial message at the end of the
- * message set.
- * Clients should handle this case."
- *
- * We're handling it by not passing the error upstream.
- * This is why most err_parse: goto labels (that are called from buf parsing
- * macros) suppress the error message and why log_decode_errors is off
- * unless PROTOCOL debugging is enabled.
- *
- * When a FetchResponse contains multiple partitions, each partition's
- * MessageSet may be partial, regardless of the other partitions.
- * To make sure the next partition can be parsed, each partition parse
- * uses its own sub-slice of only that partition's MessageSetSize length.
- */
-
-#include "rd.h"
-#include "rdunittest.h"
-#include "rdavl.h"
-#include "rdlist.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_header.h"
-#include "rdkafka_lz4.h"
-
-#include "rdvarint.h"
-#include "crc32c.h"
-
-#if WITH_ZLIB
-#include "rdgz.h"
-#endif
-#if WITH_SNAPPY
-#include "snappy.h"
-#endif
-#if WITH_ZSTD
-#include "rdkafka_zstd.h"
-#endif
-
-
-static RD_INLINE int64_t
-rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid,
- int64_t max_offset);
-static RD_INLINE int64_t
-rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid);
-
-
-struct msgset_v2_hdr {
- int64_t BaseOffset;
- int32_t Length;
- int32_t PartitionLeaderEpoch;
- int8_t MagicByte;
- int32_t Crc;
- int16_t Attributes;
- int32_t LastOffsetDelta;
- int64_t BaseTimestamp;
- int64_t MaxTimestamp;
- int64_t PID;
- int16_t ProducerEpoch;
- int32_t BaseSequence;
- int32_t RecordCount;
-};
-
-
-/**
- * @struct rd_kafka_aborted_txn_start_offsets_t
- *
- * @brief A sorted list of aborted transaction start offsets
- * (ascending) for a PID, and an offset into that list.
- */
-typedef struct rd_kafka_aborted_txn_start_offsets_s {
- rd_avl_node_t avl_node;
- int64_t pid;
- int offsets_idx;
- rd_list_t offsets;
-} rd_kafka_aborted_txn_start_offsets_t;
-
-
-typedef struct rd_kafka_msgset_reader_s {
- rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */
-
- int msetr_relative_offsets; /**< Bool: using relative offsets */
-
- /**< Outer/wrapper Message fields. */
- struct {
- int64_t offset; /**< Relative_offsets: outer message's
- * Offset (last offset) */
- rd_kafka_timestamp_type_t tstype; /**< Compressed
- * MessageSet's
- * timestamp type. */
- int64_t timestamp; /**< ... timestamp*/
- } msetr_outer;
-
- struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */
-
- /*
- * Aborted Transaction Start Offsets. These are arranged in a map
- * (ABORTED_TXN_OFFSETS), with PID as the key and value as follows:
- * - OFFSETS: sorted list of aborted transaction start offsets
- * (ascending)
- * - IDX: an index into OFFSETS list, initialized to 0.
- *
- * The logic for processing fetched data is as follows (note: this is
- * different from the Java client):
- *
- * 1. If the message is a transaction control message and the status is
- * ABORT then increment ABORTED_TXN_OFFSETS(PID).IDX. note: sanity check
- * that OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX] is less than the current
- * offset before incrementing. If the status is COMMIT, do nothing.
- *
- * 2. If the message is a normal message, find the corresponding OFFSETS
- * list in ABORTED_TXN_OFFSETS. If it doesn't exist, then keep the
- * message. If the PID does exist, compare ABORTED_TXN_OFFSETS(PID).IDX
- * with len(OFFSETS). If it's >= then the message should be kept. If
- * not, compare the message offset with
- * OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX]. If it's greater than or equal
- * to this value, then the message should be ignored. If it's less than,
- * then the message should be kept.
- *
- * Note: A MessageSet comprises messages from at most one transaction,
- * so the logic in step 2 is done at the message set level.
- */
- rd_kafka_aborted_txns_t *msetr_aborted_txns;
-
- const struct rd_kafka_toppar_ver *msetr_tver; /**< Toppar op version of
- * request. */
-
- int32_t msetr_leader_epoch; /**< Current MessageSet's partition
- * leader epoch (or -1). */
-
- int32_t msetr_broker_id; /**< Broker id (of msetr_rkb) */
- rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted
- * reference! */
- rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted
- * reference! */
-
- int msetr_msgcnt; /**< Number of messages in rkq */
- int64_t msetr_msg_bytes; /**< Number of bytes in rkq */
- rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */
- rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue,
- * the temp msetr_rkq will be moved
- * to this queue when parsing
- * is done.
- * Refcount is not increased. */
-
- int64_t msetr_next_offset; /**< Next offset to fetch after
- * this reader run is done.
- * Optional: only used for special
- * cases where the per-message offset
- * can't be relied on for next
- * fetch offset, such as with
- * compacted topics. */
-
- int msetr_ctrl_cnt; /**< Number of control messages
- * or MessageSets received. */
-
- int msetr_aborted_cnt; /**< Number of aborted MessageSets
- * encountered. */
-
- const char *msetr_srcname; /**< Optional message source string,
- * used in debug logging to
- * indicate messages were
- * from an inner compressed
- * message set.
- * Not freed (use const memory).
- * Add trailing space. */
-
- rd_kafka_compression_t msetr_compression; /**< Compression codec */
-} rd_kafka_msgset_reader_t;
-
-
-
-/* Forward declarations */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr);
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr);
-
-
-/**
- * @brief Set up a MessageSet reader but don't start reading messages.
- */
-static void rd_kafka_msgset_reader_init(rd_kafka_msgset_reader_t *msetr,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_toppar_t *rktp,
- const struct rd_kafka_toppar_ver *tver,
- rd_kafka_aborted_txns_t *aborted_txns,
- rd_kafka_q_t *par_rkq) {
-
- memset(msetr, 0, sizeof(*msetr));
-
- msetr->msetr_rkb = rkbuf->rkbuf_rkb;
- msetr->msetr_leader_epoch = -1;
- msetr->msetr_broker_id = rd_kafka_broker_id(msetr->msetr_rkb);
- msetr->msetr_rktp = rktp;
- msetr->msetr_aborted_txns = aborted_txns;
- msetr->msetr_tver = tver;
- msetr->msetr_rkbuf = rkbuf;
- msetr->msetr_srcname = "";
-
- rkbuf->rkbuf_uflow_mitigation = "truncated response from broker (ok)";
-
- /* All parsed messages are put on this temporary op
- * queue first and then moved in one go to the real op queue. */
- rd_kafka_q_init(&msetr->msetr_rkq, msetr->msetr_rkb->rkb_rk);
-
- /* Make sure enqueued ops get the correct serve/opaque reflecting the
- * original queue. */
- msetr->msetr_rkq.rkq_serve = par_rkq->rkq_serve;
- msetr->msetr_rkq.rkq_opaque = par_rkq->rkq_opaque;
-
- /* Keep (non-refcounted) reference to parent queue for
- * moving the messages and events in msetr_rkq to when
- * parsing is done. */
- msetr->msetr_par_rkq = par_rkq;
-}
-
-
-
-/**
- * @brief Decompress MessageSet, pass the uncompressed MessageSet to
- * the MessageSet reader.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_decompress(rd_kafka_msgset_reader_t *msetr,
- int MsgVersion,
- int Attributes,
- int64_t Timestamp,
- int64_t Offset,
- const void *compressed,
- size_t compressed_size) {
- struct iovec iov = {.iov_base = NULL, .iov_len = 0};
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_buf_t *rkbufz;
-
- msetr->msetr_compression = codec;
-
- switch (codec) {
-#if WITH_ZLIB
- case RD_KAFKA_COMPRESSION_GZIP: {
- uint64_t outlenx = 0;
-
- /* Decompress Message payload */
- iov.iov_base = rd_gz_decompress(compressed,
- (int)compressed_size, &outlenx);
- if (unlikely(!iov.iov_base)) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP",
- "Failed to decompress Gzip "
- "message at offset %" PRId64 " of %" PRIusz
- " bytes: "
- "ignoring message",
- Offset, compressed_size);
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto err;
- }
-
- iov.iov_len = (size_t)outlenx;
- } break;
-#endif
-
-#if WITH_SNAPPY
- case RD_KAFKA_COMPRESSION_SNAPPY: {
- const char *inbuf = compressed;
- size_t inlen = compressed_size;
- int r;
- static const unsigned char snappy_java_magic[] = {
- 0x82, 'S', 'N', 'A', 'P', 'P', 'Y', 0};
- static const size_t snappy_java_hdrlen = 8 + 4 + 4;
-
- /* snappy-java adds its own header (SnappyCodec)
- * which is not compatible with the official Snappy
- * implementation.
- * 8: magic, 4: version, 4: compatible
- * followed by any number of chunks:
- * 4: length
- * ...: snappy-compressed data. */
- if (likely(inlen > snappy_java_hdrlen + 4 &&
- !memcmp(inbuf, snappy_java_magic, 8))) {
- /* snappy-java framing */
- char errstr[128];
-
- inbuf = inbuf + snappy_java_hdrlen;
- inlen -= snappy_java_hdrlen;
- iov.iov_base = rd_kafka_snappy_java_uncompress(
- inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr));
-
- if (unlikely(!iov.iov_base)) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
- "%s [%" PRId32
- "]: "
- "Snappy decompression for message "
- "at offset %" PRId64
- " failed: %s: "
- "ignoring message",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, Offset,
- errstr);
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto err;
- }
-
-
- } else {
- /* No framing */
-
- /* Acquire uncompressed length */
- if (unlikely(!rd_kafka_snappy_uncompressed_length(
- inbuf, inlen, &iov.iov_len))) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
- "Failed to get length of Snappy "
- "compressed payload "
- "for message at offset %" PRId64
- " (%" PRIusz
- " bytes): "
- "ignoring message",
- Offset, inlen);
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto err;
- }
-
- /* Allocate output buffer for uncompressed data */
- iov.iov_base = rd_malloc(iov.iov_len);
- if (unlikely(!iov.iov_base)) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
- "Failed to allocate Snappy "
- "decompress buffer of size %" PRIusz
- "for message at offset %" PRId64
- " (%" PRIusz
- " bytes): %s: "
- "ignoring message",
- iov.iov_len, Offset, inlen,
- rd_strerror(errno));
- err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- goto err;
- }
-
- /* Uncompress to outbuf */
- if (unlikely((r = rd_kafka_snappy_uncompress(
- inbuf, inlen, iov.iov_base)))) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
- "Failed to decompress Snappy "
- "payload for message at offset "
- "%" PRId64 " (%" PRIusz
- " bytes): %s: "
- "ignoring message",
- Offset, inlen,
- rd_strerror(-r /*negative errno*/));
- rd_free(iov.iov_base);
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto err;
- }
- }
-
- } break;
-#endif
-
- case RD_KAFKA_COMPRESSION_LZ4: {
- err =
- rd_kafka_lz4_decompress(msetr->msetr_rkb,
- /* Proper HC? */
- MsgVersion >= 1 ? 1 : 0, Offset,
- /* @warning Will modify compressed
- * if no proper HC */
- (char *)compressed, compressed_size,
- &iov.iov_base, &iov.iov_len);
- if (err)
- goto err;
- } break;
-
-#if WITH_ZSTD
- case RD_KAFKA_COMPRESSION_ZSTD: {
- err = rd_kafka_zstd_decompress(
- msetr->msetr_rkb, (char *)compressed, compressed_size,
- &iov.iov_base, &iov.iov_len);
- if (err)
- goto err;
- } break;
-#endif
-
- default:
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC",
- "%s [%" PRId32 "]: Message at offset %" PRId64
- " with unsupported "
- "compression codec 0x%x: message ignored",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- Offset, (int)codec);
-
- err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
- goto err;
- }
-
-
- rd_assert(iov.iov_base);
-
- /*
- * Decompression successful
- */
-
- /* Create a new buffer pointing to the uncompressed
- * allocated buffer (outbuf) and let messages keep a reference to
- * this new buffer. */
- rkbufz = rd_kafka_buf_new_shadow(iov.iov_base, iov.iov_len, rd_free);
- rkbufz->rkbuf_rkb = msetr->msetr_rkbuf->rkbuf_rkb;
- rd_kafka_broker_keep(rkbufz->rkbuf_rkb);
-
-
- /* In MsgVersion v0..1 the decompressed data contains
- * an inner MessageSet, pass it to a new MessageSet reader.
- *
- * For MsgVersion v2 the decompressed data are the list of messages.
- */
-
- if (MsgVersion <= 1) {
- /* Pass decompressed data (inner Messageset)
- * to new instance of the MessageSet parser. */
- rd_kafka_msgset_reader_t inner_msetr;
- rd_kafka_msgset_reader_init(
- &inner_msetr, rkbufz, msetr->msetr_rktp, msetr->msetr_tver,
- /* there is no aborted transaction
- * support for MsgVersion < 2 */
- NULL, &msetr->msetr_rkq);
-
- inner_msetr.msetr_srcname = "compressed ";
-
- if (MsgVersion == 1) {
- /* postproc() will convert relative to
- * absolute offsets */
- inner_msetr.msetr_relative_offsets = 1;
- inner_msetr.msetr_outer.offset = Offset;
-
- /* Apply single LogAppendTime timestamp for
- * all messages. */
- if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) {
- inner_msetr.msetr_outer.tstype =
- RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
- inner_msetr.msetr_outer.timestamp = Timestamp;
- }
- }
-
- /* Parse the inner MessageSet */
- err = rd_kafka_msgset_reader_run(&inner_msetr);
-
- /* Transfer message count from inner to outer */
- msetr->msetr_msgcnt += inner_msetr.msetr_msgcnt;
- msetr->msetr_msg_bytes += inner_msetr.msetr_msg_bytes;
-
-
- } else {
- /* MsgVersion 2 */
- rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf;
-
- rkbufz->rkbuf_uflow_mitigation =
- "truncated response from broker (ok)";
-
- /* Temporarily replace read buffer with uncompressed buffer */
- msetr->msetr_rkbuf = rkbufz;
-
- /* Read messages */
- err = rd_kafka_msgset_reader_msgs_v2(msetr);
-
- /* Restore original buffer */
- msetr->msetr_rkbuf = orig_rkbuf;
- }
-
- /* Loose our refcnt of the uncompressed rkbuf.
- * Individual messages/rko's will have their own reference. */
- rd_kafka_buf_destroy(rkbufz);
-
- return err;
-
-err:
- /* Enqueue error messsage:
- * Create op and push on temporary queue. */
- rd_kafka_consumer_err(
- &msetr->msetr_rkq, msetr->msetr_broker_id, err,
- msetr->msetr_tver->version, NULL, rktp, Offset,
- "Decompression (codec 0x%x) of message at %" PRIu64 " of %" PRIusz
- " bytes failed: %s",
- codec, Offset, compressed_size, rd_kafka_err2str(err));
-
- return err;
-}
-
-
-
-/**
- * @brief Message parser for MsgVersion v0..1
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or on single-message errors,
- * or any other error code when the MessageSet parser should stop
- * parsing (such as for partial Messages).
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- rd_kafka_broker_t *rkb = msetr->msetr_rkb;
- struct {
- int64_t Offset; /* MessageSet header */
- int32_t MessageSize; /* MessageSet header */
- int32_t Crc;
- int8_t MagicByte; /* MsgVersion */
- int8_t Attributes;
- int64_t Timestamp; /* v1 */
- } hdr; /* Message header */
- rd_kafkap_bytes_t Key;
- rd_kafkap_bytes_t Value;
- int32_t Value_len;
- rd_kafka_op_t *rko;
- size_t hdrsize = 6; /* Header size following MessageSize */
- rd_slice_t crc_slice;
- rd_kafka_msg_t *rkm;
- int relative_offsets = 0;
- const char *reloff_str = "";
- /* Only log decoding errors if protocol debugging enabled. */
- int log_decode_errors =
- (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
- ? LOG_DEBUG
- : 0;
- size_t message_end;
-
- rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
- rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSize);
- message_end = rd_slice_offset(&rkbuf->rkbuf_reader) + hdr.MessageSize;
-
- rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
- if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, &crc_slice,
- hdr.MessageSize - 4))
- rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - 4);
-
- rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
- rd_kafka_buf_read_i8(rkbuf, &hdr.Attributes);
-
- if (hdr.MagicByte == 1) { /* MsgVersion */
- rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp);
- hdrsize += 8;
- /* MsgVersion 1 has relative offsets for compressed
- * MessageSets*/
- if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
- msetr->msetr_relative_offsets) {
- relative_offsets = 1;
- reloff_str = "relative ";
- }
- } else
- hdr.Timestamp = 0;
-
- /* Verify MessageSize */
- if (unlikely(hdr.MessageSize < (ssize_t)hdrsize))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "Message at %soffset %" PRId64 " MessageSize %" PRId32
- " < hdrsize %" PRIusz,
- reloff_str, hdr.Offset, hdr.MessageSize, hdrsize);
-
- /* Early check for partial messages */
- rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize);
-
- if (rkb->rkb_rk->rk_conf.check_crcs) {
- /* Verify CRC32 if desired. */
- uint32_t calc_crc;
-
- calc_crc = rd_slice_crc32(&crc_slice);
- rd_dassert(rd_slice_remains(&crc_slice) == 0);
-
- if (unlikely(hdr.Crc != (int32_t)calc_crc)) {
- /* Propagate CRC error to application and
- * continue with next message. */
- rd_kafka_consumer_err(
- &msetr->msetr_rkq, msetr->msetr_broker_id,
- RD_KAFKA_RESP_ERR__BAD_MSG,
- msetr->msetr_tver->version, NULL, rktp, hdr.Offset,
- "Message at %soffset %" PRId64 " (%" PRId32
- " bytes) "
- "failed CRC32 check "
- "(original 0x%" PRIx32
- " != "
- "calculated 0x%" PRIx32 ")",
- reloff_str, hdr.Offset, hdr.MessageSize, hdr.Crc,
- calc_crc);
- rd_kafka_buf_skip_to(rkbuf, message_end);
- rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
- /* Continue with next message */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
-
- /* Extract key */
- rd_kafka_buf_read_bytes(rkbuf, &Key);
-
- /* Extract Value */
- rd_kafka_buf_read_bytes(rkbuf, &Value);
- Value_len = RD_KAFKAP_BYTES_LEN(&Value);
-
- /* MessageSets may contain offsets earlier than we
- * requested (compressed MessageSets in particular),
- * drop the earlier messages.
- * Note: the inner offset may only be trusted for
- * absolute offsets. KIP-31 introduced
- * ApiVersion 2 that maintains relative offsets
- * of compressed messages and the base offset
- * in the outer message is the offset of
- * the *LAST* message in the MessageSet.
- * This requires us to assign offsets
- * after all messages have been read from
- * the messageset, and it also means
- * we cant perform this offset check here
- * in that case. */
- if (!relative_offsets &&
- hdr.Offset < rktp->rktp_offsets.fetch_pos.offset)
- return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
-
- /* Handle compressed MessageSet */
- if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK))
- return rd_kafka_msgset_reader_decompress(
- msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp,
- hdr.Offset, Value.data, Value_len);
-
-
- /* Pure uncompressed message, this is the innermost
- * handler after all compression and cascaded
- * MessageSets have been peeled off. */
-
- /* Create op/message container for message. */
- rko = rd_kafka_op_new_fetch_msg(
- &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset,
- (size_t)RD_KAFKAP_BYTES_LEN(&Key),
- RD_KAFKAP_BYTES_IS_NULL(&Key) ? NULL : Key.data,
- (size_t)RD_KAFKAP_BYTES_LEN(&Value),
- RD_KAFKAP_BYTES_IS_NULL(&Value) ? NULL : Value.data);
-
- rkm->rkm_u.consumer.leader_epoch = msetr->msetr_leader_epoch;
- rkm->rkm_broker_id = msetr->msetr_broker_id;
-
- /* Assign message timestamp.
- * If message was in a compressed MessageSet and the outer/wrapper
- * Message.Attribute had a LOG_APPEND_TIME set, use the
- * outer timestamp */
- if (msetr->msetr_outer.tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) {
- rkm->rkm_timestamp = msetr->msetr_outer.timestamp;
- rkm->rkm_tstype = msetr->msetr_outer.tstype;
-
- } else if (hdr.MagicByte >= 1 && hdr.Timestamp) {
- rkm->rkm_timestamp = hdr.Timestamp;
- if (hdr.Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
- else
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
- }
-
- /* Enqueue message on temporary queue */
- rd_kafka_q_enq(&msetr->msetr_rkq, rko);
- msetr->msetr_msgcnt++;
- msetr->msetr_msg_bytes += rkm->rkm_key_len + rkm->rkm_len;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */
-
-err_parse:
- /* Count all parse errors as partial message errors. */
- rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
- return rkbuf->rkbuf_err;
-}
-
-
-
-/**
- * @brief Message parser for MsgVersion v2
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msg_v2(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- struct {
- int64_t Length;
- int8_t MsgAttributes;
- int64_t TimestampDelta;
- int64_t OffsetDelta;
- int64_t Offset; /* Absolute offset */
- rd_kafkap_bytes_t Key;
- rd_kafkap_bytes_t Value;
- rd_kafkap_bytes_t Headers;
- } hdr;
- rd_kafka_op_t *rko;
- rd_kafka_msg_t *rkm;
- /* Only log decoding errors if protocol debugging enabled. */
- int log_decode_errors =
- (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
- ? LOG_DEBUG
- : 0;
- size_t message_end;
-
- rd_kafka_buf_read_varint(rkbuf, &hdr.Length);
- message_end =
- rd_slice_offset(&rkbuf->rkbuf_reader) + (size_t)hdr.Length;
- rd_kafka_buf_read_i8(rkbuf, &hdr.MsgAttributes);
-
- rd_kafka_buf_read_varint(rkbuf, &hdr.TimestampDelta);
- rd_kafka_buf_read_varint(rkbuf, &hdr.OffsetDelta);
- hdr.Offset = msetr->msetr_v2_hdr->BaseOffset + hdr.OffsetDelta;
-
- /* Skip message if outdated */
- if (hdr.Offset < rktp->rktp_offsets.fetch_pos.offset) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
- "%s [%" PRId32
- "]: "
- "Skip offset %" PRId64 " < fetch_offset %" PRId64,
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- hdr.Offset, rktp->rktp_offsets.fetch_pos.offset);
- rd_kafka_buf_skip_to(rkbuf, message_end);
- return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
- }
-
- /* Handle control messages */
- if (msetr->msetr_v2_hdr->Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL) {
- struct {
- int64_t KeySize;
- int16_t Version;
- int16_t Type;
- } ctrl_data;
- int64_t aborted_txn_start_offset;
-
- rd_kafka_buf_read_varint(rkbuf, &ctrl_data.KeySize);
-
- if (unlikely(ctrl_data.KeySize < 2))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%s [%" PRId32
- "]: "
- "Ctrl message at offset %" PRId64
- " has invalid key size %" PRId64,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, hdr.Offset,
- ctrl_data.KeySize);
-
- rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Version);
-
- if (ctrl_data.Version != 0) {
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
- "%s [%" PRId32
- "]: "
- "Skipping ctrl msg with "
- "unsupported version %" PRId16
- " at offset %" PRId64,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, ctrl_data.Version,
- hdr.Offset);
- rd_kafka_buf_skip_to(rkbuf, message_end);
- return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next
- msg */
- }
-
- if (unlikely(ctrl_data.KeySize != 4))
- rd_kafka_buf_parse_fail(
- rkbuf,
- "%s [%" PRId32
- "]: "
- "Ctrl message at offset %" PRId64
- " has invalid key size %" PRId64,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, hdr.Offset,
- ctrl_data.KeySize);
-
- rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Type);
-
- /* Client is uninterested in value of commit marker */
- rd_kafka_buf_skip(
- rkbuf, (int32_t)(message_end -
- rd_slice_offset(&rkbuf->rkbuf_reader)));
-
- switch (ctrl_data.Type) {
- case RD_KAFKA_CTRL_MSG_COMMIT:
- /* always ignore. */
- break;
-
- case RD_KAFKA_CTRL_MSG_ABORT:
- if (msetr->msetr_rkb->rkb_rk->rk_conf.isolation_level !=
- RD_KAFKA_READ_COMMITTED)
- break;
-
- if (unlikely(!msetr->msetr_aborted_txns)) {
- rd_rkb_dbg(msetr->msetr_rkb,
- MSG | RD_KAFKA_DBG_EOS, "TXN",
- "%s [%" PRId32
- "] received abort txn "
- "ctrl msg at offset %" PRId64
- " for "
- "PID %" PRId64
- ", but there are no "
- "known aborted transactions: "
- "ignoring",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, hdr.Offset,
- msetr->msetr_v2_hdr->PID);
- break;
- }
-
- /* This marks the end of this (aborted) transaction,
- * advance to next aborted transaction in list */
- aborted_txn_start_offset =
- rd_kafka_aborted_txns_pop_offset(
- msetr->msetr_aborted_txns,
- msetr->msetr_v2_hdr->PID, hdr.Offset);
-
- if (unlikely(aborted_txn_start_offset == -1)) {
- rd_rkb_dbg(msetr->msetr_rkb,
- MSG | RD_KAFKA_DBG_EOS, "TXN",
- "%s [%" PRId32
- "] received abort txn "
- "ctrl msg at offset %" PRId64
- " for "
- "PID %" PRId64
- ", but this offset is "
- "not listed as an aborted "
- "transaction: aborted transaction "
- "was possibly empty: ignoring",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, hdr.Offset,
- msetr->msetr_v2_hdr->PID);
- break;
- }
- break;
-
-
- default:
- rd_rkb_dbg(msetr->msetr_rkb, MSG,
- "TXN"
- "%s [%" PRId32
- "]: "
- "Unsupported ctrl message "
- "type %" PRId16
- " at offset"
- " %" PRId64 ": ignoring",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, ctrl_data.Type,
- hdr.Offset);
- break;
- }
-
- rko = rd_kafka_op_new_ctrl_msg(rktp, msetr->msetr_tver->version,
- rkbuf, hdr.Offset);
- rd_kafka_q_enq(&msetr->msetr_rkq, rko);
- msetr->msetr_msgcnt++;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- /* Regular message */
-
- /* Note: messages in aborted transactions are skipped at the MessageSet
- * level */
-
- rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Key);
- rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Value);
-
- /* We parse the Headers later, just store the size (possibly truncated)
- * and pointer to the headers. */
- hdr.Headers.len =
- (int32_t)(message_end - rd_slice_offset(&rkbuf->rkbuf_reader));
- rd_kafka_buf_read_ptr(rkbuf, &hdr.Headers.data, hdr.Headers.len);
-
- /* Create op/message container for message. */
- rko = rd_kafka_op_new_fetch_msg(
- &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset,
- (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key),
- RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? NULL : hdr.Key.data,
- (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value),
- RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? NULL : hdr.Value.data);
-
- rkm->rkm_u.consumer.leader_epoch = msetr->msetr_leader_epoch;
- rkm->rkm_broker_id = msetr->msetr_broker_id;
-
- /* Store pointer to unparsed message headers, they will
- * be parsed on the first access.
- * This pointer points to the rkbuf payload.
- * Note: can't perform struct copy here due to const fields (MSVC) */
- rkm->rkm_u.consumer.binhdrs.len = hdr.Headers.len;
- rkm->rkm_u.consumer.binhdrs.data = hdr.Headers.data;
-
- /* Set timestamp.
- *
- * When broker assigns the timestamps (LOG_APPEND_TIME) it will
- * assign the same timestamp for all messages in a MessageSet
- * using MaxTimestamp.
- */
- if ((msetr->msetr_v2_hdr->Attributes &
- RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) ||
- (hdr.MsgAttributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)) {
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
- rkm->rkm_timestamp = msetr->msetr_v2_hdr->MaxTimestamp;
- } else {
- rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
- rkm->rkm_timestamp =
- msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta;
- }
-
-
- /* Enqueue message on temporary queue */
- rd_kafka_q_enq(&msetr->msetr_rkq, rko);
- msetr->msetr_msgcnt++;
- msetr->msetr_msg_bytes += rkm->rkm_key_len + rkm->rkm_len;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- /* Count all parse errors as partial message errors. */
- rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
- return rkbuf->rkbuf_err;
-}
-
-
-/**
- * @brief Read v2 messages from current buffer position.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- /* Only log decoding errors if protocol debugging enabled. */
- int log_decode_errors =
- (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
- ? LOG_DEBUG
- : 0;
-
- if (msetr->msetr_aborted_txns != NULL &&
- (msetr->msetr_v2_hdr->Attributes &
- (RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL |
- RD_KAFKA_MSGSET_V2_ATTR_CONTROL)) ==
- RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL) {
- /* Transactional non-control MessageSet:
- * check if it is part of an aborted transaction. */
- int64_t txn_start_offset = rd_kafka_aborted_txns_get_offset(
- msetr->msetr_aborted_txns, msetr->msetr_v2_hdr->PID);
-
- if (txn_start_offset != -1 &&
- msetr->msetr_v2_hdr->BaseOffset >= txn_start_offset) {
- /* MessageSet is part of aborted transaction */
- rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
- "%s [%" PRId32
- "]: "
- "Skipping %" PRId32
- " message(s) "
- "in aborted transaction "
- "at offset %" PRId64 " for PID %" PRId64,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- msetr->msetr_v2_hdr->RecordCount,
- txn_start_offset, msetr->msetr_v2_hdr->PID);
- rd_kafka_buf_skip(
- msetr->msetr_rkbuf,
- rd_slice_remains(
- &msetr->msetr_rkbuf->rkbuf_reader));
- msetr->msetr_aborted_cnt++;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
- while (rd_kafka_buf_read_remain(msetr->msetr_rkbuf)) {
- rd_kafka_resp_err_t err;
- err = rd_kafka_msgset_reader_msg_v2(msetr);
- if (unlikely(err))
- return err;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- /* Count all parse errors as partial message errors. */
- rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
- msetr->msetr_v2_hdr = NULL;
- return rkbuf->rkbuf_err;
-}
-
-
-
-/**
- * @brief MessageSet reader for MsgVersion v2 (FetchRequest v4)
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_v2(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- struct msgset_v2_hdr hdr;
- rd_slice_t save_slice;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- size_t len_start;
- size_t payload_size;
- int64_t LastOffset; /* Last absolute Offset in MessageSet header */
- /* Only log decoding errors if protocol debugging enabled. */
- int log_decode_errors =
- (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
- ? LOG_DEBUG
- : 0;
-
- rd_kafka_buf_read_i64(rkbuf, &hdr.BaseOffset);
- rd_kafka_buf_read_i32(rkbuf, &hdr.Length);
- len_start = rd_slice_offset(&rkbuf->rkbuf_reader);
-
- if (unlikely(hdr.Length < RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4))
- rd_kafka_buf_parse_fail(rkbuf,
- "%s [%" PRId32
- "] "
- "MessageSet at offset %" PRId64
- " length %" PRId32 " < header size %d",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, hdr.BaseOffset,
- hdr.Length,
- RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4);
-
- rd_kafka_buf_read_i32(rkbuf, &hdr.PartitionLeaderEpoch);
- msetr->msetr_leader_epoch = hdr.PartitionLeaderEpoch;
-
- rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
- rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
-
- if (msetr->msetr_rkb->rkb_rk->rk_conf.check_crcs) {
- /* Verify CRC32C if desired. */
- uint32_t calc_crc;
- rd_slice_t crc_slice;
- size_t crc_len = hdr.Length - 4 - 1 - 4;
-
- if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader,
- &crc_slice, crc_len))
- rd_kafka_buf_check_len(rkbuf, crc_len);
-
- calc_crc = rd_slice_crc32c(&crc_slice);
-
- if (unlikely((uint32_t)hdr.Crc != calc_crc)) {
- /* Propagate CRC error to application and
- * continue with next message. */
- rd_kafka_consumer_err(
- &msetr->msetr_rkq, msetr->msetr_broker_id,
- RD_KAFKA_RESP_ERR__BAD_MSG,
- msetr->msetr_tver->version, NULL, rktp,
- hdr.BaseOffset,
- "MessageSet at offset %" PRId64 " (%" PRId32
- " bytes) "
- "failed CRC32C check "
- "(original 0x%" PRIx32
- " != "
- "calculated 0x%" PRIx32 ")",
- hdr.BaseOffset, hdr.Length, hdr.Crc, calc_crc);
- rd_kafka_buf_skip_to(rkbuf, crc_len);
- rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_err, 1);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- }
-
- rd_kafka_buf_read_i16(rkbuf, &hdr.Attributes);
- rd_kafka_buf_read_i32(rkbuf, &hdr.LastOffsetDelta);
- LastOffset = hdr.BaseOffset + hdr.LastOffsetDelta;
- rd_kafka_buf_read_i64(rkbuf, &hdr.BaseTimestamp);
- rd_kafka_buf_read_i64(rkbuf, &hdr.MaxTimestamp);
- rd_kafka_buf_read_i64(rkbuf, &hdr.PID);
- rd_kafka_buf_read_i16(rkbuf, &hdr.ProducerEpoch);
- rd_kafka_buf_read_i32(rkbuf, &hdr.BaseSequence);
- rd_kafka_buf_read_i32(rkbuf, &hdr.RecordCount);
-
- /* Payload size is hdr.Length - MessageSet headers */
- payload_size =
- hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - len_start);
-
- if (unlikely(payload_size > rd_kafka_buf_read_remain(rkbuf)))
- rd_kafka_buf_underflow_fail(
- rkbuf, payload_size,
- "%s [%" PRId32
- "] "
- "MessageSet at offset %" PRId64 " payload size %" PRIusz,
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- hdr.BaseOffset, payload_size);
-
- /* If entire MessageSet contains old outdated offsets, skip it. */
- if (LastOffset < rktp->rktp_offsets.fetch_pos.offset) {
- rd_kafka_buf_skip(rkbuf, payload_size);
- goto done;
- }
-
- if (hdr.Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL)
- msetr->msetr_ctrl_cnt++;
-
- msetr->msetr_v2_hdr = &hdr;
-
- /* Handle compressed MessageSet */
- if (hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) {
- const void *compressed;
-
- compressed =
- rd_slice_ensure_contig(&rkbuf->rkbuf_reader, payload_size);
- rd_assert(compressed);
-
- err = rd_kafka_msgset_reader_decompress(
- msetr, 2 /*MsgVersion v2*/, hdr.Attributes,
- hdr.BaseTimestamp, hdr.BaseOffset, compressed,
- payload_size);
- if (err)
- goto err;
-
- } else {
- /* Read uncompressed messages */
-
- /* Save original slice, reduce size of the current one to
- * be limited by the MessageSet.Length, and then start reading
- * messages until the lesser slice is exhausted. */
- if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice,
- payload_size))
- rd_kafka_buf_check_len(rkbuf, payload_size);
-
- /* Read messages */
- err = rd_kafka_msgset_reader_msgs_v2(msetr);
-
- /* Restore wider slice */
- rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice);
-
- if (unlikely(err))
- goto err;
- }
-
-
-done:
- /* Set the next fetch offset to the MessageSet header's last offset + 1
- * to avoid getting stuck on compacted MessageSets where the last
- * Message in the MessageSet has an Offset < MessageSet header's
- * last offset. See KAFKA-5443 */
- msetr->msetr_next_offset = LastOffset + 1;
-
- msetr->msetr_v2_hdr = NULL;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- /* Count all parse errors as partial message errors. */
- rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
- err = rkbuf->rkbuf_err;
- /* FALLTHRU */
-err:
- msetr->msetr_v2_hdr = NULL;
- return err;
-}
-
-
-/**
- * @brief Peek into the next MessageSet to find the MsgVersion.
- *
- * @param MagicBytep the MsgVersion is returned here on success.
- *
- * @returns an error on read underflow or if the MsgVersion is
- * unsupported.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_peek_msg_version(rd_kafka_msgset_reader_t *msetr,
- int8_t *MagicBytep) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- /* Only log decoding errors if protocol debugging enabled. */
- int log_decode_errors =
- (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
- ? LOG_DEBUG
- : 0;
- size_t read_offset = rd_slice_offset(&rkbuf->rkbuf_reader);
-
- rd_kafka_buf_peek_i8(rkbuf, read_offset + 8 + 4 + 4, MagicBytep);
-
- if (unlikely(*MagicBytep < 0 || *MagicBytep > 2)) {
- int64_t Offset; /* For error logging */
- int32_t Length;
-
- rd_kafka_buf_read_i64(rkbuf, &Offset);
-
- rd_rkb_dbg(msetr->msetr_rkb,
- MSG | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FETCH,
- "MAGICBYTE",
- "%s [%" PRId32
- "]: "
- "Unsupported Message(Set) MagicByte %d at "
- "offset %" PRId64
- " "
- "(buffer position %" PRIusz "/%" PRIusz
- "): skipping",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- (int)*MagicBytep, Offset, read_offset,
- rd_slice_size(&rkbuf->rkbuf_reader));
-
- if (Offset >=
- msetr->msetr_rktp->rktp_offsets.fetch_pos.offset) {
- rd_kafka_consumer_err(
- &msetr->msetr_rkq, msetr->msetr_broker_id,
- RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED,
- msetr->msetr_tver->version, NULL, rktp, Offset,
- "Unsupported Message(Set) MagicByte %d "
- "at offset %" PRId64,
- (int)*MagicBytep, Offset);
- /* Skip message(set) */
- msetr->msetr_rktp->rktp_offsets.fetch_pos.offset =
- Offset + 1;
- }
-
- /* Skip this Message(Set).
- * If the message is malformed, the skip may trigger err_parse
- * and return ERR__BAD_MSG. */
- rd_kafka_buf_read_i32(rkbuf, &Length);
- rd_kafka_buf_skip(rkbuf, Length);
-
- return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err_parse:
- return RD_KAFKA_RESP_ERR__BAD_MSG;
-}
-
-
-/**
- * @brief Parse and read messages from msgset reader buffer.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
- rd_kafka_resp_err_t (*reader[])(rd_kafka_msgset_reader_t *) = {
- /* Indexed by MsgVersion/MagicByte, pointing to
- * a Msg(Set)Version reader */
- [0] = rd_kafka_msgset_reader_msg_v0_1,
- [1] = rd_kafka_msgset_reader_msg_v0_1,
- [2] = rd_kafka_msgset_reader_v2};
- rd_kafka_resp_err_t err;
-
- /* Parse MessageSets until the slice is exhausted or an
- * error occurs (typically a partial message). */
- do {
- int8_t MagicByte;
-
- /* We dont know the MsgVersion at this point, peek where the
- * MagicByte resides both in MsgVersion v0..1 and v2 to
- * know which MessageSet reader to use. */
- err =
- rd_kafka_msgset_reader_peek_msg_version(msetr, &MagicByte);
- if (unlikely(err)) {
- if (err == RD_KAFKA_RESP_ERR__BAD_MSG)
- /* Read underflow, not an error.
- * Broker may return a partial Fetch response
- * due to its use of sendfile(2). */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* Continue on unsupported MsgVersions, the
- * MessageSet will be skipped. */
- continue;
- }
-
- /* Use MsgVersion-specific reader */
- err = reader[(int)MagicByte](msetr);
-
- } while (!err && rd_slice_remains(&rkbuf->rkbuf_reader) > 0);
-
- return err;
-}
-
-
-
-/**
- * @brief MessageSet post-processing.
- *
- * @param last_offsetp will be set to the offset of the last message in the set,
- * or -1 if not applicable.
- */
-static void rd_kafka_msgset_reader_postproc(rd_kafka_msgset_reader_t *msetr,
- int64_t *last_offsetp) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_q_last(&msetr->msetr_rkq, RD_KAFKA_OP_FETCH,
- 0 /* no error ops */);
- if (rko) {
- *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset;
-
- if (*last_offsetp != -1 && msetr->msetr_relative_offsets) {
- /* Update messages to absolute offsets
- * and purge any messages older than the current
- * fetch offset. */
- rd_kafka_q_fix_offsets(
- &msetr->msetr_rkq,
- msetr->msetr_rktp->rktp_offsets.fetch_pos.offset,
- msetr->msetr_outer.offset - *last_offsetp);
- }
- }
-}
-
-
-
-/**
- * @brief Run the MessageSet reader, read messages until buffer is
- * exhausted (or error encountered), enqueue parsed messages on
- * partition queue.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if MessageSet was successfully
- * or partially parsed. When other error codes are returned it
- * indicates a semi-permanent error (such as unsupported MsgVersion)
- * and the fetcher should back off this partition to avoid
- * busy-looping.
- */
-static rd_kafka_resp_err_t
-rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) {
- rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
- rd_kafka_resp_err_t err;
- int64_t last_offset = -1;
-
- /* Parse MessageSets and messages */
- err = rd_kafka_msgset_reader(msetr);
-
- if (unlikely(rd_kafka_q_len(&msetr->msetr_rkq) == 0)) {
- /* The message set didn't contain at least one full message
- * or no error was posted on the response queue.
- * This means the size limit perhaps was too tight,
- * increase it automatically.
- * If there was at least one control message there
- * is probably not a size limit and nothing is done.
- * If there were aborted messagesets and no underflow then
- * there is no error either (#2993).
- *
- * Also; avoid propagating underflow errors, which cause
- * backoffs, since we'll want to continue fetching the
- * remaining truncated messages as soon as possible.
- */
- if (msetr->msetr_ctrl_cnt > 0) {
- /* Noop */
- if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) {
- rktp->rktp_fetch_msg_max_bytes *= 2;
- rd_rkb_dbg(msetr->msetr_rkb, FETCH, "CONSUME",
- "Topic %s [%" PRId32
- "]: Increasing "
- "max fetch bytes to %" PRId32,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rktp->rktp_fetch_msg_max_bytes);
-
- if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- } else if (!err && msetr->msetr_aborted_cnt == 0) {
- rd_kafka_consumer_err(
- &msetr->msetr_rkq, msetr->msetr_broker_id,
- RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
- msetr->msetr_tver->version, NULL, rktp,
- rktp->rktp_offsets.fetch_pos.offset,
- "Message at offset %" PRId64
- " might be too large to fetch, try increasing "
- "receive.message.max.bytes",
- rktp->rktp_offsets.fetch_pos.offset);
-
- } else if (msetr->msetr_aborted_cnt > 0) {
- /* Noop */
- if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- } else {
- /* MessageSet post-processing. */
- rd_kafka_msgset_reader_postproc(msetr, &last_offset);
-
- /* Ignore parse errors if there was at least one
- * good message since it probably indicates a
- * partial response rather than an erroneous one. */
- if (err == RD_KAFKA_RESP_ERR__UNDERFLOW &&
- msetr->msetr_msgcnt > 0)
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME",
- "Enqueue %i %smessage(s) (%" PRId64
- " bytes, %d ops) on %s [%" PRId32
- "] fetch queue (qlen %d, v%d, last_offset %" PRId64
- ", %d ctrl msgs, %d aborted msgsets, %s)",
- msetr->msetr_msgcnt, msetr->msetr_srcname,
- msetr->msetr_msg_bytes, rd_kafka_q_len(&msetr->msetr_rkq),
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_q_len(msetr->msetr_par_rkq),
- msetr->msetr_tver->version, last_offset,
- msetr->msetr_ctrl_cnt, msetr->msetr_aborted_cnt,
- msetr->msetr_compression
- ? rd_kafka_compression2str(msetr->msetr_compression)
- : "uncompressed");
-
- /* Concat all messages&errors onto the parent's queue
- * (the partition's fetch queue) */
- if (rd_kafka_q_concat(msetr->msetr_par_rkq, &msetr->msetr_rkq) != -1) {
- /* Update partition's fetch offset based on
- * last message's offest. */
- if (likely(last_offset != -1))
- rktp->rktp_offsets.fetch_pos.offset = last_offset + 1;
- }
-
- /* Adjust next fetch offset if outlier code has indicated
- * an even later next offset. */
- if (msetr->msetr_next_offset > rktp->rktp_offsets.fetch_pos.offset)
- rktp->rktp_offsets.fetch_pos.offset = msetr->msetr_next_offset;
-
- rktp->rktp_offsets.fetch_pos.leader_epoch = msetr->msetr_leader_epoch;
-
- rd_kafka_q_destroy_owner(&msetr->msetr_rkq);
-
- /* Skip remaining part of slice so caller can continue
- * with next partition. */
- rd_slice_read(&msetr->msetr_rkbuf->rkbuf_reader, NULL,
- rd_slice_remains(&msetr->msetr_rkbuf->rkbuf_reader));
- return err;
-}
-
-
-
-/**
- * @brief Parse one MessageSet at the current buffer read position,
- * enqueueing messages, propagating errors, etc.
- * @remark The current rkbuf_reader slice must be limited to the MessageSet size
- *
- * @returns see rd_kafka_msgset_reader_run()
- */
-rd_kafka_resp_err_t
-rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_toppar_t *rktp,
- rd_kafka_aborted_txns_t *aborted_txns,
- const struct rd_kafka_toppar_ver *tver) {
- rd_kafka_msgset_reader_t msetr;
- rd_kafka_resp_err_t err;
-
- rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, aborted_txns,
- rktp->rktp_fetchq);
-
- /* Parse and handle the message set */
- err = rd_kafka_msgset_reader_run(&msetr);
-
- rd_atomic64_add(&rktp->rktp_c.rx_msgs, msetr.msetr_msgcnt);
- rd_atomic64_add(&rktp->rktp_c.rx_msg_bytes, msetr.msetr_msg_bytes);
-
- rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchcnt,
- (int64_t)msetr.msetr_msgcnt);
- rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchsize,
- (int64_t)msetr.msetr_msg_bytes);
-
- return err;
-}
-
-
-/**
- * @brief Offset comparator
- */
-static int rd_kafka_offset_cmp(const void *_a, const void *_b) {
- const int64_t *a = _a, *b = _b;
- return (*a > *b) - (*a < *b);
-}
-
-
-/**
- * @brief Pid comparator for rd_kafka_aborted_txn_start_offsets_t
- */
-static int rd_kafka_aborted_txn_cmp_by_pid(const void *_a, const void *_b) {
- const rd_kafka_aborted_txn_start_offsets_t *a = _a, *b = _b;
- return (a->pid > b->pid) - (a->pid < b->pid);
-}
-
-
-/**
- * @brief Free resources associated with an AVL tree node.
- */
-static void rd_kafka_aborted_txn_node_destroy(void *_node_ptr) {
- rd_kafka_aborted_txn_start_offsets_t *node_ptr = _node_ptr;
- rd_list_destroy(&node_ptr->offsets);
- rd_free(node_ptr);
-}
-
-
-/**
- * @brief Allocate memory for, and initialize a new
- * rd_kafka_aborted_txns_t struct.
- */
-rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt) {
- rd_kafka_aborted_txns_t *aborted_txns;
- aborted_txns = rd_malloc(sizeof(*aborted_txns));
- rd_avl_init(&aborted_txns->avl, rd_kafka_aborted_txn_cmp_by_pid, 0);
- rd_list_init(&aborted_txns->list, txn_cnt,
- rd_kafka_aborted_txn_node_destroy);
- aborted_txns->cnt = txn_cnt;
- return aborted_txns;
-}
-
-
-/**
- * @brief Free all resources associated with a
- * rd_kafka_aborted_txns_t struct.
- */
-void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns) {
- rd_list_destroy(&aborted_txns->list);
- rd_avl_destroy(&aborted_txns->avl);
- rd_free(aborted_txns);
-}
-
-
-/**
- * @brief Get the abort txn start offsets corresponding to
- * the specified pid.
- */
-static RD_INLINE rd_kafka_aborted_txn_start_offsets_t *
-rd_kafka_aborted_txns_offsets_for_pid(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid) {
- rd_kafka_aborted_txn_start_offsets_t node;
- node.pid = pid;
- return RD_AVL_FIND(&aborted_txns->avl, &node);
-}
-
-
-/**
- * @brief Get the next aborted transaction start
- * offset for the specified pid.
- *
- * @param increment_idx if true, the offset index will be incremented.
- * @param max_offset If the next aborted offset is greater than \p max_offset
- * then the index is not incremented (regardless of
- * \p increment_idx) and the function returns -1.
- * This may be the case for empty aborted transactions
- * that have an ABORT marker but are not listed in the
- * AbortedTxns list.
- *
- *
- * @returns the start offset or -1 if there is none.
- */
-static int64_t
-rd_kafka_aborted_txns_next_offset(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid,
- rd_bool_t increment_idx,
- int64_t max_offset) {
- int64_t abort_start_offset;
- rd_kafka_aborted_txn_start_offsets_t *node_ptr =
- rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid);
-
- if (node_ptr == NULL)
- return -1;
-
- if (unlikely(node_ptr->offsets_idx >= rd_list_cnt(&node_ptr->offsets)))
- return -1;
-
- abort_start_offset = *(
- (int64_t *)rd_list_elem(&node_ptr->offsets, node_ptr->offsets_idx));
-
- if (unlikely(abort_start_offset > max_offset))
- return -1;
-
- if (increment_idx)
- node_ptr->offsets_idx++;
-
- return abort_start_offset;
-}
-
-
-/**
- * @brief Get the next aborted transaction start
- * offset for the specified pid and progress the
- * current index to the next one.
- *
- * @param max_offset If the next aborted offset is greater than \p max_offset
- * then no offset is popped and the function returns -1.
- * This may be the case for empty aborted transactions
- * that have an ABORT marker but are not listed in the
- * AbortedTxns list.
- *
- * @returns the start offset or -1 if there is none.
- */
-static RD_INLINE int64_t
-rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid,
- int64_t max_offset) {
- return rd_kafka_aborted_txns_next_offset(aborted_txns, pid, rd_true,
- max_offset);
-}
-
-
-/**
- * @brief Get the next aborted transaction start
- * offset for the specified pid.
- *
- * @returns the start offset or -1 if there is none.
- */
-static RD_INLINE int64_t
-rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid) {
- return rd_kafka_aborted_txns_next_offset(
- (rd_kafka_aborted_txns_t *)aborted_txns, pid, rd_false, INT64_MAX);
-}
-
-
-/**
- * @brief Add a transaction start offset corresponding
- * to the specified pid to the aborted_txns collection.
- */
-void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns,
- int64_t pid,
- int64_t first_offset) {
- int64_t *v;
- rd_kafka_aborted_txn_start_offsets_t *node_ptr =
- rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid);
-
- if (!node_ptr) {
- node_ptr = rd_malloc(sizeof(*node_ptr));
- node_ptr->pid = pid;
- node_ptr->offsets_idx = 0;
- rd_list_init(&node_ptr->offsets, 0, NULL);
- /* Each PID list has no more than AbortedTxnCnt elements */
- rd_list_prealloc_elems(&node_ptr->offsets, sizeof(int64_t),
- aborted_txns->cnt, 0);
- RD_AVL_INSERT(&aborted_txns->avl, node_ptr, avl_node);
- rd_list_add(&aborted_txns->list, node_ptr);
- }
-
- v = rd_list_add(&node_ptr->offsets, NULL);
- *v = first_offset;
-}
-
-
-/**
- * @brief Sort each of the abort transaction start
- * offset lists for each pid.
- */
-void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns) {
- int k;
- for (k = 0; k < rd_list_cnt(&aborted_txns->list); k++) {
- rd_kafka_aborted_txn_start_offsets_t *el =
- rd_list_elem(&aborted_txns->list, k);
- rd_list_sort(&el->offsets, rd_kafka_offset_cmp);
- }
-}
-
-
-/**
- * @brief Unit tests for all functions that operate on
- * rd_kafka_aborted_txns_t
- */
-int unittest_aborted_txns(void) {
- rd_kafka_aborted_txns_t *aborted_txns = NULL;
- int64_t start_offset;
-
- aborted_txns = rd_kafka_aborted_txns_new(7);
- rd_kafka_aborted_txns_add(aborted_txns, 1, 42);
- rd_kafka_aborted_txns_add(aborted_txns, 1, 44);
- rd_kafka_aborted_txns_add(aborted_txns, 1, 10);
- rd_kafka_aborted_txns_add(aborted_txns, 1, 100);
- rd_kafka_aborted_txns_add(aborted_txns, 2, 11);
- rd_kafka_aborted_txns_add(aborted_txns, 2, 7);
- rd_kafka_aborted_txns_add(aborted_txns, 1, 3);
- rd_kafka_aborted_txns_sort(aborted_txns);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(3 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 3",
- start_offset);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(3 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 3",
- start_offset);
-
- start_offset =
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
- RD_UT_ASSERT(3 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 3",
- start_offset);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(10 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 10",
- start_offset);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
- RD_UT_ASSERT(7 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 7",
- start_offset);
-
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(42 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 42",
- start_offset);
-
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(44 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 44",
- start_offset);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
- RD_UT_ASSERT(7 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 7",
- start_offset);
-
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
- RD_UT_ASSERT(11 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected 11",
- start_offset);
-
- /* error cases */
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 3);
- RD_UT_ASSERT(-1 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected -1",
- start_offset);
-
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
- rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
- RD_UT_ASSERT(-1 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected -1",
- start_offset);
-
- start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
- RD_UT_ASSERT(-1 == start_offset,
- "queried start offset was %" PRId64
- ", "
- "expected -1",
- start_offset);
-
- rd_kafka_aborted_txns_destroy(aborted_txns);
-
- RD_UT_PASS();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c
deleted file mode 100644
index beb36bfac..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c
+++ /dev/null
@@ -1,1445 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_header.h"
-#include "rdkafka_lz4.h"
-
-#if WITH_ZSTD
-#include "rdkafka_zstd.h"
-#endif
-
-#include "snappy.h"
-#include "rdvarint.h"
-#include "crc32c.h"
-
-
-/** @brief The maxium ProduceRequestion ApiVersion supported by librdkafka */
-static const int16_t rd_kafka_ProduceRequest_max_version = 7;
-
-
-typedef struct rd_kafka_msgset_writer_s {
- rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/
-
- int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */
- int msetw_MsgVersion; /* MsgVersion to construct */
- int msetw_features; /* Protocol features to use */
- rd_kafka_compression_t msetw_compression; /**< Compression type */
- int msetw_msgcntmax; /* Max number of messages to send
- * in a batch. */
- size_t msetw_messages_len; /* Total size of Messages, with Message
- * framing but without
- * MessageSet header */
- size_t msetw_messages_kvlen; /* Total size of Message keys
- * and values */
-
- size_t msetw_MessageSetSize; /* Current MessageSetSize value */
- size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */
- size_t msetw_of_start; /* offset of MessageSet */
-
- int msetw_relative_offsets; /* Bool: use relative offsets */
-
- /* For MessageSet v2 */
- int msetw_Attributes; /* MessageSet Attributes */
- int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */
- size_t msetw_of_CRC; /* offset of MessageSet.CRC */
-
- rd_kafka_msgbatch_t *msetw_batch; /**< Convenience pointer to
- * rkbuf_u.Produce.batch */
-
- /* First message information */
- struct {
- size_t of; /* rkbuf's first message position */
- int64_t timestamp;
- } msetw_firstmsg;
-
- rd_kafka_pid_t msetw_pid; /**< Idempotent producer's
- * current Producer Id */
- rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted
- * reference! */
- rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted
- * reference! */
- rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */
-} rd_kafka_msgset_writer_t;
-
-
-
-/**
- * @brief Select ApiVersion and MsgVersion to use based on broker's
- * feature compatibility.
- *
- * @returns -1 if a MsgVersion (or ApiVersion) could not be selected, else 0.
- * @locality broker thread
- */
-static RD_INLINE int
-rd_kafka_msgset_writer_select_MsgVersion(rd_kafka_msgset_writer_t *msetw) {
- rd_kafka_broker_t *rkb = msetw->msetw_rkb;
- rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
- const int16_t max_ApiVersion = rd_kafka_ProduceRequest_max_version;
- int16_t min_ApiVersion = 0;
- int feature;
- /* Map compression types to required feature and ApiVersion */
- static const struct {
- int feature;
- int16_t ApiVersion;
- } compr_req[RD_KAFKA_COMPRESSION_NUM] = {
- [RD_KAFKA_COMPRESSION_LZ4] = {RD_KAFKA_FEATURE_LZ4, 0},
-#if WITH_ZSTD
- [RD_KAFKA_COMPRESSION_ZSTD] = {RD_KAFKA_FEATURE_ZSTD, 7},
-#endif
- };
-
- if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)) {
- min_ApiVersion = 3;
- msetw->msetw_MsgVersion = 2;
- msetw->msetw_features |= feature;
- } else if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)) {
- min_ApiVersion = 2;
- msetw->msetw_MsgVersion = 1;
- msetw->msetw_features |= feature;
- } else {
- if ((feature =
- rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) {
- min_ApiVersion = 1;
- msetw->msetw_features |= feature;
- } else
- min_ApiVersion = 0;
- msetw->msetw_MsgVersion = 0;
- }
-
- msetw->msetw_compression = rktp->rktp_rkt->rkt_conf.compression_codec;
-
- /*
- * Check that the configured compression type is supported
- * by both client and broker, else disable compression.
- */
- if (msetw->msetw_compression &&
- (rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_Produce, 0,
- compr_req[msetw->msetw_compression].ApiVersion, NULL) == -1 ||
- (compr_req[msetw->msetw_compression].feature &&
- !(msetw->msetw_rkb->rkb_features &
- compr_req[msetw->msetw_compression].feature)))) {
- if (unlikely(
- rd_interval(&rkb->rkb_suppress.unsupported_compression,
- /* at most once per day */
- (rd_ts_t)86400 * 1000 * 1000, 0) > 0))
- rd_rkb_log(
- rkb, LOG_NOTICE, "COMPRESSION",
- "%.*s [%" PRId32
- "]: "
- "Broker does not support compression "
- "type %s: not compressing batch",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_compression2str(msetw->msetw_compression));
- else
- rd_rkb_dbg(
- rkb, MSG, "PRODUCE",
- "%.*s [%" PRId32
- "]: "
- "Broker does not support compression "
- "type %s: not compressing batch",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_compression2str(msetw->msetw_compression));
-
- msetw->msetw_compression = RD_KAFKA_COMPRESSION_NONE;
- } else {
- /* Broker supports this compression type. */
- msetw->msetw_features |=
- compr_req[msetw->msetw_compression].feature;
-
- if (min_ApiVersion <
- compr_req[msetw->msetw_compression].ApiVersion)
- min_ApiVersion =
- compr_req[msetw->msetw_compression].ApiVersion;
- }
-
- /* MsgVersion specific setup. */
- switch (msetw->msetw_MsgVersion) {
- case 2:
- msetw->msetw_relative_offsets = 1; /* OffsetDelta */
- break;
- case 1:
- if (msetw->msetw_compression != RD_KAFKA_COMPRESSION_NONE)
- msetw->msetw_relative_offsets = 1;
- break;
- }
-
- /* Set the highest ApiVersion supported by us and broker */
- msetw->msetw_ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_Produce, min_ApiVersion, max_ApiVersion, NULL);
-
- if (msetw->msetw_ApiVersion == -1) {
- rd_kafka_msg_t *rkm;
- /* This will only happen if the broker reports none, or
- * no matching ProduceRequest versions, which should never
- * happen. */
- rd_rkb_log(rkb, LOG_ERR, "PRODUCE",
- "%.*s [%" PRId32
- "]: "
- "No viable ProduceRequest ApiVersions (v%d..%d) "
- "supported by broker: unable to produce",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, min_ApiVersion,
- max_ApiVersion);
-
- /* Back off and retry in 5s */
- rkm = rd_kafka_msgq_first(msetw->msetw_msgq);
- rd_assert(rkm);
- rkm->rkm_u.producer.ts_backoff = rd_clock() + (5 * 1000 * 1000);
- return -1;
- }
-
- /* It should not be possible to get a lower version than requested,
- * otherwise the logic in this function is buggy. */
- rd_assert(msetw->msetw_ApiVersion >= min_ApiVersion);
-
- return 0;
-}
-
-
-/**
- * @brief Allocate buffer for messageset writer based on a previously set
- * up \p msetw.
- *
- * Allocate iovecs to hold all headers and messages,
- * and allocate enough space to allow copies of small messages.
- * The allocated size is the minimum of message.max.bytes
- * or queued_bytes + msgcntmax * msg_overhead
- */
-static void rd_kafka_msgset_writer_alloc_buf(rd_kafka_msgset_writer_t *msetw) {
- rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
- size_t msg_overhead = 0;
- size_t hdrsize = 0;
- size_t msgsetsize = 0;
- size_t bufsize;
-
- rd_kafka_assert(NULL, !msetw->msetw_rkbuf);
-
- /* Calculate worst-case buffer size, produce header size,
- * message size, etc, this isn't critical but avoids unnecesary
- * extra allocations. The buffer will grow as needed if we get
- * this wrong.
- *
- * ProduceRequest headers go in one iovec:
- * ProduceRequest v0..2:
- * RequiredAcks + Timeout +
- * [Topic + [Partition + MessageSetSize]]
- *
- * ProduceRequest v3:
- * TransactionalId + RequiredAcks + Timeout +
- * [Topic + [Partition + MessageSetSize + MessageSet]]
- */
-
- /*
- * ProduceRequest header sizes
- */
- switch (msetw->msetw_ApiVersion) {
- case 7:
- case 6:
- case 5:
- case 4:
- case 3:
- /* Add TransactionalId */
- hdrsize += RD_KAFKAP_STR_SIZE(rk->rk_eos.transactional_id);
- /* FALLTHRU */
- case 0:
- case 1:
- case 2:
- hdrsize +=
- /* RequiredAcks + Timeout + TopicCnt */
- 2 + 4 + 4 +
- /* Topic */
- RD_KAFKAP_STR_SIZE(msetw->msetw_rktp->rktp_rkt->rkt_topic) +
- /* PartitionCnt + Partition + MessageSetSize */
- 4 + 4 + 4;
- msgsetsize += 4; /* MessageSetSize */
- break;
-
- default:
- RD_NOTREACHED();
- }
-
- /*
- * MsgVersion specific sizes:
- * - (Worst-case) Message overhead: message fields
- * - MessageSet header size
- */
- switch (msetw->msetw_MsgVersion) {
- case 0:
- /* MsgVer0 */
- msg_overhead = RD_KAFKAP_MESSAGE_V0_OVERHEAD;
- break;
- case 1:
- /* MsgVer1 */
- msg_overhead = RD_KAFKAP_MESSAGE_V1_OVERHEAD;
- break;
-
- case 2:
- /* MsgVer2 uses varints, we calculate for the worst-case. */
- msg_overhead += RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD;
-
- /* MessageSet header fields */
- msgsetsize += 8 /* BaseOffset */ + 4 /* Length */ +
- 4 /* PartitionLeaderEpoch */ +
- 1 /* Magic (MsgVersion) */ +
- 4 /* CRC (CRC32C) */ + 2 /* Attributes */ +
- 4 /* LastOffsetDelta */ + 8 /* BaseTimestamp */ +
- 8 /* MaxTimestamp */ + 8 /* ProducerId */ +
- 2 /* ProducerEpoch */ + 4 /* BaseSequence */ +
- 4 /* RecordCount */;
- break;
-
- default:
- RD_NOTREACHED();
- }
-
- /*
- * Calculate total buffer size to allocate
- */
- bufsize = hdrsize + msgsetsize;
-
- /* If copying for small payloads is enabled, allocate enough
- * space for each message to be copied based on this limit.
- */
- if (rk->rk_conf.msg_copy_max_size > 0) {
- size_t queued_bytes = rd_kafka_msgq_size(msetw->msetw_msgq);
- bufsize +=
- RD_MIN(queued_bytes, (size_t)rk->rk_conf.msg_copy_max_size *
- msetw->msetw_msgcntmax);
- }
-
- /* Add estimed per-message overhead */
- bufsize += msg_overhead * msetw->msetw_msgcntmax;
-
- /* Cap allocation at message.max.bytes */
- if (bufsize > (size_t)rk->rk_conf.max_msg_size)
- bufsize = (size_t)rk->rk_conf.max_msg_size;
-
- /*
- * Allocate iovecs to hold all headers and messages,
- * and allocate auxilliery space for message headers, etc.
- */
- msetw->msetw_rkbuf =
- rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce,
- msetw->msetw_msgcntmax / 2 + 10, bufsize);
-
- rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, msetw->msetw_ApiVersion,
- msetw->msetw_features);
-}
-
-
-/**
- * @brief Write the MessageSet header.
- * @remark Must only be called for MsgVersion 2
- */
-static void rd_kafka_msgset_writer_write_MessageSet_v2_header(
- rd_kafka_msgset_writer_t *msetw) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-
- rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
- rd_kafka_assert(NULL, msetw->msetw_MsgVersion == 2);
-
- /* BaseOffset (also store the offset to the start of
- * the messageset header fields) */
- msetw->msetw_of_start = rd_kafka_buf_write_i64(rkbuf, 0);
-
- /* Length: updated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* PartitionLeaderEpoch (KIP-101) */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* Magic (MsgVersion) */
- rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
-
- /* CRC (CRC32C): updated later.
- * CRC needs to be done after the entire messageset+messages has
- * been constructed and the following header fields updated. :(
- * Save the offset for this position. so it can be udpated later. */
- msetw->msetw_of_CRC = rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* Attributes: updated later */
- rd_kafka_buf_write_i16(rkbuf, 0);
-
- /* LastOffsetDelta: updated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* BaseTimestamp: updated later */
- rd_kafka_buf_write_i64(rkbuf, 0);
-
- /* MaxTimestamp: updated later */
- rd_kafka_buf_write_i64(rkbuf, 0);
-
- /* ProducerId */
- rd_kafka_buf_write_i64(rkbuf, msetw->msetw_pid.id);
-
- /* ProducerEpoch */
- rd_kafka_buf_write_i16(rkbuf, msetw->msetw_pid.epoch);
-
- /* BaseSequence: updated later in case of Idempotent Producer */
- rd_kafka_buf_write_i32(rkbuf, -1);
-
- /* RecordCount: udpated later */
- rd_kafka_buf_write_i32(rkbuf, 0);
-}
-
-
-/**
- * @brief Write ProduceRequest headers.
- * When this function returns the msgset is ready for
- * writing individual messages.
- * msetw_MessageSetSize will have been set to the messageset header.
- */
-static void
-rd_kafka_msgset_writer_write_Produce_header(rd_kafka_msgset_writer_t *msetw) {
-
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
- rd_kafka_topic_t *rkt = msetw->msetw_rktp->rktp_rkt;
-
- /* V3: TransactionalId */
- if (msetw->msetw_ApiVersion >= 3)
- rd_kafka_buf_write_kstr(rkbuf, rk->rk_eos.transactional_id);
-
- /* RequiredAcks */
- rd_kafka_buf_write_i16(rkbuf, rkt->rkt_conf.required_acks);
-
- /* Timeout */
- rd_kafka_buf_write_i32(rkbuf, rkt->rkt_conf.request_timeout_ms);
-
- /* TopicArrayCnt */
- rd_kafka_buf_write_i32(rkbuf, 1);
-
- /* Insert topic */
- rd_kafka_buf_write_kstr(rkbuf, rkt->rkt_topic);
-
- /* PartitionArrayCnt */
- rd_kafka_buf_write_i32(rkbuf, 1);
-
- /* Partition */
- rd_kafka_buf_write_i32(rkbuf, msetw->msetw_rktp->rktp_partition);
-
- /* MessageSetSize: Will be finalized later*/
- msetw->msetw_of_MessageSetSize = rd_kafka_buf_write_i32(rkbuf, 0);
-
- if (msetw->msetw_MsgVersion == 2) {
- /* MessageSet v2 header */
- rd_kafka_msgset_writer_write_MessageSet_v2_header(msetw);
- msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE;
- } else {
- /* Older MessageSet */
- msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE;
- }
-}
-
-
-/**
- * @brief Initialize a ProduceRequest MessageSet writer for
- * the given broker and partition.
- *
- * A new buffer will be allocated to fit the pending messages in queue.
- *
- * @returns the number of messages to enqueue
- *
- * @remark This currently constructs the entire ProduceRequest, containing
- * a single outer MessageSet for a single partition.
- *
- * @locality broker thread
- */
-static int rd_kafka_msgset_writer_init(rd_kafka_msgset_writer_t *msetw,
- rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid) {
- int msgcnt = rd_kafka_msgq_len(rkmq);
-
- if (msgcnt == 0)
- return 0;
-
- memset(msetw, 0, sizeof(*msetw));
-
- msetw->msetw_rktp = rktp;
- msetw->msetw_rkb = rkb;
- msetw->msetw_msgq = rkmq;
- msetw->msetw_pid = pid;
-
- /* Max number of messages to send in a batch,
- * limited by current queue size or configured batch size,
- * whichever is lower. */
- msetw->msetw_msgcntmax =
- RD_MIN(msgcnt, rkb->rkb_rk->rk_conf.batch_num_messages);
- rd_dassert(msetw->msetw_msgcntmax > 0);
-
- /* Select MsgVersion to use */
- if (rd_kafka_msgset_writer_select_MsgVersion(msetw) == -1)
- return -1;
-
- /* Allocate backing buffer */
- rd_kafka_msgset_writer_alloc_buf(msetw);
-
- /* Construct first part of Produce header + MessageSet header */
- rd_kafka_msgset_writer_write_Produce_header(msetw);
-
- /* The current buffer position is now where the first message
- * is located.
- * Record the current buffer position so it can be rewound later
- * in case of compression. */
- msetw->msetw_firstmsg.of =
- rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf);
-
- rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, rktp,
- pid, epoch_base_msgid);
- msetw->msetw_batch = &msetw->msetw_rkbuf->rkbuf_u.Produce.batch;
-
- return msetw->msetw_msgcntmax;
-}
-
-
-
-/**
- * @brief Copy or link message payload to buffer.
- */
-static RD_INLINE void
-rd_kafka_msgset_writer_write_msg_payload(rd_kafka_msgset_writer_t *msetw,
- const rd_kafka_msg_t *rkm,
- void (*free_cb)(void *)) {
- const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
-
- /* If payload is below the copy limit and there is still
- * room in the buffer we'll copy the payload to the buffer,
- * otherwise we push a reference to the memory. */
- if (rkm->rkm_len <= (size_t)rk->rk_conf.msg_copy_max_size &&
- rd_buf_write_remains(&rkbuf->rkbuf_buf) > rkm->rkm_len) {
- rd_kafka_buf_write(rkbuf, rkm->rkm_payload, rkm->rkm_len);
- if (free_cb)
- free_cb(rkm->rkm_payload);
- } else
- rd_kafka_buf_push(rkbuf, rkm->rkm_payload, rkm->rkm_len,
- free_cb);
-}
-
-
-/**
- * @brief Write message headers to buffer.
- *
- * @remark The enveloping HeaderCount varint must already have been written.
- * @returns the number of bytes written to msetw->msetw_rkbuf
- */
-static size_t
-rd_kafka_msgset_writer_write_msg_headers(rd_kafka_msgset_writer_t *msetw,
- const rd_kafka_headers_t *hdrs) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- const rd_kafka_header_t *hdr;
- int i;
- size_t start_pos = rd_buf_write_pos(&rkbuf->rkbuf_buf);
- size_t written;
-
- RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) {
- rd_kafka_buf_write_varint(rkbuf, hdr->rkhdr_name_size);
- rd_kafka_buf_write(rkbuf, hdr->rkhdr_name,
- hdr->rkhdr_name_size);
- rd_kafka_buf_write_varint(
- rkbuf,
- hdr->rkhdr_value ? (int64_t)hdr->rkhdr_value_size : -1);
- rd_kafka_buf_write(rkbuf, hdr->rkhdr_value,
- hdr->rkhdr_value_size);
- }
-
- written = rd_buf_write_pos(&rkbuf->rkbuf_buf) - start_pos;
- rd_dassert(written == hdrs->rkhdrs_ser_size);
-
- return written;
-}
-
-
-
-/**
- * @brief Write message to messageset buffer with MsgVersion 0 or 1.
- * @returns the number of bytes written.
- */
-static size_t
-rd_kafka_msgset_writer_write_msg_v0_1(rd_kafka_msgset_writer_t *msetw,
- rd_kafka_msg_t *rkm,
- int64_t Offset,
- int8_t MsgAttributes,
- void (*free_cb)(void *)) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- size_t MessageSize;
- size_t of_Crc;
-
- /*
- * MessageSet's (v0 and v1) per-Message header.
- */
-
- /* Offset (only relevant for compressed messages on MsgVersion v1) */
- rd_kafka_buf_write_i64(rkbuf, Offset);
-
- /* MessageSize */
- MessageSize = 4 + 1 + 1 + /* Crc+MagicByte+Attributes */
- 4 /* KeyLength */ + rkm->rkm_key_len +
- 4 /* ValueLength */ + rkm->rkm_len;
-
- if (msetw->msetw_MsgVersion == 1)
- MessageSize += 8; /* Timestamp i64 */
-
- rd_kafka_buf_write_i32(rkbuf, (int32_t)MessageSize);
-
- /*
- * Message
- */
- /* Crc: will be updated later */
- of_Crc = rd_kafka_buf_write_i32(rkbuf, 0);
-
- /* Start Crc calculation of all buf writes. */
- rd_kafka_buf_crc_init(rkbuf);
-
- /* MagicByte */
- rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
-
- /* Attributes */
- rd_kafka_buf_write_i8(rkbuf, MsgAttributes);
-
- /* V1: Timestamp */
- if (msetw->msetw_MsgVersion == 1)
- rd_kafka_buf_write_i64(rkbuf, rkm->rkm_timestamp);
-
- /* Message Key */
- rd_kafka_buf_write_bytes(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
-
- /* Write or copy Value/payload */
- if (rkm->rkm_payload) {
- rd_kafka_buf_write_i32(rkbuf, (int32_t)rkm->rkm_len);
- rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
- } else
- rd_kafka_buf_write_i32(rkbuf, RD_KAFKAP_BYTES_LEN_NULL);
-
- /* Finalize Crc */
- rd_kafka_buf_update_u32(rkbuf, of_Crc,
- rd_kafka_buf_crc_finalize(rkbuf));
-
-
- /* Return written message size */
- return 8 /*Offset*/ + 4 /*MessageSize*/ + MessageSize;
-}
-
-/**
- * @brief Write message to messageset buffer with MsgVersion 2.
- * @returns the number of bytes written.
- */
-static size_t
-rd_kafka_msgset_writer_write_msg_v2(rd_kafka_msgset_writer_t *msetw,
- rd_kafka_msg_t *rkm,
- int64_t Offset,
- int8_t MsgAttributes,
- void (*free_cb)(void *)) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- size_t MessageSize = 0;
- char varint_Length[RD_UVARINT_ENC_SIZEOF(int32_t)];
- char varint_TimestampDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
- char varint_OffsetDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
- char varint_KeyLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
- char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
- char varint_HeaderCount[RD_UVARINT_ENC_SIZEOF(int32_t)];
- size_t sz_Length;
- size_t sz_TimestampDelta;
- size_t sz_OffsetDelta;
- size_t sz_KeyLen;
- size_t sz_ValueLen;
- size_t sz_HeaderCount;
- int HeaderCount = 0;
- size_t HeaderSize = 0;
-
- if (rkm->rkm_headers) {
- HeaderCount = rkm->rkm_headers->rkhdrs_list.rl_cnt;
- HeaderSize = rkm->rkm_headers->rkhdrs_ser_size;
- }
-
- /* All varints, except for Length, needs to be pre-built
- * so that the Length field can be set correctly and thus have
- * correct varint encoded width. */
-
- sz_TimestampDelta = rd_uvarint_enc_i64(
- varint_TimestampDelta, sizeof(varint_TimestampDelta),
- rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp);
- sz_OffsetDelta = rd_uvarint_enc_i64(varint_OffsetDelta,
- sizeof(varint_OffsetDelta), Offset);
- sz_KeyLen = rd_uvarint_enc_i32(varint_KeyLen, sizeof(varint_KeyLen),
- rkm->rkm_key
- ? (int32_t)rkm->rkm_key_len
- : (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
- sz_ValueLen = rd_uvarint_enc_i32(
- varint_ValueLen, sizeof(varint_ValueLen),
- rkm->rkm_payload ? (int32_t)rkm->rkm_len
- : (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
- sz_HeaderCount =
- rd_uvarint_enc_i32(varint_HeaderCount, sizeof(varint_HeaderCount),
- (int32_t)HeaderCount);
-
- /* Calculate MessageSize without length of Length (added later)
- * to store it in Length. */
- MessageSize = 1 /* MsgAttributes */ + sz_TimestampDelta +
- sz_OffsetDelta + sz_KeyLen + rkm->rkm_key_len +
- sz_ValueLen + rkm->rkm_len + sz_HeaderCount + HeaderSize;
-
- /* Length */
- sz_Length = rd_uvarint_enc_i64(varint_Length, sizeof(varint_Length),
- MessageSize);
- rd_kafka_buf_write(rkbuf, varint_Length, sz_Length);
- MessageSize += sz_Length;
-
- /* Attributes: The MsgAttributes argument is losely based on MsgVer0
- * which don't apply for MsgVer2 */
- rd_kafka_buf_write_i8(rkbuf, 0);
-
- /* TimestampDelta */
- rd_kafka_buf_write(rkbuf, varint_TimestampDelta, sz_TimestampDelta);
-
- /* OffsetDelta */
- rd_kafka_buf_write(rkbuf, varint_OffsetDelta, sz_OffsetDelta);
-
- /* KeyLen */
- rd_kafka_buf_write(rkbuf, varint_KeyLen, sz_KeyLen);
-
- /* Key (if any) */
- if (rkm->rkm_key)
- rd_kafka_buf_write(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
-
- /* ValueLen */
- rd_kafka_buf_write(rkbuf, varint_ValueLen, sz_ValueLen);
-
- /* Write or copy Value/payload */
- if (rkm->rkm_payload)
- rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
-
- /* HeaderCount */
- rd_kafka_buf_write(rkbuf, varint_HeaderCount, sz_HeaderCount);
-
- /* Headers array */
- if (rkm->rkm_headers)
- rd_kafka_msgset_writer_write_msg_headers(msetw,
- rkm->rkm_headers);
-
- /* Return written message size */
- return MessageSize;
-}
-
-
-/**
- * @brief Write message to messageset buffer.
- * @returns the number of bytes written.
- */
-static size_t rd_kafka_msgset_writer_write_msg(rd_kafka_msgset_writer_t *msetw,
- rd_kafka_msg_t *rkm,
- int64_t Offset,
- int8_t MsgAttributes,
- void (*free_cb)(void *)) {
- size_t outlen;
- size_t (*writer[])(rd_kafka_msgset_writer_t *, rd_kafka_msg_t *,
- int64_t, int8_t, void (*)(void *)) = {
- [0] = rd_kafka_msgset_writer_write_msg_v0_1,
- [1] = rd_kafka_msgset_writer_write_msg_v0_1,
- [2] = rd_kafka_msgset_writer_write_msg_v2};
- size_t actual_written;
- size_t pre_pos;
-
- if (likely(rkm->rkm_timestamp))
- MsgAttributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
-
- pre_pos = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf);
-
- outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, Offset,
- MsgAttributes, free_cb);
-
- actual_written =
- rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - pre_pos;
- rd_assert(outlen <=
- rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion));
- rd_assert(outlen == actual_written);
-
- return outlen;
-}
-
-/**
- * @brief Write as many messages from the given message queue to
- * the messageset.
- *
- * May not write any messages.
- *
- * @returns 1 on success or 0 on error.
- */
-static int rd_kafka_msgset_writer_write_msgq(rd_kafka_msgset_writer_t *msetw,
- rd_kafka_msgq_t *rkmq) {
- rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
- rd_kafka_broker_t *rkb = msetw->msetw_rkb;
- size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf);
- size_t max_msg_size =
- RD_MIN((size_t)msetw->msetw_rkb->rkb_rk->rk_conf.max_msg_size,
- (size_t)msetw->msetw_rkb->rkb_rk->rk_conf.batch_size);
- rd_ts_t int_latency_base;
- rd_ts_t MaxTimestamp = 0;
- rd_kafka_msg_t *rkm;
- int msgcnt = 0;
- const rd_ts_t now = rd_clock();
-
- /* Internal latency calculation base.
- * Uses rkm_ts_timeout which is enqueue time + timeout */
- int_latency_base =
- now + ((rd_ts_t)rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000);
-
- /* Acquire BaseTimestamp from first message. */
- rkm = TAILQ_FIRST(&rkmq->rkmq_msgs);
- rd_kafka_assert(NULL, rkm);
- msetw->msetw_firstmsg.timestamp = rkm->rkm_timestamp;
-
- rd_kafka_msgbatch_set_first_msg(msetw->msetw_batch, rkm);
-
- /*
- * Write as many messages as possible until buffer is full
- * or limit reached.
- */
- do {
- if (unlikely(msetw->msetw_batch->last_msgid &&
- msetw->msetw_batch->last_msgid <
- rkm->rkm_u.producer.msgid)) {
- rd_rkb_dbg(rkb, MSG, "PRODUCE",
- "%.*s [%" PRId32
- "]: "
- "Reconstructed MessageSet "
- "(%d message(s), %" PRIusz
- " bytes, "
- "MsgIds %" PRIu64 "..%" PRIu64 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, msgcnt, len,
- msetw->msetw_batch->first_msgid,
- msetw->msetw_batch->last_msgid);
- break;
- }
-
- /* Check if there is enough space in the current messageset
- * to add this message.
- * Since calculating the total size of a request at produce()
- * time is tricky (we don't know the protocol version or
- * MsgVersion that will be used), we allow a messageset to
- * overshoot the message.max.bytes limit by one message to
- * avoid getting stuck here.
- * The actual messageset size is enforced by the broker. */
- if (unlikely(
- msgcnt == msetw->msetw_msgcntmax ||
- (msgcnt > 0 && len + rd_kafka_msg_wire_size(
- rkm, msetw->msetw_MsgVersion) >
- max_msg_size))) {
- rd_rkb_dbg(rkb, MSG, "PRODUCE",
- "%.*s [%" PRId32
- "]: "
- "No more space in current MessageSet "
- "(%i message(s), %" PRIusz " bytes)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, msgcnt, len);
- break;
- }
-
- if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
- /* Stop accumulation when we've reached
- * a message with a retry backoff in the future */
- break;
- }
-
- /* Move message to buffer's queue */
- rd_kafka_msgq_deq(rkmq, rkm, 1);
- rd_kafka_msgq_enq(&msetw->msetw_batch->msgq, rkm);
-
- msetw->msetw_messages_kvlen += rkm->rkm_len + rkm->rkm_key_len;
-
- /* Add internal latency metrics */
- rd_avg_add(&rkb->rkb_avg_int_latency,
- int_latency_base - rkm->rkm_ts_timeout);
-
- /* MessageSet v2's .MaxTimestamp field */
- if (unlikely(MaxTimestamp < rkm->rkm_timestamp))
- MaxTimestamp = rkm->rkm_timestamp;
-
- /* Write message to buffer */
- len += rd_kafka_msgset_writer_write_msg(msetw, rkm, msgcnt, 0,
- NULL);
-
- msgcnt++;
-
- } while ((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)));
-
- msetw->msetw_MaxTimestamp = MaxTimestamp;
-
- /* Idempotent Producer:
- * When reconstructing a batch to retry make sure
- * the original message sequence span matches identically
- * or we can't guarantee exactly-once delivery.
- * If this check fails we raise a fatal error since
- * it is unrecoverable and most likely caused by a bug
- * in the client implementation.
- * This should not be considered an abortable error for
- * the transactional producer. */
- if (msgcnt > 0 && msetw->msetw_batch->last_msgid) {
- rd_kafka_msg_t *lastmsg;
-
- lastmsg = rd_kafka_msgq_last(&msetw->msetw_batch->msgq);
- rd_assert(lastmsg);
-
- if (unlikely(lastmsg->rkm_u.producer.msgid !=
- msetw->msetw_batch->last_msgid)) {
- rd_kafka_set_fatal_error(
- rkb->rkb_rk, RD_KAFKA_RESP_ERR__INCONSISTENT,
- "Unable to reconstruct MessageSet "
- "(currently with %d message(s)) "
- "with msgid range %" PRIu64 "..%" PRIu64
- ": "
- "last message added has msgid %" PRIu64
- ": "
- "unable to guarantee consistency",
- msgcnt, msetw->msetw_batch->first_msgid,
- msetw->msetw_batch->last_msgid,
- lastmsg->rkm_u.producer.msgid);
- return 0;
- }
- }
- return 1;
-}
-
-
-#if WITH_ZLIB
-/**
- * @brief Compress messageset using gzip/zlib
- */
-static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw,
- rd_slice_t *slice,
- struct iovec *ciov) {
-
- rd_kafka_broker_t *rkb = msetw->msetw_rkb;
- rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
- z_stream strm;
- size_t len = rd_slice_remains(slice);
- const void *p;
- size_t rlen;
- int r;
- int comp_level =
- msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
-
- memset(&strm, 0, sizeof(strm));
- r = deflateInit2(&strm, comp_level, Z_DEFLATED, 15 + 16, 8,
- Z_DEFAULT_STRATEGY);
- if (r != Z_OK) {
- rd_rkb_log(rkb, LOG_ERR, "GZIP",
- "Failed to initialize gzip for "
- "compressing %" PRIusz
- " bytes in "
- "topic %.*s [%" PRId32
- "]: %s (%i): "
- "sending uncompressed",
- len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, strm.msg ? strm.msg : "", r);
- return -1;
- }
-
- /* Calculate maximum compressed size and
- * allocate an output buffer accordingly, being
- * prefixed with the Message header. */
- ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice));
- ciov->iov_base = rd_malloc(ciov->iov_len);
-
- strm.next_out = (void *)ciov->iov_base;
- strm.avail_out = (uInt)ciov->iov_len;
-
- /* Iterate through each segment and compress it. */
- while ((rlen = rd_slice_reader(slice, &p))) {
-
- strm.next_in = (void *)p;
- strm.avail_in = (uInt)rlen;
-
- /* Compress message */
- if ((r = deflate(&strm, Z_NO_FLUSH)) != Z_OK) {
- rd_rkb_log(rkb, LOG_ERR, "GZIP",
- "Failed to gzip-compress "
- "%" PRIusz " bytes (%" PRIusz
- " total) for "
- "topic %.*s [%" PRId32
- "]: "
- "%s (%i): "
- "sending uncompressed",
- rlen, len,
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- strm.msg ? strm.msg : "", r);
- deflateEnd(&strm);
- rd_free(ciov->iov_base);
- return -1;
- }
-
- rd_kafka_assert(rkb->rkb_rk, strm.avail_in == 0);
- }
-
- /* Finish the compression */
- if ((r = deflate(&strm, Z_FINISH)) != Z_STREAM_END) {
- rd_rkb_log(rkb, LOG_ERR, "GZIP",
- "Failed to finish gzip compression "
- " of %" PRIusz
- " bytes for "
- "topic %.*s [%" PRId32
- "]: "
- "%s (%i): "
- "sending uncompressed",
- len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, strm.msg ? strm.msg : "", r);
- deflateEnd(&strm);
- rd_free(ciov->iov_base);
- return -1;
- }
-
- ciov->iov_len = strm.total_out;
-
- /* Deinitialize compression */
- deflateEnd(&strm);
-
- return 0;
-}
-#endif
-
-
-#if WITH_SNAPPY
-/**
- * @brief Compress messageset using Snappy
- */
-static int
-rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw,
- rd_slice_t *slice,
- struct iovec *ciov) {
- rd_kafka_broker_t *rkb = msetw->msetw_rkb;
- rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
- struct iovec *iov;
- size_t iov_max, iov_cnt;
- struct snappy_env senv;
- size_t len = rd_slice_remains(slice);
- int r;
-
- /* Initialize snappy compression environment */
- rd_kafka_snappy_init_env_sg(&senv, 1 /*iov enable*/);
-
- /* Calculate maximum compressed size and
- * allocate an output buffer accordingly. */
- ciov->iov_len = rd_kafka_snappy_max_compressed_length(len);
- ciov->iov_base = rd_malloc(ciov->iov_len);
-
- iov_max = slice->buf->rbuf_segment_cnt;
- iov = rd_alloca(sizeof(*iov) * iov_max);
-
- rd_slice_get_iov(slice, iov, &iov_cnt, iov_max, len);
-
- /* Compress each message */
- if ((r = rd_kafka_snappy_compress_iov(&senv, iov, iov_cnt, len,
- ciov)) != 0) {
- rd_rkb_log(rkb, LOG_ERR, "SNAPPY",
- "Failed to snappy-compress "
- "%" PRIusz
- " bytes for "
- "topic %.*s [%" PRId32
- "]: %s: "
- "sending uncompressed",
- len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_strerror(-r));
- rd_free(ciov->iov_base);
- return -1;
- }
-
- /* rd_free snappy environment */
- rd_kafka_snappy_free_env(&senv);
-
- return 0;
-}
-#endif
-
-/**
- * @brief Compress messageset using LZ4F
- */
-static int rd_kafka_msgset_writer_compress_lz4(rd_kafka_msgset_writer_t *msetw,
- rd_slice_t *slice,
- struct iovec *ciov) {
- rd_kafka_resp_err_t err;
- int comp_level =
- msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
- err = rd_kafka_lz4_compress(msetw->msetw_rkb,
- /* Correct or incorrect HC */
- msetw->msetw_MsgVersion >= 1 ? 1 : 0,
- comp_level, slice, &ciov->iov_base,
- &ciov->iov_len);
- return (err ? -1 : 0);
-}
-
-#if WITH_ZSTD
-/**
- * @brief Compress messageset using ZSTD
- */
-static int rd_kafka_msgset_writer_compress_zstd(rd_kafka_msgset_writer_t *msetw,
- rd_slice_t *slice,
- struct iovec *ciov) {
- rd_kafka_resp_err_t err;
- int comp_level =
- msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
- err = rd_kafka_zstd_compress(msetw->msetw_rkb, comp_level, slice,
- &ciov->iov_base, &ciov->iov_len);
- return (err ? -1 : 0);
-}
-#endif
-
-/**
- * @brief Compress the message set.
- * @param outlenp in: total uncompressed messages size,
- * out (on success): returns the compressed buffer size.
- * @returns 0 on success or if -1 if compression failed.
- * @remark Compression failures are not critical, we'll just send the
- * the messageset uncompressed.
- */
-static int rd_kafka_msgset_writer_compress(rd_kafka_msgset_writer_t *msetw,
- size_t *outlenp) {
- rd_buf_t *rbuf = &msetw->msetw_rkbuf->rkbuf_buf;
- rd_slice_t slice;
- size_t len = *outlenp;
- struct iovec ciov = RD_ZERO_INIT; /* Compressed output buffer */
- int r = -1;
- size_t outlen;
-
- rd_assert(rd_buf_len(rbuf) >= msetw->msetw_firstmsg.of + len);
-
- /* Create buffer slice from firstmsg and onwards */
- r = rd_slice_init(&slice, rbuf, msetw->msetw_firstmsg.of, len);
- rd_assert(r == 0 || !*"invalid firstmsg position");
-
- switch (msetw->msetw_compression) {
-#if WITH_ZLIB
- case RD_KAFKA_COMPRESSION_GZIP:
- r = rd_kafka_msgset_writer_compress_gzip(msetw, &slice, &ciov);
- break;
-#endif
-
-#if WITH_SNAPPY
- case RD_KAFKA_COMPRESSION_SNAPPY:
- r = rd_kafka_msgset_writer_compress_snappy(msetw, &slice,
- &ciov);
- break;
-#endif
-
- case RD_KAFKA_COMPRESSION_LZ4:
- r = rd_kafka_msgset_writer_compress_lz4(msetw, &slice, &ciov);
- break;
-
-#if WITH_ZSTD
- case RD_KAFKA_COMPRESSION_ZSTD:
- r = rd_kafka_msgset_writer_compress_zstd(msetw, &slice, &ciov);
- break;
-#endif
-
- default:
- rd_kafka_assert(NULL,
- !*"notreached: unsupported compression.codec");
- break;
- }
-
- if (r == -1) /* Compression failed, send uncompressed */
- return -1;
-
-
- if (unlikely(ciov.iov_len > len)) {
- /* If the compressed data is larger than the uncompressed size
- * then throw it away and send as uncompressed. */
- rd_free(ciov.iov_base);
- return -1;
- }
-
- /* Set compression codec in MessageSet.Attributes */
- msetw->msetw_Attributes |= msetw->msetw_compression;
-
- /* Rewind rkbuf to the pre-message checkpoint (firstmsg)
- * and replace the original message(s) with the compressed payload,
- * possibly with version dependent enveloping. */
- rd_buf_write_seek(rbuf, msetw->msetw_firstmsg.of);
-
- rd_kafka_assert(msetw->msetw_rkb->rkb_rk, ciov.iov_len < INT32_MAX);
-
- if (msetw->msetw_MsgVersion == 2) {
- /* MsgVersion 2 has no inner MessageSet header or wrapping
- * for compressed messages, just the messages back-to-back,
- * so we can push the compressed memory directly to the
- * buffer without wrapping it. */
- rd_buf_push(rbuf, ciov.iov_base, ciov.iov_len, rd_free);
- outlen = ciov.iov_len;
-
- } else {
- /* Older MessageSets envelope/wrap the compressed MessageSet
- * in an outer Message. */
- rd_kafka_msg_t rkm = {.rkm_len = ciov.iov_len,
- .rkm_payload = ciov.iov_base,
- .rkm_timestamp =
- msetw->msetw_firstmsg.timestamp};
- outlen = rd_kafka_msgset_writer_write_msg(
- msetw, &rkm, 0, msetw->msetw_compression,
- rd_free /*free for ciov.iov_base*/);
- }
-
- *outlenp = outlen;
-
- return 0;
-}
-
-
-
-/**
- * @brief Calculate MessageSet v2 CRC (CRC32C) when messageset is complete.
- */
-static void
-rd_kafka_msgset_writer_calc_crc_v2(rd_kafka_msgset_writer_t *msetw) {
- int32_t crc;
- rd_slice_t slice;
- int r;
-
- r = rd_slice_init(&slice, &msetw->msetw_rkbuf->rkbuf_buf,
- msetw->msetw_of_CRC + 4,
- rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
- msetw->msetw_of_CRC - 4);
- rd_assert(!r && *"slice_init failed");
-
- /* CRC32C calculation */
- crc = rd_slice_crc32c(&slice);
-
- /* Update CRC at MessageSet v2 CRC offset */
- rd_kafka_buf_update_i32(msetw->msetw_rkbuf, msetw->msetw_of_CRC, crc);
-}
-
-/**
- * @brief Finalize MessageSet v2 header fields.
- */
-static void rd_kafka_msgset_writer_finalize_MessageSet_v2_header(
- rd_kafka_msgset_writer_t *msetw) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq);
-
- rd_kafka_assert(NULL, msgcnt > 0);
- rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
-
- msetw->msetw_MessageSetSize =
- RD_KAFKAP_MSGSET_V2_SIZE + msetw->msetw_messages_len;
-
- /* MessageSet.Length is the same as
- * MessageSetSize minus field widths for FirstOffset+Length */
- rd_kafka_buf_update_i32(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Length,
- (int32_t)msetw->msetw_MessageSetSize - (8 + 4));
-
- msetw->msetw_Attributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
-
- if (rd_kafka_is_transactional(msetw->msetw_rkb->rkb_rk))
- msetw->msetw_Attributes |=
- RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL;
-
- rd_kafka_buf_update_i16(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Attributes,
- msetw->msetw_Attributes);
-
- rd_kafka_buf_update_i32(rkbuf,
- msetw->msetw_of_start +
- RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta,
- msgcnt - 1);
-
- rd_kafka_buf_update_i64(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp,
- msetw->msetw_firstmsg.timestamp);
-
- rd_kafka_buf_update_i64(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp,
- msetw->msetw_MaxTimestamp);
-
- rd_kafka_buf_update_i32(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseSequence,
- msetw->msetw_batch->first_seq);
-
- rd_kafka_buf_update_i32(
- rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_RecordCount,
- msgcnt);
-
- rd_kafka_msgset_writer_calc_crc_v2(msetw);
-}
-
-
-
-/**
- * @brief Finalize the MessageSet header, if applicable.
- */
-static void
-rd_kafka_msgset_writer_finalize_MessageSet(rd_kafka_msgset_writer_t *msetw) {
- rd_dassert(msetw->msetw_messages_len > 0);
-
- if (msetw->msetw_MsgVersion == 2)
- rd_kafka_msgset_writer_finalize_MessageSet_v2_header(msetw);
- else
- msetw->msetw_MessageSetSize =
- RD_KAFKAP_MSGSET_V0_SIZE + msetw->msetw_messages_len;
-
- /* Update MessageSetSize */
- rd_kafka_buf_update_i32(msetw->msetw_rkbuf,
- msetw->msetw_of_MessageSetSize,
- (int32_t)msetw->msetw_MessageSetSize);
-}
-
-
-/**
- * @brief Finalize the messageset - call when no more messages are to be
- * added to the messageset.
- *
- * Will compress, update final values, CRCs, etc.
- *
- * The messageset writer is destroyed and the buffer is returned
- * and ready to be transmitted.
- *
- * @param MessagetSetSizep will be set to the finalized MessageSetSize
- *
- * @returns the buffer to transmit or NULL if there were no messages
- * in messageset.
- */
-static rd_kafka_buf_t *
-rd_kafka_msgset_writer_finalize(rd_kafka_msgset_writer_t *msetw,
- size_t *MessageSetSizep) {
- rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
- rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
- size_t len;
- int cnt;
-
- /* No messages added, bail out early. */
- if (unlikely((cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) ==
- 0)) {
- rd_kafka_buf_destroy(rkbuf);
- return NULL;
- }
-
- /* Total size of messages */
- len = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
- msetw->msetw_firstmsg.of;
- rd_assert(len > 0);
- rd_assert(len <= (size_t)rktp->rktp_rkt->rkt_rk->rk_conf.max_msg_size);
-
- rd_atomic64_add(&rktp->rktp_c.tx_msgs, cnt);
- rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes,
- msetw->msetw_messages_kvlen);
-
- /* Idempotent Producer:
- * Store request's PID for matching on response
- * if the instance PID has changed and thus made
- * the request obsolete. */
- msetw->msetw_rkbuf->rkbuf_u.Produce.batch.pid = msetw->msetw_pid;
-
- /* Compress the message set */
- if (msetw->msetw_compression) {
- if (rd_kafka_msgset_writer_compress(msetw, &len) == -1)
- msetw->msetw_compression = 0;
- }
-
- msetw->msetw_messages_len = len;
-
- /* Finalize MessageSet header fields */
- rd_kafka_msgset_writer_finalize_MessageSet(msetw);
-
- /* Return final MessageSetSize */
- *MessageSetSizep = msetw->msetw_MessageSetSize;
-
- rd_rkb_dbg(msetw->msetw_rkb, MSG, "PRODUCE",
- "%s [%" PRId32
- "]: "
- "Produce MessageSet with %i message(s) (%" PRIusz
- " bytes, "
- "ApiVersion %d, MsgVersion %d, MsgId %" PRIu64
- ", "
- "BaseSeq %" PRId32 ", %s, %s)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, cnt,
- msetw->msetw_MessageSetSize, msetw->msetw_ApiVersion,
- msetw->msetw_MsgVersion, msetw->msetw_batch->first_msgid,
- msetw->msetw_batch->first_seq,
- rd_kafka_pid2str(msetw->msetw_pid),
- msetw->msetw_compression
- ? rd_kafka_compression2str(msetw->msetw_compression)
- : "uncompressed");
-
- rd_kafka_msgq_verify_order(rktp, &msetw->msetw_batch->msgq,
- msetw->msetw_batch->first_msgid, rd_false);
-
- rd_kafka_msgbatch_ready_produce(msetw->msetw_batch);
-
- return rkbuf;
-}
-
-
-/**
- * @brief Create ProduceRequest containing as many messages from
- * the toppar's transmit queue as possible, limited by configuration,
- * size, etc.
- *
- * @param rkb broker to create buffer for
- * @param rktp toppar to transmit messages for
- * @param MessagetSetSizep will be set to the final MessageSetSize
- *
- * @returns the buffer to transmit or NULL if there were no messages
- * in messageset.
- *
- * @locality broker thread
- */
-rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- const rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid,
- size_t *MessageSetSizep) {
-
- rd_kafka_msgset_writer_t msetw;
-
- if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, pid,
- epoch_base_msgid) <= 0)
- return NULL;
-
- if (!rd_kafka_msgset_writer_write_msgq(&msetw, msetw.msetw_msgq)) {
- /* Error while writing messages to MessageSet,
- * move all messages back on the xmit queue. */
- rd_kafka_msgq_insert_msgq(
- rkmq, &msetw.msetw_batch->msgq,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
- }
-
- return rd_kafka_msgset_writer_finalize(&msetw, MessageSetSizep);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c
deleted file mode 100644
index ffa6a9d52..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c
+++ /dev/null
@@ -1,1548 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-// FIXME: Revise this documentation:
-/**
- * This file implements the consumer offset storage.
- * It currently supports local file storage and broker OffsetCommit storage.
- *
- * Regardless of commit method (file, broker, ..) this is how it works:
- * - When rdkafka, or the application, depending on if auto.offset.commit
- * is enabled or not, calls rd_kafka_offset_store() with an offset to store,
- * all it does is set rktp->rktp_stored_offset to this value.
- * This can happen from any thread and is locked by the rktp lock.
- * - The actual commit/write of the offset to its backing store (filesystem)
- * is performed by the main rdkafka thread and scheduled at the configured
- * auto.commit.interval.ms interval.
- * - The write is performed in the main rdkafka thread (in a blocking manner
- * for file based offsets) and once the write has
- * succeeded rktp->rktp_committed_offset is updated to the new value.
- * - If offset.store.sync.interval.ms is configured the main rdkafka thread
- * will also make sure to fsync() each offset file accordingly. (file)
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-
-#include <stdio.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#ifdef _WIN32
-#include <io.h>
-#include <share.h>
-#include <sys/stat.h>
-#include <shlwapi.h>
-#endif
-
-
-/**
- * Convert an absolute or logical offset to string.
- */
-const char *rd_kafka_offset2str(int64_t offset) {
- static RD_TLS char ret[16][32];
- static RD_TLS int i = 0;
-
- i = (i + 1) % 16;
-
- if (offset >= 0)
- rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64, offset);
- else if (offset == RD_KAFKA_OFFSET_BEGINNING)
- return "BEGINNING";
- else if (offset == RD_KAFKA_OFFSET_END)
- return "END";
- else if (offset == RD_KAFKA_OFFSET_STORED)
- return "STORED";
- else if (offset == RD_KAFKA_OFFSET_INVALID)
- return "INVALID";
- else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE)
- rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)",
- llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE));
- else
- rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64 "?", offset);
-
- return ret[i];
-}
-
-static void rd_kafka_offset_file_close(rd_kafka_toppar_t *rktp) {
- if (!rktp->rktp_offset_fp)
- return;
-
- fclose(rktp->rktp_offset_fp);
- rktp->rktp_offset_fp = NULL;
-}
-
-
-#ifndef _WIN32
-/**
- * Linux version of open callback providing racefree CLOEXEC.
- */
-int rd_kafka_open_cb_linux(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque) {
-#ifdef O_CLOEXEC
- return open(pathname, flags | O_CLOEXEC, mode);
-#else
- return rd_kafka_open_cb_generic(pathname, flags, mode, opaque);
-#endif
-}
-#endif
-
-/**
- * Fallback version of open_cb NOT providing racefree CLOEXEC,
- * but setting CLOEXEC after file open (if FD_CLOEXEC is defined).
- */
-int rd_kafka_open_cb_generic(const char *pathname,
- int flags,
- mode_t mode,
- void *opaque) {
-#ifndef _WIN32
- int fd;
- int on = 1;
- fd = open(pathname, flags, mode);
- if (fd == -1)
- return -1;
-#ifdef FD_CLOEXEC
- fcntl(fd, F_SETFD, FD_CLOEXEC, &on);
-#endif
- return fd;
-#else
- int fd;
- if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0)
- return -1;
- return fd;
-#endif
-}
-
-
-static int rd_kafka_offset_file_open(rd_kafka_toppar_t *rktp) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- int fd;
-
-#ifndef _WIN32
- mode_t mode = 0644;
-#else
- mode_t mode = _S_IREAD | _S_IWRITE;
-#endif
- if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, O_CREAT | O_RDWR,
- mode, rk->rk_conf.opaque)) == -1) {
- rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
- "%s [%" PRId32
- "]: "
- "Failed to open offset file %s: %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_offset_path,
- rd_strerror(errno));
- return -1;
- }
-
- rktp->rktp_offset_fp =
-#ifndef _WIN32
- fdopen(fd, "r+");
-#else
- _fdopen(fd, "r+");
-#endif
-
- return 0;
-}
-
-
-static int64_t rd_kafka_offset_file_read(rd_kafka_toppar_t *rktp) {
- char buf[22];
- char *end;
- int64_t offset;
- size_t r;
-
- if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
- rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
- "%s [%" PRId32
- "]: "
- "Seek (for read) failed on offset file %s: %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_offset_path,
- rd_strerror(errno));
- rd_kafka_offset_file_close(rktp);
- return RD_KAFKA_OFFSET_INVALID;
- }
-
- r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp);
- if (r == 0) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: offset file (%s) is empty",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_offset_path);
- return RD_KAFKA_OFFSET_INVALID;
- }
-
- buf[r] = '\0';
-
- offset = strtoull(buf, &end, 10);
- if (buf == end) {
- rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
- "%s [%" PRId32
- "]: "
- "Unable to parse offset in %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_offset_path);
- return RD_KAFKA_OFFSET_INVALID;
- }
-
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: Read offset %" PRId64
- " from offset "
- "file (%s)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- offset, rktp->rktp_offset_path);
-
- return offset;
-}
-
-
-/**
- * Sync/flush offset file.
- */
-static int rd_kafka_offset_file_sync(rd_kafka_toppar_t *rktp) {
- if (!rktp->rktp_offset_fp)
- return 0;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC",
- "%s [%" PRId32 "]: offset file sync",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-
-#ifndef _WIN32
- (void)fflush(rktp->rktp_offset_fp);
- (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME
-#else
- // FIXME
- // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp)));
-#endif
- return 0;
-}
-
-
-/**
- * Write offset to offset file.
- *
- * Locality: toppar's broker thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_offset_file_commit(rd_kafka_toppar_t *rktp) {
- rd_kafka_topic_t *rkt = rktp->rktp_rkt;
- int attempt;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- int64_t offset = rktp->rktp_stored_pos.offset;
-
- for (attempt = 0; attempt < 2; attempt++) {
- char buf[22];
- int len;
-
- if (!rktp->rktp_offset_fp)
- if (rd_kafka_offset_file_open(rktp) == -1)
- continue;
-
- if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
- rd_kafka_op_err(
- rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
- "%s [%" PRId32
- "]: "
- "Seek failed on offset file %s: %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_offset_path,
- rd_strerror(errno));
- err = RD_KAFKA_RESP_ERR__FS;
- rd_kafka_offset_file_close(rktp);
- continue;
- }
-
- len = rd_snprintf(buf, sizeof(buf), "%" PRId64 "\n", offset);
-
- if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) {
- rd_kafka_op_err(
- rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
- "%s [%" PRId32
- "]: "
- "Failed to write offset %" PRId64
- " to "
- "offset file %s: %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, offset,
- rktp->rktp_offset_path, rd_strerror(errno));
- err = RD_KAFKA_RESP_ERR__FS;
- rd_kafka_offset_file_close(rktp);
- continue;
- }
-
- /* Need to flush before truncate to preserve write ordering */
- (void)fflush(rktp->rktp_offset_fp);
-
- /* Truncate file */
-#ifdef _WIN32
- if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1)
- ; /* Ignore truncate failures */
-#else
- if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1)
- ; /* Ignore truncate failures */
-#endif
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: wrote offset %" PRId64
- " to "
- "file %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, offset,
- rktp->rktp_offset_path);
-
- rktp->rktp_committed_pos.offset = offset;
-
- /* If sync interval is set to immediate we sync right away. */
- if (rkt->rkt_conf.offset_store_sync_interval_ms == 0)
- rd_kafka_offset_file_sync(rktp);
-
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
-
- return err;
-}
-
-
-
-/**
- * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
- * Optional \p cb will be set on requesting op.
- *
- * Makes a copy of \p offsets (may be NULL for current assignment)
- */
-static rd_kafka_resp_err_t
-rd_kafka_commit0(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq,
- void (*cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque),
- void *opaque,
- const char *reason) {
- rd_kafka_cgrp_t *rkcg;
- rd_kafka_op_t *rko;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
- rko->rko_u.offset_commit.reason = rd_strdup(reason);
- rko->rko_replyq = replyq;
- rko->rko_u.offset_commit.cb = cb;
- rko->rko_u.offset_commit.opaque = opaque;
- if (rktp)
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- if (offsets)
- rko->rko_u.offset_commit.partitions =
- rd_kafka_topic_partition_list_copy(offsets);
-
- rd_kafka_q_enq(rkcg->rkcg_ops, rko);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * NOTE: 'offsets' may be NULL, see official documentation.
- */
-rd_kafka_resp_err_t
-rd_kafka_commit(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- int async) {
- rd_kafka_cgrp_t *rkcg;
- rd_kafka_resp_err_t err;
- rd_kafka_q_t *repq = NULL;
- rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- if (!async) {
- repq = rd_kafka_q_new(rk);
- rq = RD_KAFKA_REPLYQ(repq, 0);
- }
-
- err = rd_kafka_commit0(rk, offsets, NULL, rq, NULL, NULL, "manual");
-
- if (!err && !async)
- err = rd_kafka_q_wait_result(repq, RD_POLL_INFINITE);
-
- if (!async)
- rd_kafka_q_destroy_owner(repq);
-
- return err;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- int async) {
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
- rd_kafka_resp_err_t err;
-
- if (rkmessage->err)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition);
- rktpar->offset = rkmessage->offset + 1;
-
- err = rd_kafka_commit(rk, offsets, async);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- return err;
-}
-
-
-
-rd_kafka_resp_err_t
-rd_kafka_commit_queue(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_queue_t *rkqu,
- void (*cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque),
- void *opaque) {
- rd_kafka_q_t *rkq;
- rd_kafka_resp_err_t err;
-
- if (!rd_kafka_cgrp_get(rk))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- if (rkqu)
- rkq = rkqu->rkqu_q;
- else
- rkq = rd_kafka_q_new(rk);
-
- err = rd_kafka_commit0(rk, offsets, NULL, RD_KAFKA_REPLYQ(rkq, 0), cb,
- opaque, "manual");
-
- if (!rkqu) {
- rd_kafka_op_t *rko = rd_kafka_q_pop_serve(
- rkq, RD_POLL_INFINITE, 0, RD_KAFKA_Q_CB_FORCE_RETURN, NULL,
- NULL);
- if (!rko)
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- else {
- if (cb)
- cb(rk, rko->rko_err,
- rko->rko_u.offset_commit.partitions, opaque);
- err = rko->rko_err;
- rd_kafka_op_destroy(rko);
- }
-
- if (rkqu)
- rd_kafka_q_destroy(rkq);
- else
- rd_kafka_q_destroy_owner(rkq);
- }
-
- return err;
-}
-
-
-
-/**
- * Called when a broker commit is done.
- *
- * Locality: toppar handler thread
- * Locks: none
- */
-static void
-rd_kafka_offset_broker_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_topic_partition_t *rktpar;
-
- if (offsets->cnt == 0) {
- rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
- "No offsets to commit (commit_cb)");
- return;
- }
-
- rktpar = &offsets->elems[0];
-
- if (!(rktp =
- rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false))) {
- rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
- "No local partition found for %s [%" PRId32
- "] "
- "while parsing OffsetCommit response "
- "(offset %" PRId64 ", error \"%s\")",
- rktpar->topic, rktpar->partition, rktpar->offset,
- rd_kafka_err2str(rktpar->err));
- return;
- }
-
- if (!err)
- err = rktpar->err;
-
- rd_kafka_toppar_offset_commit_result(rktp, err, offsets);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: offset %" PRId64 " %scommitted: %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rktpar->offset, err ? "not " : "", rd_kafka_err2str(err));
-
- rktp->rktp_committing_pos.offset = 0;
-
- rd_kafka_toppar_lock(rktp);
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING)
- rd_kafka_offset_store_term(rktp, err);
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp);
-}
-
-
-/**
- * @locks_required rd_kafka_toppar_lock(rktp) MUST be held.
- */
-static rd_kafka_resp_err_t
-rd_kafka_offset_broker_commit(rd_kafka_toppar_t *rktp, const char *reason) {
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
-
- rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL);
- rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
- rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE);
-
- rktp->rktp_committing_pos = rktp->rktp_stored_pos;
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
- rd_kafka_topic_partition_set_from_fetch_pos(rktpar,
- rktp->rktp_committing_pos);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT",
- "%.*s [%" PRId32 "]: committing %s: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_committing_pos), reason);
-
- rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp,
- RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
- rd_kafka_offset_broker_commit_cb, NULL, reason);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-}
-
-
-
-/**
- * Commit offset to backing store.
- * This might be an async operation.
- *
- * Locality: toppar handler thread
- */
-static rd_kafka_resp_err_t rd_kafka_offset_commit(rd_kafka_toppar_t *rktp,
- const char *reason) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: commit: stored %s > committed %s?",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_stored_pos),
- rd_kafka_fetch_pos2str(rktp->rktp_committed_pos));
-
- /* Already committed */
- if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
- &rktp->rktp_committed_pos) <= 0)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* Already committing (for async ops) */
- if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
- &rktp->rktp_committing_pos) <= 0)
- return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
-
- switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
- case RD_KAFKA_OFFSET_METHOD_FILE:
- return rd_kafka_offset_file_commit(rktp);
- case RD_KAFKA_OFFSET_METHOD_BROKER:
- return rd_kafka_offset_broker_commit(rktp, reason);
- default:
- /* UNREACHABLE */
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-}
-
-
-
-/**
- * Sync offset backing store. This is only used for METHOD_FILE.
- *
- * Locality: rktp's broker thread.
- */
-rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp) {
- switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
- case RD_KAFKA_OFFSET_METHOD_FILE:
- return rd_kafka_offset_file_sync(rktp);
- default:
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-}
-
-
-/**
- * Store offset.
- * Typically called from application code.
- *
- * NOTE: No locks must be held.
- *
- * @deprecated Use rd_kafka_offsets_store().
- */
-rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt,
- int32_t partition,
- int64_t offset) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
- rd_kafka_toppar_t *rktp;
- rd_kafka_resp_err_t err;
- rd_kafka_fetch_pos_t pos = {offset + 1, -1 /*no leader epoch known*/};
-
- /* Find toppar */
- rd_kafka_topic_rdlock(rkt);
- if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0 /*!ua_on_miss*/))) {
- rd_kafka_topic_rdunlock(rkt);
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- }
- rd_kafka_topic_rdunlock(rkt);
-
- err = rd_kafka_offset_store0(rktp, pos, rd_false /* Don't force */,
- RD_DO_LOCK);
-
- rd_kafka_toppar_destroy(rktp);
-
- return err;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_offsets_store(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *offsets) {
- int i;
- int ok_cnt = 0;
- rd_kafka_resp_err_t last_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (rk->rk_conf.enable_auto_offset_store)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- for (i = 0; i < offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
- rd_kafka_toppar_t *rktp;
- rd_kafka_fetch_pos_t pos = {rktpar->offset, -1};
-
- rktp =
- rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
- if (!rktp) {
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- last_err = rktpar->err;
- continue;
- }
-
- pos.leader_epoch =
- rd_kafka_topic_partition_get_leader_epoch(rktpar);
-
- rktpar->err = rd_kafka_offset_store0(
- rktp, pos, rd_false /* don't force */, RD_DO_LOCK);
- rd_kafka_toppar_destroy(rktp);
-
- if (rktpar->err)
- last_err = rktpar->err;
- else
- ok_cnt++;
- }
-
- return offsets->cnt > 0 && ok_cnt == 0 ? last_err
- : RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_op_t *rko;
- rd_kafka_resp_err_t err;
- rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage;
- rd_kafka_fetch_pos_t pos;
-
- if (rkmessage->err)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Message object must not have an "
- "error set");
-
- if (unlikely(!(rko = rd_kafka_message2rko(rkmessage)) ||
- !(rktp = rko->rko_rktp)))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Invalid message object, "
- "not a consumed message");
-
- pos.offset = rkmessage->offset + 1;
- pos.leader_epoch = rkm->rkm_u.consumer.leader_epoch;
- err = rd_kafka_offset_store0(rktp, pos, rd_false /* Don't force */,
- RD_DO_LOCK);
-
- if (err == RD_KAFKA_RESP_ERR__STATE)
- return rd_kafka_error_new(err, "Partition is not assigned");
- else if (err)
- return rd_kafka_error_new(err, "Failed to store offset: %s",
- rd_kafka_err2str(err));
-
- return NULL;
-}
-
-
-
-/**
- * Decommissions the use of an offset file for a toppar.
- * The file content will not be touched and the file will not be removed.
- */
-static rd_kafka_resp_err_t rd_kafka_offset_file_term(rd_kafka_toppar_t *rktp) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* Sync offset file if the sync is intervalled (> 0) */
- if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) {
- rd_kafka_offset_file_sync(rktp);
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_sync_tmr, 1 /*lock*/);
- }
-
-
- rd_kafka_offset_file_close(rktp);
-
- rd_free(rktp->rktp_offset_path);
- rktp->rktp_offset_path = NULL;
-
- return err;
-}
-
-static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_toppar_t *rktp = rko->rko_rktp;
- rd_kafka_toppar_lock(rktp);
- rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.broker_id,
- rko->rko_u.offset_reset.pos, rko->rko_err, "%s",
- rko->rko_u.offset_reset.reason);
- rd_kafka_toppar_unlock(rktp);
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * @brief Take action when the offset for a toppar is unusable (due to an
- * error, or offset is logical).
- *
- * @param rktp the toppar
- * @param broker_id Originating broker, if any, else RD_KAFKA_NODEID_UA.
- * @param err_pos a logical offset, or offset corresponding to the error.
- * @param err the error, or RD_KAFKA_RESP_ERR_NO_ERROR if offset is logical.
- * @param fmt a reason string for logging.
- *
- * @locality any. if not main thread, work will be enqued on main thread.
- * @locks_required toppar_lock() MUST be held
- */
-void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
- int32_t broker_id,
- rd_kafka_fetch_pos_t err_pos,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1};
- const char *extra = "";
- char reason[512];
- va_list ap;
-
- va_start(ap, fmt);
- rd_vsnprintf(reason, sizeof(reason), fmt, ap);
- va_end(ap);
-
- /* Enqueue op for toppar handler thread if we're on the wrong thread. */
- if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
- rd_kafka_op_t *rko =
- rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB);
- rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
- rko->rko_err = err;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_u.offset_reset.broker_id = broker_id;
- rko->rko_u.offset_reset.pos = err_pos;
- rko->rko_u.offset_reset.reason = rd_strdup(reason);
- rd_kafka_q_enq(rktp->rktp_ops, rko);
- return;
- }
-
- if (err_pos.offset == RD_KAFKA_OFFSET_INVALID || err)
- pos.offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
- else
- pos.offset = err_pos.offset;
-
- if (pos.offset == RD_KAFKA_OFFSET_INVALID) {
- /* Error, auto.offset.reset tells us to error out. */
- if (broker_id != RD_KAFKA_NODEID_UA)
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, broker_id,
- RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
- err_pos.offset, "%s: %s (broker %" PRId32 ")",
- reason, rd_kafka_err2str(err), broker_id);
- else
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, broker_id,
- RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
- err_pos.offset, "%s: %s", reason,
- rd_kafka_err2str(err));
-
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_NONE);
-
- } else if (pos.offset == RD_KAFKA_OFFSET_BEGINNING &&
- rktp->rktp_lo_offset >= 0) {
- /* Use cached log start from last Fetch if available.
- * Note: The cached end offset (rktp_ls_offset) can't be
- * used here since the End offset is a constantly moving
- * target as new messages are produced. */
- extra = "cached BEGINNING offset ";
- pos.offset = rktp->rktp_lo_offset;
- pos.leader_epoch = -1;
- rd_kafka_toppar_next_offset_handle(rktp, pos);
-
- } else {
- /* Else query cluster for offset */
- rktp->rktp_query_pos = pos;
- rd_kafka_toppar_set_fetch_state(
- rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
- }
-
- /* Offset resets due to error are logged since they might have quite
- * critical impact. For non-errors, or for auto.offset.reset=error,
- * the reason is simply debug-logged. */
- if (!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
- pos.offset == RD_KAFKA_OFFSET_INVALID)
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32
- ") "
- "to %s%s: %s: %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_pos2str(err_pos), broker_id, extra,
- rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err));
- else
- rd_kafka_log(
- rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET",
- "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32
- ") to %s%s: %s: %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_pos2str(err_pos), broker_id, extra,
- rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err));
-
- /* Note: If rktp is not delegated to the leader, then low and high
- offsets will necessarily be cached from the last FETCH request,
- and so this offset query will never occur in that case for
- BEGINNING / END logical offsets. */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
- rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos,
- err ? 100 : 0);
-}
-
-
-
-/**
- * @brief Offset validation retry timer
- */
-static void rd_kafka_offset_validate_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_toppar_t *rktp = arg;
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_offset_validate(rktp, "retrying offset validation");
- rd_kafka_toppar_unlock(rktp);
-}
-
-
-
-/**
- * @brief OffsetForLeaderEpochResponse handler that
- * pushes the matched toppar's to the next state.
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_toppar_handle_OffsetForLeaderEpoch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_topic_partition_list_t *parts = NULL;
- rd_kafka_toppar_t *rktp = opaque;
- rd_kafka_topic_partition_t *rktpar;
- int64_t end_offset;
- int32_t end_offset_leader_epoch;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- rd_kafka_toppar_destroy(rktp); /* Drop refcnt */
- return;
- }
-
- err = rd_kafka_handle_OffsetForLeaderEpoch(rk, rkb, err, rkbuf, request,
- &parts);
-
- rd_kafka_toppar_lock(rktp);
-
- if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT)
- err = RD_KAFKA_RESP_ERR__OUTDATED;
-
- if (unlikely(!err && parts->cnt == 0))
- err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- if (!err) {
- err = (&parts->elems[0])->err;
- }
-
- if (err) {
- int actions;
-
- rd_rkb_dbg(rkb, FETCH, "OFFSETVALID",
- "%.*s [%" PRId32
- "]: OffsetForLeaderEpoch requested failed: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_err2str(err));
-
- if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) {
- rd_rkb_dbg(rkb, FETCH, "VALIDATE",
- "%.*s [%" PRId32
- "]: offset and epoch validation not "
- "supported by broker: validation skipped",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- /* Reset the epoch to -1 since it can't be used with
- * older brokers. */
- rktp->rktp_next_fetch_start.leader_epoch = -1;
- rd_kafka_toppar_set_fetch_state(
- rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
- goto done;
-
- } else if (err == RD_KAFKA_RESP_ERR__OUTDATED) {
- /* Partition state has changed, this response
- * is outdated. */
- goto done;
- }
-
- actions = rd_kafka_err_action(
- rkb, err, request, RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
- RD_KAFKA_ERR_ACTION_END);
-
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
- /* Metadata refresh is ongoing, so force it */
- rd_kafka_topic_leader_query0(rk, rktp->rktp_rkt, 1,
- rd_true /* force */);
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- /* No need for refcnt on rktp for timer opaque
- * since the timer resides on the rktp and will be
- * stopped on toppar remove. */
- rd_kafka_timer_start_oneshot(
- &rk->rk_timers, &rktp->rktp_validate_tmr, rd_false,
- 500 * 1000 /* 500ms */,
- rd_kafka_offset_validate_tmr_cb, rktp);
- goto done;
- }
-
- if (!(actions & RD_KAFKA_ERR_ACTION_REFRESH)) {
- /* Permanent error */
- rd_kafka_offset_reset(
- rktp, rd_kafka_broker_id(rkb),
- RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID,
- rktp->rktp_leader_epoch),
- RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
- "Unable to validate offset and epoch: %s",
- rd_kafka_err2str(err));
- }
- goto done;
- }
-
-
- rktpar = &parts->elems[0];
- end_offset = rktpar->offset;
- end_offset_leader_epoch =
- rd_kafka_topic_partition_get_leader_epoch(rktpar);
-
- if (end_offset < 0 || end_offset_leader_epoch < 0) {
- rd_kafka_offset_reset(
- rktp, rd_kafka_broker_id(rkb), rktp->rktp_next_fetch_start,
- RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
- "No epoch found less or equal to "
- "%s: broker end offset is %" PRId64
- " (offset leader epoch %" PRId32
- ")."
- " Reset using configured policy.",
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- end_offset, end_offset_leader_epoch);
-
- } else if (end_offset < rktp->rktp_next_fetch_start.offset) {
-
- if (rktp->rktp_rkt->rkt_conf.auto_offset_reset ==
- RD_KAFKA_OFFSET_INVALID /* auto.offset.reset=error */) {
- rd_kafka_offset_reset(
- rktp, rd_kafka_broker_id(rkb),
- RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID,
- rktp->rktp_leader_epoch),
- RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
- "Partition log truncation detected at %s: "
- "broker end offset is %" PRId64
- " (offset leader epoch %" PRId32
- "). "
- "Reset to INVALID.",
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- end_offset, end_offset_leader_epoch);
-
- } else {
- rd_kafka_toppar_unlock(rktp);
-
- /* Seek to the updated end offset */
- rd_kafka_fetch_pos_t fetch_pos =
- rd_kafka_topic_partition_get_fetch_pos(rktpar);
- fetch_pos.validated = rd_true;
-
- rd_kafka_toppar_op_seek(rktp, fetch_pos,
- RD_KAFKA_NO_REPLYQ);
-
- rd_kafka_topic_partition_list_destroy(parts);
- rd_kafka_toppar_destroy(rktp);
-
- return;
- }
-
- } else {
- rd_rkb_dbg(rkb, FETCH, "OFFSETVALID",
- "%.*s [%" PRId32
- "]: offset and epoch validation "
- "succeeded: broker end offset %" PRId64
- " (offset leader epoch %" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, end_offset,
- end_offset_leader_epoch);
-
- rktp->rktp_next_fetch_start.leader_epoch =
- end_offset_leader_epoch;
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_ACTIVE);
- }
-
-done:
- rd_kafka_toppar_unlock(rktp);
-
- if (parts)
- rd_kafka_topic_partition_list_destroy(parts);
- rd_kafka_toppar_destroy(rktp);
-}
-
-
-static rd_kafka_op_res_t rd_kafka_offset_validate_op_cb(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_toppar_t *rktp = rko->rko_rktp;
- rd_kafka_toppar_lock(rktp);
- rd_kafka_offset_validate(rktp, "%s", rko->rko_u.offset_reset.reason);
- rd_kafka_toppar_unlock(rktp);
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * @brief Validate partition epoch and offset (KIP-320).
- *
- * @param rktp the toppar
- * @param err Optional error code that triggered the validation.
- * @param fmt a reason string for logging.
- *
- * @locality any. if not main thread, work will be enqued on main thread.
- * @locks_required toppar_lock() MUST be held
- */
-void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...) {
- rd_kafka_topic_partition_list_t *parts;
- rd_kafka_topic_partition_t *rktpar;
- char reason[512];
- va_list ap;
-
- if (rktp->rktp_rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)
- return;
-
- va_start(ap, fmt);
- rd_vsnprintf(reason, sizeof(reason), fmt, ap);
- va_end(ap);
-
- /* Enqueue op for toppar handler thread if we're on the wrong thread. */
- if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
- /* Reuse OP_OFFSET_RESET type */
- rd_kafka_op_t *rko =
- rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB);
- rko->rko_op_cb = rd_kafka_offset_validate_op_cb;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_u.offset_reset.reason = rd_strdup(reason);
- rd_kafka_q_enq(rktp->rktp_ops, rko);
- return;
- }
-
- if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE &&
- rktp->rktp_fetch_state !=
- RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
- "%.*s [%" PRId32
- "]: skipping offset "
- "validation in fetch state %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state]);
- return;
- }
-
-
- if (rktp->rktp_leader_id == -1 || !rktp->rktp_leader ||
- rktp->rktp_leader->rkb_source == RD_KAFKA_INTERNAL) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
- "%.*s [%" PRId32
- "]: unable to perform offset "
- "validation: partition leader not available",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
-
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_ACTIVE);
- return;
- }
-
- /* If the fetch start position does not have an epoch set then
- * there is no point in doing validation.
- * This is the case for epoch-less seek()s or epoch-less
- * committed offsets. */
- if (rktp->rktp_next_fetch_start.leader_epoch == -1) {
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
- "%.*s [%" PRId32
- "]: skipping offset "
- "validation for %s: no leader epoch set",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start));
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_ACTIVE);
- return;
- }
-
- rd_kafka_toppar_set_fetch_state(
- rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT);
-
- /* Construct and send OffsetForLeaderEpochRequest */
- parts = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(
- parts, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar, rktp->rktp_next_fetch_start.leader_epoch);
- rd_kafka_topic_partition_set_current_leader_epoch(
- rktpar, rktp->rktp_leader_epoch);
- rd_kafka_toppar_keep(rktp); /* for request opaque */
-
- rd_rkb_dbg(rktp->rktp_leader, FETCH, "VALIDATE",
- "%.*s [%" PRId32
- "]: querying broker for epoch "
- "validation of %s: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), reason);
-
- rd_kafka_OffsetForLeaderEpochRequest(
- rktp->rktp_leader, parts, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
- rd_kafka_toppar_handle_OffsetForLeaderEpoch, rktp);
- rd_kafka_topic_partition_list_destroy(parts);
-}
-
-
-/**
- * Escape any special characters in filename 'in' and write escaped
- * string to 'out' (of max size out_size).
- */
-static char *mk_esc_filename(const char *in, char *out, size_t out_size) {
- const char *s = in;
- char *o = out;
-
- while (*s) {
- const char *esc;
- size_t esclen;
-
- switch (*s) {
- case '/': /* linux */
- esc = "%2F";
- esclen = strlen(esc);
- break;
- case ':': /* osx, windows */
- esc = "%3A";
- esclen = strlen(esc);
- break;
- case '\\': /* windows */
- esc = "%5C";
- esclen = strlen(esc);
- break;
- default:
- esc = s;
- esclen = 1;
- break;
- }
-
- if ((size_t)((o + esclen + 1) - out) >= out_size) {
- /* No more space in output string, truncate. */
- break;
- }
-
- while (esclen-- > 0)
- *(o++) = *(esc++);
-
- s++;
- }
-
- *o = '\0';
- return out;
-}
-
-
-static void rd_kafka_offset_sync_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_toppar_t *rktp = arg;
- rd_kafka_offset_sync(rktp);
-}
-
-
-/**
- * Prepare a toppar for using an offset file.
- *
- * Locality: rdkafka main thread
- * Locks: toppar_lock(rktp) must be held
- */
-static void rd_kafka_offset_file_init(rd_kafka_toppar_t *rktp) {
- char spath[4096 + 1]; /* larger than escfile to avoid warning */
- const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path;
- int64_t offset = RD_KAFKA_OFFSET_INVALID;
-
- if (rd_kafka_path_is_dir(path)) {
- char tmpfile[1024];
- char escfile[4096];
-
- /* Include group.id in filename if configured. */
- if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk->rk_group_id))
- rd_snprintf(tmpfile, sizeof(tmpfile),
- "%s-%" PRId32 "-%.*s.offset",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- RD_KAFKAP_STR_PR(
- rktp->rktp_rkt->rkt_rk->rk_group_id));
- else
- rd_snprintf(tmpfile, sizeof(tmpfile),
- "%s-%" PRId32 ".offset",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
-
- /* Escape filename to make it safe. */
- mk_esc_filename(tmpfile, escfile, sizeof(escfile));
-
- rd_snprintf(spath, sizeof(spath), "%s%s%s", path,
- path[strlen(path) - 1] == '/' ? "" : "/", escfile);
-
- path = spath;
- }
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: using offset file %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- path);
- rktp->rktp_offset_path = rd_strdup(path);
-
-
- /* Set up the offset file sync interval. */
- if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0)
- rd_kafka_timer_start(
- &rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_sync_tmr,
- rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms *
- 1000ll,
- rd_kafka_offset_sync_tmr_cb, rktp);
-
- if (rd_kafka_offset_file_open(rktp) != -1) {
- /* Read offset from offset file. */
- offset = rd_kafka_offset_file_read(rktp);
- }
-
- if (offset != RD_KAFKA_OFFSET_INVALID) {
- /* Start fetching from offset */
- rktp->rktp_stored_pos.offset = offset;
- rktp->rktp_committed_pos.offset = offset;
- rd_kafka_toppar_next_offset_handle(rktp, rktp->rktp_stored_pos);
-
- } else {
- /* Offset was not usable: perform offset reset logic */
- rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID;
- rd_kafka_offset_reset(
- rktp, RD_KAFKA_NODEID_UA,
- RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
- RD_KAFKA_RESP_ERR__FS, "non-readable offset file");
- }
-}
-
-
-
-/**
- * Terminate broker offset store
- */
-static rd_kafka_resp_err_t
-rd_kafka_offset_broker_term(rd_kafka_toppar_t *rktp) {
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Prepare a toppar for using broker offset commit (broker 0.8.2 or
- * later). When using KafkaConsumer (high-level consumer) this
- * functionality is disabled in favour of the cgrp commits for the
- * entire set of subscriptions.
- */
-static void rd_kafka_offset_broker_init(rd_kafka_toppar_t *rktp) {
- if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))
- return;
- rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA,
- RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_STORED, -1),
- RD_KAFKA_RESP_ERR_NO_ERROR,
- "query broker for offsets");
-}
-
-
-/**
- * Terminates toppar's offset store, this is the finalizing step after
- * offset_store_stop().
- *
- * Locks: rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err) {
- rd_kafka_resp_err_t err2;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM",
- "%s [%" PRId32 "]: offset store terminating",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
-
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_commit_tmr, 1 /*lock*/);
-
- switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
- case RD_KAFKA_OFFSET_METHOD_FILE:
- err2 = rd_kafka_offset_file_term(rktp);
- break;
- case RD_KAFKA_OFFSET_METHOD_BROKER:
- err2 = rd_kafka_offset_broker_term(rktp);
- break;
- case RD_KAFKA_OFFSET_METHOD_NONE:
- err2 = RD_KAFKA_RESP_ERR_NO_ERROR;
- break;
- }
-
- /* Prioritize the input error (probably from commit), fall
- * back on termination error. */
- if (!err)
- err = err2;
-
- rd_kafka_toppar_fetch_stopped(rktp, err);
-}
-
-
-/**
- * Stop toppar's offset store, committing the final offsets, etc.
- *
- * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success,
- * RD_KAFKA_RESP_ERR__IN_PROGRESS if the term triggered an
- * async operation (e.g., broker offset commit), or
- * any other error in case of immediate failure.
- *
- * The offset layer will call rd_kafka_offset_store_term() when
- * the offset management has been fully stopped for this partition.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held.
- */
-rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE))
- goto done;
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32
- "]: stopping offset store "
- "(stored %s, committed %s, EOF offset %" PRId64 ")",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_stored_pos),
- rd_kafka_fetch_pos2str(rktp->rktp_committed_pos),
- rktp->rktp_offsets_fin.eof_offset);
-
- /* Store end offset for empty partitions */
- if (rktp->rktp_rkt->rkt_rk->rk_conf.enable_auto_offset_store &&
- rktp->rktp_stored_pos.offset == RD_KAFKA_OFFSET_INVALID &&
- rktp->rktp_offsets_fin.eof_offset > 0)
- rd_kafka_offset_store0(
- rktp,
- RD_KAFKA_FETCH_POS(rktp->rktp_offsets_fin.eof_offset,
- rktp->rktp_leader_epoch),
- rd_true /* force */, RD_DONT_LOCK);
-
- /* Commit offset to backing store.
- * This might be an async operation. */
- if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
- rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
- &rktp->rktp_committed_pos) > 0)
- err = rd_kafka_offset_commit(rktp, "offset store stop");
-
- /* If stop is in progress (async commit), return now. */
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
- return err;
-
-done:
- /* Stop is done */
- rd_kafka_offset_store_term(rktp, err);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void rd_kafka_offset_auto_commit_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_toppar_t *rktp = arg;
- rd_kafka_offset_commit(rktp, "auto commit timer");
-}
-
-void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_toppar_t *rktp = arg;
- rd_kafka_toppar_lock(rktp);
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "Topic %s [%" PRId32
- "]: timed offset query for %s in state %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_query_pos),
- rd_kafka_fetch_states[rktp->rktp_fetch_state]);
- rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos, 0);
- rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Initialize toppar's offset store.
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp) {
- static const char *store_names[] = {"none", "file", "broker"};
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: using offset store method: %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]);
-
- /* The committed offset is unknown at this point. */
- rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID;
-
- /* Set up the commit interval (for simple consumer). */
- if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
- rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0)
- rd_kafka_timer_start(
- &rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_commit_tmr,
- rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000ll,
- rd_kafka_offset_auto_commit_tmr_cb, rktp);
-
- switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
- case RD_KAFKA_OFFSET_METHOD_FILE:
- rd_kafka_offset_file_init(rktp);
- break;
- case RD_KAFKA_OFFSET_METHOD_BROKER:
- rd_kafka_offset_broker_init(rktp);
- break;
- case RD_KAFKA_OFFSET_METHOD_NONE:
- break;
- default:
- /* NOTREACHED */
- return;
- }
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE;
-}
-
-
-/**
- * Update toppar app_pos and store_offset (if enabled) to the provided
- * offset and epoch.
- */
-void rd_kafka_update_app_pos(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_dolock_t do_lock) {
-
- if (do_lock)
- rd_kafka_toppar_lock(rktp);
-
- rktp->rktp_app_pos = pos;
- if (rk->rk_conf.enable_auto_offset_store)
- rd_kafka_offset_store0(rktp, pos,
- /* force: ignore assignment state */
- rd_true, RD_DONT_LOCK);
-
- if (do_lock)
- rd_kafka_toppar_unlock(rktp);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h
deleted file mode 100644
index 7b01c8487..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_OFFSET_H_
-#define _RDKAFKA_OFFSET_H_
-
-#include "rdkafka_partition.h"
-
-
-const char *rd_kafka_offset2str(int64_t offset);
-
-
-/**
- * @brief Stores the offset for the toppar 'rktp'.
- * The actual commit of the offset to backing store is usually
- * performed at a later time (time or threshold based).
- *
- * For the high-level consumer (assign()), this function will reject absolute
- * offsets if the partition is not currently assigned, unless \p force is set.
- * This check was added to avoid a race condition where an application
- * would call offsets_store() after the partitions had been revoked, forcing
- * a future auto-committer on the next assignment to commit this old offset and
- * overwriting whatever newer offset was committed by another consumer.
- *
- * The \p force flag is useful for internal calls to offset_store0() which
- * do not need the protection described above.
- *
- *
- * There is one situation where the \p force flag is troublesome:
- * If the application is using any of the consumer batching APIs,
- * e.g., consume_batch() or the event-based consumption, then it's possible
- * that while the batch is being accumulated or the application is picking off
- * messages from the event a rebalance occurs (in the background) which revokes
- * the current assignment. This revokal will remove all queued messages, but
- * not the ones the application already has accumulated in the event object.
- * Enforcing assignment for store in this state is tricky with a bunch of
- * corner cases, so instead we let those places forcibly store the offset, but
- * then in assign() we reset the stored offset to .._INVALID, just like we do
- * on revoke.
- * Illustrated (with fix):
- * 1. ev = rd_kafka_queue_poll();
- * 2. background rebalance revoke unassigns the partition and sets the
- * stored offset to _INVALID.
- * 3. application calls message_next(ev) which forcibly sets the
- * stored offset.
- * 4. background rebalance assigns the partition again, but forcibly sets
- * the stored offset to .._INVALID to provide a clean state.
- *
- * @param pos Offset and leader epoch to set, may be an absolute offset
- * or .._INVALID.
- * @param force Forcibly set \p offset regardless of assignment state.
- * @param do_lock Whether to lock the \p rktp or not (already locked by caller).
- *
- * See head of rdkafka_offset.c for more information.
- *
- * @returns RD_KAFKA_RESP_ERR__STATE if the partition is not currently assigned,
- * unless \p force is set.
- */
-static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
-rd_kafka_offset_store0(rd_kafka_toppar_t *rktp,
- const rd_kafka_fetch_pos_t pos,
- rd_bool_t force,
- rd_dolock_t do_lock) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (do_lock)
- rd_kafka_toppar_lock(rktp);
-
- if (unlikely(!force && !RD_KAFKA_OFFSET_IS_LOGICAL(pos.offset) &&
- !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED) &&
- !rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))) {
- err = RD_KAFKA_RESP_ERR__STATE;
- } else {
- rktp->rktp_stored_pos = pos;
- }
-
- if (do_lock)
- rd_kafka_toppar_unlock(rktp);
-
- return err;
-}
-
-rd_kafka_resp_err_t
-rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
-
-rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp);
-
-void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err);
-rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp);
-void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp);
-
-void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
- int32_t broker_id,
- rd_kafka_fetch_pos_t err_pos,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 5, 6);
-
-void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...)
- RD_FORMAT(printf, 2, 3);
-
-void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg);
-
-void rd_kafka_update_app_pos(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_dolock_t do_lock);
-
-#endif /* _RDKAFKA_OFFSET_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c
deleted file mode 100644
index 128b8bb40..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c
+++ /dev/null
@@ -1,928 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdarg.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_op.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_proto.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_error.h"
-
-/* Current number of rd_kafka_op_t */
-rd_atomic32_t rd_kafka_op_cnt;
-
-
-const char *rd_kafka_op2str(rd_kafka_op_type_t type) {
- int skiplen = 6;
- static const char *names[RD_KAFKA_OP__END] = {
- [RD_KAFKA_OP_NONE] = "REPLY:NONE",
- [RD_KAFKA_OP_FETCH] = "REPLY:FETCH",
- [RD_KAFKA_OP_ERR] = "REPLY:ERR",
- [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR",
- [RD_KAFKA_OP_DR] = "REPLY:DR",
- [RD_KAFKA_OP_STATS] = "REPLY:STATS",
- [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT",
- [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE",
- [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF",
- [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF",
- [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY",
- [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START",
- [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP",
- [RD_KAFKA_OP_SEEK] = "REPLY:SEEK",
- [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE",
- [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH",
- [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN",
- [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE",
- [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE",
- [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE",
- [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY",
- [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE",
- [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN",
- [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION",
- [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT",
- [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE",
- [RD_KAFKA_OP_NAME] = "REPLY:NAME",
- [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA",
- [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET",
- [RD_KAFKA_OP_METADATA] = "REPLY:METADATA",
- [RD_KAFKA_OP_LOG] = "REPLY:LOG",
- [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP",
- [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS",
- [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS",
- [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS",
- [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS",
- [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS",
- [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS",
- [RD_KAFKA_OP_LISTCONSUMERGROUPS] = "REPLY:LISTCONSUMERGROUPS",
- [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] =
- "REPLY:DESCRIBECONSUMERGROUPS",
- [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS",
- [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
- "REPLY:DELETECONSUMERGROUPOFFSETS",
- [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS",
- [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS",
- [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS",
- [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] =
- "REPLY:ALTERCONSUMERGROUPOFFSETS",
- [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] =
- "REPLY:LISTCONSUMERGROUPOFFSETS",
- [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT",
- [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT",
- [RD_KAFKA_OP_PURGE] = "REPLY:PURGE",
- [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT",
- [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH",
- [RD_KAFKA_OP_MOCK] = "REPLY:MOCK",
- [RD_KAFKA_OP_BROKER_MONITOR] = "REPLY:BROKER_MONITOR",
- [RD_KAFKA_OP_TXN] = "REPLY:TXN",
- [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] =
- "REPLY:GET_REBALANCE_PROTOCOL",
- [RD_KAFKA_OP_LEADERS] = "REPLY:LEADERS",
- [RD_KAFKA_OP_BARRIER] = "REPLY:BARRIER",
- };
-
- if (type & RD_KAFKA_OP_REPLY)
- skiplen = 0;
-
- rd_assert((names[type & ~RD_KAFKA_OP_FLAGMASK] != NULL) ||
- !*"add OP type to rd_kafka_op2str()");
- return names[type & ~RD_KAFKA_OP_FLAGMASK] + skiplen;
-}
-
-
-void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko) {
- fprintf(fp,
- "%s((rd_kafka_op_t*)%p)\n"
- "%s Type: %s (0x%x), Version: %" PRId32 "\n",
- prefix, rko, prefix, rd_kafka_op2str(rko->rko_type),
- rko->rko_type, rko->rko_version);
- if (rko->rko_err)
- fprintf(fp, "%s Error: %s\n", prefix,
- rd_kafka_err2str(rko->rko_err));
- if (rko->rko_replyq.q)
- fprintf(fp, "%s Replyq %p v%d (%s)\n", prefix,
- rko->rko_replyq.q, rko->rko_replyq.version,
-#if ENABLE_DEVEL
- rko->rko_replyq._id
-#else
- ""
-#endif
- );
- if (rko->rko_rktp) {
- fprintf(fp,
- "%s ((rd_kafka_toppar_t*)%p) "
- "%s [%" PRId32 "] v%d\n",
- prefix, rko->rko_rktp,
- rko->rko_rktp->rktp_rkt->rkt_topic->str,
- rko->rko_rktp->rktp_partition,
- rd_atomic32_get(&rko->rko_rktp->rktp_version));
- }
-
- switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
- case RD_KAFKA_OP_FETCH:
- fprintf(fp, "%s Offset: %" PRId64 "\n", prefix,
- rko->rko_u.fetch.rkm.rkm_offset);
- break;
- case RD_KAFKA_OP_CONSUMER_ERR:
- fprintf(fp, "%s Offset: %" PRId64 "\n", prefix,
- rko->rko_u.err.offset);
- /* FALLTHRU */
- case RD_KAFKA_OP_ERR:
- fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr);
- break;
- case RD_KAFKA_OP_DR:
- fprintf(fp, "%s %" PRId32 " messages on %s\n", prefix,
- rko->rko_u.dr.msgq.rkmq_msg_cnt,
- rko->rko_u.dr.rkt ? rko->rko_u.dr.rkt->rkt_topic->str
- : "(n/a)");
- break;
- case RD_KAFKA_OP_OFFSET_COMMIT:
- fprintf(fp, "%s Callback: %p (opaque %p)\n", prefix,
- rko->rko_u.offset_commit.cb,
- rko->rko_u.offset_commit.opaque);
- fprintf(fp, "%s %d partitions\n", prefix,
- rko->rko_u.offset_commit.partitions
- ? rko->rko_u.offset_commit.partitions->cnt
- : 0);
- break;
-
- case RD_KAFKA_OP_LOG:
- fprintf(fp, "%s Log: %%%d %s: %s\n", prefix,
- rko->rko_u.log.level, rko->rko_u.log.fac,
- rko->rko_u.log.str);
- break;
-
- default:
- break;
- }
-}
-
-
-rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) {
- rd_kafka_op_t *rko;
-#define _RD_KAFKA_OP_EMPTY \
- 1234567 /* Special value to be able to assert \
- * on default-initialized (0) sizes \
- * if we forgot to add an op type to \
- * this list. */
- static const size_t op2size[RD_KAFKA_OP__END] = {
- [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch),
- [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err),
- [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err),
- [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr),
- [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats),
- [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit),
- [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node),
- [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf),
- [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf),
- [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf),
- [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start),
- [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start),
- [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause),
- [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch),
- [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance),
- [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe),
- [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign),
- [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe),
- [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign),
- [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle),
- [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name),
- [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata),
- [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset),
- [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata),
- [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log),
- [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_LISTCONSUMERGROUPS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] =
- sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
- sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_CREATEACLS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DESCRIBEACLS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_DELETEACLS] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] =
- sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] =
- sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request),
- [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result),
- [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge),
- [RD_KAFKA_OP_CONNECT] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = _RD_KAFKA_OP_EMPTY,
- [RD_KAFKA_OP_MOCK] = sizeof(rko->rko_u.mock),
- [RD_KAFKA_OP_BROKER_MONITOR] = sizeof(rko->rko_u.broker_monitor),
- [RD_KAFKA_OP_TXN] = sizeof(rko->rko_u.txn),
- [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] =
- sizeof(rko->rko_u.rebalance_protocol),
- [RD_KAFKA_OP_LEADERS] = sizeof(rko->rko_u.leaders),
- [RD_KAFKA_OP_BARRIER] = _RD_KAFKA_OP_EMPTY,
- };
- size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK];
-
- rd_assert(tsize > 0 || !*"add OP type to rd_kafka_op_new0()");
- if (tsize == _RD_KAFKA_OP_EMPTY)
- tsize = 0;
-
- rko = rd_calloc(1, sizeof(*rko) - sizeof(rko->rko_u) + tsize);
- rko->rko_type = type;
-
-#if ENABLE_DEVEL
- rko->rko_source = source;
- rd_atomic32_add(&rd_kafka_op_cnt, 1);
-#endif
- return rko;
-}
-
-
-void rd_kafka_op_destroy(rd_kafka_op_t *rko) {
-
- /* Call ops callback with ERR__DESTROY to let it
- * clean up its resources. */
- if ((rko->rko_type & RD_KAFKA_OP_CB) && rko->rko_op_cb) {
- rd_kafka_op_res_t res;
- rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
- res = rko->rko_op_cb(rko->rko_rk, NULL, rko);
- rd_assert(res != RD_KAFKA_OP_RES_YIELD);
- rd_assert(res != RD_KAFKA_OP_RES_KEEP);
- }
-
-
- switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
- case RD_KAFKA_OP_FETCH:
- rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm);
- /* Decrease refcount on rkbuf to eventually rd_free shared buf*/
- if (rko->rko_u.fetch.rkbuf)
- rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
-
- break;
-
- case RD_KAFKA_OP_OFFSET_FETCH:
- if (rko->rko_u.offset_fetch.partitions &&
- rko->rko_u.offset_fetch.do_free)
- rd_kafka_topic_partition_list_destroy(
- rko->rko_u.offset_fetch.partitions);
- break;
-
- case RD_KAFKA_OP_OFFSET_COMMIT:
- RD_IF_FREE(rko->rko_u.offset_commit.partitions,
- rd_kafka_topic_partition_list_destroy);
- RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free);
- break;
-
- case RD_KAFKA_OP_SUBSCRIBE:
- case RD_KAFKA_OP_GET_SUBSCRIPTION:
- RD_IF_FREE(rko->rko_u.subscribe.topics,
- rd_kafka_topic_partition_list_destroy);
- break;
-
- case RD_KAFKA_OP_ASSIGN:
- case RD_KAFKA_OP_GET_ASSIGNMENT:
- RD_IF_FREE(rko->rko_u.assign.partitions,
- rd_kafka_topic_partition_list_destroy);
- break;
-
- case RD_KAFKA_OP_REBALANCE:
- RD_IF_FREE(rko->rko_u.rebalance.partitions,
- rd_kafka_topic_partition_list_destroy);
- break;
-
- case RD_KAFKA_OP_NAME:
- RD_IF_FREE(rko->rko_u.name.str, rd_free);
- break;
-
- case RD_KAFKA_OP_CG_METADATA:
- RD_IF_FREE(rko->rko_u.cg_metadata,
- rd_kafka_consumer_group_metadata_destroy);
- break;
-
- case RD_KAFKA_OP_ERR:
- case RD_KAFKA_OP_CONSUMER_ERR:
- RD_IF_FREE(rko->rko_u.err.errstr, rd_free);
- rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm);
- break;
-
- break;
-
- case RD_KAFKA_OP_THROTTLE:
- RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free);
- break;
-
- case RD_KAFKA_OP_STATS:
- RD_IF_FREE(rko->rko_u.stats.json, rd_free);
- break;
-
- case RD_KAFKA_OP_XMIT_RETRY:
- case RD_KAFKA_OP_XMIT_BUF:
- case RD_KAFKA_OP_RECV_BUF:
- if (rko->rko_u.xbuf.rkbuf)
- rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
-
- RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy);
- break;
-
- case RD_KAFKA_OP_DR:
- rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq);
- if (rko->rko_u.dr.do_purge2)
- rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2);
-
- if (rko->rko_u.dr.rkt)
- rd_kafka_topic_destroy0(rko->rko_u.dr.rkt);
- break;
-
- case RD_KAFKA_OP_OFFSET_RESET:
- RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free);
- break;
-
- case RD_KAFKA_OP_METADATA:
- RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy);
- break;
-
- case RD_KAFKA_OP_LOG:
- rd_free(rko->rko_u.log.str);
- break;
-
- case RD_KAFKA_OP_ADMIN_FANOUT:
- rd_assert(rko->rko_u.admin_request.fanout.outstanding == 0);
- rd_list_destroy(&rko->rko_u.admin_request.fanout.results);
- case RD_KAFKA_OP_CREATETOPICS:
- case RD_KAFKA_OP_DELETETOPICS:
- case RD_KAFKA_OP_CREATEPARTITIONS:
- case RD_KAFKA_OP_ALTERCONFIGS:
- case RD_KAFKA_OP_DESCRIBECONFIGS:
- case RD_KAFKA_OP_DELETERECORDS:
- case RD_KAFKA_OP_LISTCONSUMERGROUPS:
- case RD_KAFKA_OP_DESCRIBECONSUMERGROUPS:
- case RD_KAFKA_OP_DELETEGROUPS:
- case RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS:
- case RD_KAFKA_OP_CREATEACLS:
- case RD_KAFKA_OP_DESCRIBEACLS:
- case RD_KAFKA_OP_DELETEACLS:
- case RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS:
- case RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS:
- rd_kafka_replyq_destroy(&rko->rko_u.admin_request.replyq);
- rd_list_destroy(&rko->rko_u.admin_request.args);
- if (rko->rko_u.admin_request.options.match_consumer_group_states
- .u.PTR) {
- rd_list_destroy(rko->rko_u.admin_request.options
- .match_consumer_group_states.u.PTR);
- }
- rd_assert(!rko->rko_u.admin_request.fanout_parent);
- RD_IF_FREE(rko->rko_u.admin_request.coordkey, rd_free);
- break;
-
- case RD_KAFKA_OP_ADMIN_RESULT:
- rd_list_destroy(&rko->rko_u.admin_result.args);
- rd_list_destroy(&rko->rko_u.admin_result.results);
- RD_IF_FREE(rko->rko_u.admin_result.errstr, rd_free);
- rd_assert(!rko->rko_u.admin_result.fanout_parent);
- ;
- break;
-
- case RD_KAFKA_OP_MOCK:
- RD_IF_FREE(rko->rko_u.mock.name, rd_free);
- RD_IF_FREE(rko->rko_u.mock.str, rd_free);
- break;
-
- case RD_KAFKA_OP_BROKER_MONITOR:
- rd_kafka_broker_destroy(rko->rko_u.broker_monitor.rkb);
- break;
-
- case RD_KAFKA_OP_TXN:
- RD_IF_FREE(rko->rko_u.txn.group_id, rd_free);
- RD_IF_FREE(rko->rko_u.txn.offsets,
- rd_kafka_topic_partition_list_destroy);
- RD_IF_FREE(rko->rko_u.txn.cgmetadata,
- rd_kafka_consumer_group_metadata_destroy);
- break;
-
- case RD_KAFKA_OP_LEADERS:
- rd_assert(!rko->rko_u.leaders.eonce);
- rd_assert(!rko->rko_u.leaders.replyq.q);
- RD_IF_FREE(rko->rko_u.leaders.leaders, rd_list_destroy);
- RD_IF_FREE(rko->rko_u.leaders.partitions,
- rd_kafka_topic_partition_list_destroy);
- break;
-
- default:
- break;
- }
-
- RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy);
-
- RD_IF_FREE(rko->rko_error, rd_kafka_error_destroy);
-
- rd_kafka_replyq_destroy(&rko->rko_replyq);
-
-#if ENABLE_DEVEL
- if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
- rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");
-#endif
-
- rd_free(rko);
-}
-
-
-
-/**
- * Propagate an error event to the application on a specific queue.
- */
-void rd_kafka_q_op_err(rd_kafka_q_t *rkq,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- va_list ap;
- char buf[2048];
- rd_kafka_op_t *rko;
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
- rko->rko_err = err;
- rko->rko_u.err.errstr = rd_strdup(buf);
-
- rd_kafka_q_enq(rkq, rko);
-}
-
-
-
-/**
- * @brief Enqueue RD_KAFKA_OP_CONSUMER_ERR on \p rkq.
- *
- * @param broker_id Is the relevant broker id, or RD_KAFKA_NODEID_UA (-1)
- * if not applicable.
- * @param err Error code.
- * @param version Queue version barrier, or 0 if not applicable.
- * @param topic May be NULL.
- * @param rktp May be NULL. Takes precedence over \p topic.
- * @param offset RD_KAFKA_OFFSET_INVALID if not applicable.
- *
- * @sa rd_kafka_q_op_err()
- */
-void rd_kafka_consumer_err(rd_kafka_q_t *rkq,
- int32_t broker_id,
- rd_kafka_resp_err_t err,
- int32_t version,
- const char *topic,
- rd_kafka_toppar_t *rktp,
- int64_t offset,
- const char *fmt,
- ...) {
- va_list ap;
- char buf[2048];
- rd_kafka_op_t *rko;
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);
- rko->rko_version = version;
- rko->rko_err = err;
- rko->rko_u.err.offset = offset;
- rko->rko_u.err.errstr = rd_strdup(buf);
- rko->rko_u.err.rkm.rkm_broker_id = broker_id;
-
- if (rktp)
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- else if (topic)
- rko->rko_u.err.rkm.rkm_rkmessage.rkt =
- (rd_kafka_topic_t *)rd_kafka_lwtopic_new(rkq->rkq_rk,
- topic);
-
-
- rd_kafka_q_enq(rkq, rko);
-}
-
-
-/**
- * Creates a reply op based on 'rko_orig'.
- * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
- * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
- * with RD_KAFKA_OP_REPLY.
- */
-rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig,
- rd_kafka_resp_err_t err) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(rko_orig->rko_type | RD_KAFKA_OP_REPLY);
- rd_kafka_op_get_reply_version(rko, rko_orig);
- rko->rko_err = err;
- if (rko_orig->rko_rktp)
- rko->rko_rktp = rd_kafka_toppar_keep(rko_orig->rko_rktp);
-
- return rko;
-}
-
-
-/**
- * @brief Create new callback op for type \p type
- */
-rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk,
- rd_kafka_op_type_t type,
- rd_kafka_op_cb_t *cb) {
- rd_kafka_op_t *rko;
- rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB);
- rko->rko_op_cb = cb;
- rko->rko_rk = rk;
- return rko;
-}
-
-
-/**
- * @brief Reply to 'rko' re-using the same rko with rko_err
- * specified by \p err. rko_error is set to NULL.
- *
- * If there is no replyq the rko is destroyed.
- *
- * @returns 1 if op was enqueued, else 0 and rko is destroyed.
- */
-int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
-
- if (!rko->rko_replyq.q) {
- rd_kafka_op_destroy(rko);
- return 0;
- }
-
- rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY);
- rko->rko_err = err;
- rko->rko_error = NULL;
-
- return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
-}
-
-
-/**
- * @brief Reply to 'rko' re-using the same rko with rko_error specified
- * by \p error (may be NULL) and rko_err set to the corresponding
- * error code. Assumes ownership of \p error.
- *
- * If there is no replyq the rko is destroyed.
- *
- * @returns 1 if op was enqueued, else 0 and rko is destroyed.
- */
-int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error) {
-
- if (!rko->rko_replyq.q) {
- RD_IF_FREE(error, rd_kafka_error_destroy);
- rd_kafka_op_destroy(rko);
- return 0;
- }
-
- rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY);
- rko->rko_err =
- error ? rd_kafka_error_code(error) : RD_KAFKA_RESP_ERR_NO_ERROR;
- rko->rko_error = error;
-
- return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
-}
-
-
-/**
- * @brief Send request to queue, wait for response.
- *
- * @returns response on success or NULL if destq is disabled.
- */
-rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq,
- rd_kafka_q_t *recvq,
- rd_kafka_op_t *rko,
- int timeout_ms) {
- rd_kafka_op_t *reply;
-
- /* Indicate to destination where to send reply. */
- rd_kafka_op_set_replyq(rko, recvq, NULL);
-
- /* Enqueue op */
- if (!rd_kafka_q_enq(destq, rko))
- return NULL;
-
- /* Wait for reply */
- reply = rd_kafka_q_pop(recvq, rd_timeout_us(timeout_ms), 0);
-
- /* May be NULL for timeout */
- return reply;
-}
-
-/**
- * Send request to queue, wait for response.
- * Creates a temporary reply queue.
- */
-rd_kafka_op_t *
-rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms) {
- rd_kafka_q_t *recvq;
- rd_kafka_op_t *reply;
-
- recvq = rd_kafka_q_new(destq->rkq_rk);
-
- reply = rd_kafka_op_req0(destq, recvq, rko, timeout_ms);
-
- rd_kafka_q_destroy_owner(recvq);
-
- return reply;
-}
-
-
-/**
- * Send simple type-only request to queue, wait for response.
- */
-rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(type);
- return rd_kafka_op_req(destq, rko, RD_POLL_INFINITE);
-}
-
-
-/**
- * Destroys the rko and returns its err.
- */
-rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- if (rko) {
- err = rko->rko_err;
- rd_kafka_op_destroy(rko);
- }
- return err;
-}
-
-
-/**
- * Destroys the rko and returns its error object or NULL if no error.
- */
-rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko) {
- if (rko) {
- rd_kafka_error_t *error = rko->rko_error;
- rko->rko_error = NULL;
- rd_kafka_op_destroy(rko);
- return error;
- }
-
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Operation timed out");
-}
-
-
-/**
- * Call op callback
- */
-rd_kafka_op_res_t
-rd_kafka_op_call(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
- rd_kafka_op_res_t res;
- rd_assert(rko->rko_op_cb);
- res = rko->rko_op_cb(rk, rkq, rko);
- if (unlikely(res == RD_KAFKA_OP_RES_YIELD || rd_kafka_yield_thread))
- return RD_KAFKA_OP_RES_YIELD;
- if (res != RD_KAFKA_OP_RES_KEEP)
- rko->rko_op_cb = NULL;
- return res;
-}
-
-
-/**
- * @brief Creates a new RD_KAFKA_OP_FETCH op representing a
- * control message. The rkm_flags property is set to
- * RD_KAFKA_MSG_F_CONTROL.
- */
-rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp,
- int32_t version,
- rd_kafka_buf_t *rkbuf,
- int64_t offset) {
- rd_kafka_msg_t *rkm;
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, version, rkbuf, offset, 0,
- NULL, 0, NULL);
-
- rkm->rkm_flags |= RD_KAFKA_MSG_F_CONTROL;
-
- return rko;
-}
-
-/**
- * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the
- * embedded message according to the parameters.
- *
- * @param rkmp will be set to the embedded rkm in the rko (for convenience)
- * @param offset may be updated later if relative offset.
- */
-rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp,
- rd_kafka_toppar_t *rktp,
- int32_t version,
- rd_kafka_buf_t *rkbuf,
- int64_t offset,
- size_t key_len,
- const void *key,
- size_t val_len,
- const void *val) {
- rd_kafka_msg_t *rkm;
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH);
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_version = version;
- rkm = &rko->rko_u.fetch.rkm;
- *rkmp = rkm;
-
- /* Since all the ops share the same payload buffer
- * a refcnt is used on the rkbuf that makes sure all
- * consume_cb() will have been
- * called for each of these ops before the rkbuf
- * and its memory backing buffers are freed. */
- rko->rko_u.fetch.rkbuf = rkbuf;
- rd_kafka_buf_keep(rkbuf);
-
- rkm->rkm_offset = offset;
-
- rkm->rkm_key = (void *)key;
- rkm->rkm_key_len = key_len;
-
- rkm->rkm_payload = (void *)val;
- rkm->rkm_len = val_len;
- rko->rko_len = (int32_t)rkm->rkm_len;
-
- rkm->rkm_partition = rktp->rktp_partition;
-
- /* Persistence status is always PERSISTED for consumed messages
- * since we managed to read the message. */
- rkm->rkm_status = RD_KAFKA_MSG_STATUS_PERSISTED;
-
- return rko;
-}
-
-
-/**
- * Enqueue ERR__THROTTLE op, if desired.
- */
-void rd_kafka_op_throttle_time(rd_kafka_broker_t *rkb,
- rd_kafka_q_t *rkq,
- int throttle_time) {
- rd_kafka_op_t *rko;
-
- if (unlikely(throttle_time > 0))
- rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);
-
- /* We send throttle events when:
- * - throttle_time > 0
- * - throttle_time == 0 and last throttle_time > 0
- */
- if (!rkb->rkb_rk->rk_conf.throttle_cb ||
- (!throttle_time &&
- !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle)))
- return;
-
- rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
- rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename);
- rko->rko_u.throttle.nodeid = rkb->rkb_nodeid;
- rko->rko_u.throttle.throttle_time = throttle_time;
- rd_kafka_q_enq(rkq, rko);
-}
-
-
-/**
- * @brief Handle standard op types.
- */
-rd_kafka_op_res_t rd_kafka_op_handle_std(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- int cb_type) {
- if (cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
- return RD_KAFKA_OP_RES_PASS;
- else if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) {
- /* Control messages must not be exposed to the application
- * but we need to store their offsets. */
- rd_kafka_fetch_op_app_prepare(rk, rko);
- return RD_KAFKA_OP_RES_HANDLED;
- } else if (cb_type != RD_KAFKA_Q_CB_EVENT &&
- rko->rko_type & RD_KAFKA_OP_CB)
- return rd_kafka_op_call(rk, rkq, rko);
- else if (rko->rko_type == RD_KAFKA_OP_RECV_BUF) /* Handle Response */
- rd_kafka_buf_handle_op(rko, rko->rko_err);
- else if (cb_type != RD_KAFKA_Q_CB_RETURN &&
- rko->rko_type & RD_KAFKA_OP_REPLY &&
- rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED; /* dest queue was
- * probably disabled. */
- else
- return RD_KAFKA_OP_RES_PASS;
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Attempt to handle op using its queue's serve callback,
- * or the passed callback, or op_handle_std(), else do nothing.
- *
- * @param rkq is \p rko's queue (which it was unlinked from) with rkq_lock
- * being held. Callback may re-enqueue the op on this queue
- * and return YIELD.
- *
- * @returns HANDLED if op was handled (and destroyed), PASS if not,
- * or YIELD if op was handled (maybe destroyed or re-enqueued)
- * and caller must propagate yield upwards (cancel and return).
- */
-rd_kafka_op_res_t rd_kafka_op_handle(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque,
- rd_kafka_q_serve_cb_t *callback) {
- rd_kafka_op_res_t res;
-
- if (rko->rko_serve) {
- callback = rko->rko_serve;
- opaque = rko->rko_serve_opaque;
- rko->rko_serve = NULL;
- rko->rko_serve_opaque = NULL;
- }
-
- res = rd_kafka_op_handle_std(rk, rkq, rko, cb_type);
- if (res == RD_KAFKA_OP_RES_KEEP) {
- /* Op was handled but must not be destroyed. */
- return res;
- }
- if (res == RD_KAFKA_OP_RES_HANDLED) {
- rd_kafka_op_destroy(rko);
- return res;
- } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD))
- return res;
-
- if (callback)
- res = callback(rk, rkq, rko, cb_type, opaque);
-
- return res;
-}
-
-
-/**
- * @brief Prepare passing message to application.
- * This must be called just prior to passing/returning a consumed
- * message to the application.
- *
- * Performs:
- * - Store offset for fetched message + 1.
- * - Updates the application offset (rktp_app_offset).
- *
- * @locks rktp_lock and rk_lock MUST NOT be held
- */
-void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_fetch_pos_t pos;
-
- if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err))
- return;
-
- rktp = rko->rko_rktp;
-
- if (unlikely(!rk))
- rk = rktp->rktp_rkt->rkt_rk;
-
- pos.offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1;
- pos.leader_epoch = rko->rko_u.fetch.rkm.rkm_u.consumer.leader_epoch;
-
- rd_kafka_update_app_pos(rk, rktp, pos, RD_DO_LOCK);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h
deleted file mode 100644
index 57c07491a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_OP_H_
-#define _RDKAFKA_OP_H_
-
-
-#include "rdkafka_msg.h"
-#include "rdkafka_timer.h"
-#include "rdkafka_admin.h"
-
-
-/* Forward declarations */
-typedef struct rd_kafka_q_s rd_kafka_q_t;
-typedef struct rd_kafka_toppar_s rd_kafka_toppar_t;
-typedef struct rd_kafka_op_s rd_kafka_op_t;
-
-/* One-off reply queue + reply version.
- * All APIs that take a rd_kafka_replyq_t makes a copy of the
- * struct as-is and grabs hold of the existing .q refcount.
- * Think of replyq as a (Q,VERSION) tuple. */
-typedef struct rd_kafka_replyq_s {
- rd_kafka_q_t *q;
- int32_t version;
-#if ENABLE_DEVEL
- char *_id; /* Devel id used for debugging reference leaks.
- * Is a strdup() of the caller's function name,
- * which makes for easy debugging with valgrind. */
-#endif
-} rd_kafka_replyq_t;
-
-
-
-/**
- * Flags used by:
- * - rd_kafka_op_t.rko_flags
- * - rd_kafka_buf_t.rkbuf_flags
- */
-#define RD_KAFKA_OP_F_FREE 0x1 /* rd_free payload when done with it */
-#define RD_KAFKA_OP_F_NO_RESPONSE 0x2 /* rkbuf: Not expecting a response */
-#define RD_KAFKA_OP_F_CRC 0x4 /* rkbuf: Perform CRC calculation */
-#define RD_KAFKA_OP_F_BLOCKING 0x8 /* rkbuf: blocking protocol request */
-#define RD_KAFKA_OP_F_REPROCESS 0x10 /* cgrp: Reprocess at a later time. */
-#define RD_KAFKA_OP_F_SENT 0x20 /* rkbuf: request sent on wire */
-#define RD_KAFKA_OP_F_FLEXVER \
- 0x40 /* rkbuf: flexible protocol version \
- * (KIP-482) */
-#define RD_KAFKA_OP_F_NEED_MAKE \
- 0x80 /* rkbuf: request content has not \
- * been made yet, the make \
- * callback will be triggered \
- * to construct the request \
- * right before it is sent. */
-#define RD_KAFKA_OP_F_FORCE_CB \
- 0x100 /* rko: force callback even if \
- * op type is eventable. */
-
-typedef enum {
- RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */
- RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */
- RD_KAFKA_OP_ERR, /* Kafka thread -> Application */
- RD_KAFKA_OP_CONSUMER_ERR, /* Kafka thread -> Application */
- RD_KAFKA_OP_DR, /* Kafka thread -> Application
- * Produce message delivery report */
- RD_KAFKA_OP_STATS, /* Kafka thread -> Application */
-
- RD_KAFKA_OP_OFFSET_COMMIT, /* any -> toppar's Broker thread */
- RD_KAFKA_OP_NODE_UPDATE, /* any -> Broker thread: node update */
-
- RD_KAFKA_OP_XMIT_BUF, /* transmit buffer: any -> broker thread */
- RD_KAFKA_OP_RECV_BUF, /* received response buffer: broker thr -> any */
- RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */
- RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */
- RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */
- RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */
- RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */
- RD_KAFKA_OP_OFFSET_FETCH, /* Broker -> broker thread: fetch offsets
- * for topic. */
-
- RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp
- * * -> broker op: add toppar to broker */
- RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp
- * * -> broker op: remove toppar from rkb*/
- RD_KAFKA_OP_REBALANCE, /* broker thread -> app:
- * group rebalance */
- RD_KAFKA_OP_TERMINATE, /* For generic use */
- RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */
- RD_KAFKA_OP_SUBSCRIBE, /* New subscription */
- RD_KAFKA_OP_ASSIGN, /* New assignment */
- RD_KAFKA_OP_GET_SUBSCRIPTION, /* Get current subscription.
- * Reuses u.subscribe */
- RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment.
- * Reuses u.assign */
- RD_KAFKA_OP_THROTTLE, /* Throttle info */
- RD_KAFKA_OP_NAME, /* Request name */
- RD_KAFKA_OP_CG_METADATA, /**< Request consumer metadata */
- RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */
- RD_KAFKA_OP_METADATA, /* Metadata response */
- RD_KAFKA_OP_LOG, /* Log */
- RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */
- RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/
- RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/
- RD_KAFKA_OP_CREATEPARTITIONS, /**< Admin: CreatePartitions:
- * u.admin_request*/
- RD_KAFKA_OP_ALTERCONFIGS, /**< Admin: AlterConfigs: u.admin_request*/
- RD_KAFKA_OP_DESCRIBECONFIGS, /**< Admin: DescribeConfigs:
- * u.admin_request*/
- RD_KAFKA_OP_DELETERECORDS, /**< Admin: DeleteRecords:
- * u.admin_request*/
- RD_KAFKA_OP_LISTCONSUMERGROUPS, /**< Admin:
- * ListConsumerGroups
- * u.admin_request */
- RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, /**< Admin:
- * DescribeConsumerGroups
- * u.admin_request */
- RD_KAFKA_OP_DELETEGROUPS, /**< Admin: DeleteGroups: u.admin_request*/
- RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin:
- * DeleteConsumerGroupOffsets
- * u.admin_request */
- RD_KAFKA_OP_CREATEACLS, /**< Admin: CreateAcls: u.admin_request*/
- RD_KAFKA_OP_DESCRIBEACLS, /**< Admin: DescribeAcls: u.admin_request*/
- RD_KAFKA_OP_DELETEACLS, /**< Admin: DeleteAcls: u.admin_request*/
- RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, /**< Admin:
- * AlterConsumerGroupOffsets
- * u.admin_request */
- RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, /**< Admin:
- * ListConsumerGroupOffsets
- * u.admin_request */
- RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */
- RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */
- RD_KAFKA_OP_PURGE, /**< Purge queues */
- RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */
- RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */
- RD_KAFKA_OP_MOCK, /**< Mock cluster command */
- RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */
- RD_KAFKA_OP_TXN, /**< Transaction command */
- RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */
- RD_KAFKA_OP_LEADERS, /**< Partition leader query */
- RD_KAFKA_OP_BARRIER, /**< Version barrier bump */
- RD_KAFKA_OP__END
-} rd_kafka_op_type_t;
-
-/* Flags used with op_type_t */
-#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */
-#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */
-#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY)
-
-
-/**
- * @brief Op/queue priority levels.
- * @remark Since priority levels alter the FIFO order, pay extra attention
- * to preserve ordering as deemed necessary.
- * @remark Priority should only be set on ops destined for application
- * facing queues (rk_rep, rkcg_q, etc).
- */
-typedef enum {
- RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */
- RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk,
- * still at some scale. e.g. logs, .. */
- RD_KAFKA_PRIO_HIGH, /* Small scale high priority */
- RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */
-} rd_kafka_prio_t;
-
-
-/**
- * @brief Op handler result
- *
- * @remark When returning YIELD from a handler the handler will
- * need to have made sure to either re-enqueue the op or destroy it
- * since the caller will not touch the op anymore.
- */
-typedef enum {
- RD_KAFKA_OP_RES_PASS, /* Not handled, pass to caller */
- RD_KAFKA_OP_RES_HANDLED, /* Op was handled (through callbacks) */
- RD_KAFKA_OP_RES_KEEP, /* Op was handled (through callbacks)
- * but must not be destroyed by op_handle().
- * It is NOT PERMITTED to return RES_KEEP
- * from a callback handling a ERR__DESTROY
- * event. */
- RD_KAFKA_OP_RES_YIELD /* Callback called yield */
-} rd_kafka_op_res_t;
-
-
-/**
- * @brief Queue serve callback call type
- */
-typedef enum {
- RD_KAFKA_Q_CB_INVALID, /* dont use */
- RD_KAFKA_Q_CB_CALLBACK, /* trigger callback based on op */
- RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback
- * (if possible)*/
- RD_KAFKA_Q_CB_FORCE_RETURN, /* return op, regardless of callback. */
- RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */
-} rd_kafka_q_cb_type_t;
-
-/**
- * @brief Queue serve callback
- * @remark See rd_kafka_op_res_t docs for return semantics.
- */
-typedef rd_kafka_op_res_t(rd_kafka_q_serve_cb_t)(rd_kafka_t *rk,
- struct rd_kafka_q_s *rkq,
- struct rd_kafka_op_s *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque)
- RD_WARN_UNUSED_RESULT;
-
-/**
- * @brief Enumerates the assign op sub-types.
- */
-typedef enum {
- RD_KAFKA_ASSIGN_METHOD_ASSIGN, /**< Absolute assign/unassign */
- RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, /**< Incremental assign */
- RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN /**< Incremental unassign */
-} rd_kafka_assign_method_t;
-
-/**
- * @brief Op callback type
- */
-typedef rd_kafka_op_res_t(rd_kafka_op_cb_t)(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- struct rd_kafka_op_s *rko)
- RD_WARN_UNUSED_RESULT;
-
-/* Forward declaration */
-struct rd_kafka_admin_worker_cbs;
-struct rd_kafka_admin_fanout_worker_cbs;
-
-
-#define RD_KAFKA_OP_TYPE_ASSERT(rko, type) \
- rd_assert(((rko)->rko_type & ~RD_KAFKA_OP_FLAGMASK) == (type))
-
-struct rd_kafka_op_s {
- TAILQ_ENTRY(rd_kafka_op_s) rko_link;
-
- rd_kafka_op_type_t rko_type; /* Internal op type */
- rd_kafka_event_type_t rko_evtype;
- int rko_flags; /* See RD_KAFKA_OP_F_... above */
- int32_t rko_version;
- rd_kafka_resp_err_t rko_err;
- rd_kafka_error_t *rko_error;
- int32_t rko_len; /* Depends on type, typically the
- * message length. */
- rd_kafka_prio_t rko_prio; /**< In-queue priority.
- * Higher value means higher prio*/
-
- rd_kafka_toppar_t *rko_rktp;
-
- /*
- * Generic fields
- */
-
- /* Indicates request: enqueue reply on rko_replyq.q with .version.
- * .q is refcounted. */
- rd_kafka_replyq_t rko_replyq;
-
- /* Original queue's op serve callback and opaque, if any.
- * Mainly used for forwarded queues to use the original queue's
- * serve function from the forwarded position. */
- rd_kafka_q_serve_cb_t *rko_serve;
- void *rko_serve_opaque;
-
- rd_kafka_t *rko_rk;
-
-#if ENABLE_DEVEL
- const char *rko_source; /**< Where op was created */
-#endif
-
- /* RD_KAFKA_OP_CB */
- rd_kafka_op_cb_t *rko_op_cb;
-
- union {
- struct {
- rd_kafka_buf_t *rkbuf;
- rd_kafka_msg_t rkm;
- int evidx;
- } fetch;
-
- struct {
- rd_kafka_topic_partition_list_t *partitions;
- /** Require stable (txn-commited) offsets */
- rd_bool_t require_stable_offsets;
- int do_free; /* free .partitions on destroy() */
- } offset_fetch;
-
- struct {
- rd_kafka_topic_partition_list_t *partitions;
- void (*cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque);
- void *opaque;
- int silent_empty; /**< Fail silently if there are no
- * offsets to commit. */
- rd_ts_t ts_timeout;
- char *reason;
- } offset_commit;
-
- struct {
- rd_kafka_topic_partition_list_t *topics;
- } subscribe; /* also used for GET_SUBSCRIPTION */
-
- struct {
- rd_kafka_topic_partition_list_t *partitions;
- rd_kafka_assign_method_t method;
- } assign; /* also used for GET_ASSIGNMENT */
-
- struct {
- rd_kafka_topic_partition_list_t *partitions;
- } rebalance;
-
- struct {
- const char *str;
- } rebalance_protocol;
-
- struct {
- char *str;
- } name;
-
- rd_kafka_consumer_group_metadata_t *cg_metadata;
-
- struct {
- int64_t offset;
- char *errstr;
- rd_kafka_msg_t rkm;
- rd_kafka_topic_t *rkt;
- int fatal; /**< This was a ERR__FATAL error that has
- * been translated to the fatal error
- * code. */
- } err; /* used for ERR and CONSUMER_ERR */
-
- struct {
- int throttle_time;
- int32_t nodeid;
- char *nodename;
- } throttle;
-
- struct {
- char *json;
- size_t json_len;
- } stats;
-
- struct {
- rd_kafka_buf_t *rkbuf;
- } xbuf; /* XMIT_BUF and RECV_BUF */
-
- /* RD_KAFKA_OP_METADATA */
- struct {
- rd_kafka_metadata_t *md;
- int force; /* force request regardless of outstanding
- * metadata requests. */
- } metadata;
-
- struct {
- rd_kafka_topic_t *rkt;
- rd_kafka_msgq_t msgq;
- rd_kafka_msgq_t msgq2;
- int do_purge2;
- } dr;
-
- struct {
- int32_t nodeid;
- char nodename[RD_KAFKA_NODENAME_SIZE];
- } node;
-
- struct {
- rd_kafka_fetch_pos_t pos;
- int32_t broker_id; /**< Originating broker, or -1 */
- char *reason;
- } offset_reset;
-
- struct {
- rd_kafka_fetch_pos_t pos;
- struct rd_kafka_cgrp_s *rkcg;
- } fetch_start; /* reused for SEEK */
-
- struct {
- int pause;
- int flag;
- } pause;
-
- struct {
- char fac[64];
- int level;
- char *str;
- int ctx;
- } log;
-
- struct {
- rd_kafka_AdminOptions_t options; /**< Copy of user's
- * options */
- rd_ts_t abs_timeout; /**< Absolute timeout
- * for this request. */
- rd_kafka_timer_t tmr; /**< Timeout timer */
- struct rd_kafka_enq_once_s *eonce; /**< Enqueue op
- * only once,
- * used to
- * (re)trigger
- * the request op
- * upon broker state
- * changes while
- * waiting for the
- * controller, or
- * due to .tmr
- * timeout. */
- rd_list_t
- args; /**< Type depends on request, e.g.
- * rd_kafka_NewTopic_t for CreateTopics
- */
-
- rd_kafka_buf_t *reply_buf; /**< Protocol reply,
- * temporary reference not
- * owned by this rko */
-
- /**< Worker callbacks, see rdkafka_admin.c */
- struct rd_kafka_admin_worker_cbs *cbs;
-
- /** Worker state */
- enum { RD_KAFKA_ADMIN_STATE_INIT,
- RD_KAFKA_ADMIN_STATE_WAIT_BROKER,
- RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER,
- RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS,
- RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST,
- RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE,
- RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST,
- } state;
-
- int32_t broker_id; /**< Requested broker id to
- * communicate with.
- * Used for AlterConfigs, et.al,
- * that needs to speak to a
- * specific broker rather than
- * the controller.
- * See RD_KAFKA_ADMIN_TARGET_..
- * for special values (coordinator,
- * fanout, etc).
- */
- /** The type of coordinator to look up */
- rd_kafka_coordtype_t coordtype;
- /** Which coordinator to look up */
- char *coordkey;
-
- /** Application's reply queue */
- rd_kafka_replyq_t replyq;
- rd_kafka_event_type_t reply_event_type;
-
- /** A collection of fanout child ops. */
- struct {
- /** The type of request being fanned out.
- * This is used for the ADMIN_RESULT. */
- rd_kafka_op_type_t reqtype;
-
- /** Worker callbacks, see rdkafka_admin.c */
- struct rd_kafka_admin_fanout_worker_cbs *cbs;
-
- /** Number of outstanding requests remaining to
- * wait for. */
- int outstanding;
-
- /** Incremental results from fanouts.
- * This list is pre-allocated to the number
- * of input objects and can thus be set
- * by index to retain original ordering. */
- rd_list_t results;
-
- /** Reply event type */
- rd_kafka_event_type_t reply_event_type;
-
- } fanout;
-
- /** A reference to the parent ADMIN_FANOUT op that
- * spawned this op, if applicable. NULL otherwise. */
- struct rd_kafka_op_s *fanout_parent;
-
- } admin_request;
-
- struct {
- rd_kafka_op_type_t reqtype; /**< Request op type,
- * used for logging. */
-
- rd_list_t args; /**< Args moved from the request op
- * when the result op is created.
- *
- * Type depends on request.
- */
-
- char *errstr; /**< Error string, if rko_err
- * is set, else NULL. */
-
- rd_list_t results; /**< Type depends on request type:
- *
- * (rd_kafka_topic_result_t *):
- * CreateTopics, DeleteTopics,
- * CreatePartitions.
- *
- * (rd_kafka_ConfigResource_t *):
- * AlterConfigs, DescribeConfigs
- */
-
- void *opaque; /**< Application's opaque as set by
- * rd_kafka_AdminOptions_set_opaque
- */
-
- /** A reference to the parent ADMIN_FANOUT op that
- * spawned this op, if applicable. NULL otherwise. */
- struct rd_kafka_op_s *fanout_parent;
- } admin_result;
-
- struct {
- int flags; /**< purge_flags from rd_kafka_purge() */
- } purge;
-
- /**< Mock cluster command */
- struct {
- enum { RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR,
- RD_KAFKA_MOCK_CMD_TOPIC_CREATE,
- RD_KAFKA_MOCK_CMD_PART_SET_LEADER,
- RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER,
- RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS,
- RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN,
- RD_KAFKA_MOCK_CMD_BROKER_SET_RTT,
- RD_KAFKA_MOCK_CMD_BROKER_SET_RACK,
- RD_KAFKA_MOCK_CMD_COORD_SET,
- RD_KAFKA_MOCK_CMD_APIVERSION_SET,
- } cmd;
-
- rd_kafka_resp_err_t err; /**< Error for:
- * TOPIC_SET_ERROR */
- char *name; /**< For:
- * TOPIC_SET_ERROR
- * TOPIC_CREATE
- * PART_SET_FOLLOWER
- * PART_SET_FOLLOWER_WMARKS
- * BROKER_SET_RACK
- * COORD_SET (key_type) */
- char *str; /**< For:
- * COORD_SET (key) */
- int32_t partition; /**< For:
- * PART_SET_FOLLOWER
- * PART_SET_FOLLOWER_WMARKS
- * PART_SET_LEADER
- * APIVERSION_SET (ApiKey)
- */
- int32_t broker_id; /**< For:
- * PART_SET_FOLLOWER
- * PART_SET_LEADER
- * BROKER_SET_UPDOWN
- * BROKER_SET_RACK
- * COORD_SET */
- int64_t lo; /**< Low offset, for:
- * TOPIC_CREATE (part cnt)
- * PART_SET_FOLLOWER_WMARKS
- * BROKER_SET_UPDOWN
- * APIVERSION_SET (minver)
- * BROKER_SET_RTT
- */
- int64_t hi; /**< High offset, for:
- * TOPIC_CREATE (repl fact)
- * PART_SET_FOLLOWER_WMARKS
- * APIVERSION_SET (maxver)
- */
- } mock;
-
- struct {
- struct rd_kafka_broker_s *rkb; /**< Broker who's state
- * changed. */
- /**< Callback to trigger on the op handler's thread. */
- void (*cb)(struct rd_kafka_broker_s *rkb);
- } broker_monitor;
-
- struct {
- /** Consumer group metadata for send_offsets_to.. */
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- /** Consumer group id for AddOffsetsTo.. */
- char *group_id;
- int timeout_ms; /**< Operation timeout */
- rd_ts_t abs_timeout; /**< Absolute time */
- /**< Offsets to commit */
- rd_kafka_topic_partition_list_t *offsets;
- } txn;
-
- struct {
- /* This struct serves two purposes, the fields
- * with "Request:" are used for the async workers state
- * while the "Reply:" fields is a separate reply
- * rko that is enqueued for the caller upon
- * completion or failure. */
-
- /** Request: Partitions to query.
- * Reply: Queried partitions with .err field set. */
- rd_kafka_topic_partition_list_t *partitions;
-
- /** Request: Absolute timeout */
- rd_ts_t ts_timeout;
-
- /** Request: Metadata query timer */
- rd_kafka_timer_t query_tmr;
-
- /** Request: Timeout timer */
- rd_kafka_timer_t timeout_tmr;
-
- /** Request: Enqueue op only once, used to (re)trigger
- * metadata cache lookups, topic refresh, timeout. */
- struct rd_kafka_enq_once_s *eonce;
-
- /** Request: Caller's replyq */
- rd_kafka_replyq_t replyq;
-
- /** Request: Number of metadata queries made. */
- int query_cnt;
-
- /** Reply: Leaders (result)
- * (rd_kafka_partition_leader*) */
- rd_list_t *leaders;
-
- /** Reply: Callback on completion (or failure) */
- rd_kafka_op_cb_t *cb;
-
- /** Reply: Callback opaque */
- void *opaque;
-
- } leaders;
-
- } rko_u;
-};
-
-TAILQ_HEAD(rd_kafka_op_head_s, rd_kafka_op_s);
-
-
-
-const char *rd_kafka_op2str(rd_kafka_op_type_t type);
-void rd_kafka_op_destroy(rd_kafka_op_t *rko);
-rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type);
-#if ENABLE_DEVEL
-#define _STRINGIFYX(A) #A
-#define _STRINGIFY(A) _STRINGIFYX(A)
-#define rd_kafka_op_new(type) \
- rd_kafka_op_new0(__FILE__ ":" _STRINGIFY(__LINE__), type)
-#else
-#define rd_kafka_op_new(type) rd_kafka_op_new0(NULL, type)
-#endif
-rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig,
- rd_kafka_resp_err_t err);
-rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk,
- rd_kafka_op_type_t type,
- rd_kafka_op_cb_t *cb);
-int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
-int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error);
-
-#define rd_kafka_op_set_prio(rko, prio) ((rko)->rko_prio = prio)
-
-#define rd_kafka_op_err(rk, err, ...) \
- do { \
- if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \
- rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \
- break; \
- } \
- rd_kafka_q_op_err((rk)->rk_rep, err, __VA_ARGS__); \
- } while (0)
-
-void rd_kafka_q_op_err(rd_kafka_q_t *rkq,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 3, 4);
-void rd_kafka_consumer_err(rd_kafka_q_t *rkq,
- int32_t broker_id,
- rd_kafka_resp_err_t err,
- int32_t version,
- const char *topic,
- rd_kafka_toppar_t *rktp,
- int64_t offset,
- const char *fmt,
- ...) RD_FORMAT(printf, 8, 9);
-rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq,
- rd_kafka_q_t *recvq,
- rd_kafka_op_t *rko,
- int timeout_ms);
-rd_kafka_op_t *
-rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms);
-rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type);
-rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko);
-rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko);
-
-rd_kafka_op_res_t rd_kafka_op_call(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) RD_WARN_UNUSED_RESULT;
-
-rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp,
- rd_kafka_toppar_t *rktp,
- int32_t version,
- rd_kafka_buf_t *rkbuf,
- int64_t offset,
- size_t key_len,
- const void *key,
- size_t val_len,
- const void *val);
-
-rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp,
- int32_t version,
- rd_kafka_buf_t *rkbuf,
- int64_t offset);
-
-void rd_kafka_op_throttle_time(struct rd_kafka_broker_s *rkb,
- rd_kafka_q_t *rkq,
- int throttle_time);
-
-
-rd_kafka_op_res_t
-rd_kafka_op_handle(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque,
- rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT;
-
-
-extern rd_atomic32_t rd_kafka_op_cnt;
-
-void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko);
-
-void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko);
-
-
-#define rd_kafka_op_is_ctrl_msg(rko) \
- ((rko)->rko_type == RD_KAFKA_OP_FETCH && !(rko)->rko_err && \
- ((rko)->rko_u.fetch.rkm.rkm_flags & RD_KAFKA_MSG_F_CONTROL))
-
-
-
-/**
- * @returns true if the rko's replyq is valid and the
- * rko's rktp version (if any) is not outdated.
- */
-#define rd_kafka_op_replyq_is_valid(RKO) \
- (rd_kafka_replyq_is_valid(&(RKO)->rko_replyq) && \
- !rd_kafka_op_version_outdated((RKO), 0))
-
-
-
-/**
- * @returns the rko for a consumer message (RD_KAFKA_OP_FETCH).
- */
-static RD_UNUSED rd_kafka_op_t *
-rd_kafka_message2rko(rd_kafka_message_t *rkmessage) {
- rd_kafka_op_t *rko = rkmessage->_private;
-
- if (!rko || rko->rko_type != RD_KAFKA_OP_FETCH)
- return NULL;
-
- return rko;
-}
-
-
-
-#endif /* _RDKAFKA_OP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c
deleted file mode 100644
index 46d2fb3ed..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c
+++ /dev/null
@@ -1,4301 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_fetcher.h"
-#include "rdregex.h"
-#include "rdports.h" /* rd_qsort_r() */
-
-#include "rdunittest.h"
-
-const char *rd_kafka_fetch_states[] = {"none", "stopping",
- "stopped", "offset-query",
- "offset-wait", "validate-epoch-wait",
- "active"};
-
-
-static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque);
-
-static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
- int backoff_ms,
- const char *reason);
-
-
-static RD_INLINE int32_t
-rd_kafka_toppar_version_new_barrier0(rd_kafka_toppar_t *rktp,
- const char *func,
- int line) {
- int32_t version = rd_atomic32_add(&rktp->rktp_version, 1);
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER",
- "%s [%" PRId32 "]: %s:%d: new version barrier v%" PRId32,
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, func,
- line, version);
- return version;
-}
-
-#define rd_kafka_toppar_version_new_barrier(rktp) \
- rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__)
-
-
-/**
- * Toppar based OffsetResponse handling.
- * This is used for updating the low water mark for consumer lag.
- */
-static void rd_kafka_toppar_lag_handle_Offset(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_toppar_t *rktp = opaque;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
-
- offsets = rd_kafka_topic_partition_list_new(1);
-
- /* Parse and return Offset */
- err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
- NULL);
-
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
- rd_kafka_topic_partition_list_destroy(offsets);
- return; /* Retrying */
- }
-
- if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
- offsets, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition)))
- err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- if (!err && !rktpar->err) {
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_lo_offset = rktpar->offset;
- rd_kafka_toppar_unlock(rktp);
- }
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- rktp->rktp_wait_consumer_lag_resp = 0;
-
- rd_kafka_toppar_destroy(rktp); /* from request.opaque */
-}
-
-
-
-/**
- * Request information from broker to keep track of consumer lag.
- *
- * @locality toppar handle thread
- * @locks none
- */
-static void rd_kafka_toppar_consumer_lag_req(rd_kafka_toppar_t *rktp) {
- rd_kafka_topic_partition_list_t *partitions;
- rd_kafka_topic_partition_t *rktpar;
-
- if (rktp->rktp_wait_consumer_lag_resp)
- return; /* Previous request not finished yet */
-
- rd_kafka_toppar_lock(rktp);
-
- /* Offset requests can only be sent to the leader replica.
- *
- * Note: If rktp is delegated to a preferred replica, it is
- * certain that FETCH >= v5 and so rktp_lo_offset will be
- * updated via LogStartOffset in the FETCH response.
- */
- if (!rktp->rktp_leader || (rktp->rktp_leader != rktp->rktp_broker)) {
- rd_kafka_toppar_unlock(rktp);
- return;
- }
-
- /* Also don't send a timed log start offset request if leader
- * broker supports FETCH >= v5, since this will be set when
- * doing fetch requests.
- */
- if (rd_kafka_broker_ApiVersion_supported(
- rktp->rktp_broker, RD_KAFKAP_Fetch, 0, 5, NULL) == 5) {
- rd_kafka_toppar_unlock(rktp);
- return;
- }
-
- rktp->rktp_wait_consumer_lag_resp = 1;
-
- partitions = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(
- partitions, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
- rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
- rd_kafka_topic_partition_set_current_leader_epoch(
- rktpar, rktp->rktp_leader_epoch);
-
- /* Ask for oldest offset. The newest offset is automatically
- * propagated in FetchResponse.HighwaterMark. */
- rd_kafka_ListOffsetsRequest(
- rktp->rktp_broker, partitions, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
- rd_kafka_toppar_lag_handle_Offset, rd_kafka_toppar_keep(rktp));
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_topic_partition_list_destroy(partitions);
-}
-
-
-
-/**
- * Request earliest offset for a partition
- *
- * Locality: toppar handler thread
- */
-static void rd_kafka_toppar_consumer_lag_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_toppar_t *rktp = arg;
- rd_kafka_toppar_consumer_lag_req(rktp);
-}
-
-/**
- * @brief Update rktp_op_version.
- * Enqueue an RD_KAFKA_OP_BARRIER type of operation
- * when the op_version is updated.
- *
- * @locks_required rd_kafka_toppar_lock() must be held.
- * @locality Toppar handler thread
- */
-void rd_kafka_toppar_op_version_bump(rd_kafka_toppar_t *rktp, int32_t version) {
- rd_kafka_op_t *rko;
-
- rktp->rktp_op_version = version;
- rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER);
- rko->rko_version = version;
- rko->rko_prio = RD_KAFKA_PRIO_FLASH;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rd_kafka_q_enq(rktp->rktp_fetchq, rko);
-}
-
-
-/**
- * Add new partition to topic.
- *
- * Locks: rd_kafka_topic_wrlock() must be held.
- * Locks: rd_kafka_wrlock() must be held.
- */
-rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
- int32_t partition,
- const char *func,
- int line) {
- rd_kafka_toppar_t *rktp;
-
- rktp = rd_calloc(1, sizeof(*rktp));
-
- rktp->rktp_partition = partition;
- rktp->rktp_rkt = rkt;
- rktp->rktp_leader_id = -1;
- rktp->rktp_broker_id = -1;
- rktp->rktp_leader_epoch = -1;
- rd_interval_init(&rktp->rktp_lease_intvl);
- rd_interval_init(&rktp->rktp_new_lease_intvl);
- rd_interval_init(&rktp->rktp_new_lease_log_intvl);
- rd_interval_init(&rktp->rktp_metadata_intvl);
- /* Mark partition as unknown (does not exist) until we see the
- * partition in topic metadata. */
- if (partition != RD_KAFKA_PARTITION_UA)
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
- rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE;
- rktp->rktp_fetch_msg_max_bytes =
- rkt->rkt_rk->rk_conf.fetch_msg_max_bytes;
- rktp->rktp_offset_fp = NULL;
- rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
- rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin);
- rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID;
- rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID;
- rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID;
- rd_kafka_fetch_pos_init(&rktp->rktp_query_pos);
- rd_kafka_fetch_pos_init(&rktp->rktp_next_fetch_start);
- rd_kafka_fetch_pos_init(&rktp->rktp_last_next_fetch_start);
- rd_kafka_fetch_pos_init(&rktp->rktp_app_pos);
- rd_kafka_fetch_pos_init(&rktp->rktp_stored_pos);
- rd_kafka_fetch_pos_init(&rktp->rktp_committing_pos);
- rd_kafka_fetch_pos_init(&rktp->rktp_committed_pos);
- rd_kafka_msgq_init(&rktp->rktp_msgq);
- rd_kafka_msgq_init(&rktp->rktp_xmit_msgq);
- mtx_init(&rktp->rktp_lock, mtx_plain);
-
- rd_refcnt_init(&rktp->rktp_refcnt, 0);
- rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk);
- rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk);
- rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve;
- rktp->rktp_ops->rkq_opaque = rktp;
- rd_atomic32_init(&rktp->rktp_version, 1);
- rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version);
-
- rd_atomic32_init(&rktp->rktp_msgs_inflight, 0);
- rd_kafka_pid_reset(&rktp->rktp_eos.pid);
-
- /* Consumer: If statistics is available we query the log start offset
- * of each partition.
- * Since the oldest offset only moves on log retention, we cap this
- * value on the low end to a reasonable value to avoid flooding
- * the brokers with OffsetRequests when our statistics interval is low.
- * FIXME: Use a global timer to collect offsets for all partitions
- * FIXME: This timer is superfulous for FETCH >= v5 because the log
- * start offset is included in fetch responses.
- * */
- if (rktp->rktp_rkt->rkt_rk->rk_conf.stats_interval_ms > 0 &&
- rkt->rkt_rk->rk_type == RD_KAFKA_CONSUMER &&
- rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
- int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms;
- if (intvl < 10 * 1000 /* 10s */)
- intvl = 10 * 1000;
- rd_kafka_timer_start(
- &rkt->rkt_rk->rk_timers, &rktp->rktp_consumer_lag_tmr,
- intvl * 1000ll, rd_kafka_toppar_consumer_lag_tmr_cb, rktp);
- }
-
- rktp->rktp_rkt = rd_kafka_topic_keep(rkt);
-
- rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops);
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW",
- "NEW %s [%" PRId32 "] %p refcnt %p (at %s:%d)",
- rkt->rkt_topic->str, rktp->rktp_partition, rktp,
- &rktp->rktp_refcnt, func, line);
-
- return rd_kafka_toppar_keep(rktp);
-}
-
-
-
-/**
- * Removes a toppar from its duties, global lists, etc.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-static void rd_kafka_toppar_remove(rd_kafka_toppar_t *rktp) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE",
- "Removing toppar %s [%" PRId32 "] %p",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rktp);
-
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_validate_tmr, 1 /*lock*/);
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1 /*lock*/);
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_consumer_lag_tmr, 1 /*lock*/);
-
- rd_kafka_q_fwd_set(rktp->rktp_ops, NULL);
-}
-
-
-/**
- * Final destructor for partition.
- */
-void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp) {
-
- rd_kafka_toppar_remove(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY",
- "%s [%" PRId32 "]: %p DESTROY_FINAL",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rktp);
-
- /* Clear queues */
- rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
- rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0);
- rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq,
- RD_KAFKA_RESP_ERR__DESTROY);
- rd_kafka_q_destroy_owner(rktp->rktp_fetchq);
- rd_kafka_q_destroy_owner(rktp->rktp_ops);
-
- rd_kafka_replyq_destroy(&rktp->rktp_replyq);
-
- rd_kafka_topic_destroy0(rktp->rktp_rkt);
-
- mtx_destroy(&rktp->rktp_lock);
-
- if (rktp->rktp_leader)
- rd_kafka_broker_destroy(rktp->rktp_leader);
-
- rd_refcnt_destroy(&rktp->rktp_refcnt);
-
- rd_free(rktp);
-}
-
-
-/**
- * Set toppar fetching state.
- *
- * @locality any
- * @locks_required rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state) {
- rd_kafka_assert(NULL,
- thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
-
- if ((int)rktp->rktp_fetch_state == fetch_state)
- return;
-
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE",
- "Partition %.*s [%" PRId32 "] changed fetch state %s -> %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- rd_kafka_fetch_states[fetch_state]);
-
- rktp->rktp_fetch_state = fetch_state;
-
- if (fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE)
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, CONSUMER | RD_KAFKA_DBG_TOPIC,
- "FETCH",
- "Partition %.*s [%" PRId32 "] start fetching at %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start));
-}
-
-
-/**
- * Returns the appropriate toppar for a given rkt and partition.
- * The returned toppar has increased refcnt and must be unreffed by calling
- * rd_kafka_toppar_destroy().
- * May return NULL.
- *
- * If 'ua_on_miss' is true the UA (unassigned) toppar is returned if
- * 'partition' was not known locally, else NULL is returned.
- *
- * Locks: Caller must hold rd_kafka_topic_*lock()
- */
-rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func,
- int line,
- const rd_kafka_topic_t *rkt,
- int32_t partition,
- int ua_on_miss) {
- rd_kafka_toppar_t *rktp;
-
- if (partition >= 0 && partition < rkt->rkt_partition_cnt)
- rktp = rkt->rkt_p[partition];
- else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss)
- rktp = rkt->rkt_ua;
- else
- return NULL;
-
- if (rktp)
- return rd_kafka_toppar_keep_fl(func, line, rktp);
-
- return NULL;
-}
-
-
-/**
- * Same as rd_kafka_toppar_get() but no need for locking and
- * looks up the topic first.
- *
- * Locality: any
- * Locks: none
- */
-rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int ua_on_miss,
- int create_on_miss) {
- rd_kafka_topic_t *rkt;
- rd_kafka_toppar_t *rktp;
-
- rd_kafka_wrlock(rk);
-
- /* Find or create topic */
- if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0 /*no-lock*/)))) {
- if (!create_on_miss) {
- rd_kafka_wrunlock(rk);
- return NULL;
- }
- rkt = rd_kafka_topic_new0(rk, topic, NULL, NULL, 0 /*no-lock*/);
- if (!rkt) {
- rd_kafka_wrunlock(rk);
- rd_kafka_log(rk, LOG_ERR, "TOPIC",
- "Failed to create local topic \"%s\": %s",
- topic, rd_strerror(errno));
- return NULL;
- }
- }
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_topic_wrlock(rkt);
- rktp = rd_kafka_toppar_desired_add(rkt, partition);
- rd_kafka_topic_wrunlock(rkt);
-
- rd_kafka_topic_destroy0(rkt);
-
- return rktp;
-}
-
-
-/**
- * Returns a toppar if it is available in the cluster.
- * '*errp' is set to the error-code if lookup fails.
- *
- * Locks: topic_*lock() MUST be held
- */
-rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt,
- int32_t partition,
- int ua_on_miss,
- rd_kafka_resp_err_t *errp) {
- rd_kafka_toppar_t *rktp;
-
- switch (rkt->rkt_state) {
- case RD_KAFKA_TOPIC_S_UNKNOWN:
- /* No metadata received from cluster yet.
- * Put message in UA partition and re-run partitioner when
- * cluster comes up. */
- partition = RD_KAFKA_PARTITION_UA;
- break;
-
- case RD_KAFKA_TOPIC_S_NOTEXISTS:
- /* Topic not found in cluster.
- * Fail message immediately. */
- *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- return NULL;
-
- case RD_KAFKA_TOPIC_S_ERROR:
- /* Permanent topic error. */
- *errp = rkt->rkt_err;
- return NULL;
-
- case RD_KAFKA_TOPIC_S_EXISTS:
- /* Topic exists in cluster. */
-
- /* Topic exists but has no partitions.
- * This is usually an transient state following the
- * auto-creation of a topic. */
- if (unlikely(rkt->rkt_partition_cnt == 0)) {
- partition = RD_KAFKA_PARTITION_UA;
- break;
- }
-
- /* Check that partition exists. */
- if (partition >= rkt->rkt_partition_cnt) {
- *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- return NULL;
- }
- break;
-
- default:
- rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
- break;
- }
-
- /* Get new partition */
- rktp = rd_kafka_toppar_get(rkt, partition, 0);
-
- if (unlikely(!rktp)) {
- /* Unknown topic or partition */
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
- *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
- else
- *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- return NULL;
- }
-
- return rktp;
-}
-
-
-/**
- * Looks for partition 'i' in topic 'rkt's desired list.
- *
- * The desired partition list is the list of partitions that are desired
- * (e.g., by the consumer) but not yet seen on a broker.
- * As soon as the partition is seen on a broker the toppar is moved from
- * the desired list and onto the normal rkt_p array.
- * When the partition on the broker goes away a desired partition is put
- * back on the desired list.
- *
- * Locks: rd_kafka_topic_*lock() must be held.
- * Note: 'rktp' refcount is increased.
- */
-
-rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt,
- int32_t partition) {
- rd_kafka_toppar_t *rktp;
- int i;
-
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) {
- if (rktp->rktp_partition == partition)
- return rd_kafka_toppar_keep(rktp);
- }
-
- return NULL;
-}
-
-
-/**
- * Link toppar on desired list.
- *
- * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
- */
-void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp) {
-
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP)
- return; /* Already linked */
-
- rd_kafka_toppar_keep(rktp);
- rd_list_add(&rktp->rktp_rkt->rkt_desp, rktp);
- rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_DESP;
-}
-
-/**
- * Unlink toppar from desired list.
- *
- * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
- */
-void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp) {
- if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP))
- return; /* Not linked */
-
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_DESP;
- rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp);
- rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
- rd_kafka_toppar_destroy(rktp);
-}
-
-
-/**
- * @brief If rktp is not already desired:
- * - mark as DESIRED|~REMOVE
- * - add to desired list if unknown
- *
- * @remark toppar_lock() MUST be held
- */
-void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp) {
- if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
- return;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
- "%s [%" PRId32 "]: marking as DESIRED",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-
- /* If toppar was marked for removal this is no longer
- * the case since the partition is now desired. */
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_REMOVE;
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED;
-
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
- "%s [%" PRId32 "]: adding to DESIRED list",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
- rd_kafka_toppar_desired_link(rktp);
- }
-}
-
-
-/**
- * Adds 'partition' as a desired partition to topic 'rkt', or updates
- * an existing partition to be desired.
- *
- * Locks: rd_kafka_topic_wrlock() must be held.
- */
-rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt,
- int32_t partition) {
- rd_kafka_toppar_t *rktp;
-
- rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no_ua_on_miss*/);
-
- if (!rktp)
- rktp = rd_kafka_toppar_desired_get(rkt, partition);
-
- if (!rktp)
- rktp = rd_kafka_toppar_new(rkt, partition);
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_desired_add0(rktp);
- rd_kafka_toppar_unlock(rktp);
-
- return rktp; /* Callers refcount */
-}
-
-
-
-/**
- * Unmarks an 'rktp' as desired.
- *
- * Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held.
- */
-void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) {
-
- if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
- return;
-
- rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED;
- rd_kafka_toppar_desired_unlink(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP",
- "Removing (un)desired topic %s [%" PRId32 "]",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
-
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
- /* If this partition does not exist in the cluster
- * and is no longer desired, remove it. */
- rd_kafka_toppar_broker_leave_for_remove(rktp);
- }
-}
-
-
-
-/**
- * Append message at tail of 'rktp' message queue.
- */
-void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
- rd_kafka_msg_t *rkm,
- rd_ts_t now) {
- rd_kafka_q_t *wakeup_q = NULL;
-
- rd_kafka_toppar_lock(rktp);
-
- if (!rkm->rkm_u.producer.msgid &&
- rktp->rktp_partition != RD_KAFKA_PARTITION_UA)
- rkm->rkm_u.producer.msgid = ++rktp->rktp_msgid;
-
- if (rktp->rktp_partition == RD_KAFKA_PARTITION_UA ||
- rktp->rktp_rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) {
- /* No need for enq_sorted(), this is the oldest message. */
- rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
- } else {
- rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, &rktp->rktp_msgq, rkm);
- }
-
- if (unlikely(rktp->rktp_partition != RD_KAFKA_PARTITION_UA &&
- rd_kafka_msgq_may_wakeup(&rktp->rktp_msgq, now) &&
- (wakeup_q = rktp->rktp_msgq_wakeup_q))) {
- /* Wake-up broker thread */
- rktp->rktp_msgq.rkmq_wakeup.signalled = rd_true;
- rd_kafka_q_keep(wakeup_q);
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- if (unlikely(wakeup_q != NULL)) {
- rd_kafka_q_yield(wakeup_q);
- rd_kafka_q_destroy(wakeup_q);
- }
-}
-
-
-/**
- * @brief Insert \p srcq before \p insert_before in \p destq.
- *
- * If \p srcq and \p destq overlaps only part of the \p srcq will be inserted.
- *
- * Upon return \p srcq will contain any remaining messages that require
- * another insert position in \p destq.
- */
-static void rd_kafka_msgq_insert_msgq_before(rd_kafka_msgq_t *destq,
- rd_kafka_msg_t *insert_before,
- rd_kafka_msgq_t *srcq,
- int (*cmp)(const void *a,
- const void *b)) {
- rd_kafka_msg_t *slast;
- rd_kafka_msgq_t tmpq;
-
- if (!insert_before) {
- /* Append all of srcq to destq */
- rd_kafka_msgq_concat(destq, srcq);
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
- return;
- }
-
- slast = rd_kafka_msgq_last(srcq);
- rd_dassert(slast);
-
- if (cmp(slast, insert_before) > 0) {
- rd_kafka_msg_t *new_sfirst;
- int cnt;
- int64_t bytes;
-
- /* destq insert_before resides somewhere between
- * srcq.first and srcq.last, find the first message in
- * srcq that is > insert_before and split srcq into
- * a left part that contains the messages to insert before
- * insert_before, and a right part that will need another
- * insert position. */
-
- new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, insert_before,
- cmp, &cnt, &bytes);
- rd_assert(new_sfirst);
-
- /* split srcq into two parts using the divider message */
- rd_kafka_msgq_split(srcq, &tmpq, new_sfirst, cnt, bytes);
-
- rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, &tmpq, 0, rd_false);
- } else {
- rd_kafka_msgq_init(&tmpq);
- }
-
- /* srcq now contains messages up to the first message in destq,
- * insert srcq at insert_before in destq. */
- rd_dassert(!TAILQ_EMPTY(&destq->rkmq_msgs));
- rd_dassert(!TAILQ_EMPTY(&srcq->rkmq_msgs));
- TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, insert_before,
- &srcq->rkmq_msgs, rd_kafka_msgs_head_s,
- rd_kafka_msg_t *, rkm_link);
- destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt;
- destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes;
- srcq->rkmq_msg_cnt = 0;
- srcq->rkmq_msg_bytes = 0;
-
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
-
- /* tmpq contains the remaining messages in srcq, move it over. */
- rd_kafka_msgq_move(srcq, &tmpq);
-
- rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
-}
-
-
-/**
- * @brief Insert all messages from \p srcq into \p destq in their sorted
- * position (using \p cmp)
- */
-void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq,
- rd_kafka_msgq_t *srcq,
- int (*cmp)(const void *a, const void *b)) {
- rd_kafka_msg_t *sfirst, *dlast, *start_pos = NULL;
-
- if (unlikely(RD_KAFKA_MSGQ_EMPTY(srcq))) {
- /* srcq is empty */
- return;
- }
-
- if (unlikely(RD_KAFKA_MSGQ_EMPTY(destq))) {
- /* destq is empty, simply move the srcq. */
- rd_kafka_msgq_move(destq, srcq);
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
- return;
- }
-
- /* Optimize insertion by bulk-moving messages in place.
- * We know that:
- * - destq is sorted but might not be continous (1,2,3,7)
- * - srcq is sorted but might not be continous (4,5,6,8)
- * - there migt be (multiple) overlaps between the two, e.g:
- * destq = (1,2,3,7), srcq = (4,5,6,8)
- * - there may be millions of messages.
- */
-
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
-
- dlast = rd_kafka_msgq_last(destq);
- sfirst = rd_kafka_msgq_first(srcq);
-
- /* Most common case, all of srcq goes after destq */
- if (likely(cmp(dlast, sfirst) < 0)) {
- rd_kafka_msgq_concat(destq, srcq);
-
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
-
- rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
- return;
- }
-
- /* Insert messages from srcq into destq in non-overlapping
- * chunks until srcq is exhausted. */
- while (likely(sfirst != NULL)) {
- rd_kafka_msg_t *insert_before;
-
- /* Get insert position in destq of first element in srcq */
- insert_before = rd_kafka_msgq_find_pos(destq, start_pos, sfirst,
- cmp, NULL, NULL);
-
- /* Insert as much of srcq as possible at insert_before */
- rd_kafka_msgq_insert_msgq_before(destq, insert_before, srcq,
- cmp);
-
- /* Remember the current destq position so the next find_pos()
- * does not have to re-scan destq and what was
- * added from srcq. */
- start_pos = insert_before;
-
- /* For next iteration */
- sfirst = rd_kafka_msgq_first(srcq);
-
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
- rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
- }
-
- rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
-
- rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
-}
-
-
-/**
- * @brief Inserts messages from \p srcq according to their sorted position
- * into \p destq, filtering out messages that can not be retried.
- *
- * @param incr_retry Increment retry count for messages.
- * @param max_retries Maximum retries allowed per message.
- * @param backoff Absolute retry backoff for retried messages.
- *
- * @returns 0 if all messages were retried, or 1 if some messages
- * could not be retried.
- */
-int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
- rd_kafka_msgq_t *srcq,
- int incr_retry,
- int max_retries,
- rd_ts_t backoff,
- rd_kafka_msg_status_t status,
- int (*cmp)(const void *a, const void *b)) {
- rd_kafka_msgq_t retryable = RD_KAFKA_MSGQ_INITIALIZER(retryable);
- rd_kafka_msg_t *rkm, *tmp;
-
- /* Scan through messages to see which ones are eligible for retry,
- * move the retryable ones to temporary queue and
- * set backoff time for first message and optionally
- * increase retry count for each message.
- * Sorted insert is not necessary since the original order
- * srcq order is maintained. */
- TAILQ_FOREACH_SAFE(rkm, &srcq->rkmq_msgs, rkm_link, tmp) {
- if (rkm->rkm_u.producer.retries + incr_retry > max_retries)
- continue;
-
- rd_kafka_msgq_deq(srcq, rkm, 1);
- rd_kafka_msgq_enq(&retryable, rkm);
-
- rkm->rkm_u.producer.ts_backoff = backoff;
- rkm->rkm_u.producer.retries += incr_retry;
-
- /* Don't downgrade a message from any form of PERSISTED
- * to NOT_PERSISTED, since the original cause of indicating
- * PERSISTED can't be changed.
- * E.g., a previous ack or in-flight timeout. */
- if (likely(!(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
- rkm->rkm_status !=
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED)))
- rkm->rkm_status = status;
- }
-
- /* No messages are retryable */
- if (RD_KAFKA_MSGQ_EMPTY(&retryable))
- return 0;
-
- /* Insert retryable list at sorted position */
- rd_kafka_msgq_insert_msgq(destq, &retryable, cmp);
-
- return 1;
-}
-
-/**
- * @brief Inserts messages from \p rkmq according to their sorted position
- * into the partition's message queue.
- *
- * @param incr_retry Increment retry count for messages.
- * @param status Set status on each message.
- *
- * @returns 0 if all messages were retried, or 1 if some messages
- * could not be retried.
- *
- * @locality Broker thread (but not necessarily the leader broker thread)
- */
-
-int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- int incr_retry,
- rd_kafka_msg_status_t status) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- rd_ts_t backoff = rd_clock() + (rk->rk_conf.retry_backoff_ms * 1000);
- int r;
-
- if (rd_kafka_terminating(rk))
- return 1;
-
- rd_kafka_toppar_lock(rktp);
- r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, incr_retry,
- rk->rk_conf.max_retries, backoff, status,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
- rd_kafka_toppar_unlock(rktp);
-
- return r;
-}
-
-/**
- * @brief Insert sorted message list \p rkmq at sorted position in \p rktp 's
- * message queue. The queues must not overlap.
- * @remark \p rkmq will be cleared.
- */
-void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq) {
- rd_kafka_toppar_lock(rktp);
- rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, rkmq,
- rktp->rktp_rkt->rkt_conf.msg_order_cmp);
- rd_kafka_toppar_unlock(rktp);
-}
-
-
-
-/**
- * Helper method for purging queues when removing a toppar.
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp) {
- rd_kafka_q_disable(rktp->rktp_fetchq);
- rd_kafka_q_purge(rktp->rktp_fetchq);
- rd_kafka_q_disable(rktp->rktp_ops);
- rd_kafka_q_purge(rktp->rktp_ops);
-}
-
-
-/**
- * @brief Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb,
- * but at least one is required to be non-NULL.
- *
- * This is an async operation.
- *
- * @locks rd_kafka_toppar_lock() MUST be held
- */
-static void rd_kafka_toppar_broker_migrate(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *old_rkb,
- rd_kafka_broker_t *new_rkb) {
- rd_kafka_op_t *rko;
- rd_kafka_broker_t *dest_rkb;
- int had_next_broker = rktp->rktp_next_broker ? 1 : 0;
-
- rd_assert(old_rkb || new_rkb);
-
- /* Update next broker */
- if (new_rkb)
- rd_kafka_broker_keep(new_rkb);
- if (rktp->rktp_next_broker)
- rd_kafka_broker_destroy(rktp->rktp_next_broker);
- rktp->rktp_next_broker = new_rkb;
-
- /* If next_broker is set it means there is already an async
- * migration op going on and we should not send a new one
- * but simply change the next_broker (which we did above). */
- if (had_next_broker)
- return;
-
- /* Revert from offset-wait state back to offset-query
- * prior to leaving the broker to avoid stalling
- * on the new broker waiting for a offset reply from
- * this old broker (that might not come and thus need
- * to time out..slowly) */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
- rd_kafka_toppar_offset_retry(rktp, 500,
- "migrating to new broker");
-
- if (old_rkb) {
- /* If there is an existing broker for this toppar we let it
- * first handle its own leave and then trigger the join for
- * the next broker, if any. */
- rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
- dest_rkb = old_rkb;
- } else {
- /* No existing broker, send join op directly to new broker. */
- rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN);
- dest_rkb = new_rkb;
- }
-
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
- "Migrating topic %.*s [%" PRId32
- "] %p from %s to %s "
- "(sending %s to %s)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
- rktp, old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)",
- new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)",
- rd_kafka_op2str(rko->rko_type), rd_kafka_broker_name(dest_rkb));
-
- rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
-}
-
-
-/**
- * Async toppar leave from broker.
- * Only use this when partitions are to be removed.
- *
- * Locks: rd_kafka_toppar_lock() MUST be held
- */
-void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp) {
- rd_kafka_op_t *rko;
- rd_kafka_broker_t *dest_rkb;
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE;
-
- if (rktp->rktp_next_broker)
- dest_rkb = rktp->rktp_next_broker;
- else if (rktp->rktp_broker)
- dest_rkb = rktp->rktp_broker;
- else {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL",
- "%.*s [%" PRId32
- "] %p not handled by any broker: "
- "not sending LEAVE for remove",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rktp);
- return;
- }
-
-
- /* Revert from offset-wait state back to offset-query
- * prior to leaving the broker to avoid stalling
- * on the new broker waiting for a offset reply from
- * this old broker (that might not come and thus need
- * to time out..slowly) */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
- rd_kafka_toppar_set_fetch_state(
- rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
- "%.*s [%" PRId32 "] %p sending final LEAVE for removal by %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
- rktp, rd_kafka_broker_name(dest_rkb));
-
- rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
-}
-
-
-/**
- * @brief Delegates toppar 'rktp' to broker 'rkb'. 'rkb' may be NULL to
- * undelegate broker.
- *
- * @locks Caller must have rd_kafka_toppar_lock(rktp) held.
- */
-void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *rkb) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- int internal_fallback = 0;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
- "%s [%" PRId32
- "]: delegate to broker %s "
- "(rktp %p, term %d, ref %d)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rkb ? rkb->rkb_name : "(none)", rktp,
- rd_kafka_terminating(rk),
- rd_refcnt_get(&rktp->rktp_refcnt));
-
- /* Undelegated toppars are delgated to the internal
- * broker for bookkeeping. */
- if (!rkb && !rd_kafka_terminating(rk)) {
- rkb = rd_kafka_broker_internal(rk);
- internal_fallback = 1;
- }
-
- if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
- "%.*s [%" PRId32
- "]: not updating broker: "
- "already on correct broker %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rkb ? rd_kafka_broker_name(rkb) : "(none)");
-
- if (internal_fallback)
- rd_kafka_broker_destroy(rkb);
- return;
- }
-
- if (rktp->rktp_broker)
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
- "%.*s [%" PRId32
- "]: no longer delegated to "
- "broker %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_broker_name(rktp->rktp_broker));
-
-
- if (rkb) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
- "%.*s [%" PRId32
- "]: delegating to broker %s "
- "for partition with %i messages "
- "(%" PRIu64 " bytes) queued",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_broker_name(rkb),
- rktp->rktp_msgq.rkmq_msg_cnt,
- rktp->rktp_msgq.rkmq_msg_bytes);
-
-
- } else {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
- "%.*s [%" PRId32 "]: no broker delegated",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- }
-
- if (rktp->rktp_broker || rkb)
- rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb);
-
- if (internal_fallback)
- rd_kafka_broker_destroy(rkb);
-}
-
-
-
-void rd_kafka_toppar_offset_commit_result(
- rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets) {
- if (err)
- rd_kafka_consumer_err(
- rktp->rktp_fetchq,
- /* FIXME: propagate broker_id */
- RD_KAFKA_NODEID_UA, err, 0 /* FIXME:VERSION*/, NULL, rktp,
- RD_KAFKA_OFFSET_INVALID, "Offset commit failed: %s",
- rd_kafka_err2str(err));
-
- rd_kafka_toppar_lock(rktp);
- if (!err)
- rktp->rktp_committed_pos =
- rd_kafka_topic_partition_get_fetch_pos(&offsets->elems[0]);
-
- /* When stopping toppars:
- * Final commit is now done (or failed), propagate. */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING)
- rd_kafka_toppar_fetch_stopped(rktp, err);
-
- rd_kafka_toppar_unlock(rktp);
-}
-
-
-
-/**
- * Handle the next offset to consume for a toppar.
- * This is used during initial setup when trying to figure out what
- * offset to start consuming from.
- *
- * Locality: toppar handler thread.
- * Locks: toppar_lock(rktp) must be held
- */
-void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t next_pos) {
-
- if (RD_KAFKA_OFFSET_IS_LOGICAL(next_pos.offset)) {
- /* Offset storage returned logical offset (e.g. "end"),
- * look it up. */
-
- /* Save next offset, even if logical, so that e.g.,
- * assign(BEGINNING) survives a pause+resume, etc.
- * See issue #2105. */
- rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
-
- rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, next_pos,
- RD_KAFKA_RESP_ERR_NO_ERROR, "update");
- return;
- }
-
- /* Adjust by TAIL count if, if wanted */
- if (rktp->rktp_query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
- int64_t orig_offset = next_pos.offset;
- int64_t tail_cnt = llabs(rktp->rktp_query_pos.offset -
- RD_KAFKA_OFFSET_TAIL_BASE);
-
- if (tail_cnt > next_pos.offset)
- next_pos.offset = 0;
- else
- next_pos.offset -= tail_cnt;
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "OffsetReply for topic %s [%" PRId32
- "]: "
- "offset %" PRId64
- ": adjusting for "
- "OFFSET_TAIL(%" PRId64 "): effective %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, orig_offset, tail_cnt,
- rd_kafka_fetch_pos2str(next_pos));
- }
-
- rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
-
- rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
-
- /* Wake-up broker thread which might be idling on IO */
- if (rktp->rktp_broker)
- rd_kafka_broker_wakeup(rktp->rktp_broker, "ready to fetch");
-}
-
-
-
-/**
- * Fetch committed offset for a single partition. (simple consumer)
- *
- * Locality: toppar thread
- */
-void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- rd_kafka_topic_partition_list_t *part;
- rd_kafka_op_t *rko;
-
- rd_kafka_dbg(rk, TOPIC, "OFFSETREQ",
- "Partition %.*s [%" PRId32
- "]: querying cgrp for "
- "committed offset (opv %d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, replyq.version);
-
- part = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add0(__FUNCTION__, __LINE__, part,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp, NULL);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_replyq = replyq;
-
- rko->rko_u.offset_fetch.partitions = part;
- rko->rko_u.offset_fetch.require_stable_offsets =
- rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED;
- rko->rko_u.offset_fetch.do_free = 1;
-
- rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko);
-}
-
-
-
-/**
- * Toppar based OffsetResponse handling.
- * This is used for finding the next offset to Fetch.
- *
- * Locality: toppar handler thread
- */
-static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_toppar_t *rktp = opaque;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
- int actions = 0;
-
- rd_kafka_toppar_lock(rktp);
- /* Drop reply from previous partition leader */
- if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_broker != rkb)
- err = RD_KAFKA_RESP_ERR__OUTDATED;
- rd_kafka_toppar_unlock(rktp);
-
- offsets = rd_kafka_topic_partition_list_new(1);
-
- rd_rkb_dbg(rkb, TOPIC, "OFFSET",
- "Offset reply for "
- "topic %.*s [%" PRId32 "] (v%d vs v%d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, request->rkbuf_replyq.version,
- rktp->rktp_op_version);
-
- rd_dassert(request->rkbuf_replyq.version > 0);
- if (err != RD_KAFKA_RESP_ERR__DESTROY &&
- rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) {
- /* Outdated request response, ignore. */
- err = RD_KAFKA_RESP_ERR__OUTDATED;
- }
-
- /* Parse and return Offset */
- if (err != RD_KAFKA_RESP_ERR__OUTDATED)
- err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request,
- offsets, &actions);
-
- if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
- offsets, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition))) {
- /* Requested partition not found in response */
- err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- }
-
- if (err) {
- rd_rkb_dbg(rkb, TOPIC, "OFFSET",
- "Offset reply error for "
- "topic %.*s [%" PRId32 "] (v%d, %s): %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, request->rkbuf_replyq.version,
- rd_kafka_err2str(err),
- rd_kafka_actions2str(actions));
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY ||
- err == RD_KAFKA_RESP_ERR__OUTDATED) {
- /* Termination or outdated, quick cleanup. */
-
- if (err == RD_KAFKA_RESP_ERR__OUTDATED) {
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_offset_retry(
- rktp, 500, "outdated offset response");
- rd_kafka_toppar_unlock(rktp);
- }
-
- /* from request.opaque */
- rd_kafka_toppar_destroy(rktp);
- return;
-
- } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
- return; /* Retry in progress */
-
-
- rd_kafka_toppar_lock(rktp);
-
- if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_REFRESH))) {
- /* Permanent error. Trigger auto.offset.reset policy
- * and signal error back to application. */
-
- rd_kafka_offset_reset(rktp, rkb->rkb_nodeid,
- rktp->rktp_query_pos, err,
- "failed to query logical offset");
-
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, rkb->rkb_nodeid, err, 0, NULL,
- rktp,
- (rktp->rktp_query_pos.offset <=
- RD_KAFKA_OFFSET_TAIL_BASE
- ? rktp->rktp_query_pos.offset -
- RD_KAFKA_OFFSET_TAIL_BASE
- : rktp->rktp_query_pos.offset),
- "Failed to query logical offset %s: %s",
- rd_kafka_offset2str(rktp->rktp_query_pos.offset),
- rd_kafka_err2str(err));
-
- } else {
- /* Temporary error. Schedule retry. */
- char tmp[256];
-
- rd_snprintf(
- tmp, sizeof(tmp),
- "failed to query logical offset %s: %s",
- rd_kafka_offset2str(rktp->rktp_query_pos.offset),
- rd_kafka_err2str(err));
-
- rd_kafka_toppar_offset_retry(rktp, 500, tmp);
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp); /* from request.opaque */
- return;
- }
-
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "Offset %s request for %.*s [%" PRId32
- "] "
- "returned offset %s (%" PRId64 ") leader epoch %" PRId32,
- rd_kafka_offset2str(rktp->rktp_query_pos.offset),
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_offset2str(rktpar->offset),
- rktpar->offset,
- rd_kafka_topic_partition_get_leader_epoch(rktpar));
-
-
- rd_kafka_toppar_next_offset_handle(
- rktp, RD_KAFKA_FETCH_POS(
- rktpar->offset,
- rd_kafka_topic_partition_get_leader_epoch(rktpar)));
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- rd_kafka_toppar_destroy(rktp); /* from request.opaque */
-}
-
-
-/**
- * @brief An Offset fetch failed (for whatever reason) in
- * the RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT state:
- * set the state back to FETCH_OFFSET_QUERY and start the
- * offset_query_tmr to trigger a new request eventually.
- *
- * @locality toppar handler thread
- * @locks toppar_lock() MUST be held
- */
-static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
- int backoff_ms,
- const char *reason) {
- rd_ts_t tmr_next;
- int restart_tmr;
-
- /* (Re)start timer if not started or the current timeout
- * is larger than \p backoff_ms. */
- tmr_next = rd_kafka_timer_next(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1);
-
- restart_tmr =
- (tmr_next == -1 || tmr_next > rd_clock() + (backoff_ms * 1000ll));
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%s [%" PRId32 "]: %s: %s for %s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- reason,
- restart_tmr ? "(re)starting offset query timer"
- : "offset query timer already scheduled",
- rd_kafka_fetch_pos2str(rktp->rktp_query_pos));
-
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
-
- if (restart_tmr)
- rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr,
- backoff_ms * 1000ll,
- rd_kafka_offset_query_tmr_cb, rktp);
-}
-
-
-
-/**
- * Send OffsetRequest for toppar.
- *
- * If \p backoff_ms is non-zero only the query timer is started,
- * otherwise a query is triggered directly.
- *
- * Locality: toppar handler thread
- * Locks: toppar_lock() must be held
- */
-void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t query_pos,
- int backoff_ms) {
- rd_kafka_broker_t *rkb;
-
- rd_kafka_assert(NULL,
- thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
-
- rkb = rktp->rktp_broker;
-
- if (!backoff_ms && (!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL))
- backoff_ms = 500;
-
- if (backoff_ms) {
- rd_kafka_toppar_offset_retry(
- rktp, backoff_ms,
- !rkb ? "no current leader for partition" : "backoff");
- return;
- }
-
-
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1 /*lock*/);
-
-
- if (query_pos.offset == RD_KAFKA_OFFSET_STORED &&
- rktp->rktp_rkt->rkt_conf.offset_store_method ==
- RD_KAFKA_OFFSET_METHOD_BROKER) {
- /*
- * Get stored offset from broker based storage:
- * ask cgrp manager for offsets
- */
- rd_kafka_toppar_offset_fetch(
- rktp,
- RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version));
-
- } else {
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_topic_partition_t *rktpar;
-
- /*
- * Look up logical offset (end,beginning,tail,..)
- */
-
- rd_rkb_dbg(rkb, TOPIC, "OFFREQ",
- "Partition %.*s [%" PRId32
- "]: querying for logical "
- "offset %s (opv %d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_offset2str(query_pos.offset),
- rktp->rktp_op_version);
-
- rd_kafka_toppar_keep(rktp); /* refcnt for OffsetRequest opaque*/
-
- if (query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE)
- query_pos.offset = RD_KAFKA_OFFSET_END;
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition);
- rd_kafka_topic_partition_set_from_fetch_pos(rktpar, query_pos);
- rd_kafka_topic_partition_set_current_leader_epoch(
- rktpar, rktp->rktp_leader_epoch);
-
- rd_kafka_ListOffsetsRequest(
- rkb, offsets,
- RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version),
- rd_kafka_toppar_handle_Offset, rktp);
-
- rd_kafka_topic_partition_list_destroy(offsets);
- }
-
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT);
-}
-
-
-/**
- * Start fetching toppar.
- *
- * Locality: toppar handler thread
- * Locks: none
- */
-static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_op_t *rko_orig) {
- rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg;
- rd_kafka_resp_err_t err = 0;
- int32_t version = rko_orig->rko_version;
-
- rd_kafka_toppar_lock(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
- "Start fetch for %.*s [%" PRId32
- "] in "
- "state %s at %s (v%" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- rd_kafka_fetch_pos2str(pos), version);
-
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
- err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
- rd_kafka_toppar_unlock(rktp);
- goto err_reply;
- }
-
- rd_kafka_toppar_op_version_bump(rktp, version);
-
- if (rkcg) {
- rd_kafka_assert(rktp->rktp_rkt->rkt_rk, !rktp->rktp_cgrp);
- /* Attach toppar to cgrp */
- rktp->rktp_cgrp = rkcg;
- rd_kafka_cgrp_op(rkcg, rktp, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_PARTITION_JOIN, 0);
- }
-
-
- if (pos.offset == RD_KAFKA_OFFSET_BEGINNING ||
- pos.offset == RD_KAFKA_OFFSET_END ||
- pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
- rd_kafka_toppar_next_offset_handle(rktp, pos);
-
- } else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
- rd_kafka_offset_store_init(rktp);
-
- } else if (pos.offset == RD_KAFKA_OFFSET_INVALID) {
- rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
- RD_KAFKA_RESP_ERR__NO_OFFSET,
- "no previously committed offset "
- "available");
-
- } else {
- rd_kafka_toppar_set_next_fetch_position(rktp, pos);
-
- rd_kafka_toppar_set_fetch_state(rktp,
- RD_KAFKA_TOPPAR_FETCH_ACTIVE);
-
- /* Wake-up broker thread which might be idling on IO */
- if (rktp->rktp_broker)
- rd_kafka_broker_wakeup(rktp->rktp_broker,
- "fetch start");
- }
-
- rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID;
-
- rd_kafka_toppar_unlock(rktp);
-
- /* Signal back to caller thread that start has commenced, or err */
-err_reply:
- if (rko_orig->rko_replyq.q) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START);
-
- rko->rko_err = err;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
- }
-}
-
-
-
-/**
- * Mark toppar's fetch state as stopped (all decommissioning is done,
- * offsets are stored, etc).
- *
- * Locality: toppar handler thread
- * Locks: toppar_lock(rktp) MUST be held
- */
-void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err) {
-
-
- rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED);
-
- rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
- rktp->rktp_app_pos.leader_epoch = -1;
-
- if (rktp->rktp_cgrp) {
- /* Detach toppar from cgrp */
- rd_kafka_cgrp_op(rktp->rktp_cgrp, rktp, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_PARTITION_LEAVE, 0);
- rktp->rktp_cgrp = NULL;
- }
-
- /* Signal back to application thread that stop is done. */
- if (rktp->rktp_replyq.q) {
- rd_kafka_op_t *rko;
- rko =
- rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY);
- rko->rko_err = err;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0);
- }
-}
-
-
-/**
- * Stop toppar fetcher.
- * This is usually an async operation.
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_toppar_fetch_stop(rd_kafka_toppar_t *rktp,
- rd_kafka_op_t *rko_orig) {
- int32_t version = rko_orig->rko_version;
-
- rd_kafka_toppar_lock(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
- "Stopping fetch for %.*s [%" PRId32 "] in state %s (v%d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
-
- rd_kafka_toppar_op_version_bump(rktp, version);
-
- /* Abort pending offset lookups. */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1 /*lock*/);
-
- /* Clear out the forwarding queue. */
- rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL);
-
- /* Assign the future replyq to propagate stop results. */
- rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_replyq.q == NULL);
- rktp->rktp_replyq = rko_orig->rko_replyq;
- rd_kafka_replyq_clear(&rko_orig->rko_replyq);
-
- rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPING);
-
- /* Stop offset store (possibly async).
- * NOTE: will call .._stopped() if store finishes immediately,
- * so no more operations after this call! */
- rd_kafka_offset_store_stop(rktp);
-
- rd_kafka_toppar_unlock(rktp);
-}
-
-
-/**
- * Update a toppars offset.
- * The toppar must have been previously FETCH_START:ed
- *
- * Locality: toppar handler thread
- */
-void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_op_t *rko_orig) {
- rd_kafka_resp_err_t err = 0;
- int32_t version = rko_orig->rko_version;
-
- rd_kafka_toppar_lock(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
- "Seek %.*s [%" PRId32 "] to %s in state %s (v%" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
- rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
-
-
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
- err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
- goto err_reply;
- } else if (!RD_KAFKA_TOPPAR_FETCH_IS_STARTED(rktp->rktp_fetch_state)) {
- err = RD_KAFKA_RESP_ERR__STATE;
- goto err_reply;
- } else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
- err = RD_KAFKA_RESP_ERR__INVALID_ARG;
- goto err_reply;
- }
-
- rd_kafka_toppar_op_version_bump(rktp, version);
-
- /* Reset app offsets since seek()ing is analogue to a (re)assign(),
- * and we want to avoid using the current app offset on resume()
- * following a seek (#3567). */
- rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
- rktp->rktp_app_pos.leader_epoch = -1;
-
- /* Abort pending offset lookups. */
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1 /*lock*/);
-
- if (pos.offset <= 0 || pos.validated) {
- rd_kafka_toppar_next_offset_handle(rktp, pos);
- } else {
- rd_kafka_toppar_set_fetch_state(
- rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT);
- rd_kafka_toppar_set_next_fetch_position(rktp, pos);
- rd_kafka_offset_validate(rktp, "seek");
- }
-
- /* Signal back to caller thread that seek has commenced, or err */
-err_reply:
- rd_kafka_toppar_unlock(rktp);
-
- if (rko_orig->rko_replyq.q) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK | RD_KAFKA_OP_REPLY);
-
- rko->rko_err = err;
- rko->rko_u.fetch_start.pos = rko_orig->rko_u.fetch_start.pos;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
- }
-}
-
-
-/**
- * @brief Pause/resume toppar.
- *
- * This is the internal handler of the pause/resume op.
- *
- * @locality toppar's handler thread
- */
-static void rd_kafka_toppar_pause_resume(rd_kafka_toppar_t *rktp,
- rd_kafka_op_t *rko_orig) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- int pause = rko_orig->rko_u.pause.pause;
- int flag = rko_orig->rko_u.pause.flag;
- int32_t version = rko_orig->rko_version;
-
- rd_kafka_toppar_lock(rktp);
-
- rd_kafka_toppar_op_version_bump(rktp, version);
-
- if (!pause && (rktp->rktp_flags & flag) != flag) {
- rd_kafka_dbg(rk, TOPIC, "RESUME",
- "Not resuming %s [%" PRId32
- "]: "
- "partition is not paused by %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- (flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "application"
- : "library"));
- rd_kafka_toppar_unlock(rktp);
- return;
- }
-
- if (pause) {
- /* Pause partition by setting either
- * RD_KAFKA_TOPPAR_F_APP_PAUSE or
- * RD_KAFKA_TOPPAR_F_LIB_PAUSE */
- rktp->rktp_flags |= flag;
-
- if (rk->rk_type == RD_KAFKA_CONSUMER) {
- /* Save offset of last consumed message+1 as the
- * next message to fetch on resume. */
- if (rktp->rktp_app_pos.offset !=
- RD_KAFKA_OFFSET_INVALID)
- rd_kafka_toppar_set_next_fetch_position(
- rktp, rktp->rktp_app_pos);
-
- rd_kafka_dbg(
- rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %s [%" PRId32 "]: at %s (state %s, v%d)",
- pause ? "Pause" : "Resume",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- version);
- } else {
- rd_kafka_dbg(
- rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %s [%" PRId32 "] (state %s, v%d)",
- pause ? "Pause" : "Resume",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- version);
- }
-
- } else {
- /* Unset the RD_KAFKA_TOPPAR_F_APP_PAUSE or
- * RD_KAFKA_TOPPAR_F_LIB_PAUSE flag */
- rktp->rktp_flags &= ~flag;
-
- if (rk->rk_type == RD_KAFKA_CONSUMER) {
- rd_kafka_dbg(
- rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %s [%" PRId32 "]: at %s (state %s, v%d)",
- rktp->rktp_fetch_state ==
- RD_KAFKA_TOPPAR_FETCH_ACTIVE
- ? "Resuming"
- : "Not resuming stopped",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- version);
-
- /* If the resuming offset is logical we
- * need to trigger a seek (that performs the
- * logical->absolute lookup logic) to get
- * things going.
- * Typical case is when a partition is paused
- * before anything has been consumed by app
- * yet thus having rktp_app_offset=INVALID. */
- if (!RD_KAFKA_TOPPAR_IS_PAUSED(rktp) &&
- (rktp->rktp_fetch_state ==
- RD_KAFKA_TOPPAR_FETCH_ACTIVE ||
- rktp->rktp_fetch_state ==
- RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) &&
- rktp->rktp_next_fetch_start.offset ==
- RD_KAFKA_OFFSET_INVALID)
- rd_kafka_toppar_next_offset_handle(
- rktp, rktp->rktp_next_fetch_start);
-
- } else
- rd_kafka_dbg(
- rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %s [%" PRId32 "] (state %s, v%d)",
- pause ? "Pause" : "Resume",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- version);
- }
- rd_kafka_toppar_unlock(rktp);
-
- if (pause && rk->rk_type == RD_KAFKA_CONSUMER) {
- /* Flush partition's fetch queue */
- rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
- rko_orig->rko_version);
- }
-}
-
-
-
-/**
- * @brief Serve a toppar in a consumer broker thread.
- * This is considered the fast path and should be minimal,
- * mostly focusing on fetch related mechanisms.
- *
- * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
- *
- * @locality broker thread
- * @locks none
- */
-rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp) {
- return rd_kafka_toppar_fetch_decide(rktp, rkb, 0);
-}
-
-
-
-/**
- * @brief Serve a toppar op
- *
- * @param rktp may be NULL for certain ops (OP_RECV_BUF)
- *
- * Will send an empty reply op if the request rko has a replyq set,
- * providing synchronous operation.
- *
- * @locality toppar handler thread
- */
-static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_cb_type_t cb_type,
- void *opaque) {
- rd_kafka_toppar_t *rktp = NULL;
- int outdated = 0;
-
- if (rko->rko_rktp)
- rktp = rko->rko_rktp;
-
- if (rktp) {
- outdated =
- rd_kafka_op_version_outdated(rko, rktp->rktp_op_version);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP",
- "%.*s [%" PRId32
- "] received %sop %s "
- "(v%" PRId32 ") in fetch-state %s (opv%d)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, outdated ? "outdated " : "",
- rd_kafka_op2str(rko->rko_type), rko->rko_version,
- rd_kafka_fetch_states[rktp->rktp_fetch_state],
- rktp->rktp_op_version);
-
- if (outdated) {
-#if ENABLE_DEVEL
- rd_kafka_op_print(stdout, "PART_OUTDATED", rko);
-#endif
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__OUTDATED);
- return RD_KAFKA_OP_RES_HANDLED;
- }
- }
-
- switch ((int)rko->rko_type) {
- case RD_KAFKA_OP_FETCH_START:
- rd_kafka_toppar_fetch_start(rktp, rko->rko_u.fetch_start.pos,
- rko);
- break;
-
- case RD_KAFKA_OP_FETCH_STOP:
- rd_kafka_toppar_fetch_stop(rktp, rko);
- break;
-
- case RD_KAFKA_OP_SEEK:
- rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.pos, rko);
- break;
-
- case RD_KAFKA_OP_PAUSE:
- rd_kafka_toppar_pause_resume(rktp, rko);
- break;
-
- case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
- rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb);
- rko->rko_u.offset_commit.cb(rk, rko->rko_err,
- rko->rko_u.offset_commit.partitions,
- rko->rko_u.offset_commit.opaque);
- break;
-
- case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: {
- /* OffsetFetch reply */
- rd_kafka_topic_partition_list_t *offsets =
- rko->rko_u.offset_fetch.partitions;
- rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1};
-
- rktp = rd_kafka_topic_partition_get_toppar(
- rk, &offsets->elems[0], rd_true /*create-on-miss*/);
-
- if (!rko->rko_err) {
- /* Request succeeded but per-partition might have failed
- */
- rko->rko_err = offsets->elems[0].err;
- pos = rd_kafka_topic_partition_get_fetch_pos(
- &offsets->elems[0]);
- }
-
- rd_kafka_topic_partition_list_destroy(offsets);
- rko->rko_u.offset_fetch.partitions = NULL;
-
- rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
- &rktp->rktp_offset_query_tmr, 1 /*lock*/);
-
- rd_kafka_toppar_lock(rktp);
-
- if (rko->rko_err) {
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "Failed to fetch offset for "
- "%.*s [%" PRId32 "]: %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_err2str(rko->rko_err));
-
- /* Keep on querying until we succeed. */
- rd_kafka_toppar_offset_retry(rktp, 500,
- "failed to fetch offsets");
- rd_kafka_toppar_unlock(rktp);
-
-
- /* Propagate error to application */
- if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD &&
- rko->rko_err !=
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
- rd_kafka_consumer_err(
- rktp->rktp_fetchq, RD_KAFKA_NODEID_UA,
- rko->rko_err, 0, NULL, rktp,
- RD_KAFKA_OFFSET_INVALID,
- "Failed to fetch "
- "offsets from brokers: %s",
- rd_kafka_err2str(rko->rko_err));
-
- /* Refcount from get_toppar() */
- rd_kafka_toppar_destroy(rktp);
-
- break;
- }
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
- "%.*s [%" PRId32 "]: OffsetFetch returned %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_fetch_pos2str(pos));
-
- if (pos.offset > 0)
- rktp->rktp_committed_pos = pos;
-
- if (pos.offset >= 0)
- rd_kafka_toppar_next_offset_handle(rktp, pos);
- else
- rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
- RD_KAFKA_RESP_ERR__NO_OFFSET,
- "no previously committed offset "
- "available");
- rd_kafka_toppar_unlock(rktp);
-
- /* Refcount from get_toppar() */
- rd_kafka_toppar_destroy(rktp);
- } break;
-
- default:
- rd_kafka_assert(NULL, !*"unknown type");
- break;
- }
-
- rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-/**
- * Send command op to toppar (handled by toppar's thread).
- *
- * Locality: any thread
- */
-static void rd_kafka_toppar_op0(rd_kafka_toppar_t *rktp,
- rd_kafka_op_t *rko,
- rd_kafka_replyq_t replyq) {
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_replyq = replyq;
-
- rd_kafka_q_enq(rktp->rktp_ops, rko);
-}
-
-
-/**
- * Send command op to toppar (handled by toppar's thread).
- *
- * Locality: any thread
- */
-static void rd_kafka_toppar_op(rd_kafka_toppar_t *rktp,
- rd_kafka_op_type_t type,
- int32_t version,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_cgrp_t *rkcg,
- rd_kafka_replyq_t replyq) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new(type);
- rko->rko_version = version;
- if (type == RD_KAFKA_OP_FETCH_START || type == RD_KAFKA_OP_SEEK) {
- if (rkcg)
- rko->rko_u.fetch_start.rkcg = rkcg;
- rko->rko_u.fetch_start.pos = pos;
- }
-
- rd_kafka_toppar_op0(rktp, rko, replyq);
-}
-
-
-
-/**
- * Start consuming partition (async operation).
- * 'offset' is the initial offset
- * 'fwdq' is an optional queue to forward messages to, if this is NULL
- * then messages will be enqueued on rktp_fetchq.
- * 'replyq' is an optional queue for handling the consume_start ack.
- *
- * This is the thread-safe interface that can be called from any thread.
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_q_t *fwdq,
- rd_kafka_replyq_t replyq) {
- int32_t version;
-
- rd_kafka_q_lock(rktp->rktp_fetchq);
- if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP))
- rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, 0, /* no do_lock */
- 0 /* no fwd_app */);
- rd_kafka_q_unlock(rktp->rktp_fetchq);
-
- /* Bump version barrier. */
- version = rd_kafka_toppar_version_new_barrier(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
- "Start consuming %.*s [%" PRId32 "] at %s (v%" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
- version);
-
- rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, pos,
- rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * Stop consuming partition (async operatoin)
- * This is thread-safe interface that can be called from any thread.
- *
- * Locality: any thread
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq) {
- int32_t version;
-
- /* Bump version barrier. */
- version = rd_kafka_toppar_version_new_barrier(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
- "Stop consuming %.*s [%" PRId32 "] (v%" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, version);
-
- rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version,
- RD_KAFKA_FETCH_POS(-1, -1), NULL, replyq);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Set/Seek offset of a consumed partition (async operation).
- *
- * @param offset is the target offset.
- * @param leader_epoch is the partition leader epoch, or -1.
- * @param replyq is an optional queue for handling the ack.
- *
- * This is the thread-safe interface that can be called from any thread.
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_replyq_t replyq) {
- int32_t version;
-
- /* Bump version barrier. */
- version = rd_kafka_toppar_version_new_barrier(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
- "Seek %.*s [%" PRId32 "] to %s (v%" PRId32 ")",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
- version);
-
- rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, pos, NULL, replyq);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Pause/resume partition (async operation).
- *
- * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- * @param pause is 1 for pausing or 0 for resuming.
- *
- * @locality any
- */
-rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp,
- int pause,
- int flag,
- rd_kafka_replyq_t replyq) {
- int32_t version;
- rd_kafka_op_t *rko;
-
- /* Bump version barrier. */
- version = rd_kafka_toppar_version_new_barrier(rktp);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %.*s [%" PRId32 "] (v%" PRId32 ")",
- pause ? "Pause" : "Resume",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, version);
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE);
- rko->rko_version = version;
- rko->rko_u.pause.pause = pause;
- rko->rko_u.pause.flag = flag;
-
- rd_kafka_toppar_op0(rktp, rko, replyq);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Pause a toppar (asynchronous).
- *
- * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- *
- * @locality any
- * @locks none needed
- */
-void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag) {
- rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
- RD_KAFKA_NO_REPLYQ);
-}
-
-/**
- * @brief Resume a toppar (asynchronous).
- *
- * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- *
- * @locality any
- * @locks none needed
- */
-void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag) {
- rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
- RD_KAFKA_NO_REPLYQ);
-}
-
-
-
-/**
- * @brief Pause or resume a list of partitions.
- *
- * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
- * depending on if the app paused or librdkafka.
- * @param pause true for pausing, false for resuming.
- * @param async RD_SYNC to wait for background thread to handle op,
- * RD_ASYNC for asynchronous operation.
- *
- * @locality any
- *
- * @remark This is an asynchronous call, the actual pause/resume is performed
- * by toppar_pause() in the toppar's handler thread.
- */
-rd_kafka_resp_err_t
-rd_kafka_toppars_pause_resume(rd_kafka_t *rk,
- rd_bool_t pause,
- rd_async_t async,
- int flag,
- rd_kafka_topic_partition_list_t *partitions) {
- int i;
- int waitcnt = 0;
- rd_kafka_q_t *tmpq = NULL;
-
- if (!async)
- tmpq = rd_kafka_q_new(rk);
-
- rd_kafka_dbg(
- rk, TOPIC, pause ? "PAUSE" : "RESUME", "%s %s %d partition(s)",
- flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library",
- pause ? "pausing" : "resuming", partitions->cnt);
-
- for (i = 0; i < partitions->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
- rd_kafka_toppar_t *rktp;
-
- rktp =
- rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
- if (!rktp) {
- rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE" : "RESUME",
- "%s %s [%" PRId32
- "]: skipped: "
- "unknown partition",
- pause ? "Pause" : "Resume", rktpar->topic,
- rktpar->partition);
-
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- continue;
- }
-
- rd_kafka_toppar_op_pause_resume(rktp, pause, flag,
- RD_KAFKA_REPLYQ(tmpq, 0));
-
- if (!async)
- waitcnt++;
-
- rd_kafka_toppar_destroy(rktp);
-
- rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (!async) {
- while (waitcnt-- > 0)
- rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
-
- rd_kafka_q_destroy_owner(tmpq);
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * Propagate error for toppar
- */
-void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err,
- const char *reason) {
- rd_kafka_op_t *rko;
- char buf[512];
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
- rko->rko_err = err;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
-
- rd_snprintf(buf, sizeof(buf), "%.*s [%" PRId32 "]: %s (%s)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, reason, rd_kafka_err2str(err));
-
- rko->rko_u.err.errstr = rd_strdup(buf);
-
- rd_kafka_q_enq(rktp->rktp_fetchq, rko);
-}
-
-
-
-/**
- * Returns the currently delegated broker for this toppar.
- * If \p proper_broker is set NULL will be returned if current handler
- * is not a proper broker (INTERNAL broker).
- *
- * The returned broker has an increased refcount.
- *
- * Locks: none
- */
-rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp,
- int proper_broker) {
- rd_kafka_broker_t *rkb;
- rd_kafka_toppar_lock(rktp);
- rkb = rktp->rktp_broker;
- if (rkb) {
- if (proper_broker && rkb->rkb_source == RD_KAFKA_INTERNAL)
- rkb = NULL;
- else
- rd_kafka_broker_keep(rkb);
- }
- rd_kafka_toppar_unlock(rktp);
-
- return rkb;
-}
-
-
-/**
- * @brief Take action when partition broker becomes unavailable.
- * This should be called when requests fail with
- * NOT_LEADER_FOR.. or similar error codes, e.g. ProduceRequest.
- *
- * @locks none
- * @locality any
- */
-void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp,
- const char *reason,
- rd_kafka_resp_err_t err) {
- rd_kafka_topic_t *rkt = rktp->rktp_rkt;
-
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "BROKERUA",
- "%s [%" PRId32 "]: broker unavailable: %s: %s",
- rkt->rkt_topic->str, rktp->rktp_partition, reason,
- rd_kafka_err2str(err));
-
- rd_kafka_topic_wrlock(rkt);
- rkt->rkt_flags |= RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
- rd_kafka_topic_wrunlock(rkt);
-
- rd_kafka_topic_fast_leader_query(rkt->rkt_rk);
-}
-
-
-const char *
-rd_kafka_topic_partition_topic(const rd_kafka_topic_partition_t *rktpar) {
- const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
- return rktp->rktp_rkt->rkt_topic->str;
-}
-
-int32_t
-rd_kafka_topic_partition_partition(const rd_kafka_topic_partition_t *rktpar) {
- const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
- return rktp->rktp_partition;
-}
-
-void rd_kafka_topic_partition_get(const rd_kafka_topic_partition_t *rktpar,
- const char **name,
- int32_t *partition) {
- const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
- *name = rktp->rktp_rkt->rkt_topic->str;
- *partition = rktp->rktp_partition;
-}
-
-
-
-/**
- *
- * rd_kafka_topic_partition_t lists
- * Fixed-size non-growable list of partitions for propagation to application.
- *
- */
-
-
-static void
-rd_kafka_topic_partition_list_grow(rd_kafka_topic_partition_list_t *rktparlist,
- int add_size) {
- if (add_size < rktparlist->size)
- add_size = RD_MAX(rktparlist->size, 32);
-
- rktparlist->size += add_size;
- rktparlist->elems = rd_realloc(
- rktparlist->elems, sizeof(*rktparlist->elems) * rktparlist->size);
-}
-
-
-/**
- * @brief Initialize a list for fitting \p size partitions.
- */
-void rd_kafka_topic_partition_list_init(
- rd_kafka_topic_partition_list_t *rktparlist,
- int size) {
- memset(rktparlist, 0, sizeof(*rktparlist));
-
- if (size > 0)
- rd_kafka_topic_partition_list_grow(rktparlist, size);
-}
-
-
-/**
- * Create a list for fitting 'size' topic_partitions (rktp).
- */
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) {
- rd_kafka_topic_partition_list_t *rktparlist;
-
- rktparlist = rd_calloc(1, sizeof(*rktparlist));
-
- if (size > 0)
- rd_kafka_topic_partition_list_grow(rktparlist, size);
-
- return rktparlist;
-}
-
-
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic,
- int32_t partition) {
- rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
-
- rktpar->topic = rd_strdup(topic);
- rktpar->partition = partition;
-
- return rktpar;
-}
-
-/**
- * @brief Update \p dst with info from \p src.
- */
-static void
-rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst,
- const rd_kafka_topic_partition_t *src) {
- const rd_kafka_topic_partition_private_t *srcpriv;
- rd_kafka_topic_partition_private_t *dstpriv;
-
- rd_dassert(!strcmp(dst->topic, src->topic));
- rd_dassert(dst->partition == src->partition);
- rd_dassert(dst != src);
-
- dst->offset = src->offset;
- dst->opaque = src->opaque;
- dst->err = src->err;
-
- if (src->metadata_size > 0) {
- dst->metadata = rd_malloc(src->metadata_size);
- dst->metadata_size = src->metadata_size;
- ;
- memcpy(dst->metadata, src->metadata, dst->metadata_size);
- }
-
- if ((srcpriv = src->_private)) {
- dstpriv = rd_kafka_topic_partition_get_private(dst);
- if (srcpriv->rktp && !dstpriv->rktp)
- dstpriv->rktp = rd_kafka_toppar_keep(srcpriv->rktp);
-
- rd_assert(dstpriv->rktp == srcpriv->rktp);
-
- dstpriv->leader_epoch = srcpriv->leader_epoch;
-
- } else if ((dstpriv = dst->_private)) {
- /* No private object in source, reset the leader epoch. */
- dstpriv->leader_epoch = -1;
- }
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src) {
- rd_kafka_topic_partition_t *dst =
- rd_kafka_topic_partition_new(src->topic, src->partition);
-
- rd_kafka_topic_partition_update(dst, src);
-
- return dst;
-}
-
-
-/** Same as above but with generic void* signature */
-void *rd_kafka_topic_partition_copy_void(const void *src) {
- return rd_kafka_topic_partition_copy(src);
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp) {
- rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
-
- rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic);
- rktpar->partition = rktp->rktp_partition;
-
- return rktpar;
-}
-
-/**
- * @brief Destroy a partition private glue object.
- */
-static void rd_kafka_topic_partition_private_destroy(
- rd_kafka_topic_partition_private_t *parpriv) {
- if (parpriv->rktp)
- rd_kafka_toppar_destroy(parpriv->rktp);
- rd_free(parpriv);
-}
-
-static void
-rd_kafka_topic_partition_destroy0(rd_kafka_topic_partition_t *rktpar,
- int do_free) {
- if (rktpar->topic)
- rd_free(rktpar->topic);
- if (rktpar->metadata)
- rd_free(rktpar->metadata);
- if (rktpar->_private)
- rd_kafka_topic_partition_private_destroy(
- (rd_kafka_topic_partition_private_t *)rktpar->_private);
-
- if (do_free)
- rd_free(rktpar);
-}
-
-
-int32_t rd_kafka_topic_partition_get_leader_epoch(
- const rd_kafka_topic_partition_t *rktpar) {
- const rd_kafka_topic_partition_private_t *parpriv;
-
- if (!(parpriv = rktpar->_private))
- return -1;
-
- return parpriv->leader_epoch;
-}
-
-void rd_kafka_topic_partition_set_leader_epoch(
- rd_kafka_topic_partition_t *rktpar,
- int32_t leader_epoch) {
- rd_kafka_topic_partition_private_t *parpriv;
-
- /* Avoid allocating private_t if clearing the epoch */
- if (leader_epoch == -1 && !rktpar->_private)
- return;
-
- parpriv = rd_kafka_topic_partition_get_private(rktpar);
-
- parpriv->leader_epoch = leader_epoch;
-}
-
-int32_t rd_kafka_topic_partition_get_current_leader_epoch(
- const rd_kafka_topic_partition_t *rktpar) {
- const rd_kafka_topic_partition_private_t *parpriv;
-
- if (!(parpriv = rktpar->_private))
- return -1;
-
- return parpriv->current_leader_epoch;
-}
-
-void rd_kafka_topic_partition_set_current_leader_epoch(
- rd_kafka_topic_partition_t *rktpar,
- int32_t current_leader_epoch) {
- rd_kafka_topic_partition_private_t *parpriv;
-
- /* Avoid allocating private_t if clearing the epoch */
- if (current_leader_epoch == -1 && !rktpar->_private)
- return;
-
- parpriv = rd_kafka_topic_partition_get_private(rktpar);
-
- parpriv->current_leader_epoch = current_leader_epoch;
-}
-
-/**
- * @brief Set offset and leader epoch from a fetchpos.
- */
-void rd_kafka_topic_partition_set_from_fetch_pos(
- rd_kafka_topic_partition_t *rktpar,
- const rd_kafka_fetch_pos_t fetchpos) {
- rktpar->offset = fetchpos.offset;
- rd_kafka_topic_partition_set_leader_epoch(rktpar,
- fetchpos.leader_epoch);
-}
-
-/**
- * @brief Destroy all partitions in list.
- *
- * @remark The allocated size of the list will not shrink.
- */
-void rd_kafka_topic_partition_list_clear(
- rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
-
- for (i = 0; i < rktparlist->cnt; i++)
- rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
-
- rktparlist->cnt = 0;
-}
-
-
-void rd_kafka_topic_partition_destroy_free(void *ptr) {
- rd_kafka_topic_partition_destroy0(ptr, rd_true /*do_free*/);
-}
-
-void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar) {
- rd_kafka_topic_partition_destroy0(rktpar, 1);
-}
-
-
-/**
- * Destroys a list previously created with .._list_new() and drops
- * any references to contained toppars.
- */
-void rd_kafka_topic_partition_list_destroy(
- rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
-
- for (i = 0; i < rktparlist->cnt; i++)
- rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
-
- if (rktparlist->elems)
- rd_free(rktparlist->elems);
-
- rd_free(rktparlist);
-}
-
-
-/**
- * @brief Wrapper for rd_kafka_topic_partition_list_destroy() that
- * matches the standard free(void *) signature, for callback use.
- */
-void rd_kafka_topic_partition_list_destroy_free(void *ptr) {
- rd_kafka_topic_partition_list_destroy(
- (rd_kafka_topic_partition_list_t *)ptr);
-}
-
-
-/**
- * @brief Add a partition to an rktpar list.
- * The list must have enough room to fit it.
- *
- * @param rktp Optional partition object that will be stored on the
- * ._private object (with refcount increased).
- *
- * @returns a pointer to the added element.
- */
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0(
- const char *func,
- int line,
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition,
- rd_kafka_toppar_t *rktp,
- const rd_kafka_topic_partition_private_t *parpriv) {
- rd_kafka_topic_partition_t *rktpar;
- if (rktparlist->cnt == rktparlist->size)
- rd_kafka_topic_partition_list_grow(rktparlist, 1);
- rd_kafka_assert(NULL, rktparlist->cnt < rktparlist->size);
-
- rktpar = &rktparlist->elems[rktparlist->cnt++];
- memset(rktpar, 0, sizeof(*rktpar));
- rktpar->topic = rd_strdup(topic);
- rktpar->partition = partition;
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
-
- if (parpriv) {
- rd_kafka_topic_partition_private_t *parpriv_copy =
- rd_kafka_topic_partition_get_private(rktpar);
- if (parpriv->rktp) {
- parpriv_copy->rktp =
- rd_kafka_toppar_keep_fl(func, line, parpriv->rktp);
- }
- parpriv_copy->leader_epoch = parpriv->leader_epoch;
- parpriv_copy->current_leader_epoch = parpriv->leader_epoch;
- } else if (rktp) {
- rd_kafka_topic_partition_private_t *parpriv_copy =
- rd_kafka_topic_partition_get_private(rktpar);
- parpriv_copy->rktp = rd_kafka_toppar_keep_fl(func, line, rktp);
- }
-
- return rktpar;
-}
-
-
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition) {
- return rd_kafka_topic_partition_list_add0(
- __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL);
-}
-
-
-/**
- * Adds a consecutive list of partitions to a list
- */
-void rd_kafka_topic_partition_list_add_range(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t start,
- int32_t stop) {
-
- for (; start <= stop; start++)
- rd_kafka_topic_partition_list_add(rktparlist, topic, start);
-}
-
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition) {
- rd_kafka_topic_partition_t *rktpar;
-
- if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
- partition)))
- return rktpar;
-
- return rd_kafka_topic_partition_list_add(rktparlist, topic, partition);
-}
-
-
-
-/**
- * @brief Creates a copy of \p rktpar and adds it to \p rktparlist
- */
-void rd_kafka_topic_partition_list_add_copy(
- rd_kafka_topic_partition_list_t *rktparlist,
- const rd_kafka_topic_partition_t *rktpar) {
- rd_kafka_topic_partition_t *dst;
-
- dst = rd_kafka_topic_partition_list_add0(
- __FUNCTION__, __LINE__, rktparlist, rktpar->topic,
- rktpar->partition, NULL, rktpar->_private);
- rd_kafka_topic_partition_update(dst, rktpar);
-}
-
-
-
-/**
- * Create and return a copy of list 'src'
- */
-rd_kafka_topic_partition_list_t *
-rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src) {
- rd_kafka_topic_partition_list_t *dst;
- int i;
-
- dst = rd_kafka_topic_partition_list_new(src->size);
-
- for (i = 0; i < src->cnt; i++)
- rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
- return dst;
-}
-
-/**
- * @brief Same as rd_kafka_topic_partition_list_copy() but suitable for
- * rd_list_copy(). The \p opaque is ignored.
- */
-void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque) {
- return rd_kafka_topic_partition_list_copy(src);
-}
-
-/**
- * @brief Append copies of all elements in \p src to \p dst.
- * No duplicate-checks are performed.
- */
-void rd_kafka_topic_partition_list_add_list(
- rd_kafka_topic_partition_list_t *dst,
- const rd_kafka_topic_partition_list_t *src) {
- int i;
-
- if (src->cnt == 0)
- return;
-
- if (dst->size < dst->cnt + src->cnt)
- rd_kafka_topic_partition_list_grow(dst, src->cnt);
-
- for (i = 0; i < src->cnt; i++)
- rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
-}
-
-
-/**
- * @brief Compare two partition lists using partition comparator \p cmp.
- *
- * @warning This is an O(Na*Nb) operation.
- */
-int rd_kafka_topic_partition_list_cmp(const void *_a,
- const void *_b,
- int (*cmp)(const void *, const void *)) {
- const rd_kafka_topic_partition_list_t *a = _a, *b = _b;
- int r;
- int i;
-
- r = a->cnt - b->cnt;
- if (r || a->cnt == 0)
- return r;
-
- /* Since the lists may not be sorted we need to scan all of B
- * for each element in A.
- * FIXME: If the list sizes are larger than X we could create a
- * temporary hash map instead. */
- for (i = 0; i < a->cnt; i++) {
- int j;
-
- for (j = 0; j < b->cnt; j++) {
- r = cmp(&a->elems[i], &b->elems[j]);
- if (!r)
- break;
- }
-
- if (j == b->cnt)
- return 1;
- }
-
- return 0;
-}
-
-
-/**
- * @brief Ensures the \p rktpar has a toppar set in _private.
- *
- * @returns the toppar object (or possibly NULL if \p create_on_miss is true)
- * WITHOUT refcnt increased.
- */
-rd_kafka_toppar_t *
-rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk,
- rd_kafka_topic_partition_t *rktpar,
- rd_bool_t create_on_miss) {
- rd_kafka_topic_partition_private_t *parpriv;
-
- parpriv = rd_kafka_topic_partition_get_private(rktpar);
-
- if (!parpriv->rktp)
- parpriv->rktp = rd_kafka_toppar_get2(
- rk, rktpar->topic, rktpar->partition,
- 0 /* not ua on miss */, create_on_miss);
-
- return parpriv->rktp;
-}
-
-
-int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) {
- const rd_kafka_topic_partition_t *a = _a;
- const rd_kafka_topic_partition_t *b = _b;
- int r = strcmp(a->topic, b->topic);
- if (r)
- return r;
- else
- return RD_CMP(a->partition, b->partition);
-}
-
-/** @brief Compare only the topic */
-int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) {
- const rd_kafka_topic_partition_t *a = _a;
- const rd_kafka_topic_partition_t *b = _b;
- return strcmp(a->topic, b->topic);
-}
-
-static int rd_kafka_topic_partition_cmp_opaque(const void *_a,
- const void *_b,
- void *opaque) {
- return rd_kafka_topic_partition_cmp(_a, _b);
-}
-
-/** @returns a hash of the topic and partition */
-unsigned int rd_kafka_topic_partition_hash(const void *_a) {
- const rd_kafka_topic_partition_t *a = _a;
- int r = 31 * 17 + a->partition;
- return 31 * r + rd_string_hash(a->topic, -1);
-}
-
-
-
-/**
- * @brief Search 'rktparlist' for 'topic' and 'partition'.
- * @returns the elems[] index or -1 on miss.
- */
-static int rd_kafka_topic_partition_list_find0(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition,
- int (*cmp)(const void *, const void *)) {
- rd_kafka_topic_partition_t skel;
- int i;
-
- skel.topic = (char *)topic;
- skel.partition = partition;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- if (!cmp(&skel, &rktparlist->elems[i]))
- return i;
- }
-
- return -1;
-}
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition) {
- int i = rd_kafka_topic_partition_list_find0(
- rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
- if (i == -1)
- return NULL;
- else
- return &rktparlist->elems[i];
-}
-
-
-int rd_kafka_topic_partition_list_find_idx(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition) {
- return rd_kafka_topic_partition_list_find0(
- rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
-}
-
-
-/**
- * @returns the first element that matches \p topic, regardless of partition.
- */
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic) {
- int i = rd_kafka_topic_partition_list_find0(
- rktparlist, topic, RD_KAFKA_PARTITION_UA,
- rd_kafka_topic_partition_cmp_topic);
- if (i == -1)
- return NULL;
- else
- return &rktparlist->elems[i];
-}
-
-
-int rd_kafka_topic_partition_list_del_by_idx(
- rd_kafka_topic_partition_list_t *rktparlist,
- int idx) {
- if (unlikely(idx < 0 || idx >= rktparlist->cnt))
- return 0;
-
- rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0);
- memmove(&rktparlist->elems[idx], &rktparlist->elems[idx + 1],
- (rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx]));
- rktparlist->cnt--;
-
- return 1;
-}
-
-
-int rd_kafka_topic_partition_list_del(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition) {
- int i = rd_kafka_topic_partition_list_find0(
- rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
- if (i == -1)
- return 0;
-
- return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i);
-}
-
-
-
-/**
- * Returns true if 'topic' matches the 'rktpar', else false.
- * On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1.
- */
-int rd_kafka_topic_partition_match(rd_kafka_t *rk,
- const rd_kafka_group_member_t *rkgm,
- const rd_kafka_topic_partition_t *rktpar,
- const char *topic,
- int *matched_by_regex) {
- int ret = 0;
-
- if (*rktpar->topic == '^') {
- char errstr[128];
-
- ret = rd_regex_match(rktpar->topic, topic, errstr,
- sizeof(errstr));
- if (ret == -1) {
- rd_kafka_dbg(rk, CGRP, "SUBMATCH",
- "Invalid regex for member "
- "\"%.*s\" subscription \"%s\": %s",
- RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
- rktpar->topic, errstr);
- return 0;
- }
-
- if (ret && matched_by_regex)
- *matched_by_regex = 1;
-
- } else if (!strcmp(rktpar->topic, topic)) {
-
- if (matched_by_regex)
- *matched_by_regex = 0;
-
- ret = 1;
- }
-
- return ret;
-}
-
-
-
-void rd_kafka_topic_partition_list_sort(
- rd_kafka_topic_partition_list_t *rktparlist,
- int (*cmp)(const void *, const void *, void *),
- void *opaque) {
-
- if (!cmp)
- cmp = rd_kafka_topic_partition_cmp_opaque;
-
- rd_qsort_r(rktparlist->elems, rktparlist->cnt,
- sizeof(*rktparlist->elems), cmp, opaque);
-}
-
-
-void rd_kafka_topic_partition_list_sort_by_topic(
- rd_kafka_topic_partition_list_t *rktparlist) {
- rd_kafka_topic_partition_list_sort(
- rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL);
-}
-
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition,
- int64_t offset) {
- rd_kafka_topic_partition_t *rktpar;
-
- if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
- partition)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- rktpar->offset = offset;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Reset all offsets to the provided value.
- */
-void rd_kafka_topic_partition_list_reset_offsets(
- rd_kafka_topic_partition_list_t *rktparlist,
- int64_t offset) {
-
- int i;
- for (i = 0; i < rktparlist->cnt; i++)
- rktparlist->elems[i].offset = offset;
-}
-
-
-/**
- * Set offset values in partition list based on toppar's last stored offset.
- *
- * from_rktp - true: set rktp's last stored offset, false: set def_value
- * unless a concrete offset is set.
- * is_commit: indicates that set offset is to be committed (for debug log)
- *
- * Returns the number of valid non-logical offsets (>=0).
- */
-int rd_kafka_topic_partition_list_set_offsets(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- int from_rktp,
- int64_t def_value,
- int is_commit) {
- int i;
- int valid_cnt = 0;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
- const char *verb = "setting";
- char preamble[128];
-
- *preamble = '\0'; /* Avoid warning */
-
- if (from_rktp) {
- rd_kafka_toppar_t *rktp =
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
- rd_true);
- rd_kafka_toppar_lock(rktp);
-
- if (rk->rk_conf.debug &
- (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_TOPIC))
- rd_snprintf(preamble, sizeof(preamble),
- "stored %s, committed %s: ",
- rd_kafka_fetch_pos2str(
- rktp->rktp_stored_pos),
- rd_kafka_fetch_pos2str(
- rktp->rktp_committed_pos));
-
- if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
- &rktp->rktp_committed_pos) >
- 0) {
- verb = "setting stored";
- rd_kafka_topic_partition_set_from_fetch_pos(
- rktpar, rktp->rktp_stored_pos);
- } else {
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
- }
- rd_kafka_toppar_unlock(rktp);
- } else {
- if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) {
- verb = "setting default";
- rktpar->offset = def_value;
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar, -1);
- } else
- verb = "keeping";
- }
-
- if (is_commit && rktpar->offset == RD_KAFKA_OFFSET_INVALID)
- rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
- "Topic %s [%" PRId32
- "]: "
- "%snot including in commit",
- rktpar->topic, rktpar->partition,
- preamble);
- else
- rd_kafka_dbg(
- rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
- "Topic %s [%" PRId32
- "]: "
- "%s%s offset %s (leader epoch %" PRId32 ") %s",
- rktpar->topic, rktpar->partition, preamble, verb,
- rd_kafka_offset2str(rktpar->offset),
- rd_kafka_topic_partition_get_leader_epoch(rktpar),
- is_commit ? " for commit" : "");
-
- if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset))
- valid_cnt++;
- }
-
- return valid_cnt;
-}
-
-
-/**
- * @returns the number of partitions with absolute (non-logical) offsets set.
- */
-int rd_kafka_topic_partition_list_count_abs_offsets(
- const rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
- int valid_cnt = 0;
-
- for (i = 0; i < rktparlist->cnt; i++)
- if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset))
- valid_cnt++;
-
- return valid_cnt;
-}
-
-
-/**
- * @brief Update _private (toppar) field to point to valid rktp
- * for each parition.
- *
- * @param create_on_miss Create partition (and topic_t object) if necessary.
- */
-void rd_kafka_topic_partition_list_update_toppars(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_bool_t create_on_miss) {
- int i;
- for (i = 0; i < rktparlist->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
-
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
- create_on_miss);
- }
-}
-
-
-/**
- * @brief Populate \p leaders with the leaders+partitions for the partitions in
- * \p rktparlist. Duplicates are suppressed.
- *
- * If no leader is found for a partition that element's \c .err will
- * be set to RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE.
- *
- * If the partition does not exist \c .err will be set to
- * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION.
- *
- * @param rktparlist The partitions to look up leaders for, the .err field
- * will be set according to outcome, e.g., ERR_NO_ERROR,
- * ERR_UNKNOWN_TOPIC_OR_PART, etc.
- * @param leaders rd_list_t of allocated (struct rd_kafka_partition_leader *)
- * @param query_topics (optional) rd_list of strdupped (char *)
- * @param query_unknown Add unknown topics to \p query_topics.
- * @param eonce (optional) For triggering asynchronously on cache change
- * in case not all leaders are known now.
- *
- * @remark This is based on the current topic_t and partition state
- * which may lag behind the last metadata update due to internal
- * threading and also the fact that no topic_t may have been created.
- *
- * @param leaders rd_list_t of type (struct rd_kafka_partition_leader *)
- *
- * @returns true if all partitions have leaders, else false.
- *
- * @sa rd_kafka_topic_partition_list_get_leaders_by_metadata
- *
- * @locks rd_kafka_*lock() MUST NOT be held
- */
-static rd_bool_t rd_kafka_topic_partition_list_get_leaders(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *leaders,
- rd_list_t *query_topics,
- rd_bool_t query_unknown,
- rd_kafka_enq_once_t *eonce) {
- rd_bool_t complete;
- int cnt = 0;
- int i;
-
- if (eonce)
- rd_kafka_wrlock(rk);
- else
- rd_kafka_rdlock(rk);
-
- for (i = 0; i < rktparlist->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
- rd_kafka_topic_partition_t *rktpar2;
- rd_kafka_broker_t *rkb = NULL;
- struct rd_kafka_partition_leader leader_skel;
- struct rd_kafka_partition_leader *leader;
- const rd_kafka_metadata_topic_t *mtopic;
- const rd_kafka_metadata_partition_t *mpart;
- rd_bool_t topic_wait_cache;
-
- rd_kafka_metadata_cache_topic_partition_get(
- rk, &mtopic, &mpart, rktpar->topic, rktpar->partition,
- 0 /*negative entries too*/);
-
- topic_wait_cache =
- !mtopic ||
- RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err);
-
- if (!topic_wait_cache && mtopic &&
- mtopic->err != RD_KAFKA_RESP_ERR_NO_ERROR &&
- mtopic->err != RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) {
- /* Topic permanently errored */
- rktpar->err = mtopic->err;
- continue;
- }
-
- if (mtopic && !mpart && mtopic->partition_cnt > 0) {
- /* Topic exists but partition doesnt.
- * This is a permanent error. */
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- continue;
- }
-
- if (mpart &&
- (mpart->leader == -1 ||
- !(rkb = rd_kafka_broker_find_by_nodeid0(
- rk, mpart->leader, -1 /*any state*/, rd_false)))) {
- /* Partition has no (valid) leader.
- * This is a permanent error. */
- rktpar->err =
- mtopic->err
- ? mtopic->err
- : RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE;
- continue;
- }
-
- if (topic_wait_cache || !rkb) {
- /* Topic unknown or no current leader for partition,
- * add topic to query list. */
- rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
- if (query_topics &&
- !rd_list_find(query_topics, rktpar->topic,
- (void *)strcmp))
- rd_list_add(query_topics,
- rd_strdup(rktpar->topic));
- continue;
- }
-
- /* Leader exists, add to leader list. */
-
- rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- memset(&leader_skel, 0, sizeof(leader_skel));
- leader_skel.rkb = rkb;
-
- leader = rd_list_find(leaders, &leader_skel,
- rd_kafka_partition_leader_cmp);
-
- if (!leader) {
- leader = rd_kafka_partition_leader_new(rkb);
- rd_list_add(leaders, leader);
- }
-
- rktpar2 = rd_kafka_topic_partition_list_find(
- leader->partitions, rktpar->topic, rktpar->partition);
- if (rktpar2) {
- /* Already exists in partitions list, just update. */
- rd_kafka_topic_partition_update(rktpar2, rktpar);
- } else {
- /* Make a copy of rktpar and add to partitions list */
- rd_kafka_topic_partition_list_add_copy(
- leader->partitions, rktpar);
- }
-
- rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_broker_destroy(rkb); /* loose refcount */
- cnt++;
- }
-
- complete = cnt == rktparlist->cnt;
-
- if (!complete && eonce)
- /* Add eonce to cache observers */
- rd_kafka_metadata_cache_wait_state_change_async(rk, eonce);
-
- if (eonce)
- rd_kafka_wrunlock(rk);
- else
- rd_kafka_rdunlock(rk);
-
- return complete;
-}
-
-
-/**
- * @brief Timer timeout callback for query_leaders_async rko's eonce object.
- */
-static void
-rd_kafka_partition_leader_query_eonce_timeout_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_enq_once_t *eonce = arg;
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT,
- "timeout timer");
-}
-
-
-/**
- * @brief Query timer callback for query_leaders_async rko's eonce object.
- */
-static void
-rd_kafka_partition_leader_query_eonce_timer_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_enq_once_t *eonce = arg;
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
- "query timer");
-}
-
-
-/**
- * @brief Query metadata cache for partition leaders, or trigger metadata
- * refresh if leaders not known.
- *
- * @locks_required none
- * @locality any
- */
-static rd_kafka_op_res_t
-rd_kafka_topic_partition_list_query_leaders_async_worker(rd_kafka_op_t *rko) {
- rd_kafka_t *rk = rko->rko_rk;
- rd_list_t query_topics, *leaders = NULL;
- rd_kafka_op_t *reply;
-
- RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_LEADERS);
-
- if (rko->rko_err)
- goto reply; /* Timeout or ERR__DESTROY */
-
- /* Since we're iterating over get_leaders() until all partition leaders
- * are known we need to re-enable the eonce to be triggered again (which
- * is not necessary the first time we get here, but there
- * is no harm doing it then either). */
- rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, rko,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- /* Look up the leaders in the metadata cache, if not all leaders
- * are known the eonce is registered for metadata cache changes
- * which will cause our function to be called
- * again on (any) metadata cache change.
- *
- * When we are called again we perform the cache lookup again and
- * hopefully get all leaders, otherwise defer a new async wait.
- * Repeat until success or timeout. */
-
- rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt / 2,
- rd_free);
-
- leaders = rd_list_new(1 + rko->rko_u.leaders.partitions->cnt / 2,
- rd_kafka_partition_leader_destroy_free);
-
- if (rd_kafka_topic_partition_list_get_leaders(
- rk, rko->rko_u.leaders.partitions, leaders, &query_topics,
- /* Add unknown topics to query_topics only on the
- * first query, after that we consider them permanently
- * non-existent */
- rko->rko_u.leaders.query_cnt == 0, rko->rko_u.leaders.eonce)) {
- /* All leaders now known (or failed), reply to caller */
- rd_list_destroy(&query_topics);
- goto reply;
- }
-
- if (rd_list_empty(&query_topics)) {
- /* Not all leaders known but no topics left to query,
- * reply to caller. */
- rd_list_destroy(&query_topics);
- goto reply;
- }
-
- /* Need to refresh topic metadata, but at most every interval. */
- if (!rd_kafka_timer_is_started(&rk->rk_timers,
- &rko->rko_u.leaders.query_tmr)) {
-
- rko->rko_u.leaders.query_cnt++;
-
- /* Add query interval timer. */
- rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce,
- "query timer");
- rd_kafka_timer_start_oneshot(
- &rk->rk_timers, &rko->rko_u.leaders.query_tmr, rd_true,
- 3 * 1000 * 1000 /* 3s */,
- rd_kafka_partition_leader_query_eonce_timer_cb,
- rko->rko_u.leaders.eonce);
-
- /* Request metadata refresh */
- rd_kafka_metadata_refresh_topics(
- rk, NULL, &query_topics, rd_true /*force*/,
- rd_false /*!allow_auto_create*/, rd_false /*!cgrp_update*/,
- "query partition leaders");
- }
-
- rd_list_destroy(leaders);
- rd_list_destroy(&query_topics);
-
- /* Wait for next eonce trigger */
- return RD_KAFKA_OP_RES_KEEP; /* rko is still used */
-
-reply:
- /* Decommission worker state and reply to caller */
-
- if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr,
- RD_DO_LOCK))
- rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
- "query timer");
- if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr,
- RD_DO_LOCK))
- rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
- "timeout timer");
-
- if (rko->rko_u.leaders.eonce) {
- rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce);
- rko->rko_u.leaders.eonce = NULL;
- }
-
- /* No leaders found, set a request-level error */
- if (leaders && rd_list_cnt(leaders) == 0) {
- if (!rko->rko_err)
- rko->rko_err = RD_KAFKA_RESP_ERR__NOENT;
- rd_list_destroy(leaders);
- leaders = NULL;
- }
-
- /* Create and enqueue reply rko */
- if (rko->rko_u.leaders.replyq.q) {
- reply = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_LEADERS,
- rko->rko_u.leaders.cb);
- rd_kafka_op_get_reply_version(reply, rko);
- reply->rko_err = rko->rko_err;
- reply->rko_u.leaders.partitions =
- rko->rko_u.leaders.partitions; /* Transfer ownership for
- * partition list that
- * now contains
- * per-partition errors*/
- rko->rko_u.leaders.partitions = NULL;
- reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */
- reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque;
-
- rd_kafka_replyq_enq(&rko->rko_u.leaders.replyq, reply, 0);
- }
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-static rd_kafka_op_res_t
-rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb(
- rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- return rd_kafka_topic_partition_list_query_leaders_async_worker(rko);
-}
-
-/**
- * @brief Async variant of rd_kafka_topic_partition_list_query_leaders().
- *
- * The reply rko op will contain:
- * - .leaders which is a list of leaders and their partitions, this may be
- * NULL for overall errors (such as no leaders are found), or a
- * partial or complete list of leaders.
- * - .partitions which is a copy of the input list of partitions with the
- * .err field set to the outcome of the leader query, typically ERR_NO_ERROR
- * or ERR_UNKNOWN_TOPIC_OR_PART.
- *
- * @locks_acquired rd_kafka_*lock()
- *
- * @remark rd_kafka_*lock() MUST NOT be held
- */
-void rd_kafka_topic_partition_list_query_leaders_async(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *rktparlist,
- int timeout_ms,
- rd_kafka_replyq_t replyq,
- rd_kafka_op_cb_t *cb,
- void *opaque) {
- rd_kafka_op_t *rko;
-
- rd_assert(rktparlist && rktparlist->cnt > 0);
- rd_assert(replyq.q);
-
- rko = rd_kafka_op_new_cb(
- rk, RD_KAFKA_OP_LEADERS,
- rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb);
- rko->rko_u.leaders.replyq = replyq;
- rko->rko_u.leaders.partitions =
- rd_kafka_topic_partition_list_copy(rktparlist);
- rko->rko_u.leaders.ts_timeout = rd_timeout_init(timeout_ms);
- rko->rko_u.leaders.cb = cb;
- rko->rko_u.leaders.opaque = opaque;
-
- /* Create an eonce to be triggered either by metadata cache update
- * (from refresh_topics()), query interval, or timeout. */
- rko->rko_u.leaders.eonce =
- rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
-
- rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "timeout timer");
- rd_kafka_timer_start_oneshot(
- &rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, rd_true,
- rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout),
- rd_kafka_partition_leader_query_eonce_timeout_cb,
- rko->rko_u.leaders.eonce);
-
- if (rd_kafka_topic_partition_list_query_leaders_async_worker(rko) ==
- RD_KAFKA_OP_RES_HANDLED)
- rd_kafka_op_destroy(rko); /* Reply queue already disabled */
-}
-
-
-/**
- * @brief Get leaders for all partitions in \p rktparlist, querying metadata
- * if needed.
- *
- * @param leaders is a pre-initialized (empty) list which will be populated
- * with the leader brokers and their partitions
- * (struct rd_kafka_partition_leader *)
- *
- * @remark Will not trigger topic auto creation (unless configured).
- *
- * @returns an error code on error.
- *
- * @locks rd_kafka_*lock() MUST NOT be held
- */
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *leaders,
- int timeout_ms) {
- rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- rd_ts_t ts_query = 0;
- rd_ts_t now;
- int query_cnt = 0;
- int i = 0;
-
- /* Get all the partition leaders, try multiple times:
- * if there are no leaders after the first run fire off a leader
- * query and wait for broker state update before trying again,
- * keep trying and re-querying at increasing intervals until
- * success or timeout. */
- do {
- rd_list_t query_topics;
- int query_intvl;
-
- rd_list_init(&query_topics, rktparlist->cnt, rd_free);
-
- rd_kafka_topic_partition_list_get_leaders(
- rk, rktparlist, leaders, &query_topics,
- /* Add unknown topics to query_topics only on the
- * first query, after that we consider them
- * permanently non-existent */
- query_cnt == 0, NULL);
-
- if (rd_list_empty(&query_topics)) {
- /* No remaining topics to query: leader-list complete.*/
- rd_list_destroy(&query_topics);
-
- /* No leader(s) for partitions means all partitions
- * are unknown. */
- if (rd_list_empty(leaders))
- return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- now = rd_clock();
-
- /*
- * Missing leader for some partitions
- */
- query_intvl = (i + 1) * 100; /* add 100ms per iteration */
- if (query_intvl > 2 * 1000)
- query_intvl = 2 * 1000; /* Cap to 2s */
-
- if (now >= ts_query + (query_intvl * 1000)) {
- /* Query metadata for missing leaders,
- * possibly creating the topic. */
- rd_kafka_metadata_refresh_topics(
- rk, NULL, &query_topics, rd_true /*force*/,
- rd_false /*!allow_auto_create*/,
- rd_false /*!cgrp_update*/,
- "query partition leaders");
- ts_query = now;
- query_cnt++;
-
- } else {
- /* Wait for broker ids to be updated from
- * metadata refresh above. */
- int wait_ms =
- rd_timeout_remains_limit(ts_end, query_intvl);
- rd_kafka_metadata_cache_wait_change(rk, wait_ms);
- }
-
- rd_list_destroy(&query_topics);
-
- i++;
- } while (ts_end == RD_POLL_INFINITE ||
- now < ts_end); /* now is deliberately outdated here
- * since wait_change() will block.
- * This gives us one more chance to spin thru*/
-
- if (rd_atomic32_get(&rk->rk_broker_up_cnt) == 0)
- return RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN;
-
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
-}
-
-
-/**
- * @brief Populate \p rkts with the rd_kafka_topic_t objects for the
- * partitions in. Duplicates are suppressed.
- *
- * @returns the number of topics added.
- */
-int rd_kafka_topic_partition_list_get_topics(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *rkts) {
- int cnt = 0;
-
- int i;
- for (i = 0; i < rktparlist->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
- rd_kafka_toppar_t *rktp;
-
- rktp =
- rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
- if (!rktp) {
- rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
- continue;
- }
-
- if (!rd_list_find(rkts, rktp->rktp_rkt,
- rd_kafka_topic_cmp_rkt)) {
- rd_list_add(rkts, rd_kafka_topic_keep(rktp->rktp_rkt));
- cnt++;
- }
-
- rd_kafka_toppar_destroy(rktp);
- }
-
- return cnt;
-}
-
-
-/**
- * @brief Populate \p topics with the strdupped topic names in \p rktparlist.
- * Duplicates are suppressed.
- *
- * @param include_regex: include regex topics
- *
- * @returns the number of topics added.
- */
-int rd_kafka_topic_partition_list_get_topic_names(
- const rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *topics,
- int include_regex) {
- int cnt = 0;
- int i;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
-
- if (!include_regex && *rktpar->topic == '^')
- continue;
-
- if (!rd_list_find(topics, rktpar->topic, (void *)strcmp)) {
- rd_list_add(topics, rd_strdup(rktpar->topic));
- cnt++;
- }
- }
-
- return cnt;
-}
-
-
-/**
- * @brief Create a copy of \p rktparlist only containing the partitions
- * matched by \p match function.
- *
- * \p match shall return 1 for match, else 0.
- *
- * @returns a new list
- */
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match(
- const rd_kafka_topic_partition_list_t *rktparlist,
- int (*match)(const void *elem, const void *opaque),
- void *opaque) {
- rd_kafka_topic_partition_list_t *newlist;
- int i;
-
- newlist = rd_kafka_topic_partition_list_new(0);
-
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
-
- if (!match(rktpar, opaque))
- continue;
-
- rd_kafka_topic_partition_list_add_copy(newlist, rktpar);
- }
-
- return newlist;
-}
-
-void rd_kafka_topic_partition_list_log(
- rd_kafka_t *rk,
- const char *fac,
- int dbg,
- const rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
-
- rd_kafka_dbg(rk, NONE | dbg, fac,
- "List with %d partition(s):", rktparlist->cnt);
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
- rd_kafka_dbg(rk, NONE | dbg, fac,
- " %s [%" PRId32 "] offset %s%s%s", rktpar->topic,
- rktpar->partition,
- rd_kafka_offset2str(rktpar->offset),
- rktpar->err ? ": error: " : "",
- rktpar->err ? rd_kafka_err2str(rktpar->err) : "");
- }
-}
-
-/**
- * @returns a comma-separated list of partitions.
- */
-const char *rd_kafka_topic_partition_list_str(
- const rd_kafka_topic_partition_list_t *rktparlist,
- char *dest,
- size_t dest_size,
- int fmt_flags) {
- int i;
- size_t of = 0;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
- char errstr[128];
- char offsetstr[32];
- int r;
-
- if (!rktpar->err && (fmt_flags & RD_KAFKA_FMT_F_ONLY_ERR))
- continue;
-
- if (rktpar->err && !(fmt_flags & RD_KAFKA_FMT_F_NO_ERR))
- rd_snprintf(errstr, sizeof(errstr), "(%s)",
- rd_kafka_err2str(rktpar->err));
- else
- errstr[0] = '\0';
-
- if (rktpar->offset != RD_KAFKA_OFFSET_INVALID)
- rd_snprintf(offsetstr, sizeof(offsetstr), "@%" PRId64,
- rktpar->offset);
- else
- offsetstr[0] = '\0';
-
- r = rd_snprintf(&dest[of], dest_size - of,
- "%s"
- "%s[%" PRId32
- "]"
- "%s"
- "%s",
- of == 0 ? "" : ", ", rktpar->topic,
- rktpar->partition, offsetstr, errstr);
-
- if ((size_t)r >= dest_size - of) {
- rd_snprintf(&dest[dest_size - 4], 4, "...");
- break;
- }
-
- of += r;
- }
-
- return dest;
-}
-
-
-
-/**
- * @brief Update \p dst with info from \p src.
- *
- * Fields updated:
- * - metadata
- * - metadata_size
- * - offset
- * - offset leader epoch
- * - err
- *
- * Will only update partitions that are in both dst and src, other partitions
- * will remain unchanged.
- */
-void rd_kafka_topic_partition_list_update(
- rd_kafka_topic_partition_list_t *dst,
- const rd_kafka_topic_partition_list_t *src) {
- int i;
-
- for (i = 0; i < dst->cnt; i++) {
- rd_kafka_topic_partition_t *d = &dst->elems[i];
- rd_kafka_topic_partition_t *s;
- rd_kafka_topic_partition_private_t *s_priv, *d_priv;
-
- if (!(s = rd_kafka_topic_partition_list_find(
- (rd_kafka_topic_partition_list_t *)src, d->topic,
- d->partition)))
- continue;
-
- d->offset = s->offset;
- d->err = s->err;
- if (d->metadata) {
- rd_free(d->metadata);
- d->metadata = NULL;
- d->metadata_size = 0;
- }
- if (s->metadata_size > 0) {
- d->metadata = rd_malloc(s->metadata_size);
- d->metadata_size = s->metadata_size;
- memcpy((void *)d->metadata, s->metadata,
- s->metadata_size);
- }
-
- s_priv = rd_kafka_topic_partition_get_private(s);
- d_priv = rd_kafka_topic_partition_get_private(d);
- d_priv->leader_epoch = s_priv->leader_epoch;
- }
-}
-
-
-/**
- * @returns the sum of \p cb called for each element.
- */
-size_t rd_kafka_topic_partition_list_sum(
- const rd_kafka_topic_partition_list_t *rktparlist,
- size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque),
- void *opaque) {
- int i;
- size_t sum = 0;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
- sum += cb(rktpar, opaque);
- }
-
- return sum;
-}
-
-
-/**
- * @returns rd_true if there are duplicate topic/partitions in the list,
- * rd_false if not.
- *
- * @remarks sorts the elements of the list.
- */
-rd_bool_t rd_kafka_topic_partition_list_has_duplicates(
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_bool_t ignore_partition) {
-
- int i;
-
- if (rktparlist->cnt <= 1)
- return rd_false;
-
- rd_kafka_topic_partition_list_sort_by_topic(rktparlist);
-
- for (i = 1; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *p1 =
- &rktparlist->elems[i - 1];
- const rd_kafka_topic_partition_t *p2 = &rktparlist->elems[i];
-
- if (((p1->partition == p2->partition) || ignore_partition) &&
- !strcmp(p1->topic, p2->topic)) {
- return rd_true;
- }
- }
-
- return rd_false;
-}
-
-
-/**
- * @brief Set \c .err field \p err on all partitions in list.
- */
-void rd_kafka_topic_partition_list_set_err(
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_kafka_resp_err_t err) {
- int i;
-
- for (i = 0; i < rktparlist->cnt; i++)
- rktparlist->elems[i].err = err;
-}
-
-/**
- * @brief Get the first set error in the partition list.
- */
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err(
- const rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
-
- for (i = 0; i < rktparlist->cnt; i++)
- if (rktparlist->elems[i].err)
- return rktparlist->elems[i].err;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @returns the number of wildcard/regex topics
- */
-int rd_kafka_topic_partition_list_regex_cnt(
- const rd_kafka_topic_partition_list_t *rktparlist) {
- int i;
- int cnt = 0;
-
- for (i = 0; i < rktparlist->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &rktparlist->elems[i];
- cnt += *rktpar->topic == '^';
- }
- return cnt;
-}
-
-
-/**
- * @brief Reset base sequence for this toppar.
- *
- * See rd_kafka_toppar_pid_change() below.
- *
- * @warning Toppar must be completely drained.
- *
- * @locality toppar handler thread
- * @locks toppar_lock MUST be held.
- */
-static void rd_kafka_toppar_reset_base_msgid(rd_kafka_toppar_t *rktp,
- uint64_t new_base_msgid) {
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "RESETSEQ",
- "%.*s [%" PRId32
- "] "
- "resetting epoch base seq from %" PRIu64 " to %" PRIu64,
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
- rktp->rktp_eos.epoch_base_msgid, new_base_msgid);
-
- rktp->rktp_eos.next_ack_seq = 0;
- rktp->rktp_eos.next_err_seq = 0;
- rktp->rktp_eos.epoch_base_msgid = new_base_msgid;
-}
-
-
-/**
- * @brief Update/change the Producer ID for this toppar.
- *
- * Must only be called when pid is different from the current toppar pid.
- *
- * The epoch base sequence will be set to \p base_msgid, which must be the
- * first message in the partition
- * queue. However, if there are outstanding messages in-flight to the broker
- * we will need to wait for these ProduceRequests to finish (most likely
- * with failure) and have their messages re-enqueued to maintain original order.
- * In this case the pid will not be updated and this function should be
- * called again when there are no outstanding messages.
- *
- * @remark This function must only be called when rktp_xmitq is non-empty.
- *
- * @returns 1 if a new pid was set, else 0.
- *
- * @locality toppar handler thread
- * @locks none
- */
-int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp,
- rd_kafka_pid_t pid,
- uint64_t base_msgid) {
- int inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight);
-
- if (unlikely(inflight > 0)) {
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
- "%.*s [%" PRId32
- "] will not change %s -> %s yet: "
- "%d message(s) still in-flight from current "
- "epoch",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
- rd_kafka_pid2str(pid), inflight);
- return 0;
- }
-
- rd_assert(base_msgid != 0 &&
- *"BUG: pid_change() must only be called with "
- "non-empty xmitq");
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
- "%.*s [%" PRId32
- "] changed %s -> %s "
- "with base MsgId %" PRIu64,
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
- rd_kafka_pid2str(pid), base_msgid);
-
- rktp->rktp_eos.pid = pid;
- rd_kafka_toppar_reset_base_msgid(rktp, base_msgid);
-
- rd_kafka_toppar_unlock(rktp);
-
- return 1;
-}
-
-
-/**
- * @brief Purge messages in partition queues.
- * Delivery reports will be enqueued for all purged messages, the error
- * code is set to RD_KAFKA_RESP_ERR__PURGE_QUEUE.
- *
- * @param include_xmit_msgq If executing from the rktp's current broker handler
- * thread, also include the xmit message queue.
- *
- * @warning Only to be used with the producer.
- *
- * @returns the number of messages purged
- *
- * @locality any thread.
- * @locks_acquired rd_kafka_toppar_lock()
- * @locks_required none
- */
-int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp,
- int purge_flags,
- rd_bool_t include_xmit_msgq) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
- rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
- int cnt;
-
- rd_assert(rk->rk_type == RD_KAFKA_PRODUCER);
-
- rd_kafka_dbg(rk, TOPIC, "PURGE",
- "%s [%" PRId32
- "]: purging queues "
- "(purge_flags 0x%x, %s xmit_msgq)",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- purge_flags, include_xmit_msgq ? "include" : "exclude");
-
- if (!(purge_flags & RD_KAFKA_PURGE_F_QUEUE))
- return 0;
-
- if (include_xmit_msgq) {
- /* xmit_msgq is owned by the toppar handler thread
- * (broker thread) and requires no locking. */
- rd_assert(rktp->rktp_broker);
- rd_assert(thrd_is_current(rktp->rktp_broker->rkb_thread));
- rd_kafka_msgq_concat(&rkmq, &rktp->rktp_xmit_msgq);
- }
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_msgq_concat(&rkmq, &rktp->rktp_msgq);
- cnt = rd_kafka_msgq_len(&rkmq);
-
- if (cnt > 0 && purge_flags & RD_KAFKA_PURGE_F_ABORT_TXN) {
- /* All messages in-queue are purged
- * on abort_transaction(). Since these messages
- * will not be produced (retried) we need to adjust the
- * idempotence epoch's base msgid to skip the messages. */
- rktp->rktp_eos.epoch_base_msgid += cnt;
- rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_EOS, "ADVBASE",
- "%.*s [%" PRId32
- "] "
- "advancing epoch base msgid to %" PRIu64
- " due to %d message(s) in aborted transaction",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rktp->rktp_eos.epoch_base_msgid, cnt);
- }
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_dr_msgq(rktp->rktp_rkt, &rkmq, RD_KAFKA_RESP_ERR__PURGE_QUEUE);
-
- return cnt;
-}
-
-
-/**
- * @brief Purge queues for the unassigned toppars of all known topics.
- *
- * @locality application thread
- * @locks none
- */
-void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk) {
- rd_kafka_topic_t *rkt;
- int msg_cnt = 0, part_cnt = 0;
-
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- rd_kafka_toppar_t *rktp;
- int r;
-
- rd_kafka_topic_rdlock(rkt);
- rktp = rkt->rkt_ua;
- if (rktp)
- rd_kafka_toppar_keep(rktp);
- rd_kafka_topic_rdunlock(rkt);
-
- if (unlikely(!rktp))
- continue;
-
-
- rd_kafka_toppar_lock(rktp);
-
- r = rd_kafka_msgq_len(&rktp->rktp_msgq);
- rd_kafka_dr_msgq(rkt, &rktp->rktp_msgq,
- RD_KAFKA_RESP_ERR__PURGE_QUEUE);
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp);
-
- if (r > 0) {
- msg_cnt += r;
- part_cnt++;
- }
- }
- rd_kafka_rdunlock(rk);
-
- rd_kafka_dbg(rk, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
- "Purged %i message(s) from %d UA-partition(s)", msg_cnt,
- part_cnt);
-}
-
-
-void rd_kafka_partition_leader_destroy_free(void *ptr) {
- struct rd_kafka_partition_leader *leader = ptr;
- rd_kafka_partition_leader_destroy(leader);
-}
-
-
-const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos) {
- static RD_TLS char ret[2][64];
- static int idx;
-
- idx = (idx + 1) % 2;
-
- rd_snprintf(
- ret[idx], sizeof(ret[idx]), "offset %s (leader epoch %" PRId32 ")",
- rd_kafka_offset2str(fetchpos.offset), fetchpos.leader_epoch);
-
- return ret[idx];
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h
deleted file mode 100644
index a1f1f47cd..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_PARTITION_H_
-#define _RDKAFKA_PARTITION_H_
-
-#include "rdkafka_topic.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_broker.h"
-
-extern const char *rd_kafka_fetch_states[];
-
-
-/**
- * @brief Offset statistics
- */
-struct offset_stats {
- rd_kafka_fetch_pos_t fetch_pos; /**< Next offset to fetch */
- int64_t eof_offset; /**< Last offset we reported EOF for */
-};
-
-/**
- * @brief Reset offset_stats struct to default values
- */
-static RD_UNUSED void rd_kafka_offset_stats_reset(struct offset_stats *offs) {
- offs->fetch_pos.offset = 0;
- offs->fetch_pos.leader_epoch = -1;
- offs->eof_offset = RD_KAFKA_OFFSET_INVALID;
-}
-
-
-/**
- * @brief Store information about a partition error for future use.
- */
-struct rd_kafka_toppar_err {
- rd_kafka_resp_err_t err; /**< Error code */
- int actions; /**< Request actions */
- rd_ts_t ts; /**< Timestamp */
- uint64_t base_msgid; /**< First msg msgid */
- int32_t base_seq; /**< Idempodent Producer:
- * first msg sequence */
- int32_t last_seq; /**< Idempotent Producer:
- * last msg sequence */
-};
-
-
-
-/**
- * @brief Fetchpos comparator, leader epoch has precedence.
- */
-static RD_UNUSED RD_INLINE int
-rd_kafka_fetch_pos_cmp(const rd_kafka_fetch_pos_t *a,
- const rd_kafka_fetch_pos_t *b) {
- if (a->leader_epoch < b->leader_epoch)
- return -1;
- else if (a->leader_epoch > b->leader_epoch)
- return 1;
- else if (a->offset < b->offset)
- return -1;
- else if (a->offset > b->offset)
- return 1;
- else
- return 0;
-}
-
-
-static RD_UNUSED RD_INLINE void
-rd_kafka_fetch_pos_init(rd_kafka_fetch_pos_t *fetchpos) {
- fetchpos->offset = RD_KAFKA_OFFSET_INVALID;
- fetchpos->leader_epoch = -1;
-}
-
-const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos);
-
-static RD_UNUSED RD_INLINE rd_kafka_fetch_pos_t
-rd_kafka_fetch_pos_make(int64_t offset,
- int32_t leader_epoch,
- rd_bool_t validated) {
- rd_kafka_fetch_pos_t fetchpos = {offset, leader_epoch, validated};
- return fetchpos;
-}
-
-#ifdef RD_HAS_STATEMENT_EXPRESSIONS
-#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \
- ({ \
- rd_kafka_fetch_pos_t _fetchpos = {offset, leader_epoch, \
- validated}; \
- _fetchpos; \
- })
-#else
-#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \
- rd_kafka_fetch_pos_make(offset, leader_epoch, validated)
-#endif
-
-#define RD_KAFKA_FETCH_POS(offset, leader_epoch) \
- RD_KAFKA_FETCH_POS0(offset, leader_epoch, rd_false)
-
-
-
-typedef TAILQ_HEAD(rd_kafka_toppar_tqhead_s,
- rd_kafka_toppar_s) rd_kafka_toppar_tqhead_t;
-
-/**
- * Topic + Partition combination
- */
-struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
- TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */
- TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/
- CIRCLEQ_ENTRY(rd_kafka_toppar_s)
- rktp_activelink; /* rkb_active_toppars */
- TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_topic_t link*/
- TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink; /* rd_kafka_cgrp_t link */
- TAILQ_ENTRY(rd_kafka_toppar_s)
- rktp_txnlink; /**< rd_kafka_t.rk_eos.
- * txn_pend_rktps
- * or txn_rktps */
- rd_kafka_topic_t *rktp_rkt; /**< This toppar's topic object */
- int32_t rktp_partition;
- // LOCK: toppar_lock() + topic_wrlock()
- // LOCK: .. in partition_available()
- int32_t rktp_leader_id; /**< Current leader id.
- * This is updated directly
- * from metadata. */
- int32_t rktp_broker_id; /**< Current broker id. */
- rd_kafka_broker_t *rktp_leader; /**< Current leader broker.
- * This updated simultaneously
- * with rktp_leader_id. */
- rd_kafka_broker_t *rktp_broker; /**< Current preferred broker
- * (usually the leader).
- * This updated asynchronously
- * by issuing JOIN op to
- * broker thread, so be careful
- * in using this since it
- * may lag. */
- rd_kafka_broker_t *rktp_next_broker; /**< Next preferred broker after
- * async migration op. */
- rd_refcnt_t rktp_refcnt;
- mtx_t rktp_lock;
-
- // LOCK: toppar_lock. toppar_insert_msg(), concat_msgq()
- // LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq()
- rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */
- rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue.
- * protected by rktp_lock */
- rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue.
- * local to broker thread. */
-
- int rktp_fetch; /* On rkb_active_toppars list */
-
- /* Consumer */
- rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages
- * from broker.
- * Broker thread -> App */
- rd_kafka_q_t *rktp_ops; /* * -> Main thread */
-
- rd_atomic32_t rktp_msgs_inflight; /**< Current number of
- * messages in-flight to/from
- * the broker. */
-
- uint64_t rktp_msgid; /**< Current/last message id.
- * Each message enqueued on a
- * non-UA partition will get a
- * partition-unique sequencial
- * number assigned.
- * This number is used to
- * re-enqueue the message
- * on resends but making sure
- * the input ordering is still
- * maintained, and used by
- * the idempotent producer.
- * Starts at 1.
- * Protected by toppar_lock */
- struct {
- rd_kafka_pid_t pid; /**< Partition's last known
- * Producer Id and epoch.
- * Protected by toppar lock.
- * Only updated in toppar
- * handler thread. */
- uint64_t acked_msgid; /**< Highest acknowledged message.
- * Protected by toppar lock. */
- uint64_t epoch_base_msgid; /**< This Producer epoch's
- * base msgid.
- * When a new epoch is
- * acquired, or on transaction
- * abort, the base_seq is set to
- * the current rktp_msgid so that
- * sub-sequent produce
- * requests will have
- * a sequence number series
- * starting at 0.
- * Protected by toppar_lock */
- int32_t next_ack_seq; /**< Next expected ack sequence.
- * Protected by toppar lock. */
- int32_t next_err_seq; /**< Next expected error sequence.
- * Used when draining outstanding
- * issues.
- * This value will be the same
- * as next_ack_seq until a
- * drainable error occurs,
- * in which case it
- * will advance past next_ack_seq.
- * next_ack_seq can never be larger
- * than next_err_seq.
- * Protected by toppar lock. */
- rd_bool_t wait_drain; /**< All inflight requests must
- * be drained/finish before
- * resuming producing.
- * This is set to true
- * when a leader change
- * happens so that the
- * in-flight messages for the
- * old brokers finish before
- * the new broker starts sending.
- * This as a step to ensure
- * consistency.
- * Only accessed from toppar
- * handler thread. */
- } rktp_eos;
-
- /**
- * rktp version barriers
- *
- * rktp_version is the application/controller side's
- * authoritative version, it depicts the most up to date state.
- * This is what q_filter() matches an rko_version to.
- *
- * rktp_op_version is the last/current received state handled
- * by the toppar in the broker thread. It is updated to rktp_version
- * when receiving a new op.
- *
- * rktp_fetch_version is the current fetcher decision version.
- * It is used in fetch_decide() to see if the fetch decision
- * needs to be updated by comparing to rktp_op_version.
- *
- * Example:
- * App thread : Send OP_START (v1 bump): rktp_version=1
- * Broker thread: Recv OP_START (v1): rktp_op_version=1
- * Broker thread: fetch_decide() detects that
- * rktp_op_version != rktp_fetch_version and
- * sets rktp_fetch_version=1.
- * Broker thread: next Fetch request has it's tver state set to
- * rktp_fetch_verison (v1).
- *
- * App thread : Send OP_SEEK (v2 bump): rktp_version=2
- * Broker thread: Recv OP_SEEK (v2): rktp_op_version=2
- * Broker thread: Recv IO FetchResponse with tver=1,
- * when enqueued on rktp_fetchq they're discarded
- * due to old version (tver<rktp_version).
- * Broker thread: fetch_decide() detects version change and
- * sets rktp_fetch_version=2.
- * Broker thread: next Fetch request has tver=2
- * Broker thread: Recv IO FetchResponse with tver=2 which
- * is same as rktp_version so message is forwarded
- * to app.
- */
- rd_atomic32_t rktp_version; /* Latest op version.
- * Authoritative (app thread)*/
- int32_t rktp_op_version; /* Op version of curr command
- * state from.
- * (broker thread) */
- int32_t rktp_fetch_version; /* Op version of curr fetch.
- (broker thread) */
-
- enum { RD_KAFKA_TOPPAR_FETCH_NONE = 0,
- RD_KAFKA_TOPPAR_FETCH_STOPPING,
- RD_KAFKA_TOPPAR_FETCH_STOPPED,
- RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY,
- RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT,
- RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT,
- RD_KAFKA_TOPPAR_FETCH_ACTIVE,
- } rktp_fetch_state; /* Broker thread's state */
-
-#define RD_KAFKA_TOPPAR_FETCH_IS_STARTED(fetch_state) \
- ((fetch_state) >= RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
-
- int32_t rktp_leader_epoch; /**< Last known partition leader epoch,
- * or -1. */
-
- int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to
- * fetch.
- * Locality: broker thread
- */
-
- rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for
- * this partition until this
- * absolute timestamp
- * expires. */
-
- /** Offset to query broker for. */
- rd_kafka_fetch_pos_t rktp_query_pos;
-
- /** Next fetch start position.
- * This is set up start, seek, resume, etc, to tell
- * the fetcher where to start fetching.
- * It is not updated for each fetch, see
- * rktp_offsets.fetch_pos for that.
- * @locality toppar thread */
- rd_kafka_fetch_pos_t rktp_next_fetch_start;
-
- /** The previous next fetch position.
- * @locality toppar thread */
- rd_kafka_fetch_pos_t rktp_last_next_fetch_start;
-
- /** Application's position.
- * This is the latest offset delivered to application + 1.
- * It is reset to INVALID_OFFSET when partition is
- * unassigned/stopped/seeked. */
- rd_kafka_fetch_pos_t rktp_app_pos;
-
- /** Last stored offset, but maybe not yet committed. */
- rd_kafka_fetch_pos_t rktp_stored_pos;
-
- /** Offset currently being committed */
- rd_kafka_fetch_pos_t rktp_committing_pos;
-
- /** Last (known) committed offset */
- rd_kafka_fetch_pos_t rktp_committed_pos;
-
- rd_ts_t rktp_ts_committed_offset; /**< Timestamp of last commit */
-
- struct offset_stats rktp_offsets; /* Current offsets.
- * Locality: broker thread*/
- struct offset_stats rktp_offsets_fin; /* Finalized offset for stats.
- * Updated periodically
- * by broker thread.
- * Locks: toppar_lock */
-
- int64_t rktp_ls_offset; /**< Current last stable offset
- * Locks: toppar_lock */
- int64_t rktp_hi_offset; /* Current high watermark offset.
- * Locks: toppar_lock */
- int64_t rktp_lo_offset; /* Current broker low offset.
- * This is outside of the stats
- * struct due to this field
- * being populated by the
- * toppar thread rather than
- * the broker thread.
- * Locality: toppar thread
- * Locks: toppar_lock */
-
- rd_ts_t rktp_ts_offset_lag;
-
- char *rktp_offset_path; /* Path to offset file */
- FILE *rktp_offset_fp; /* Offset file pointer */
-
- rd_kafka_resp_err_t rktp_last_error; /**< Last Fetch error.
- * Used for suppressing
- * reoccuring errors.
- * @locality broker thread */
-
- rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */
-
- rd_bool_t rktp_started; /**< Fetcher is instructured to
- * start.
- * This is used by cgrp to keep
- * track of whether the toppar has
- * been started or not. */
-
- rd_kafka_replyq_t rktp_replyq; /* Current replyq+version
- * for propagating
- * major operations, e.g.,
- * FETCH_STOP. */
- // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED
- // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN
- int rktp_flags;
-#define RD_KAFKA_TOPPAR_F_DESIRED \
- 0x1 /* This partition is desired \
- * by a consumer. */
-#define RD_KAFKA_TOPPAR_F_UNKNOWN \
- 0x2 /* Topic is not yet or no longer \
- * seen on a broker. */
-#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */
-#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING \
- 0x8 /* Offset store stopping \
- */
-#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */
-#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */
-#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */
-#define RD_KAFKA_TOPPAR_F_LEADER_ERR \
- 0x80 /* Operation failed: \
- * leader might be missing. \
- * Typically set from \
- * ProduceResponse failure. */
-#define RD_KAFKA_TOPPAR_F_PEND_TXN \
- 0x100 /* Partition is pending being added \
- * to a producer transaction. */
-#define RD_KAFKA_TOPPAR_F_IN_TXN \
- 0x200 /* Partition is part of \
- * a producer transaction. */
-#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */
-#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */
-#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */
-#define RD_KAFKA_TOPPAR_F_ASSIGNED \
- 0x2000 /**< Toppar is part of the consumer \
- * assignment. */
-
- /*
- * Timers
- */
- rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */
- rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */
- rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */
- rd_kafka_timer_t rktp_consumer_lag_tmr; /* Consumer lag monitoring
- * timer */
- rd_kafka_timer_t rktp_validate_tmr; /**< Offset and epoch
- * validation retry timer */
-
- rd_interval_t rktp_lease_intvl; /**< Preferred replica lease
- * period */
- rd_interval_t rktp_new_lease_intvl; /**< Controls max frequency
- * at which a new preferred
- * replica lease can be
- * created for a toppar.
- */
- rd_interval_t rktp_new_lease_log_intvl; /**< .. and how often
- * we log about it. */
- rd_interval_t rktp_metadata_intvl; /**< Controls max frequency
- * of metadata requests
- * in preferred replica
- * handler.
- */
-
- int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag
- * response. */
-
- struct rd_kafka_toppar_err rktp_last_err; /**< Last produce error */
-
-
- struct {
- rd_atomic64_t tx_msgs; /**< Producer: sent messages */
- rd_atomic64_t tx_msg_bytes; /**< .. bytes */
- rd_atomic64_t rx_msgs; /**< Consumer: received messages */
- rd_atomic64_t rx_msg_bytes; /**< .. bytes */
- rd_atomic64_t producer_enq_msgs; /**< Producer: enqueued msgs */
- rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message
- * drops. */
- } rktp_c;
-};
-
-/**
- * @struct This is a separately allocated glue object used in
- * rd_kafka_topic_partition_t._private to allow referencing both
- * an rktp and/or a leader epoch. Both are optional.
- * The rktp, if non-NULL, owns a refcount.
- *
- * This glue object is not always set in ._private, but allocated on demand
- * as necessary.
- */
-typedef struct rd_kafka_topic_partition_private_s {
- /** Reference to a toppar. Optional, may be NULL. */
- rd_kafka_toppar_t *rktp;
- /** Current Leader epoch, if known, else -1.
- * this is set when the API needs to send the last epoch known
- * by the client. */
- int32_t current_leader_epoch;
- /** Leader epoch if known, else -1. */
- int32_t leader_epoch;
-} rd_kafka_topic_partition_private_t;
-
-
-/**
- * Check if toppar is paused (consumer).
- * Locks: toppar_lock() MUST be held.
- */
-#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \
- ((rktp)->rktp_flags & \
- (RD_KAFKA_TOPPAR_F_APP_PAUSE | RD_KAFKA_TOPPAR_F_LIB_PAUSE))
-
-
-
-/**
- * @brief Increase refcount and return rktp object.
- */
-#define rd_kafka_toppar_keep(RKTP) \
- rd_kafka_toppar_keep0(__FUNCTION__, __LINE__, RKTP)
-
-#define rd_kafka_toppar_keep_fl(FUNC, LINE, RKTP) \
- rd_kafka_toppar_keep0(FUNC, LINE, RKTP)
-
-static RD_UNUSED RD_INLINE rd_kafka_toppar_t *
-rd_kafka_toppar_keep0(const char *func, int line, rd_kafka_toppar_t *rktp) {
- rd_refcnt_add_fl(func, line, &rktp->rktp_refcnt);
- return rktp;
-}
-
-void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp);
-
-#define rd_kafka_toppar_destroy(RKTP) \
- do { \
- rd_kafka_toppar_t *_RKTP = (RKTP); \
- if (unlikely(rd_refcnt_sub(&_RKTP->rktp_refcnt) == 0)) \
- rd_kafka_toppar_destroy_final(_RKTP); \
- } while (0)
-
-
-
-#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock)
-#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock)
-
-static const char *
-rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) RD_UNUSED;
-static const char *rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) {
- static RD_TLS char ret[256];
-
- rd_snprintf(ret, sizeof(ret), "%.*s [%" PRId32 "]",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
-
- return ret;
-}
-rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
- int32_t partition,
- const char *func,
- int line);
-#define rd_kafka_toppar_new(rkt, partition) \
- rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__)
-void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state);
-void rd_kafka_toppar_insert_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
-void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
- rd_kafka_msg_t *rkm,
- rd_ts_t now);
-int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
- rd_kafka_msgq_t *srcq,
- int incr_retry,
- int max_retries,
- rd_ts_t backoff,
- rd_kafka_msg_status_t status,
- int (*cmp)(const void *a, const void *b));
-void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq,
- rd_kafka_msgq_t *srcq,
- int (*cmp)(const void *a, const void *b));
-int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq,
- int incr_retry,
- rd_kafka_msg_status_t status);
-void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp,
- rd_kafka_msgq_t *rkmq);
-void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err,
- const char *reason);
-rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func,
- int line,
- const rd_kafka_topic_t *rkt,
- int32_t partition,
- int ua_on_miss);
-#define rd_kafka_toppar_get(rkt, partition, ua_on_miss) \
- rd_kafka_toppar_get0(__FUNCTION__, __LINE__, rkt, partition, ua_on_miss)
-rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int ua_on_miss,
- int create_on_miss);
-rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt,
- int32_t partition,
- int ua_on_miss,
- rd_kafka_resp_err_t *errp);
-
-rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt,
- int32_t partition);
-void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp);
-rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt,
- int32_t partition);
-void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp);
-void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp);
-
-void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t next_pos);
-
-void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *rkb);
-
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_q_t *fwdq,
- rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t pos,
- rd_kafka_replyq_t replyq);
-
-rd_kafka_resp_err_t
-rd_kafka_toppar_op_pause(rd_kafka_toppar_t *rktp, int pause, int flag);
-
-void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err);
-
-
-
-rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp);
-
-
-void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp,
- rd_kafka_replyq_t replyq);
-
-void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t query_pos,
- int backoff_ms);
-
-int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp,
- int purge_flags,
- rd_bool_t include_xmit_msgq);
-
-rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp,
- int proper_broker);
-void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp,
- const char *reason,
- rd_kafka_resp_err_t err);
-
-void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag);
-void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag);
-
-rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp,
- int pause,
- int flag,
- rd_kafka_replyq_t replyq);
-rd_kafka_resp_err_t
-rd_kafka_toppars_pause_resume(rd_kafka_t *rk,
- rd_bool_t pause,
- rd_async_t async,
- int flag,
- rd_kafka_topic_partition_list_t *partitions);
-
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic,
- int32_t partition);
-void rd_kafka_topic_partition_destroy_free(void *ptr);
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src);
-void *rd_kafka_topic_partition_copy_void(const void *src);
-void rd_kafka_topic_partition_destroy_free(void *ptr);
-rd_kafka_topic_partition_t *
-rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp);
-
-void rd_kafka_topic_partition_list_init(
- rd_kafka_topic_partition_list_t *rktparlist,
- int size);
-void rd_kafka_topic_partition_list_destroy_free(void *ptr);
-
-void rd_kafka_topic_partition_list_clear(
- rd_kafka_topic_partition_list_t *rktparlist);
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0(
- const char *func,
- int line,
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition,
- rd_kafka_toppar_t *rktp,
- const rd_kafka_topic_partition_private_t *parpriv);
-
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert(
- rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition);
-
-void rd_kafka_topic_partition_list_add_copy(
- rd_kafka_topic_partition_list_t *rktparlist,
- const rd_kafka_topic_partition_t *rktpar);
-
-
-void rd_kafka_topic_partition_list_add_list(
- rd_kafka_topic_partition_list_t *dst,
- const rd_kafka_topic_partition_list_t *src);
-
-/**
- * Traverse rd_kafka_topic_partition_list_t.
- *
- * @warning \p TPLIST modifications are not allowed.
- */
-#define RD_KAFKA_TPLIST_FOREACH(RKTPAR, TPLIST) \
- for (RKTPAR = &(TPLIST)->elems[0]; \
- (RKTPAR) < &(TPLIST)->elems[(TPLIST)->cnt]; RKTPAR++)
-
-/**
- * Traverse rd_kafka_topic_partition_list_t.
- *
- * @warning \p TPLIST modifications are not allowed, but removal of the
- * current \p RKTPAR element is allowed.
- */
-#define RD_KAFKA_TPLIST_FOREACH_REVERSE(RKTPAR, TPLIST) \
- for (RKTPAR = &(TPLIST)->elems[(TPLIST)->cnt - 1]; \
- (RKTPAR) >= &(TPLIST)->elems[0]; RKTPAR--)
-
-int rd_kafka_topic_partition_match(rd_kafka_t *rk,
- const rd_kafka_group_member_t *rkgm,
- const rd_kafka_topic_partition_t *rktpar,
- const char *topic,
- int *matched_by_regex);
-
-
-int rd_kafka_topic_partition_cmp(const void *_a, const void *_b);
-unsigned int rd_kafka_topic_partition_hash(const void *a);
-
-int rd_kafka_topic_partition_list_find_idx(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic,
- int32_t partition);
-rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic(
- const rd_kafka_topic_partition_list_t *rktparlist,
- const char *topic);
-
-void rd_kafka_topic_partition_list_sort_by_topic(
- rd_kafka_topic_partition_list_t *rktparlist);
-
-void rd_kafka_topic_partition_list_reset_offsets(
- rd_kafka_topic_partition_list_t *rktparlist,
- int64_t offset);
-
-int rd_kafka_topic_partition_list_set_offsets(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- int from_rktp,
- int64_t def_value,
- int is_commit);
-
-int rd_kafka_topic_partition_list_count_abs_offsets(
- const rd_kafka_topic_partition_list_t *rktparlist);
-
-int rd_kafka_topic_partition_list_cmp(const void *_a,
- const void *_b,
- int (*cmp)(const void *, const void *));
-
-/**
- * @returns (and creates if necessary) the ._private glue object.
- */
-static RD_UNUSED RD_INLINE rd_kafka_topic_partition_private_t *
-rd_kafka_topic_partition_get_private(rd_kafka_topic_partition_t *rktpar) {
- rd_kafka_topic_partition_private_t *parpriv;
-
- if (!(parpriv = rktpar->_private)) {
- parpriv = rd_calloc(1, sizeof(*parpriv));
- parpriv->leader_epoch = -1;
- rktpar->_private = parpriv;
- }
-
- return parpriv;
-}
-
-
-/**
- * @returns the partition leader current epoch, if relevant and known,
- * else -1.
- *
- * @param rktpar Partition object.
- *
- * @remark See KIP-320 for more information.
- */
-int32_t rd_kafka_topic_partition_get_current_leader_epoch(
- const rd_kafka_topic_partition_t *rktpar);
-
-
-/**
- * @brief Sets the partition leader current epoch (use -1 to clear).
- *
- * @param rktpar Partition object.
- * @param leader_epoch Partition leader current epoch, use -1 to reset.
- *
- * @remark See KIP-320 for more information.
- */
-void rd_kafka_topic_partition_set_current_leader_epoch(
- rd_kafka_topic_partition_t *rktpar,
- int32_t leader_epoch);
-
-
-/**
- * @returns the partition's rktp if set (no refcnt increase), else NULL.
- */
-static RD_INLINE RD_UNUSED rd_kafka_toppar_t *
-rd_kafka_topic_partition_toppar(rd_kafka_t *rk,
- const rd_kafka_topic_partition_t *rktpar) {
- const rd_kafka_topic_partition_private_t *parpriv;
-
- if ((parpriv = rktpar->_private))
- return parpriv->rktp;
-
- return NULL;
-}
-
-rd_kafka_toppar_t *
-rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk,
- rd_kafka_topic_partition_t *rktpar,
- rd_bool_t create_on_miss);
-
-/**
- * @returns (and sets if necessary) the \p rktpar's ._private.
- * @remark a new reference is returned.
- */
-static RD_INLINE RD_UNUSED rd_kafka_toppar_t *
-rd_kafka_topic_partition_get_toppar(rd_kafka_t *rk,
- rd_kafka_topic_partition_t *rktpar,
- rd_bool_t create_on_miss) {
- rd_kafka_toppar_t *rktp;
-
- rktp =
- rd_kafka_topic_partition_ensure_toppar(rk, rktpar, create_on_miss);
-
- if (rktp)
- rd_kafka_toppar_keep(rktp);
-
- return rktp;
-}
-
-
-
-void rd_kafka_topic_partition_list_update_toppars(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_bool_t create_on_miss);
-
-
-void rd_kafka_topic_partition_list_query_leaders_async(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *rktparlist,
- int timeout_ms,
- rd_kafka_replyq_t replyq,
- rd_kafka_op_cb_t *cb,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *leaders,
- int timeout_ms);
-
-int rd_kafka_topic_partition_list_get_topics(
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *rkts);
-
-int rd_kafka_topic_partition_list_get_topic_names(
- const rd_kafka_topic_partition_list_t *rktparlist,
- rd_list_t *topics,
- int include_regex);
-
-void rd_kafka_topic_partition_list_log(
- rd_kafka_t *rk,
- const char *fac,
- int dbg,
- const rd_kafka_topic_partition_list_t *rktparlist);
-
-#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */
-#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */
-#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */
-const char *rd_kafka_topic_partition_list_str(
- const rd_kafka_topic_partition_list_t *rktparlist,
- char *dest,
- size_t dest_size,
- int fmt_flags);
-
-void rd_kafka_topic_partition_list_update(
- rd_kafka_topic_partition_list_t *dst,
- const rd_kafka_topic_partition_list_t *src);
-
-int rd_kafka_topic_partition_leader_cmp(const void *_a, const void *_b);
-
-void rd_kafka_topic_partition_set_from_fetch_pos(
- rd_kafka_topic_partition_t *rktpar,
- const rd_kafka_fetch_pos_t fetchpos);
-
-static RD_UNUSED rd_kafka_fetch_pos_t rd_kafka_topic_partition_get_fetch_pos(
- const rd_kafka_topic_partition_t *rktpar) {
- rd_kafka_fetch_pos_t fetchpos = {
- rktpar->offset, rd_kafka_topic_partition_get_leader_epoch(rktpar)};
-
- return fetchpos;
-}
-
-
-/**
- * @brief Match function that returns true if partition has a valid offset.
- */
-static RD_UNUSED int
-rd_kafka_topic_partition_match_valid_offset(const void *elem,
- const void *opaque) {
- const rd_kafka_topic_partition_t *rktpar = elem;
- return rktpar->offset >= 0;
-}
-
-rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match(
- const rd_kafka_topic_partition_list_t *rktparlist,
- int (*match)(const void *elem, const void *opaque),
- void *opaque);
-
-size_t rd_kafka_topic_partition_list_sum(
- const rd_kafka_topic_partition_list_t *rktparlist,
- size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque),
- void *opaque);
-
-rd_bool_t rd_kafka_topic_partition_list_has_duplicates(
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_bool_t ignore_partition);
-
-void rd_kafka_topic_partition_list_set_err(
- rd_kafka_topic_partition_list_t *rktparlist,
- rd_kafka_resp_err_t err);
-
-rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err(
- const rd_kafka_topic_partition_list_t *rktparlist);
-
-int rd_kafka_topic_partition_list_regex_cnt(
- const rd_kafka_topic_partition_list_t *rktparlist);
-
-void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque);
-
-/**
- * @brief Toppar + Op version tuple used for mapping Fetched partitions
- * back to their fetch versions.
- */
-struct rd_kafka_toppar_ver {
- rd_kafka_toppar_t *rktp;
- int32_t version;
-};
-
-
-/**
- * @brief Toppar + Op version comparator.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_toppar_ver_cmp(const void *_a,
- const void *_b) {
- const struct rd_kafka_toppar_ver *a = _a, *b = _b;
- const rd_kafka_toppar_t *rktp_a = a->rktp;
- const rd_kafka_toppar_t *rktp_b = b->rktp;
- int r;
-
- if (rktp_a->rktp_rkt != rktp_b->rktp_rkt &&
- (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic,
- rktp_b->rktp_rkt->rkt_topic)))
- return r;
-
- return RD_CMP(rktp_a->rktp_partition, rktp_b->rktp_partition);
-}
-
-/**
- * @brief Frees up resources for \p tver but not the \p tver itself.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_toppar_ver_destroy(struct rd_kafka_toppar_ver *tver) {
- rd_kafka_toppar_destroy(tver->rktp);
-}
-
-
-/**
- * @returns 1 if rko version is outdated, else 0.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_op_version_outdated(rd_kafka_op_t *rko,
- int version) {
- if (!rko->rko_version)
- return 0;
-
- if (version)
- return rko->rko_version < version;
-
- if (rko->rko_rktp)
- return rko->rko_version <
- rd_atomic32_get(&rko->rko_rktp->rktp_version);
- return 0;
-}
-
-void rd_kafka_toppar_offset_commit_result(
- rd_kafka_toppar_t *rktp,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets);
-
-void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp);
-
-
-/**
- * @brief Represents a leader and the partitions it is leader for.
- */
-struct rd_kafka_partition_leader {
- rd_kafka_broker_t *rkb;
- rd_kafka_topic_partition_list_t *partitions;
-};
-
-static RD_UNUSED void
-rd_kafka_partition_leader_destroy(struct rd_kafka_partition_leader *leader) {
- rd_kafka_broker_destroy(leader->rkb);
- rd_kafka_topic_partition_list_destroy(leader->partitions);
- rd_free(leader);
-}
-
-void rd_kafka_partition_leader_destroy_free(void *ptr);
-
-static RD_UNUSED struct rd_kafka_partition_leader *
-rd_kafka_partition_leader_new(rd_kafka_broker_t *rkb) {
- struct rd_kafka_partition_leader *leader = rd_malloc(sizeof(*leader));
- leader->rkb = rkb;
- rd_kafka_broker_keep(rkb);
- leader->partitions = rd_kafka_topic_partition_list_new(0);
- return leader;
-}
-
-static RD_UNUSED int rd_kafka_partition_leader_cmp(const void *_a,
- const void *_b) {
- const struct rd_kafka_partition_leader *a = _a, *b = _b;
- return rd_kafka_broker_cmp(a->rkb, b->rkb);
-}
-
-
-int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp,
- rd_kafka_pid_t pid,
- uint64_t base_msgid);
-
-int rd_kafka_toppar_handle_purge_queues(rd_kafka_toppar_t *rktp,
- rd_kafka_broker_t *rkb,
- int purge_flags);
-void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk);
-
-static RD_UNUSED int rd_kafka_toppar_topic_cmp(const void *_a, const void *_b) {
- const rd_kafka_toppar_t *a = _a, *b = _b;
- return strcmp(a->rktp_rkt->rkt_topic->str, b->rktp_rkt->rkt_topic->str);
-}
-
-
-/**
- * @brief Set's the partitions next fetch position, i.e., the next offset
- * to start fetching from.
- *
- * @locks_required rd_kafka_toppar_lock(rktp) MUST be held.
- */
-static RD_UNUSED RD_INLINE void
-rd_kafka_toppar_set_next_fetch_position(rd_kafka_toppar_t *rktp,
- rd_kafka_fetch_pos_t next_pos) {
- rktp->rktp_next_fetch_start = next_pos;
-}
-
-#endif /* _RDKAFKA_PARTITION_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c
deleted file mode 100644
index dfe3ef03e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_pattern.h"
-
-void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist,
- rd_kafka_pattern_t *rkpat) {
- TAILQ_REMOVE(&plist->rkpl_head, rkpat, rkpat_link);
- rd_regex_destroy(rkpat->rkpat_re);
- rd_free(rkpat->rkpat_orig);
- rd_free(rkpat);
-}
-
-void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist,
- rd_kafka_pattern_t *rkpat) {
- TAILQ_INSERT_TAIL(&plist->rkpl_head, rkpat, rkpat_link);
-}
-
-rd_kafka_pattern_t *
-rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size) {
- rd_kafka_pattern_t *rkpat;
-
- rkpat = rd_calloc(1, sizeof(*rkpat));
-
- /* Verify and precompile pattern */
- if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) {
- rd_free(rkpat);
- return NULL;
- }
-
- rkpat->rkpat_orig = rd_strdup(pattern);
-
- return rkpat;
-}
-
-
-
-int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str) {
- rd_kafka_pattern_t *rkpat;
-
- TAILQ_FOREACH(rkpat, &plist->rkpl_head, rkpat_link) {
- if (rd_regex_exec(rkpat->rkpat_re, str))
- return 1;
- }
-
- return 0;
-}
-
-
-/**
- * Append pattern to list.
- */
-int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist,
- const char *pattern,
- char *errstr,
- int errstr_size) {
- rd_kafka_pattern_t *rkpat;
- rkpat = rd_kafka_pattern_new(pattern, errstr, errstr_size);
- if (!rkpat)
- return -1;
-
- rd_kafka_pattern_add(plist, rkpat);
- return 0;
-}
-
-/**
- * Remove matching patterns.
- * Returns the number of removed patterns.
- */
-int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist,
- const char *pattern) {
- rd_kafka_pattern_t *rkpat, *rkpat_tmp;
- int cnt = 0;
-
- TAILQ_FOREACH_SAFE(rkpat, &plist->rkpl_head, rkpat_link, rkpat_tmp) {
- if (!strcmp(rkpat->rkpat_orig, pattern)) {
- rd_kafka_pattern_destroy(plist, rkpat);
- cnt++;
- }
- }
- return cnt;
-}
-
-/**
- * Parse a patternlist and populate a list with it.
- */
-static int rd_kafka_pattern_list_parse(rd_kafka_pattern_list_t *plist,
- const char *patternlist,
- char *errstr,
- size_t errstr_size) {
- char *s;
- rd_strdupa(&s, patternlist);
-
- while (s && *s) {
- char *t = s;
- char re_errstr[256];
-
- /* Find separator */
- while ((t = strchr(t, ','))) {
- if (t > s && *(t - 1) == ',') {
- /* separator was escaped,
- remove escape and scan again. */
- memmove(t - 1, t, strlen(t) + 1);
- t++;
- } else {
- *t = '\0';
- t++;
- break;
- }
- }
-
- if (rd_kafka_pattern_list_append(plist, s, re_errstr,
- sizeof(re_errstr)) == -1) {
- rd_snprintf(errstr, errstr_size,
- "Failed to parse pattern \"%s\": "
- "%s",
- s, re_errstr);
- rd_kafka_pattern_list_clear(plist);
- return -1;
- }
-
- s = t;
- }
-
- return 0;
-}
-
-
-/**
- * Clear a pattern list.
- */
-void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist) {
- rd_kafka_pattern_t *rkpat;
-
- while ((rkpat = TAILQ_FIRST(&plist->rkpl_head)))
- rd_kafka_pattern_destroy(plist, rkpat);
-
- if (plist->rkpl_orig) {
- rd_free(plist->rkpl_orig);
- plist->rkpl_orig = NULL;
- }
-}
-
-
-/**
- * Free a pattern list previously created with list_new()
- */
-void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist) {
- rd_kafka_pattern_list_clear(plist);
- rd_free(plist);
-}
-
-/**
- * Initialize a pattern list, optionally populating it with the
- * comma-separated patterns in 'patternlist'.
- */
-int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist,
- const char *patternlist,
- char *errstr,
- size_t errstr_size) {
- TAILQ_INIT(&plist->rkpl_head);
- if (patternlist) {
- if (rd_kafka_pattern_list_parse(plist, patternlist, errstr,
- errstr_size) == -1)
- return -1;
- plist->rkpl_orig = rd_strdup(patternlist);
- } else
- plist->rkpl_orig = NULL;
-
- return 0;
-}
-
-
-/**
- * Allocate and initialize a new list.
- */
-rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist,
- char *errstr,
- int errstr_size) {
- rd_kafka_pattern_list_t *plist;
-
- plist = rd_calloc(1, sizeof(*plist));
-
- if (rd_kafka_pattern_list_init(plist, patternlist, errstr,
- errstr_size) == -1) {
- rd_free(plist);
- return NULL;
- }
-
- return plist;
-}
-
-
-/**
- * Make a copy of a pattern list.
- */
-rd_kafka_pattern_list_t *
-rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src) {
- char errstr[16];
- return rd_kafka_pattern_list_new(src->rkpl_orig, errstr,
- sizeof(errstr));
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h
deleted file mode 100644
index 88d183cd3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_PATTERN_H_
-#define _RDKAFKA_PATTERN_H_
-
-#include "rdregex.h"
-
-typedef struct rd_kafka_pattern_s {
- TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link;
-
- rd_regex_t *rkpat_re; /* Compiled regex */
- char *rkpat_orig; /* Original pattern */
-} rd_kafka_pattern_t;
-
-typedef struct rd_kafka_pattern_list_s {
- TAILQ_HEAD(, rd_kafka_pattern_s) rkpl_head;
- char *rkpl_orig;
-} rd_kafka_pattern_list_t;
-
-void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist,
- rd_kafka_pattern_t *rkpat);
-void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist,
- rd_kafka_pattern_t *rkpat);
-rd_kafka_pattern_t *
-rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size);
-int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str);
-int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist,
- const char *pattern,
- char *errstr,
- int errstr_size);
-int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist,
- const char *pattern);
-void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist);
-void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist);
-int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist,
- const char *patternlist,
- char *errstr,
- size_t errstr_size);
-rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist,
- char *errstr,
- int errstr_size);
-rd_kafka_pattern_list_t *
-rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src);
-
-#endif /* _RDKAFKA_PATTERN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c
deleted file mode 100644
index f58bc5060..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_plugin.h"
-#include "rddl.h"
-
-
-typedef struct rd_kafka_plugin_s {
- char *rkplug_path; /* Library path */
- rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */
- void *rkplug_handle; /* dlopen (or similar) handle */
- void *rkplug_opaque; /* Plugin's opaque */
-
-} rd_kafka_plugin_t;
-
-
-/**
- * @brief Plugin path comparator
- */
-static int rd_kafka_plugin_cmp(const void *_a, const void *_b) {
- const rd_kafka_plugin_t *a = _a, *b = _b;
-
- return strcmp(a->rkplug_path, b->rkplug_path);
-}
-
-
-/**
- * @brief Add plugin (by library path) and calls its conf_init() constructor
- *
- * @returns an error code on error.
- * @remark duplicate plugins are silently ignored.
- *
- * @remark Libraries are refcounted and thus not unloaded until all
- * plugins referencing the library have been destroyed.
- * (dlopen() and LoadLibrary() does this for us)
- */
-static rd_kafka_resp_err_t rd_kafka_plugin_new(rd_kafka_conf_t *conf,
- const char *path,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_plugin_t *rkplug;
- const rd_kafka_plugin_t skel = {.rkplug_path = (char *)path};
- rd_kafka_plugin_f_conf_init_t *conf_init;
- rd_kafka_resp_err_t err;
- void *handle;
- void *plug_opaque = NULL;
-
- /* Avoid duplicates */
- if (rd_list_find(&conf->plugins, &skel, rd_kafka_plugin_cmp)) {
- rd_snprintf(errstr, errstr_size, "Ignoring duplicate plugin %s",
- path);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Loading plugin \"%s\"", path);
-
- /* Attempt to load library */
- if (!(handle = rd_dl_open(path, errstr, errstr_size))) {
- rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
- "Failed to load plugin \"%s\": %s", path, errstr);
- return RD_KAFKA_RESP_ERR__FS;
- }
-
- /* Find conf_init() function */
- if (!(conf_init =
- rd_dl_sym(handle, "conf_init", errstr, errstr_size))) {
- rd_dl_close(handle);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- /* Call conf_init() */
- rd_kafka_dbg0(conf, PLUGIN, "PLUGINIT",
- "Calling plugin \"%s\" conf_init()", path);
-
- if ((err = conf_init(conf, &plug_opaque, errstr, errstr_size))) {
- rd_dl_close(handle);
- return err;
- }
-
- rkplug = rd_calloc(1, sizeof(*rkplug));
- rkplug->rkplug_path = rd_strdup(path);
- rkplug->rkplug_handle = handle;
- rkplug->rkplug_opaque = plug_opaque;
-
- rd_list_add(&conf->plugins, rkplug);
-
- rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Plugin \"%s\" loaded", path);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Free the plugin, any conf_destroy() interceptors will have been
- * called prior to this call.
- * @remark plugin is not removed from any list (caller's responsibility)
- * @remark this relies on the actual library loader to refcount libraries,
- * especially in the config copy case.
- * This is true for POSIX dlopen() and Win32 LoadLibrary().
- * @locality application thread
- */
-static void rd_kafka_plugin_destroy(rd_kafka_plugin_t *rkplug) {
- rd_dl_close(rkplug->rkplug_handle);
- rd_free(rkplug->rkplug_path);
- rd_free(rkplug);
-}
-
-
-
-/**
- * @brief Initialize all configured plugins.
- *
- * @remark Any previously loaded plugins will be unloaded.
- *
- * @returns the error code of the first failing plugin.
- * @locality application thread calling rd_kafka_new().
- */
-static rd_kafka_conf_res_t rd_kafka_plugins_conf_set0(rd_kafka_conf_t *conf,
- const char *paths,
- char *errstr,
- size_t errstr_size) {
- char *s;
-
- rd_list_destroy(&conf->plugins);
- rd_list_init(&conf->plugins, 0, (void *)&rd_kafka_plugin_destroy);
-
- if (!paths || !*paths)
- return RD_KAFKA_CONF_OK;
-
- /* Split paths by ; */
- rd_strdupa(&s, paths);
-
- rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
- "Loading plugins from conf object %p: \"%s\"", conf,
- paths);
-
- while (s && *s) {
- char *path = s;
- char *t;
- rd_kafka_resp_err_t err;
-
- if ((t = strchr(s, ';'))) {
- *t = '\0';
- s = t + 1;
- } else {
- s = NULL;
- }
-
- if ((err = rd_kafka_plugin_new(conf, path, errstr,
- errstr_size))) {
- /* Failed to load plugin */
- size_t elen = errstr_size > 0 ? strlen(errstr) : 0;
-
- /* See if there is room for appending the
- * plugin path to the error message. */
- if (elen + strlen("(plugin )") + strlen(path) <
- errstr_size)
- rd_snprintf(errstr + elen, errstr_size - elen,
- " (plugin %s)", path);
-
- rd_list_destroy(&conf->plugins);
- return RD_KAFKA_CONF_INVALID;
- }
- }
-
- return RD_KAFKA_CONF_OK;
-}
-
-
-/**
- * @brief Conf setter for "plugin.library.paths"
- */
-rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope,
- void *pconf,
- const char *name,
- const char *value,
- void *dstptr,
- rd_kafka_conf_set_mode_t set_mode,
- char *errstr,
- size_t errstr_size) {
-
- assert(scope == _RK_GLOBAL);
- return rd_kafka_plugins_conf_set0(
- (rd_kafka_conf_t *)pconf,
- set_mode == _RK_CONF_PROP_SET_DEL ? NULL : value, errstr,
- errstr_size);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h
deleted file mode 100644
index 1783d5f53..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_PLUGIN_H
-#define _RDKAFKA_PLUGIN_H
-
-rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope,
- void *conf,
- const char *name,
- const char *value,
- void *dstptr,
- rd_kafka_conf_set_mode_t set_mode,
- char *errstr,
- size_t errstr_size);
-
-#endif /* _RDKAFKA_PLUGIN_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h
deleted file mode 100644
index 396765857..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h
+++ /dev/null
@@ -1,655 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_PROTO_H_
-#define _RDKAFKA_PROTO_H_
-
-
-#include "rdendian.h"
-#include "rdvarint.h"
-
-/* Protocol defines */
-#include "rdkafka_protocol.h"
-
-
-
-/** Default generic retry count for failed requests.
- * This may be overriden for specific request types. */
-#define RD_KAFKA_REQUEST_DEFAULT_RETRIES 2
-
-/** Max (practically infinite) retry count */
-#define RD_KAFKA_REQUEST_MAX_RETRIES INT_MAX
-
-/** Do not retry request */
-#define RD_KAFKA_REQUEST_NO_RETRIES 0
-
-
-/**
- * Request types
- */
-struct rd_kafkap_reqhdr {
- int32_t Size;
- int16_t ApiKey;
- int16_t ApiVersion;
- int32_t CorrId;
- /* ClientId follows */
-};
-
-#define RD_KAFKAP_REQHDR_SIZE (4 + 2 + 2 + 4)
-#define RD_KAFKAP_RESHDR_SIZE (4 + 4)
-
-/**
- * Response header
- */
-struct rd_kafkap_reshdr {
- int32_t Size;
- int32_t CorrId;
-};
-
-
-/**
- * Request type v1 (flexible version)
- *
- * i32 Size
- * i16 ApiKey
- * i16 ApiVersion
- * i32 CorrId
- * string ClientId (2-byte encoding, not compact string)
- * uvarint Tags
- * <Request payload>
- * uvarint EndTags
- *
- * Any struct-type (non-primitive or array type) field in the request payload
- * must also have a trailing tags list, this goes for structs in arrays as well.
- */
-
-/**
- * @brief Protocol request type (ApiKey) to name/string.
- *
- * Generate updates to this list with generate_proto.sh.
- */
-static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) {
- static const char *names[] = {
- [RD_KAFKAP_Produce] = "Produce",
- [RD_KAFKAP_Fetch] = "Fetch",
- [RD_KAFKAP_ListOffsets] = "ListOffsets",
- [RD_KAFKAP_Metadata] = "Metadata",
- [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr",
- [RD_KAFKAP_StopReplica] = "StopReplica",
- [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata",
- [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown",
- [RD_KAFKAP_OffsetCommit] = "OffsetCommit",
- [RD_KAFKAP_OffsetFetch] = "OffsetFetch",
- [RD_KAFKAP_FindCoordinator] = "FindCoordinator",
- [RD_KAFKAP_JoinGroup] = "JoinGroup",
- [RD_KAFKAP_Heartbeat] = "Heartbeat",
- [RD_KAFKAP_LeaveGroup] = "LeaveGroup",
- [RD_KAFKAP_SyncGroup] = "SyncGroup",
- [RD_KAFKAP_DescribeGroups] = "DescribeGroups",
- [RD_KAFKAP_ListGroups] = "ListGroups",
- [RD_KAFKAP_SaslHandshake] = "SaslHandshake",
- [RD_KAFKAP_ApiVersion] = "ApiVersion",
- [RD_KAFKAP_CreateTopics] = "CreateTopics",
- [RD_KAFKAP_DeleteTopics] = "DeleteTopics",
- [RD_KAFKAP_DeleteRecords] = "DeleteRecords",
- [RD_KAFKAP_InitProducerId] = "InitProducerId",
- [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch",
- [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn",
- [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn",
- [RD_KAFKAP_EndTxn] = "EndTxn",
- [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers",
- [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit",
- [RD_KAFKAP_DescribeAcls] = "DescribeAcls",
- [RD_KAFKAP_CreateAcls] = "CreateAcls",
- [RD_KAFKAP_DeleteAcls] = "DeleteAcls",
- [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs",
- [RD_KAFKAP_AlterConfigs] = "AlterConfigs",
- [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs",
- [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs",
- [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate",
- [RD_KAFKAP_CreatePartitions] = "CreatePartitions",
- [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken",
- [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken",
- [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken",
- [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken",
- [RD_KAFKAP_DeleteGroups] = "DeleteGroups",
- [RD_KAFKAP_ElectLeaders] = "ElectLeadersRequest",
- [RD_KAFKAP_IncrementalAlterConfigs] =
- "IncrementalAlterConfigsRequest",
- [RD_KAFKAP_AlterPartitionReassignments] =
- "AlterPartitionReassignmentsRequest",
- [RD_KAFKAP_ListPartitionReassignments] =
- "ListPartitionReassignmentsRequest",
- [RD_KAFKAP_OffsetDelete] = "OffsetDeleteRequest",
- [RD_KAFKAP_DescribeClientQuotas] = "DescribeClientQuotasRequest",
- [RD_KAFKAP_AlterClientQuotas] = "AlterClientQuotasRequest",
- [RD_KAFKAP_DescribeUserScramCredentials] =
- "DescribeUserScramCredentialsRequest",
- [RD_KAFKAP_AlterUserScramCredentials] =
- "AlterUserScramCredentialsRequest",
- [RD_KAFKAP_Vote] = "VoteRequest",
- [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest",
- [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest",
- [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest",
- [RD_KAFKAP_AlterIsr] = "AlterIsrRequest",
- [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest",
- [RD_KAFKAP_Envelope] = "EnvelopeRequest",
- [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot",
- [RD_KAFKAP_DescribeCluster] = "DescribeCluster",
- [RD_KAFKAP_DescribeProducers] = "DescribeProducers",
- [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat",
- [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker",
- [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions",
- [RD_KAFKAP_ListTransactions] = "ListTransactions",
- [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds",
- };
- static RD_TLS char ret[64];
-
- if (ApiKey < 0 || ApiKey >= (int)RD_ARRAYSIZE(names) ||
- !names[ApiKey]) {
- rd_snprintf(ret, sizeof(ret), "Unknown-%hd?", ApiKey);
- return ret;
- }
-
- return names[ApiKey];
-}
-
-
-
-/**
- * @brief ApiKey version support tuple.
- */
-struct rd_kafka_ApiVersion {
- int16_t ApiKey;
- int16_t MinVer;
- int16_t MaxVer;
-};
-
-/**
- * @brief ApiVersion.ApiKey comparator.
- */
-static RD_UNUSED int rd_kafka_ApiVersion_key_cmp(const void *_a,
- const void *_b) {
- const struct rd_kafka_ApiVersion *a =
- (const struct rd_kafka_ApiVersion *)_a;
- const struct rd_kafka_ApiVersion *b =
- (const struct rd_kafka_ApiVersion *)_b;
- return RD_CMP(a->ApiKey, b->ApiKey);
-}
-
-
-
-typedef enum {
- RD_KAFKA_READ_UNCOMMITTED = 0,
- RD_KAFKA_READ_COMMITTED = 1
-} rd_kafka_isolation_level_t;
-
-
-
-#define RD_KAFKA_CTRL_MSG_ABORT 0
-#define RD_KAFKA_CTRL_MSG_COMMIT 1
-
-
-/**
- * @enum Coordinator type, used with FindCoordinatorRequest
- */
-typedef enum rd_kafka_coordtype_t {
- RD_KAFKA_COORD_GROUP = 0,
- RD_KAFKA_COORD_TXN = 1
-} rd_kafka_coordtype_t;
-
-
-/**
- *
- * Kafka protocol string representation prefixed with a convenience header
- *
- * Serialized format:
- * { uint16, data.. }
- *
- */
-typedef struct rd_kafkap_str_s {
- /* convenience header (aligned access, host endian) */
- int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */
- const char *str; /* points into data[] or other memory,
- * not NULL-terminated */
-} rd_kafkap_str_t;
-
-
-#define RD_KAFKAP_STR_LEN_NULL -1
-#define RD_KAFKAP_STR_IS_NULL(kstr) ((kstr)->len == RD_KAFKAP_STR_LEN_NULL)
-
-/* Returns the length of the string of a kafka protocol string representation */
-#define RD_KAFKAP_STR_LEN0(len) ((len) == RD_KAFKAP_STR_LEN_NULL ? 0 : (len))
-#define RD_KAFKAP_STR_LEN(kstr) RD_KAFKAP_STR_LEN0((kstr)->len)
-
-/* Returns the actual size of a kafka protocol string representation. */
-#define RD_KAFKAP_STR_SIZE0(len) (2 + RD_KAFKAP_STR_LEN0(len))
-#define RD_KAFKAP_STR_SIZE(kstr) RD_KAFKAP_STR_SIZE0((kstr)->len)
-
-
-/** @returns true if kstr is pre-serialized through .._new() */
-#define RD_KAFKAP_STR_IS_SERIALIZED(kstr) \
- (((const char *)((kstr) + 1)) + 2 == (const char *)((kstr)->str))
-
-/* Serialized Kafka string: only works for _new() kstrs.
- * Check with RD_KAFKAP_STR_IS_SERIALIZED */
-#define RD_KAFKAP_STR_SER(kstr) ((kstr) + 1)
-
-/* Macro suitable for "%.*s" printing. */
-#define RD_KAFKAP_STR_PR(kstr) \
- (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \
- (kstr)->str
-
-/* strndupa() a Kafka string */
-#define RD_KAFKAP_STR_DUPA(destptr, kstr) \
- rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr))
-
-/* strndup() a Kafka string */
-#define RD_KAFKAP_STR_DUP(kstr) rd_strndup((kstr)->str, RD_KAFKAP_STR_LEN(kstr))
-
-#define RD_KAFKAP_STR_INITIALIZER \
- { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL }
-
-/**
- * Frees a Kafka string previously allocated with `rd_kafkap_str_new()`
- */
-static RD_UNUSED void rd_kafkap_str_destroy(rd_kafkap_str_t *kstr) {
- rd_free(kstr);
-}
-
-
-
-/**
- * Allocate a new Kafka string and make a copy of 'str'.
- * If 'len' is -1 the length will be calculated.
- * Supports Kafka NULL strings.
- * Nul-terminates the string, but the trailing \0 is not part of
- * the serialized string.
- */
-static RD_INLINE RD_UNUSED rd_kafkap_str_t *rd_kafkap_str_new(const char *str,
- int len) {
- rd_kafkap_str_t *kstr;
- int16_t klen;
-
- if (!str)
- len = RD_KAFKAP_STR_LEN_NULL;
- else if (len == -1)
- len = (int)strlen(str);
-
- kstr = (rd_kafkap_str_t *)rd_malloc(
- sizeof(*kstr) + 2 + (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1));
- kstr->len = len;
-
- /* Serialised format: 16-bit string length */
- klen = htobe16(len);
- memcpy(kstr + 1, &klen, 2);
-
- /* Pre-Serialised format: non null-terminated string */
- if (len == RD_KAFKAP_STR_LEN_NULL)
- kstr->str = NULL;
- else {
- kstr->str = ((const char *)(kstr + 1)) + 2;
- memcpy((void *)kstr->str, str, len);
- ((char *)kstr->str)[len] = '\0';
- }
-
- return kstr;
-}
-
-
-/**
- * Makes a copy of `src`. The copy will be fully allocated and should
- * be freed with rd_kafka_pstr_destroy()
- */
-static RD_INLINE RD_UNUSED rd_kafkap_str_t *
-rd_kafkap_str_copy(const rd_kafkap_str_t *src) {
- return rd_kafkap_str_new(src->str, src->len);
-}
-
-static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp(const rd_kafkap_str_t *a,
- const rd_kafkap_str_t *b) {
- int minlen = RD_MIN(a->len, b->len);
- int r = memcmp(a->str, b->str, minlen);
- if (r)
- return r;
- else
- return RD_CMP(a->len, b->len);
-}
-
-static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str(const rd_kafkap_str_t *a,
- const char *str) {
- int len = (int)strlen(str);
- int minlen = RD_MIN(a->len, len);
- int r = memcmp(a->str, str, minlen);
- if (r)
- return r;
- else
- return RD_CMP(a->len, len);
-}
-
-static RD_INLINE RD_UNUSED int
-rd_kafkap_str_cmp_str2(const char *str, const rd_kafkap_str_t *b) {
- int len = (int)strlen(str);
- int minlen = RD_MIN(b->len, len);
- int r = memcmp(str, b->str, minlen);
- if (r)
- return r;
- else
- return RD_CMP(len, b->len);
-}
-
-
-
-/**
- *
- * Kafka protocol bytes array representation prefixed with a convenience header
- *
- * Serialized format:
- * { uint32, data.. }
- *
- */
-typedef struct rd_kafkap_bytes_s {
- /* convenience header (aligned access, host endian) */
- int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */
- const void *data; /* points just past the struct, or other memory,
- * not NULL-terminated */
- const char _data[1]; /* Bytes following struct when new()ed */
-} rd_kafkap_bytes_t;
-
-
-#define RD_KAFKAP_BYTES_LEN_NULL -1
-#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \
- ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL)
-
-/* Returns the length of the bytes of a kafka protocol bytes representation */
-#define RD_KAFKAP_BYTES_LEN0(len) \
- ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0 : (len))
-#define RD_KAFKAP_BYTES_LEN(kbytes) RD_KAFKAP_BYTES_LEN0((kbytes)->len)
-
-/* Returns the actual size of a kafka protocol bytes representation. */
-#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len))
-#define RD_KAFKAP_BYTES_SIZE(kbytes) RD_KAFKAP_BYTES_SIZE0((kbytes)->len)
-
-/** @returns true if kbyes is pre-serialized through .._new() */
-#define RD_KAFKAP_BYTES_IS_SERIALIZED(kstr) \
- (((const char *)((kbytes) + 1)) + 2 == (const char *)((kbytes)->data))
-
-/* Serialized Kafka bytes: only works for _new() kbytes */
-#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes) + 1)
-
-
-/**
- * Frees a Kafka bytes previously allocated with `rd_kafkap_bytes_new()`
- */
-static RD_UNUSED void rd_kafkap_bytes_destroy(rd_kafkap_bytes_t *kbytes) {
- rd_free(kbytes);
-}
-
-
-/**
- * @brief Allocate a new Kafka bytes and make a copy of 'bytes'.
- * If \p len > 0 but \p bytes is NULL no copying is performed by
- * the bytes structure will be allocated to fit \p size bytes.
- *
- * Supports:
- * - Kafka NULL bytes (bytes==NULL,len==0),
- * - Empty bytes (bytes!=NULL,len==0)
- * - Copy data (bytes!=NULL,len>0)
- * - No-copy, just alloc (bytes==NULL,len>0)
- */
-static RD_INLINE RD_UNUSED rd_kafkap_bytes_t *
-rd_kafkap_bytes_new(const char *bytes, int32_t len) {
- rd_kafkap_bytes_t *kbytes;
- int32_t klen;
-
- if (!bytes && !len)
- len = RD_KAFKAP_BYTES_LEN_NULL;
-
- kbytes = (rd_kafkap_bytes_t *)rd_malloc(
- sizeof(*kbytes) + 4 + (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len));
- kbytes->len = len;
-
- klen = htobe32(len);
- memcpy((void *)(kbytes + 1), &klen, 4);
-
- if (len == RD_KAFKAP_BYTES_LEN_NULL)
- kbytes->data = NULL;
- else {
- kbytes->data = ((const char *)(kbytes + 1)) + 4;
- if (bytes)
- memcpy((void *)kbytes->data, bytes, len);
- }
-
- return kbytes;
-}
-
-
-/**
- * Makes a copy of `src`. The copy will be fully allocated and should
- * be freed with rd_kafkap_bytes_destroy()
- */
-static RD_INLINE RD_UNUSED rd_kafkap_bytes_t *
-rd_kafkap_bytes_copy(const rd_kafkap_bytes_t *src) {
- return rd_kafkap_bytes_new((const char *)src->data, src->len);
-}
-
-
-static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp(const rd_kafkap_bytes_t *a,
- const rd_kafkap_bytes_t *b) {
- int minlen = RD_MIN(a->len, b->len);
- int r = memcmp(a->data, b->data, minlen);
- if (r)
- return r;
- else
- return RD_CMP(a->len, b->len);
-}
-
-static RD_INLINE RD_UNUSED int
-rd_kafkap_bytes_cmp_data(const rd_kafkap_bytes_t *a,
- const char *data,
- int len) {
- int minlen = RD_MIN(a->len, len);
- int r = memcmp(a->data, data, minlen);
- if (r)
- return r;
- else
- return RD_CMP(a->len, len);
-}
-
-
-
-typedef struct rd_kafka_buf_s rd_kafka_buf_t;
-
-
-#define RD_KAFKA_NODENAME_SIZE 256
-
-
-
-/**
- * @brief Message overheads (worst-case)
- */
-
-/**
- * MsgVersion v0..v1
- */
-/* Offset + MessageSize */
-#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8 + 4)
-/* CRC + Magic + Attr + KeyLen + ValueLen */
-#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4 + 1 + 1 + 4 + 4)
-/* CRC + Magic + Attr + Timestamp + KeyLen + ValueLen */
-#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4 + 1 + 1 + 8 + 4 + 4)
-/* Maximum per-message overhead */
-#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \
- (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V0_HDR_SIZE)
-#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \
- (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V1_HDR_SIZE)
-
-/**
- * MsgVersion v2
- */
-#define RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD \
- ( /* Length (varint) */ \
- RD_UVARINT_ENC_SIZEOF(int32_t) + /* Attributes */ \
- 1 + /* TimestampDelta (varint) */ \
- RD_UVARINT_ENC_SIZEOF(int64_t) + /* OffsetDelta (varint) */ \
- RD_UVARINT_ENC_SIZEOF(int32_t) + /* KeyLen (varint) */ \
- RD_UVARINT_ENC_SIZEOF(int32_t) + /* ValueLen (varint) */ \
- RD_UVARINT_ENC_SIZEOF(int32_t) + /* HeaderCnt (varint): */ \
- RD_UVARINT_ENC_SIZEOF(int32_t))
-
-#define RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD \
- ( /* Length (varint) */ \
- RD_UVARINT_ENC_SIZE_0() + /* Attributes */ \
- 1 + /* TimestampDelta (varint) */ \
- RD_UVARINT_ENC_SIZE_0() + /* OffsetDelta (varint) */ \
- RD_UVARINT_ENC_SIZE_0() + /* KeyLen (varint) */ \
- RD_UVARINT_ENC_SIZE_0() + /* ValueLen (varint) */ \
- RD_UVARINT_ENC_SIZE_0() + /* HeaderCnt (varint): */ \
- RD_UVARINT_ENC_SIZE_0())
-
-
-/**
- * @brief MessageSets are not explicitly versioned but depends on the
- * Produce/Fetch API version and the encompassed Message versions.
- * We use the Message version (MsgVersion, aka MagicByte) to describe
- * the MessageSet version, that is, MsgVersion <= 1 uses the old
- * MessageSet version (v0?) while MsgVersion 2 uses MessageSet version v2
- */
-
-/* Old MessageSet header: none */
-#define RD_KAFKAP_MSGSET_V0_SIZE 0
-
-/* MessageSet v2 header */
-#define RD_KAFKAP_MSGSET_V2_SIZE \
- (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4 + 4)
-
-/* Byte offsets for MessageSet fields */
-#define RD_KAFKAP_MSGSET_V2_OF_Length (8)
-#define RD_KAFKAP_MSGSET_V2_OF_MagicByte (8 + 4 + 4)
-#define RD_KAFKAP_MSGSET_V2_OF_CRC (8 + 4 + 4 + 1)
-#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8 + 4 + 4 + 1 + 4)
-#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8 + 4 + 4 + 1 + 4 + 2)
-#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4)
-#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8)
-#define RD_KAFKAP_MSGSET_V2_OF_ProducerId (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8)
-#define RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch \
- (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8)
-#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence \
- (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2)
-#define RD_KAFKAP_MSGSET_V2_OF_RecordCount \
- (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4)
-
-
-
-/**
- * @name Producer ID and Epoch for the Idempotent Producer
- * @{
- *
- */
-
-/**
- * @brief Producer ID and Epoch
- */
-typedef struct rd_kafka_pid_s {
- int64_t id; /**< Producer Id */
- int16_t epoch; /**< Producer Epoch */
-} rd_kafka_pid_t;
-
-#define RD_KAFKA_PID_INITIALIZER \
- { -1, -1 }
-
-/**
- * @returns true if \p PID is valid
- */
-#define rd_kafka_pid_valid(PID) ((PID).id != -1)
-
-/**
- * @brief Check two pids for equality
- */
-static RD_UNUSED RD_INLINE int rd_kafka_pid_eq(const rd_kafka_pid_t a,
- const rd_kafka_pid_t b) {
- return a.id == b.id && a.epoch == b.epoch;
-}
-
-/**
- * @brief Pid+epoch comparator
- */
-static RD_UNUSED int rd_kafka_pid_cmp(const void *_a, const void *_b) {
- const rd_kafka_pid_t *a = _a, *b = _b;
-
- if (a->id < b->id)
- return -1;
- else if (a->id > b->id)
- return 1;
-
- return (int)a->epoch - (int)b->epoch;
-}
-
-
-/**
- * @returns the string representation of a PID in a thread-safe
- * static buffer.
- */
-static RD_UNUSED const char *rd_kafka_pid2str(const rd_kafka_pid_t pid) {
- static RD_TLS char buf[2][64];
- static RD_TLS int i;
-
- if (!rd_kafka_pid_valid(pid))
- return "PID{Invalid}";
-
- i = (i + 1) % 2;
-
- rd_snprintf(buf[i], sizeof(buf[i]), "PID{Id:%" PRId64 ",Epoch:%hd}",
- pid.id, pid.epoch);
-
- return buf[i];
-}
-
-/**
- * @brief Reset the PID to invalid/init state
- */
-static RD_UNUSED RD_INLINE void rd_kafka_pid_reset(rd_kafka_pid_t *pid) {
- pid->id = -1;
- pid->epoch = -1;
-}
-
-
-/**
- * @brief Bump the epoch of a valid PID
- */
-static RD_UNUSED RD_INLINE rd_kafka_pid_t
-rd_kafka_pid_bump(const rd_kafka_pid_t old) {
- rd_kafka_pid_t new_pid = {
- old.id, (int16_t)(((int)old.epoch + 1) & (int)INT16_MAX)};
- return new_pid;
-}
-
-/**@}*/
-
-
-#endif /* _RDKAFKA_PROTO_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h
deleted file mode 100644
index 60c099986..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_PROTOCOL_H_
-#define _RDKAFKA_PROTOCOL_H_
-
-/**
- * Kafka protocol defines.
- *
- * The separation from rdkafka_proto.h is to provide the protocol defines
- * to C and C++ test code in tests/.
- */
-
-#define RD_KAFKA_PORT 9092
-#define RD_KAFKA_PORT_STR "9092"
-
-
-/**
- * Request types
- *
- * Generate updates to this list with generate_proto.sh.
- */
-#define RD_KAFKAP_None -1
-#define RD_KAFKAP_Produce 0
-#define RD_KAFKAP_Fetch 1
-#define RD_KAFKAP_ListOffsets 2
-#define RD_KAFKAP_Metadata 3
-#define RD_KAFKAP_LeaderAndIsr 4
-#define RD_KAFKAP_StopReplica 5
-#define RD_KAFKAP_UpdateMetadata 6
-#define RD_KAFKAP_ControlledShutdown 7
-#define RD_KAFKAP_OffsetCommit 8
-#define RD_KAFKAP_OffsetFetch 9
-#define RD_KAFKAP_FindCoordinator 10
-#define RD_KAFKAP_JoinGroup 11
-#define RD_KAFKAP_Heartbeat 12
-#define RD_KAFKAP_LeaveGroup 13
-#define RD_KAFKAP_SyncGroup 14
-#define RD_KAFKAP_DescribeGroups 15
-#define RD_KAFKAP_ListGroups 16
-#define RD_KAFKAP_SaslHandshake 17
-#define RD_KAFKAP_ApiVersion 18
-#define RD_KAFKAP_CreateTopics 19
-#define RD_KAFKAP_DeleteTopics 20
-#define RD_KAFKAP_DeleteRecords 21
-#define RD_KAFKAP_InitProducerId 22
-#define RD_KAFKAP_OffsetForLeaderEpoch 23
-#define RD_KAFKAP_AddPartitionsToTxn 24
-#define RD_KAFKAP_AddOffsetsToTxn 25
-#define RD_KAFKAP_EndTxn 26
-#define RD_KAFKAP_WriteTxnMarkers 27
-#define RD_KAFKAP_TxnOffsetCommit 28
-#define RD_KAFKAP_DescribeAcls 29
-#define RD_KAFKAP_CreateAcls 30
-#define RD_KAFKAP_DeleteAcls 31
-#define RD_KAFKAP_DescribeConfigs 32
-#define RD_KAFKAP_AlterConfigs 33
-#define RD_KAFKAP_AlterReplicaLogDirs 34
-#define RD_KAFKAP_DescribeLogDirs 35
-#define RD_KAFKAP_SaslAuthenticate 36
-#define RD_KAFKAP_CreatePartitions 37
-#define RD_KAFKAP_CreateDelegationToken 38
-#define RD_KAFKAP_RenewDelegationToken 39
-#define RD_KAFKAP_ExpireDelegationToken 40
-#define RD_KAFKAP_DescribeDelegationToken 41
-#define RD_KAFKAP_DeleteGroups 42
-#define RD_KAFKAP_ElectLeaders 43
-#define RD_KAFKAP_IncrementalAlterConfigs 44
-#define RD_KAFKAP_AlterPartitionReassignments 45
-#define RD_KAFKAP_ListPartitionReassignments 46
-#define RD_KAFKAP_OffsetDelete 47
-#define RD_KAFKAP_DescribeClientQuotas 48
-#define RD_KAFKAP_AlterClientQuotas 49
-#define RD_KAFKAP_DescribeUserScramCredentials 50
-#define RD_KAFKAP_AlterUserScramCredentials 51
-#define RD_KAFKAP_Vote 52
-#define RD_KAFKAP_BeginQuorumEpoch 53
-#define RD_KAFKAP_EndQuorumEpoch 54
-#define RD_KAFKAP_DescribeQuorum 55
-#define RD_KAFKAP_AlterIsr 56
-#define RD_KAFKAP_UpdateFeatures 57
-#define RD_KAFKAP_Envelope 58
-#define RD_KAFKAP_FetchSnapshot 59
-#define RD_KAFKAP_DescribeCluster 60
-#define RD_KAFKAP_DescribeProducers 61
-#define RD_KAFKAP_BrokerHeartbeat 63
-#define RD_KAFKAP_UnregisterBroker 64
-#define RD_KAFKAP_DescribeTransactions 65
-#define RD_KAFKAP_ListTransactions 66
-#define RD_KAFKAP_AllocateProducerIds 67
-
-#define RD_KAFKAP__NUM 68
-
-
-#endif /* _RDKAFKA_PROTOCOL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c
deleted file mode 100644
index 57fce36b8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_interceptor.h"
-
-int RD_TLS rd_kafka_yield_thread = 0;
-
-void rd_kafka_yield(rd_kafka_t *rk) {
- rd_kafka_yield_thread = 1;
-}
-
-
-/**
- * @brief Check and reset yield flag.
- * @returns rd_true if caller should yield, otherwise rd_false.
- * @remarks rkq_lock MUST be held
- */
-static RD_INLINE rd_bool_t rd_kafka_q_check_yield(rd_kafka_q_t *rkq) {
- if (!(rkq->rkq_flags & RD_KAFKA_Q_F_YIELD))
- return rd_false;
-
- rkq->rkq_flags &= ~RD_KAFKA_Q_F_YIELD;
- return rd_true;
-}
-/**
- * Destroy a queue. refcnt must be at zero.
- */
-void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq) {
-
- mtx_lock(&rkq->rkq_lock);
- if (unlikely(rkq->rkq_qio != NULL)) {
- rd_free(rkq->rkq_qio);
- rkq->rkq_qio = NULL;
- }
- /* Queue must have been disabled prior to final destruction,
- * this is to catch the case where the queue owner/poll does not
- * use rd_kafka_q_destroy_owner(). */
- rd_dassert(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY));
- rd_kafka_q_disable0(rkq, 0 /*no-lock*/); /* for the non-devel case */
- rd_kafka_q_fwd_set0(rkq, NULL, 0 /*no-lock*/, 0 /*no-fwd-app*/);
- rd_kafka_q_purge0(rkq, 0 /*no-lock*/);
- assert(!rkq->rkq_fwdq);
- mtx_unlock(&rkq->rkq_lock);
- mtx_destroy(&rkq->rkq_lock);
- cnd_destroy(&rkq->rkq_cond);
-
- if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED)
- rd_free(rkq);
-}
-
-
-
-/**
- * Initialize a queue.
- */
-void rd_kafka_q_init0(rd_kafka_q_t *rkq,
- rd_kafka_t *rk,
- const char *func,
- int line) {
- rd_kafka_q_reset(rkq);
- rkq->rkq_fwdq = NULL;
- rkq->rkq_refcnt = 1;
- rkq->rkq_flags = RD_KAFKA_Q_F_READY;
- rkq->rkq_rk = rk;
- rkq->rkq_qio = NULL;
- rkq->rkq_serve = NULL;
- rkq->rkq_opaque = NULL;
- mtx_init(&rkq->rkq_lock, mtx_plain);
- cnd_init(&rkq->rkq_cond);
-#if ENABLE_DEVEL
- rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
-#else
- rkq->rkq_name = func;
-#endif
-}
-
-
-/**
- * Allocate a new queue and initialize it.
- */
-rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line) {
- rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq));
- rd_kafka_q_init(rkq, rk);
- rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED;
-#if ENABLE_DEVEL
- rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
-#else
- rkq->rkq_name = func;
-#endif
- return rkq;
-}
-
-/**
- * Set/clear forward queue.
- * Queue forwarding enables message routing inside rdkafka.
- * Typical use is to re-route all fetched messages for all partitions
- * to one single queue.
- *
- * All access to rkq_fwdq are protected by rkq_lock.
- */
-void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq,
- rd_kafka_q_t *destq,
- int do_lock,
- int fwd_app) {
- if (unlikely(srcq == destq))
- return;
-
- if (do_lock)
- mtx_lock(&srcq->rkq_lock);
- if (fwd_app)
- srcq->rkq_flags |= RD_KAFKA_Q_F_FWD_APP;
- if (srcq->rkq_fwdq) {
- rd_kafka_q_destroy(srcq->rkq_fwdq);
- srcq->rkq_fwdq = NULL;
- }
- if (destq) {
- rd_kafka_q_keep(destq);
-
- /* If rkq has ops in queue, append them to fwdq's queue.
- * This is an irreversible operation. */
- if (srcq->rkq_qlen > 0) {
- rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY);
- rd_kafka_q_concat(destq, srcq);
- }
-
- srcq->rkq_fwdq = destq;
- }
- if (do_lock)
- mtx_unlock(&srcq->rkq_lock);
-}
-
-/**
- * Purge all entries from a queue.
- */
-int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock) {
- rd_kafka_op_t *rko, *next;
- TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
- rd_kafka_q_t *fwdq;
- int cnt = 0;
-
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
-
- if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
- cnt = rd_kafka_q_purge(fwdq);
- rd_kafka_q_destroy(fwdq);
- return cnt;
- }
-
- /* Move ops queue to tmpq to avoid lock-order issue
- * by locks taken from rd_kafka_op_destroy(). */
- TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link);
-
- rd_kafka_q_mark_served(rkq);
-
- /* Zero out queue */
- rd_kafka_q_reset(rkq);
-
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-
- /* Destroy the ops */
- next = TAILQ_FIRST(&tmpq);
- while ((rko = next)) {
- next = TAILQ_NEXT(next, rko_link);
- rd_kafka_op_destroy(rko);
- cnt++;
- }
-
- return cnt;
-}
-
-
-/**
- * Purge all entries from a queue with a rktp version smaller than `version`
- * This shaves off the head of the queue, up until the first rko with
- * a non-matching rktp or version.
- */
-void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq,
- rd_kafka_toppar_t *rktp,
- int version) {
- rd_kafka_op_t *rko, *next;
- TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
- int32_t cnt = 0;
- int64_t size = 0;
- rd_kafka_q_t *fwdq;
-
- mtx_lock(&rkq->rkq_lock);
-
- if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- mtx_unlock(&rkq->rkq_lock);
- rd_kafka_q_purge_toppar_version(fwdq, rktp, version);
- rd_kafka_q_destroy(fwdq);
- return;
- }
-
- /* Move ops to temporary queue and then destroy them from there
- * without locks to avoid lock-ordering problems in op_destroy() */
- while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp &&
- rko->rko_rktp == rktp && rko->rko_version < version) {
- TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
- TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
- cnt++;
- size += rko->rko_len;
- }
-
- rd_kafka_q_mark_served(rkq);
-
- rkq->rkq_qlen -= cnt;
- rkq->rkq_qsize -= size;
- mtx_unlock(&rkq->rkq_lock);
-
- next = TAILQ_FIRST(&tmpq);
- while ((rko = next)) {
- next = TAILQ_NEXT(next, rko_link);
- rd_kafka_op_destroy(rko);
- }
-}
-
-
-/**
- * Move 'cnt' entries from 'srcq' to 'dstq'.
- * If 'cnt' == -1 all entries will be moved.
- * Returns the number of entries moved.
- */
-int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq,
- rd_kafka_q_t *srcq,
- int cnt,
- int do_locks) {
- rd_kafka_op_t *rko;
- int mcnt = 0;
-
- if (do_locks) {
- mtx_lock(&srcq->rkq_lock);
- mtx_lock(&dstq->rkq_lock);
- }
-
- if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) {
- if (cnt > 0 && dstq->rkq_qlen == 0)
- rd_kafka_q_io_event(dstq);
-
- /* Optimization, if 'cnt' is equal/larger than all
- * items of 'srcq' we can move the entire queue. */
- if (cnt == -1 || cnt >= (int)srcq->rkq_qlen) {
- mcnt = srcq->rkq_qlen;
- rd_kafka_q_concat0(dstq, srcq, 0 /*no-lock*/);
- } else {
- while (mcnt < cnt &&
- (rko = TAILQ_FIRST(&srcq->rkq_q))) {
- TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
- if (likely(!rko->rko_prio))
- TAILQ_INSERT_TAIL(&dstq->rkq_q, rko,
- rko_link);
- else
- TAILQ_INSERT_SORTED(
- &dstq->rkq_q, rko, rd_kafka_op_t *,
- rko_link, rd_kafka_op_cmp_prio);
-
- srcq->rkq_qlen--;
- dstq->rkq_qlen++;
- srcq->rkq_qsize -= rko->rko_len;
- dstq->rkq_qsize += rko->rko_len;
- mcnt++;
- }
- }
-
- rd_kafka_q_mark_served(srcq);
-
- } else
- mcnt = rd_kafka_q_move_cnt(
- dstq->rkq_fwdq ? dstq->rkq_fwdq : dstq,
- srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, cnt, do_locks);
-
- if (do_locks) {
- mtx_unlock(&dstq->rkq_lock);
- mtx_unlock(&srcq->rkq_lock);
- }
-
- return mcnt;
-}
-
-
-/**
- * Filters out outdated ops.
- */
-static RD_INLINE rd_kafka_op_t *
-rd_kafka_op_filter(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int version) {
- if (unlikely(!rko))
- return NULL;
-
- if (unlikely(rd_kafka_op_version_outdated(rko, version))) {
- rd_kafka_q_deq0(rkq, rko);
- rd_kafka_op_destroy(rko);
- return NULL;
- }
-
- return rko;
-}
-
-
-
-/**
- * Pop an op from a queue.
- *
- * Locality: any thread.
- */
-
-
-/**
- * Serve q like rd_kafka_q_serve() until an op is found that can be returned
- * as an event to the application.
- *
- * @returns the first event:able op, or NULL on timeout.
- *
- * Locality: any thread
- */
-rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq,
- rd_ts_t timeout_us,
- int32_t version,
- rd_kafka_q_cb_type_t cb_type,
- rd_kafka_q_serve_cb_t *callback,
- void *opaque) {
- rd_kafka_op_t *rko;
- rd_kafka_q_t *fwdq;
-
- rd_dassert(cb_type);
-
- mtx_lock(&rkq->rkq_lock);
-
- rd_kafka_yield_thread = 0;
- if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- struct timespec timeout_tspec;
-
- rd_timeout_init_timespec_us(&timeout_tspec, timeout_us);
-
- while (1) {
- rd_kafka_op_res_t res;
- /* Keep track of current lock status to avoid
- * unnecessary lock flapping in all the cases below. */
- rd_bool_t is_locked = rd_true;
-
- /* Filter out outdated ops */
- retry:
- while ((rko = TAILQ_FIRST(&rkq->rkq_q)) &&
- !(rko = rd_kafka_op_filter(rkq, rko, version)))
- ;
-
- rd_kafka_q_mark_served(rkq);
-
- if (rko) {
- /* Proper versioned op */
- rd_kafka_q_deq0(rkq, rko);
-
- /* Let op_handle() operate without lock
- * held to allow re-enqueuing, etc. */
- mtx_unlock(&rkq->rkq_lock);
- is_locked = rd_false;
-
- /* Ops with callbacks are considered handled
- * and we move on to the next op, if any.
- * Ops w/o callbacks are returned immediately */
- res = rd_kafka_op_handle(rkq->rkq_rk, rkq, rko,
- cb_type, opaque,
- callback);
-
- if (res == RD_KAFKA_OP_RES_HANDLED ||
- res == RD_KAFKA_OP_RES_KEEP) {
- mtx_lock(&rkq->rkq_lock);
- is_locked = rd_true;
- goto retry; /* Next op */
- } else if (unlikely(res ==
- RD_KAFKA_OP_RES_YIELD)) {
- /* Callback yielded, unroll */
- return NULL;
- } else
- break; /* Proper op, handle below. */
- }
-
- if (unlikely(rd_kafka_q_check_yield(rkq))) {
- if (is_locked)
- mtx_unlock(&rkq->rkq_lock);
- return NULL;
- }
-
- if (!is_locked)
- mtx_lock(&rkq->rkq_lock);
-
- if (cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
- &timeout_tspec) != thrd_success) {
- mtx_unlock(&rkq->rkq_lock);
- return NULL;
- }
- }
-
- } else {
- /* Since the q_pop may block we need to release the parent
- * queue's lock. */
- mtx_unlock(&rkq->rkq_lock);
- rko = rd_kafka_q_pop_serve(fwdq, timeout_us, version, cb_type,
- callback, opaque);
- rd_kafka_q_destroy(fwdq);
- }
-
-
- return rko;
-}
-
-rd_kafka_op_t *
-rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version) {
- return rd_kafka_q_pop_serve(rkq, timeout_us, version,
- RD_KAFKA_Q_CB_RETURN, NULL, NULL);
-}
-
-
-/**
- * Pop all available ops from a queue and call the provided
- * callback for each op.
- * `max_cnt` limits the number of ops served, 0 = no limit.
- *
- * Returns the number of ops served.
- *
- * Locality: any thread.
- */
-int rd_kafka_q_serve(rd_kafka_q_t *rkq,
- int timeout_ms,
- int max_cnt,
- rd_kafka_q_cb_type_t cb_type,
- rd_kafka_q_serve_cb_t *callback,
- void *opaque) {
- rd_kafka_t *rk = rkq->rkq_rk;
- rd_kafka_op_t *rko;
- rd_kafka_q_t localq;
- rd_kafka_q_t *fwdq;
- int cnt = 0;
- struct timespec timeout_tspec;
-
- rd_dassert(cb_type);
-
- mtx_lock(&rkq->rkq_lock);
-
- rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0);
- if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- int ret;
- /* Since the q_pop may block we need to release the parent
- * queue's lock. */
- mtx_unlock(&rkq->rkq_lock);
- ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, cb_type,
- callback, opaque);
- rd_kafka_q_destroy(fwdq);
- return ret;
- }
-
- rd_timeout_init_timespec(&timeout_tspec, timeout_ms);
-
- /* Wait for op */
- while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) &&
- !rd_kafka_q_check_yield(rkq) &&
- cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
- &timeout_tspec) == thrd_success)
- ;
-
- rd_kafka_q_mark_served(rkq);
-
- if (!rko) {
- mtx_unlock(&rkq->rkq_lock);
- return 0;
- }
-
- /* Move the first `max_cnt` ops. */
- rd_kafka_q_init(&localq, rkq->rkq_rk);
- rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1 /*all*/ : max_cnt,
- 0 /*no-locks*/);
-
- mtx_unlock(&rkq->rkq_lock);
-
- rd_kafka_yield_thread = 0;
-
- /* Call callback for each op */
- while ((rko = TAILQ_FIRST(&localq.rkq_q))) {
- rd_kafka_op_res_t res;
-
- rd_kafka_q_deq0(&localq, rko);
- res = rd_kafka_op_handle(rk, &localq, rko, cb_type, opaque,
- callback);
- /* op must have been handled */
- rd_kafka_assert(NULL, res != RD_KAFKA_OP_RES_PASS);
- cnt++;
-
- if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
- rd_kafka_yield_thread)) {
- /* Callback called rd_kafka_yield(), we must
- * stop our callback dispatching and put the
- * ops in localq back on the original queue head. */
- if (!TAILQ_EMPTY(&localq.rkq_q))
- rd_kafka_q_prepend(rkq, &localq);
- break;
- }
- }
-
- rd_kafka_q_destroy_owner(&localq);
-
- return cnt;
-}
-
-/**
- * @brief Filter out and destroy outdated messages.
- *
- * @returns Returns the number of valid messages.
- *
- * @locality Any thread.
- */
-static size_t
-rd_kafka_purge_outdated_messages(rd_kafka_toppar_t *rktp,
- int32_t version,
- rd_kafka_message_t **rkmessages,
- size_t cnt,
- struct rd_kafka_op_tailq *ctrl_msg_q) {
- size_t valid_count = 0;
- size_t i;
- rd_kafka_op_t *rko, *next;
-
- for (i = 0; i < cnt; i++) {
- rko = rkmessages[i]->_private;
- if (rko->rko_rktp == rktp &&
- rd_kafka_op_version_outdated(rko, version)) {
- /* This also destroys the corresponding rkmessage. */
- rd_kafka_op_destroy(rko);
- } else if (i > valid_count) {
- rkmessages[valid_count++] = rkmessages[i];
- } else {
- valid_count++;
- }
- }
-
- /* Discard outdated control msgs ops */
- next = TAILQ_FIRST(ctrl_msg_q);
- while (next) {
- rko = next;
- next = TAILQ_NEXT(rko, rko_link);
- if (rko->rko_rktp == rktp &&
- rd_kafka_op_version_outdated(rko, version)) {
- TAILQ_REMOVE(ctrl_msg_q, rko, rko_link);
- rd_kafka_op_destroy(rko);
- }
- }
-
- return valid_count;
-}
-
-
-/**
- * Populate 'rkmessages' array with messages from 'rkq'.
- * If 'auto_commit' is set, each message's offset will be committed
- * to the offset store for that toppar.
- *
- * Returns the number of messages added.
- */
-
-int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size) {
- unsigned int cnt = 0;
- TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
- struct rd_kafka_op_tailq ctrl_msg_q =
- TAILQ_HEAD_INITIALIZER(ctrl_msg_q);
- rd_kafka_op_t *rko, *next;
- rd_kafka_t *rk = rkq->rkq_rk;
- rd_kafka_q_t *fwdq;
- struct timespec timeout_tspec;
- int i;
-
- mtx_lock(&rkq->rkq_lock);
- if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- /* Since the q_pop may block we need to release the parent
- * queue's lock. */
- mtx_unlock(&rkq->rkq_lock);
- cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, rkmessages,
- rkmessages_size);
- rd_kafka_q_destroy(fwdq);
- return cnt;
- }
- mtx_unlock(&rkq->rkq_lock);
-
- if (timeout_ms)
- rd_kafka_app_poll_blocking(rk);
-
- rd_timeout_init_timespec(&timeout_tspec, timeout_ms);
-
- rd_kafka_yield_thread = 0;
- while (cnt < rkmessages_size) {
- rd_kafka_op_res_t res;
-
- mtx_lock(&rkq->rkq_lock);
-
- while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) &&
- !rd_kafka_q_check_yield(rkq) &&
- cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
- &timeout_tspec) == thrd_success)
- ;
-
- rd_kafka_q_mark_served(rkq);
-
- if (!rko) {
- mtx_unlock(&rkq->rkq_lock);
- break; /* Timed out */
- }
-
- rd_kafka_q_deq0(rkq, rko);
-
- mtx_unlock(&rkq->rkq_lock);
-
- if (unlikely(rko->rko_type == RD_KAFKA_OP_BARRIER)) {
- cnt = (unsigned int)rd_kafka_purge_outdated_messages(
- rko->rko_rktp, rko->rko_version, rkmessages, cnt,
- &ctrl_msg_q);
- rd_kafka_op_destroy(rko);
- continue;
- }
-
- if (rd_kafka_op_version_outdated(rko, 0)) {
- /* Outdated op, put on discard queue */
- TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
- continue;
- }
-
- /* Serve non-FETCH callbacks */
- res =
- rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL);
- if (res == RD_KAFKA_OP_RES_KEEP ||
- res == RD_KAFKA_OP_RES_HANDLED) {
- /* Callback served, rko is destroyed (if HANDLED). */
- continue;
- } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
- rd_kafka_yield_thread)) {
- /* Yield. */
- break;
- }
- rd_dassert(res == RD_KAFKA_OP_RES_PASS);
-
- /* If this is a control messages, don't return message to
- * application. Add it to a tmp queue from where we can store
- * the offset and destroy the op */
- if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) {
- TAILQ_INSERT_TAIL(&ctrl_msg_q, rko, rko_link);
- continue;
- }
-
- /* Get rkmessage from rko and append to array. */
- rkmessages[cnt++] = rd_kafka_message_get(rko);
- }
-
- for (i = cnt - 1; i >= 0; i--) {
- rko = (rd_kafka_op_t *)rkmessages[i]->_private;
- rd_kafka_toppar_t *rktp = rko->rko_rktp;
- int64_t offset = rkmessages[i]->offset + 1;
- if (unlikely(rktp->rktp_app_pos.offset < offset))
- rd_kafka_update_app_pos(
- rk, rktp,
- RD_KAFKA_FETCH_POS(
- offset,
- rd_kafka_message_leader_epoch(rkmessages[i])),
- RD_DO_LOCK);
- }
-
- /* Discard non-desired and already handled ops */
- next = TAILQ_FIRST(&tmpq);
- while (next) {
- rko = next;
- next = TAILQ_NEXT(next, rko_link);
- rd_kafka_op_destroy(rko);
- }
-
- /* Discard ctrl msgs */
- next = TAILQ_FIRST(&ctrl_msg_q);
- while (next) {
- rko = next;
- next = TAILQ_NEXT(next, rko_link);
- rd_kafka_toppar_t *rktp = rko->rko_rktp;
- int64_t offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1;
- if (rktp->rktp_app_pos.offset < offset)
- rd_kafka_update_app_pos(
- rk, rktp,
- RD_KAFKA_FETCH_POS(
- offset,
- rd_kafka_message_leader_epoch(
- &rko->rko_u.fetch.rkm.rkm_rkmessage)),
- RD_DO_LOCK);
- rd_kafka_op_destroy(rko);
- }
-
- rd_kafka_app_polled(rk);
-
- return cnt;
-}
-
-
-
-void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu) {
- if (rkqu->rkqu_is_owner)
- rd_kafka_q_destroy_owner(rkqu->rkqu_q);
- else
- rd_kafka_q_destroy(rkqu->rkqu_q);
- rd_free(rkqu);
-}
-
-rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq) {
- rd_kafka_queue_t *rkqu;
-
- rkqu = rd_calloc(1, sizeof(*rkqu));
-
- rkqu->rkqu_q = rkq;
- rd_kafka_q_keep(rkq);
-
- rkqu->rkqu_rk = rk;
-
- return rkqu;
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk) {
- rd_kafka_q_t *rkq;
- rd_kafka_queue_t *rkqu;
-
- rkq = rd_kafka_q_new(rk);
- rkqu = rd_kafka_queue_new0(rk, rkq);
- rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held
- * by queue_new0 */
- rkqu->rkqu_is_owner = 1;
- return rkqu;
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk) {
- return rd_kafka_queue_new0(rk, rk->rk_rep);
-}
-
-
-rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk) {
- if (!rk->rk_cgrp)
- return NULL;
- return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q);
-}
-
-rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk,
- const char *topic,
- int32_t partition) {
- rd_kafka_toppar_t *rktp;
- rd_kafka_queue_t *result;
-
- if (rk->rk_type == RD_KAFKA_PRODUCER)
- return NULL;
-
- rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, /* no ua_on_miss */
- 1 /* create_on_miss */);
-
- if (!rktp)
- return NULL;
-
- result = rd_kafka_queue_new0(rk, rktp->rktp_fetchq);
- rd_kafka_toppar_destroy(rktp);
-
- return result;
-}
-
-rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk) {
- rd_kafka_queue_t *rkqu;
-
- rd_kafka_wrlock(rk);
- if (!rk->rk_background.q) {
- char errstr[256];
-
- if (rd_kafka_background_thread_create(rk, errstr,
- sizeof(errstr))) {
- rd_kafka_log(rk, LOG_ERR, "BACKGROUND",
- "Failed to create background thread: %s",
- errstr);
- rd_kafka_wrunlock(rk);
- return NULL;
- }
- }
-
- rkqu = rd_kafka_queue_new0(rk, rk->rk_background.q);
- rd_kafka_wrunlock(rk);
- return rkqu;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk,
- rd_kafka_queue_t *rkqu) {
- rd_kafka_q_t *rkq;
-
- if (!rk->rk_logq)
- return RD_KAFKA_RESP_ERR__NOT_CONFIGURED;
-
- if (!rkqu)
- rkq = rk->rk_rep;
- else
- rkq = rkqu->rkqu_q;
- rd_kafka_q_fwd_set(rk->rk_logq, rkq);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst) {
- rd_kafka_q_fwd_set0(src->rkqu_q, dst ? dst->rkqu_q : NULL,
- 1, /* do_lock */
- 1 /* fwd_app */);
-}
-
-
-size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu) {
- return (size_t)rd_kafka_q_len(rkqu->rkqu_q);
-}
-
-/**
- * @brief Enable or disable(fd==-1) fd-based wake-ups for queue
- */
-void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq,
- rd_socket_t fd,
- const void *payload,
- size_t size) {
- struct rd_kafka_q_io *qio = NULL;
-
- if (fd != -1) {
- qio = rd_malloc(sizeof(*qio) + size);
- qio->fd = fd;
- qio->size = size;
- qio->payload = (void *)(qio + 1);
- qio->sent = rd_false;
- qio->event_cb = NULL;
- qio->event_cb_opaque = NULL;
- memcpy(qio->payload, payload, size);
- }
-
- mtx_lock(&rkq->rkq_lock);
- if (rkq->rkq_qio) {
- rd_free(rkq->rkq_qio);
- rkq->rkq_qio = NULL;
- }
-
- if (fd != -1) {
- rkq->rkq_qio = qio;
- }
-
- mtx_unlock(&rkq->rkq_lock);
-}
-
-void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu,
- int fd,
- const void *payload,
- size_t size) {
- rd_kafka_q_io_event_enable(rkqu->rkqu_q, fd, payload, size);
-}
-
-
-void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu) {
- rd_kafka_q_yield(rkqu->rkqu_q);
-}
-
-
-/**
- * @brief Enable or disable(event_cb==NULL) callback-based wake-ups for queue
- */
-void rd_kafka_q_cb_event_enable(rd_kafka_q_t *rkq,
- void (*event_cb)(rd_kafka_t *rk, void *opaque),
- void *opaque) {
- struct rd_kafka_q_io *qio = NULL;
-
- if (event_cb) {
- qio = rd_malloc(sizeof(*qio));
- qio->fd = -1;
- qio->size = 0;
- qio->payload = NULL;
- qio->event_cb = event_cb;
- qio->event_cb_opaque = opaque;
- }
-
- mtx_lock(&rkq->rkq_lock);
- if (rkq->rkq_qio) {
- rd_free(rkq->rkq_qio);
- rkq->rkq_qio = NULL;
- }
-
- if (event_cb) {
- rkq->rkq_qio = qio;
- }
-
- mtx_unlock(&rkq->rkq_lock);
-}
-
-void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu,
- void (*event_cb)(rd_kafka_t *rk,
- void *opaque),
- void *opaque) {
- rd_kafka_q_cb_event_enable(rkqu->rkqu_q, event_cb, opaque);
-}
-
-
-/**
- * Helper: wait for single op on 'rkq', and return its error,
- * or .._TIMED_OUT on timeout.
- */
-rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms) {
- rd_kafka_op_t *rko;
- rd_kafka_resp_err_t err;
-
- rko = rd_kafka_q_pop(rkq, rd_timeout_us(timeout_ms), 0);
- if (!rko)
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- else {
- err = rko->rko_err;
- rd_kafka_op_destroy(rko);
- }
-
- return err;
-}
-
-
-/**
- * Apply \p callback on each op in queue.
- * If the callback wishes to remove the rko it must do so using
- * using rd_kafka_op_deq0().
- *
- * @returns the sum of \p callback() return values.
- * @remark rkq will be locked, callers should take care not to
- * interact with \p rkq through other means from the callback to avoid
- * deadlocks.
- */
-int rd_kafka_q_apply(rd_kafka_q_t *rkq,
- int (*callback)(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- void *opaque),
- void *opaque) {
- rd_kafka_op_t *rko, *next;
- rd_kafka_q_t *fwdq;
- int cnt = 0;
-
- mtx_lock(&rkq->rkq_lock);
- if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- mtx_unlock(&rkq->rkq_lock);
- cnt = rd_kafka_q_apply(fwdq, callback, opaque);
- rd_kafka_q_destroy(fwdq);
- return cnt;
- }
-
- next = TAILQ_FIRST(&rkq->rkq_q);
- while ((rko = next)) {
- next = TAILQ_NEXT(next, rko_link);
- cnt += callback(rkq, rko, opaque);
- }
-
- rd_kafka_q_mark_served(rkq);
-
- mtx_unlock(&rkq->rkq_lock);
-
- return cnt;
-}
-
-/**
- * @brief Convert relative to absolute offsets and also purge any messages
- * that are older than \p min_offset.
- * @remark Error ops with ERR__NOT_IMPLEMENTED will not be purged since
- * they are used to indicate unknnown compression codecs and compressed
- * messagesets may have a starting offset lower than what we requested.
- * @remark \p rkq locking is not performed (caller's responsibility)
- * @remark Must NOT be used on fwdq.
- */
-void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq,
- int64_t min_offset,
- int64_t base_offset) {
- rd_kafka_op_t *rko, *next;
- int adj_len = 0;
- int64_t adj_size = 0;
-
- rd_kafka_assert(NULL, !rkq->rkq_fwdq);
-
- next = TAILQ_FIRST(&rkq->rkq_q);
- while ((rko = next)) {
- next = TAILQ_NEXT(next, rko_link);
-
- if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH))
- continue;
-
- rko->rko_u.fetch.rkm.rkm_offset += base_offset;
-
- if (rko->rko_u.fetch.rkm.rkm_offset < min_offset &&
- rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) {
- adj_len++;
- adj_size += rko->rko_len;
- TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
- rd_kafka_op_destroy(rko);
- continue;
- }
- }
-
-
- rkq->rkq_qlen -= adj_len;
- rkq->rkq_qsize -= adj_size;
-}
-
-
-/**
- * @brief Print information and contents of queue
- */
-void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq) {
- mtx_lock(&rkq->rkq_lock);
- fprintf(fp,
- "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, "
- "%" PRId64 " bytes)\n",
- rkq, rkq->rkq_name, rkq->rkq_refcnt, rkq->rkq_flags,
- rkq->rkq_qlen, rkq->rkq_qsize);
-
- if (rkq->rkq_qio)
- fprintf(fp, " QIO fd %d\n", (int)rkq->rkq_qio->fd);
- if (rkq->rkq_serve)
- fprintf(fp, " Serve callback %p, opaque %p\n", rkq->rkq_serve,
- rkq->rkq_opaque);
-
- if (rkq->rkq_fwdq) {
- fprintf(fp, " Forwarded ->\n");
- rd_kafka_q_dump(fp, rkq->rkq_fwdq);
- } else {
- rd_kafka_op_t *rko;
-
- if (!TAILQ_EMPTY(&rkq->rkq_q))
- fprintf(fp, " Queued ops:\n");
- TAILQ_FOREACH(rko, &rkq->rkq_q, rko_link) {
- fprintf(fp,
- " %p %s (v%" PRId32
- ", flags 0x%x, "
- "prio %d, len %" PRId32
- ", source %s, "
- "replyq %p)\n",
- rko, rd_kafka_op2str(rko->rko_type),
- rko->rko_version, rko->rko_flags, rko->rko_prio,
- rko->rko_len,
-#if ENABLE_DEVEL
- rko->rko_source
-#else
- "-"
-#endif
- ,
- rko->rko_replyq.q);
- }
- }
-
- mtx_unlock(&rkq->rkq_lock);
-}
-
-
-void rd_kafka_enq_once_trigger_destroy(void *ptr) {
- rd_kafka_enq_once_t *eonce = ptr;
-
- rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__DESTROY, "destroy");
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h
deleted file mode 100644
index 0d50f5870..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h
+++ /dev/null
@@ -1,1171 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_QUEUE_H_
-#define _RDKAFKA_QUEUE_H_
-
-#include "rdkafka_op.h"
-#include "rdkafka_int.h"
-
-#ifdef _WIN32
-#include <io.h> /* for _write() */
-#endif
-
-/** @brief Queueing strategy */
-#define RD_KAFKA_QUEUE_FIFO 0
-#define RD_KAFKA_QUEUE_LIFO 1
-
-TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s);
-
-/**
- * @struct Queue for rd_kafka_op_t*.
- *
- * @remark All readers of the queue must call rd_kafka_q_mark_served()
- * after reading the queue (while still holding the queue lock) to
- * clear the wakeup-sent flag.
- */
-struct rd_kafka_q_s {
- mtx_t rkq_lock;
- cnd_t rkq_cond;
- struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue.
- * Used in place of this queue
- * for all operations. */
-
- struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */
- int rkq_qlen; /* Number of entries in queue */
- int64_t rkq_qsize; /* Size of all entries in queue */
- int rkq_refcnt;
- int rkq_flags;
-#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */
-#define RD_KAFKA_Q_F_READY \
- 0x2 /* Queue is ready to be used. \
- * Flag is cleared on destroy */
-#define RD_KAFKA_Q_F_FWD_APP \
- 0x4 /* Queue is being forwarded by a call \
- * to rd_kafka_queue_forward. */
-#define RD_KAFKA_Q_F_YIELD \
- 0x8 /* Have waiters return even if \
- * no rko was enqueued. \
- * This is used to wake up a waiter \
- * by triggering the cond-var \
- * but without having to enqueue \
- * an op. */
-
- rd_kafka_t *rkq_rk;
- struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */
-
- /* Op serve callback (optional).
- * Mainly used for forwarded queues to use the original queue's
- * serve function from the forwarded position.
- * Shall return 1 if op was handled, else 0. */
- rd_kafka_q_serve_cb_t *rkq_serve;
- void *rkq_opaque;
-
-#if ENABLE_DEVEL
- char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */
-#else
- const char *rkq_name; /* Debugging: queue name (FUNC) */
-#endif
-};
-
-
-/* Application signalling state holder. */
-struct rd_kafka_q_io {
- /* For FD-based signalling */
- rd_socket_t fd;
- void *payload;
- size_t size;
- rd_bool_t sent; /**< Wake-up has been sent.
- * This field is reset to false by the queue
- * reader, allowing a new wake-up to be sent by a
- * subsequent writer. */
- /* For callback-based signalling */
- void (*event_cb)(rd_kafka_t *rk, void *opaque);
- void *event_cb_opaque;
-};
-
-
-
-/**
- * @return true if queue is ready/enabled, else false.
- * @remark queue luck must be held by caller (if applicable)
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_ready(rd_kafka_q_t *rkq) {
- return rkq->rkq_flags & RD_KAFKA_Q_F_READY;
-}
-
-
-
-void rd_kafka_q_init0(rd_kafka_q_t *rkq,
- rd_kafka_t *rk,
- const char *func,
- int line);
-#define rd_kafka_q_init(rkq, rk) \
- rd_kafka_q_init0(rkq, rk, __FUNCTION__, __LINE__)
-rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line);
-#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk, __FUNCTION__, __LINE__)
-void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq);
-
-#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock)
-#define rd_kafka_q_unlock(rkqu) mtx_unlock(&(rkqu)->rkq_lock)
-
-static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_keep(rd_kafka_q_t *rkq) {
- mtx_lock(&rkq->rkq_lock);
- rkq->rkq_refcnt++;
- mtx_unlock(&rkq->rkq_lock);
- return rkq;
-}
-
-static RD_INLINE RD_UNUSED rd_kafka_q_t *
-rd_kafka_q_keep_nolock(rd_kafka_q_t *rkq) {
- rkq->rkq_refcnt++;
- return rkq;
-}
-
-
-/**
- * @returns the queue's name (used for debugging)
- */
-static RD_INLINE RD_UNUSED const char *rd_kafka_q_name(rd_kafka_q_t *rkq) {
- return rkq->rkq_name;
-}
-
-/**
- * @returns the final destination queue name (after forwarding)
- * @remark rkq MUST NOT be locked
- */
-static RD_INLINE RD_UNUSED const char *rd_kafka_q_dest_name(rd_kafka_q_t *rkq) {
- const char *ret;
- mtx_lock(&rkq->rkq_lock);
- if (rkq->rkq_fwdq)
- ret = rd_kafka_q_dest_name(rkq->rkq_fwdq);
- else
- ret = rd_kafka_q_name(rkq);
- mtx_unlock(&rkq->rkq_lock);
- return ret;
-}
-
-/**
- * @brief Disable a queue.
- * Attempting to enqueue ops to the queue will destroy the ops.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_disable0(rd_kafka_q_t *rkq,
- int do_lock) {
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
- rkq->rkq_flags &= ~RD_KAFKA_Q_F_READY;
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-}
-#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1 /*lock*/)
-
-int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock);
-#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1 /*lock*/)
-void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq,
- rd_kafka_toppar_t *rktp,
- int version);
-
-/**
- * @brief Loose reference to queue, when refcount reaches 0 the queue
- * will be destroyed.
- *
- * @param disable Also disable the queue, to be used by owner of the queue.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_destroy0(rd_kafka_q_t *rkq,
- int disable) {
- int do_delete = 0;
-
- if (disable) {
- /* To avoid recursive locking (from ops being purged
- * that reference this queue somehow),
- * we disable the queue and purge it with individual
- * locking. */
- rd_kafka_q_disable0(rkq, 1 /*lock*/);
- rd_kafka_q_purge0(rkq, 1 /*lock*/);
- }
-
- mtx_lock(&rkq->rkq_lock);
- rd_kafka_assert(NULL, rkq->rkq_refcnt > 0);
- do_delete = !--rkq->rkq_refcnt;
- mtx_unlock(&rkq->rkq_lock);
-
- if (unlikely(do_delete))
- rd_kafka_q_destroy_final(rkq);
-}
-
-#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0 /*dont-disable*/)
-
-/**
- * @brief Queue destroy method to be used by the owner (poller) of
- * the queue. The only difference to q_destroy() is that this
- * method also disables the queue so that any q_enq() operations
- * will fail.
- * Failure to disable a queue on the poller when it destroys its
- * queue reference results in ops being enqueued on the queue
- * but there is noone left to poll it, possibly resulting in a
- * hang on termination due to refcounts held by the op.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_destroy_owner(rd_kafka_q_t *rkq) {
- rd_kafka_q_destroy0(rkq, 1 /*disable*/);
-}
-
-
-/**
- * Reset a queue.
- * WARNING: All messages will be lost and leaked.
- * NOTE: No locking is performed.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_reset(rd_kafka_q_t *rkq) {
- TAILQ_INIT(&rkq->rkq_q);
- rd_dassert(TAILQ_EMPTY(&rkq->rkq_q));
- rkq->rkq_qlen = 0;
- rkq->rkq_qsize = 0;
-}
-
-
-
-/**
- * Forward 'srcq' to 'destq'
- */
-void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq,
- rd_kafka_q_t *destq,
- int do_lock,
- int fwd_app);
-#define rd_kafka_q_fwd_set(S, D) \
- rd_kafka_q_fwd_set0(S, D, 1 /*lock*/, 0 /*no fwd_app*/)
-
-/**
- * @returns the forward queue (if any) with its refcount increased.
- * @locks rd_kafka_q_lock(rkq) == !do_lock
- */
-static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_fwd_get(rd_kafka_q_t *rkq,
- int do_lock) {
- rd_kafka_q_t *fwdq;
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
-
- if ((fwdq = rkq->rkq_fwdq))
- rd_kafka_q_keep(fwdq);
-
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-
- return fwdq;
-}
-
-
-/**
- * @returns true if queue is forwarded, else false.
- *
- * @remark Thread-safe.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded(rd_kafka_q_t *rkq) {
- int r;
- mtx_lock(&rkq->rkq_lock);
- r = rkq->rkq_fwdq ? 1 : 0;
- mtx_unlock(&rkq->rkq_lock);
- return r;
-}
-
-
-
-/**
- * @brief Trigger an IO event for this queue.
- *
- * @remark Queue MUST be locked
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_io_event(rd_kafka_q_t *rkq) {
-
- if (likely(!rkq->rkq_qio))
- return;
-
- if (rkq->rkq_qio->event_cb) {
- rkq->rkq_qio->event_cb(rkq->rkq_rk,
- rkq->rkq_qio->event_cb_opaque);
- return;
- }
-
-
- /* Only one wake-up event should be sent per non-polling period.
- * As the queue reader calls poll/reads the channel it calls to
- * rd_kafka_q_mark_served() to reset the wakeup sent flag, allowing
- * further wakeups in the next non-polling period. */
- if (rkq->rkq_qio->sent)
- return; /* Wake-up event already written */
-
- rkq->rkq_qio->sent = rd_true;
-
- /* Write wake-up event to socket.
- * Ignore errors, not much to do anyway. */
- if (rd_socket_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload,
- (int)rkq->rkq_qio->size) == -1)
- ;
-}
-
-
-/**
- * @brief rko->rko_prio comparator
- * @remark: descending order: higher priority takes preceedence.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_op_cmp_prio(const void *_a,
- const void *_b) {
- const rd_kafka_op_t *a = _a, *b = _b;
-
- return RD_CMP(b->rko_prio, a->rko_prio);
-}
-
-
-/**
- * @brief Wake up waiters without enqueuing an op.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_yield(rd_kafka_q_t *rkq) {
- rd_kafka_q_t *fwdq;
-
- mtx_lock(&rkq->rkq_lock);
-
- rd_dassert(rkq->rkq_refcnt > 0);
-
- if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
- /* Queue has been disabled */
- mtx_unlock(&rkq->rkq_lock);
- return;
- }
-
- if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- rkq->rkq_flags |= RD_KAFKA_Q_F_YIELD;
- cnd_broadcast(&rkq->rkq_cond);
- if (rkq->rkq_qlen == 0)
- rd_kafka_q_io_event(rkq);
-
- mtx_unlock(&rkq->rkq_lock);
- } else {
- mtx_unlock(&rkq->rkq_lock);
- rd_kafka_q_yield(fwdq);
- rd_kafka_q_destroy(fwdq);
- }
-}
-
-/**
- * @brief Low-level unprotected enqueue that only performs
- * the actual queue enqueue and counter updates.
- * @remark Will not perform locking, signaling, fwdq, READY checking, etc.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_q_enq0(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) {
- if (likely(!rko->rko_prio))
- TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link);
- else if (at_head)
- TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link);
- else
- TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, rko_link,
- rd_kafka_op_cmp_prio);
- rkq->rkq_qlen++;
- rkq->rkq_qsize += rko->rko_len;
-}
-
-
-/**
- * @brief Enqueue \p rko either at head or tail of \p rkq.
- *
- * The provided \p rko is either enqueued or destroyed.
- *
- * \p orig_destq is the original (outermost) dest queue for which
- * this op was enqueued, before any queue forwarding has kicked in.
- * The rko_serve callback from the orig_destq will be set on the rko
- * if there is no rko_serve callback already set, and the \p rko isn't
- * failed because the final queue is disabled.
- *
- * @returns 1 if op was enqueued or 0 if queue is disabled and
- * there was no replyq to enqueue on in which case the rko is destroyed.
- *
- * @locality any thread.
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_enq1(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- rd_kafka_q_t *orig_destq,
- int at_head,
- int do_lock) {
- rd_kafka_q_t *fwdq;
-
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
-
- rd_dassert(rkq->rkq_refcnt > 0);
-
- if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
- /* Queue has been disabled, reply to and fail the rko. */
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-
- return rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__DESTROY);
- }
-
- if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- if (!rko->rko_serve && orig_destq->rkq_serve) {
- /* Store original queue's serve callback and opaque
- * prior to forwarding. */
- rko->rko_serve = orig_destq->rkq_serve;
- rko->rko_serve_opaque = orig_destq->rkq_opaque;
- }
-
- rd_kafka_q_enq0(rkq, rko, at_head);
- cnd_signal(&rkq->rkq_cond);
- if (rkq->rkq_qlen == 1)
- rd_kafka_q_io_event(rkq);
-
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
- } else {
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
- rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1 /*do lock*/);
- rd_kafka_q_destroy(fwdq);
- }
-
- return 1;
-}
-
-/**
- * @brief Enqueue the 'rko' op at the tail of the queue 'rkq'.
- *
- * The provided 'rko' is either enqueued or destroyed.
- *
- * @returns 1 if op was enqueued or 0 if queue is disabled and
- * there was no replyq to enqueue on in which case the rko is destroyed.
- *
- * @locality any thread.
- * @locks rkq MUST NOT be locked
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_enq(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- return rd_kafka_q_enq1(rkq, rko, rkq, 0 /*at tail*/, 1 /*do lock*/);
-}
-
-
-/**
- * @brief Re-enqueue rko at head of rkq.
- *
- * The provided 'rko' is either enqueued or destroyed.
- *
- * @returns 1 if op was enqueued or 0 if queue is disabled and
- * there was no replyq to enqueue on in which case the rko is destroyed.
- *
- * @locality any thread
- * @locks rkq MUST BE locked
- */
-static RD_INLINE RD_UNUSED int rd_kafka_q_reenq(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- return rd_kafka_q_enq1(rkq, rko, rkq, 1 /*at head*/, 0 /*don't lock*/);
-}
-
-
-/**
- * Dequeue 'rko' from queue 'rkq'.
- *
- * NOTE: rkq_lock MUST be held
- * Locality: any thread
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_deq0(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_dassert(rkq->rkq_qlen > 0 &&
- rkq->rkq_qsize >= (int64_t)rko->rko_len);
-
- TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
- rkq->rkq_qlen--;
- rkq->rkq_qsize -= rko->rko_len;
-}
-
-
-/**
- * @brief Mark queue as served / read.
- *
- * This is currently used by the queue reader side to reset the io-event
- * wakeup flag.
- *
- * Should be called by all queue readers.
- *
- * @locks_required rkq must be locked.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served(rd_kafka_q_t *rkq) {
- if (rkq->rkq_qio)
- rkq->rkq_qio->sent = rd_false;
-}
-
-
-/**
- * Concat all elements of 'srcq' onto tail of 'rkq'.
- * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
- * NOTE: 'srcq' will be reset.
- *
- * Locality: any thread.
- *
- * @returns 0 if operation was performed or -1 if rkq is disabled.
- */
-static RD_INLINE RD_UNUSED int
-rd_kafka_q_concat0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) {
- int r = 0;
-
- while (srcq->rkq_fwdq) /* Resolve source queue */
- srcq = srcq->rkq_fwdq;
- if (unlikely(srcq->rkq_qlen == 0))
- return 0; /* Don't do anything if source queue is empty */
-
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
- if (!rkq->rkq_fwdq) {
- rd_kafka_op_t *rko;
-
- rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || srcq->rkq_qlen > 0);
- if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
- return -1;
- }
- /* First insert any prioritized ops from srcq
- * in the right position in rkq. */
- while ((rko = TAILQ_FIRST(&srcq->rkq_q)) && rko->rko_prio > 0) {
- TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
- TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *,
- rko_link, rd_kafka_op_cmp_prio);
- }
-
- TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link);
- if (rkq->rkq_qlen == 0)
- rd_kafka_q_io_event(rkq);
- rkq->rkq_qlen += srcq->rkq_qlen;
- rkq->rkq_qsize += srcq->rkq_qsize;
- cnd_signal(&rkq->rkq_cond);
-
- rd_kafka_q_mark_served(srcq);
- rd_kafka_q_reset(srcq);
- } else
- r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
- srcq, rkq->rkq_fwdq ? do_lock : 0);
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-
- return r;
-}
-
-#define rd_kafka_q_concat(dstq, srcq) rd_kafka_q_concat0(dstq, srcq, 1 /*lock*/)
-
-
-/**
- * @brief Prepend all elements of 'srcq' onto head of 'rkq'.
- * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
- * 'srcq' will be reset.
- *
- * @remark Will not respect priority of ops, srcq will be prepended in its
- * original form to rkq.
- *
- * @locality any thread.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_q_prepend0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) {
- if (do_lock)
- mtx_lock(&rkq->rkq_lock);
- if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) {
- /* FIXME: prio-aware */
- /* Concat rkq on srcq */
- TAILQ_CONCAT(&srcq->rkq_q, &rkq->rkq_q, rko_link);
- /* Move srcq to rkq */
- TAILQ_MOVE(&rkq->rkq_q, &srcq->rkq_q, rko_link);
- if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0)
- rd_kafka_q_io_event(rkq);
- rkq->rkq_qlen += srcq->rkq_qlen;
- rkq->rkq_qsize += srcq->rkq_qsize;
-
- rd_kafka_q_mark_served(srcq);
- rd_kafka_q_reset(srcq);
- } else
- rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
- srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq,
- rkq->rkq_fwdq ? do_lock : 0);
- if (do_lock)
- mtx_unlock(&rkq->rkq_lock);
-}
-
-#define rd_kafka_q_prepend(dstq, srcq) \
- rd_kafka_q_prepend0(dstq, srcq, 1 /*lock*/)
-
-
-/* Returns the number of elements in the queue */
-static RD_INLINE RD_UNUSED int rd_kafka_q_len(rd_kafka_q_t *rkq) {
- int qlen;
- rd_kafka_q_t *fwdq;
- mtx_lock(&rkq->rkq_lock);
- if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- qlen = rkq->rkq_qlen;
- mtx_unlock(&rkq->rkq_lock);
- } else {
- mtx_unlock(&rkq->rkq_lock);
- qlen = rd_kafka_q_len(fwdq);
- rd_kafka_q_destroy(fwdq);
- }
- return qlen;
-}
-
-/* Returns the total size of elements in the queue */
-static RD_INLINE RD_UNUSED uint64_t rd_kafka_q_size(rd_kafka_q_t *rkq) {
- uint64_t sz;
- rd_kafka_q_t *fwdq;
- mtx_lock(&rkq->rkq_lock);
- if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
- sz = rkq->rkq_qsize;
- mtx_unlock(&rkq->rkq_lock);
- } else {
- mtx_unlock(&rkq->rkq_lock);
- sz = rd_kafka_q_size(fwdq);
- rd_kafka_q_destroy(fwdq);
- }
- return sz;
-}
-
-/**
- * @brief Construct a temporary on-stack replyq with increased
- * \p rkq refcount (unless NULL), version, and debug id.
- */
-static RD_INLINE RD_UNUSED rd_kafka_replyq_t
-rd_kafka_replyq_make(rd_kafka_q_t *rkq, int version, const char *id) {
- rd_kafka_replyq_t replyq = RD_ZERO_INIT;
-
- if (rkq) {
- replyq.q = rd_kafka_q_keep(rkq);
- replyq.version = version;
-#if ENABLE_DEVEL
- replyq._id = rd_strdup(id);
-#endif
- }
-
- return replyq;
-}
-
-/* Construct temporary on-stack replyq with increased Q refcount and
- * optional VERSION. */
-#define RD_KAFKA_REPLYQ(Q, VERSION) \
- rd_kafka_replyq_make(Q, VERSION, __FUNCTION__)
-
-/* Construct temporary on-stack replyq for indicating no replyq. */
-#if ENABLE_DEVEL
-#define RD_KAFKA_NO_REPLYQ \
- (rd_kafka_replyq_t) { \
- NULL, 0, NULL \
- }
-#else
-#define RD_KAFKA_NO_REPLYQ \
- (rd_kafka_replyq_t) { \
- NULL, 0 \
- }
-#endif
-
-
-/**
- * @returns true if the replyq is valid, else false.
- */
-static RD_INLINE RD_UNUSED rd_bool_t
-rd_kafka_replyq_is_valid(rd_kafka_replyq_t *replyq) {
- rd_bool_t valid = rd_true;
-
- if (!replyq->q)
- return rd_false;
-
- rd_kafka_q_lock(replyq->q);
- valid = rd_kafka_q_ready(replyq->q);
- rd_kafka_q_unlock(replyq->q);
-
- return valid;
-}
-
-
-
-/**
- * Set up replyq.
- * Q refcnt is increased.
- */
-static RD_INLINE RD_UNUSED void rd_kafka_set_replyq(rd_kafka_replyq_t *replyq,
- rd_kafka_q_t *rkq,
- int32_t version) {
- replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL;
- replyq->version = version;
-#if ENABLE_DEVEL
- replyq->_id = rd_strdup(__FUNCTION__);
-#endif
-}
-
-/**
- * Set rko's replyq with an optional version (versionptr != NULL).
- * Q refcnt is increased.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_op_set_replyq(rd_kafka_op_t *rko,
- rd_kafka_q_t *rkq,
- rd_atomic32_t *versionptr) {
- rd_kafka_set_replyq(&rko->rko_replyq, rkq,
- versionptr ? rd_atomic32_get(versionptr) : 0);
-}
-
-/* Set reply rko's version from replyq's version */
-#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) \
- do { \
- (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \
- } while (0)
-
-
-/* Clear replyq holder without decreasing any .q references. */
-static RD_INLINE RD_UNUSED void
-rd_kafka_replyq_clear(rd_kafka_replyq_t *replyq) {
- memset(replyq, 0, sizeof(*replyq));
-}
-
-/**
- * @brief Make a copy of \p src in \p dst, with its own queue reference
- */
-static RD_INLINE RD_UNUSED void rd_kafka_replyq_copy(rd_kafka_replyq_t *dst,
- rd_kafka_replyq_t *src) {
- dst->version = src->version;
- dst->q = src->q;
- if (dst->q)
- rd_kafka_q_keep(dst->q);
-#if ENABLE_DEVEL
- if (src->_id)
- dst->_id = rd_strdup(src->_id);
- else
- dst->_id = NULL;
-#endif
-}
-
-
-/**
- * Clear replyq holder and destroy any .q references.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_replyq_destroy(rd_kafka_replyq_t *replyq) {
- if (replyq->q)
- rd_kafka_q_destroy(replyq->q);
-#if ENABLE_DEVEL
- if (replyq->_id) {
- rd_free(replyq->_id);
- replyq->_id = NULL;
- }
-#endif
- rd_kafka_replyq_clear(replyq);
-}
-
-
-/**
- * @brief Wrapper for rd_kafka_q_enq() that takes a replyq,
- * steals its queue reference, enqueues the op with the replyq version,
- * and then destroys the queue reference.
- *
- * If \p version is non-zero it will be updated, else replyq->version.
- *
- * @returns Same as rd_kafka_q_enq()
- */
-static RD_INLINE RD_UNUSED int rd_kafka_replyq_enq(rd_kafka_replyq_t *replyq,
- rd_kafka_op_t *rko,
- int version) {
- rd_kafka_q_t *rkq = replyq->q;
- int r;
-
- if (version)
- rko->rko_version = version;
- else
- rko->rko_version = replyq->version;
-
- /* The replyq queue reference is done after we've enqueued the rko
- * so clear it here. */
- replyq->q = NULL; /* destroyed separately below */
-
-#if ENABLE_DEVEL
- if (replyq->_id) {
- rd_free(replyq->_id);
- replyq->_id = NULL;
- }
-#endif
-
- /* Retain replyq->version since it is used by buf_callback
- * when dispatching the callback. */
-
- r = rd_kafka_q_enq(rkq, rko);
-
- rd_kafka_q_destroy(rkq);
-
- return r;
-}
-
-
-
-rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq,
- rd_ts_t timeout_us,
- int32_t version,
- rd_kafka_q_cb_type_t cb_type,
- rd_kafka_q_serve_cb_t *callback,
- void *opaque);
-rd_kafka_op_t *
-rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version);
-int rd_kafka_q_serve(rd_kafka_q_t *rkq,
- int timeout_ms,
- int max_cnt,
- rd_kafka_q_cb_type_t cb_type,
- rd_kafka_q_serve_cb_t *callback,
- void *opaque);
-
-
-int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq,
- rd_kafka_q_t *srcq,
- int cnt,
- int do_locks);
-
-int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq,
- int timeout_ms,
- rd_kafka_message_t **rkmessages,
- size_t rkmessages_size);
-rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms);
-
-int rd_kafka_q_apply(rd_kafka_q_t *rkq,
- int (*callback)(rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko,
- void *opaque),
- void *opaque);
-
-void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq,
- int64_t min_offset,
- int64_t base_offset);
-
-/**
- * @returns the last op in the queue matching \p op_type and \p allow_err (bool)
- * @remark The \p rkq must be properly locked before this call, the returned rko
- * is not removed from the queue and may thus not be held for longer
- * than the lock is held.
- */
-static RD_INLINE RD_UNUSED rd_kafka_op_t *
-rd_kafka_q_last(rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, int allow_err) {
- rd_kafka_op_t *rko;
- TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) {
- if (rko->rko_type == op_type && (allow_err || !rko->rko_err))
- return rko;
- }
-
- return NULL;
-}
-
-void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq,
- rd_socket_t fd,
- const void *payload,
- size_t size);
-
-/* Public interface */
-struct rd_kafka_queue_s {
- rd_kafka_q_t *rkqu_q;
- rd_kafka_t *rkqu_rk;
- int rkqu_is_owner; /**< Is owner/creator of rkqu_q */
-};
-
-
-rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq);
-
-void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq);
-
-extern int RD_TLS rd_kafka_yield_thread;
-
-
-
-/**
- * @name Enqueue op once
- * @{
- */
-
-/**
- * @brief Minimal rd_kafka_op_t wrapper that ensures that
- * the op is only enqueued on the provided queue once.
- *
- * Typical use-case is for an op to be triggered from multiple sources,
- * but at most once, such as from a timer and some other source.
- */
-typedef struct rd_kafka_enq_once_s {
- mtx_t lock;
- int refcnt;
- rd_kafka_op_t *rko;
- rd_kafka_replyq_t replyq;
-} rd_kafka_enq_once_t;
-
-
-/**
- * @brief Allocate and set up a new eonce and set the initial refcount to 1.
- * @remark This is to be called by the owner of the rko.
- */
-static RD_INLINE RD_UNUSED rd_kafka_enq_once_t *
-rd_kafka_enq_once_new(rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) {
- rd_kafka_enq_once_t *eonce = rd_calloc(1, sizeof(*eonce));
- mtx_init(&eonce->lock, mtx_plain);
- eonce->rko = rko;
- eonce->replyq = replyq; /* struct copy */
- eonce->refcnt = 1;
- return eonce;
-}
-
-/**
- * @brief Re-enable triggering of a eonce even after it has been triggered
- * once.
- *
- * @remark This is to be called by the owner.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_reenable(rd_kafka_enq_once_t *eonce,
- rd_kafka_op_t *rko,
- rd_kafka_replyq_t replyq) {
- mtx_lock(&eonce->lock);
- eonce->rko = rko;
- rd_kafka_replyq_destroy(&eonce->replyq);
- eonce->replyq = replyq; /* struct copy */
- mtx_unlock(&eonce->lock);
-}
-
-
-/**
- * @brief Free eonce and its resources. Must only be called with refcnt==0
- * and eonce->lock NOT held.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_destroy0(rd_kafka_enq_once_t *eonce) {
- /* This must not be called with the rko or replyq still set, which would
- * indicate that no enqueueing was performed and that the owner
- * did not clean up, which is a bug. */
- rd_assert(!eonce->rko);
- rd_assert(!eonce->replyq.q);
-#if ENABLE_DEVEL
- rd_assert(!eonce->replyq._id);
-#endif
- rd_assert(eonce->refcnt == 0);
-
- mtx_destroy(&eonce->lock);
- rd_free(eonce);
-}
-
-
-/**
- * @brief Increment refcount for source (non-owner), such as a timer.
- *
- * @param srcdesc a human-readable descriptive string of the source.
- * May be used for future debugging.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) {
- mtx_lock(&eonce->lock);
- eonce->refcnt++;
- mtx_unlock(&eonce->lock);
-}
-
-
-/**
- * @brief Decrement refcount for source (non-owner), such as a timer.
- *
- * @param srcdesc a human-readable descriptive string of the source.
- * May be used for future debugging.
- *
- * @remark Must only be called from the owner with the owner
- * still holding its own refcount.
- * This API is used to undo an add_source() from the
- * same code.
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) {
- int do_destroy;
-
- mtx_lock(&eonce->lock);
- rd_assert(eonce->refcnt > 0);
- eonce->refcnt--;
- do_destroy = eonce->refcnt == 0;
- mtx_unlock(&eonce->lock);
-
- if (do_destroy) {
- /* We're the last refcount holder, clean up eonce. */
- rd_kafka_enq_once_destroy0(eonce);
- }
-}
-
-/**
- * @brief Trigger a source's reference where the eonce resides on
- * an rd_list_t. This is typically used as a free_cb for
- * rd_list_destroy() and the trigger error code is
- * always RD_KAFKA_RESP_ERR__DESTROY.
- */
-void rd_kafka_enq_once_trigger_destroy(void *ptr);
-
-
-/**
- * @brief Decrement refcount for source (non-owner) and return the rko
- * if still set.
- *
- * @remark Must only be called by sources (non-owner) but only on the
- * the owner's thread to make sure the rko is not freed.
- *
- * @remark The rko remains set on the eonce.
- */
-static RD_INLINE RD_UNUSED rd_kafka_op_t *
-rd_kafka_enq_once_del_source_return(rd_kafka_enq_once_t *eonce,
- const char *srcdesc) {
- rd_bool_t do_destroy;
- rd_kafka_op_t *rko;
-
- mtx_lock(&eonce->lock);
-
- rd_assert(eonce->refcnt > 0);
- /* Owner must still hold a eonce reference, or the eonce must
- * have been disabled by the owner (no rko) */
- rd_assert(eonce->refcnt > 1 || !eonce->rko);
- eonce->refcnt--;
- do_destroy = eonce->refcnt == 0;
-
- rko = eonce->rko;
- mtx_unlock(&eonce->lock);
-
- if (do_destroy) {
- /* We're the last refcount holder, clean up eonce. */
- rd_kafka_enq_once_destroy0(eonce);
- }
-
- return rko;
-}
-
-/**
- * @brief Trigger enqueuing of the rko (unless already enqueued)
- * and drops the source's refcount.
- *
- * @remark Must only be called by sources (non-owner).
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_trigger(rd_kafka_enq_once_t *eonce,
- rd_kafka_resp_err_t err,
- const char *srcdesc) {
- int do_destroy;
- rd_kafka_op_t *rko = NULL;
- rd_kafka_replyq_t replyq = RD_ZERO_INIT;
-
- mtx_lock(&eonce->lock);
-
- rd_assert(eonce->refcnt > 0);
- eonce->refcnt--;
- do_destroy = eonce->refcnt == 0;
-
- if (eonce->rko) {
- /* Not already enqueued, do it.
- * Detach the rko and replyq from the eonce and unlock the eonce
- * before enqueuing rko on reply to avoid recursive locks
- * if the replyq has been disabled and the ops
- * destructor is called (which might then access the eonce
- * to clean up). */
- rko = eonce->rko;
- replyq = eonce->replyq;
-
- eonce->rko = NULL;
- rd_kafka_replyq_clear(&eonce->replyq);
-
- /* Reply is enqueued at the end of this function */
- }
- mtx_unlock(&eonce->lock);
-
- if (do_destroy) {
- /* We're the last refcount holder, clean up eonce. */
- rd_kafka_enq_once_destroy0(eonce);
- }
-
- if (rko) {
- rko->rko_err = err;
- rd_kafka_replyq_enq(&replyq, rko, replyq.version);
- rd_kafka_replyq_destroy(&replyq);
- }
-}
-
-/**
- * @brief Destroy eonce, must only be called by the owner.
- * There may be outstanding refcounts by non-owners after this call
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_enq_once_destroy(rd_kafka_enq_once_t *eonce) {
- int do_destroy;
-
- mtx_lock(&eonce->lock);
- rd_assert(eonce->refcnt > 0);
- eonce->refcnt--;
- do_destroy = eonce->refcnt == 0;
-
- eonce->rko = NULL;
- rd_kafka_replyq_destroy(&eonce->replyq);
-
- mtx_unlock(&eonce->lock);
-
- if (do_destroy) {
- /* We're the last refcount holder, clean up eonce. */
- rd_kafka_enq_once_destroy0(eonce);
- }
-}
-
-
-/**
- * @brief Disable the owner's eonce, extracting, resetting and returning
- * the \c rko object.
- *
- * This is the same as rd_kafka_enq_once_destroy() but returning
- * the rko.
- *
- * Use this for owner-thread triggering where the enqueuing of the
- * rko on the replyq is not necessary.
- *
- * @returns the eonce's rko object, if still available, else NULL.
- */
-static RD_INLINE RD_UNUSED rd_kafka_op_t *
-rd_kafka_enq_once_disable(rd_kafka_enq_once_t *eonce) {
- int do_destroy;
- rd_kafka_op_t *rko;
-
- mtx_lock(&eonce->lock);
- rd_assert(eonce->refcnt > 0);
- eonce->refcnt--;
- do_destroy = eonce->refcnt == 0;
-
- /* May be NULL */
- rko = eonce->rko;
- eonce->rko = NULL;
- rd_kafka_replyq_destroy(&eonce->replyq);
-
- mtx_unlock(&eonce->lock);
-
- if (do_destroy) {
- /* We're the last refcount holder, clean up eonce. */
- rd_kafka_enq_once_destroy0(eonce);
- }
-
- return rko;
-}
-
-
-/**@}*/
-
-
-#endif /* _RDKAFKA_QUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c
deleted file mode 100644
index c83f1f1a4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-
-
-
-/**
- * Source:
- * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java
- *
- * The range assignor works on a per-topic basis. For each topic, we lay out the
- * available partitions in numeric order and the consumers in lexicographic
- * order. We then divide the number of partitions by the total number of
- * consumers to determine the number of partitions to assign to each consumer.
- * If it does not evenly divide, then the first few consumers will have one
- * extra partition.
- *
- * For example, suppose there are two consumers C0 and C1, two topics t0 and t1,
- * and each topic has 3 partitions, resulting in partitions t0p0, t0p1, t0p2,
- * t1p0, t1p1, and t1p2.
- *
- * The assignment will be:
- * C0: [t0p0, t0p1, t1p0, t1p1]
- * C1: [t0p2, t1p2]
- */
-
-rd_kafka_resp_err_t
-rd_kafka_range_assignor_assign_cb(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque) {
- unsigned int ti;
- int i;
-
- /* The range assignor works on a per-topic basis. */
- for (ti = 0; ti < eligible_topic_cnt; ti++) {
- rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
- int numPartitionsPerConsumer;
- int consumersWithExtraPartition;
-
- /* For each topic, we lay out the available partitions in
- * numeric order and the consumers in lexicographic order. */
- rd_list_sort(&eligible_topic->members,
- rd_kafka_group_member_cmp);
-
- /* We then divide the number of partitions by the total number
- * of consumers to determine the number of partitions to assign
- * to each consumer. */
- numPartitionsPerConsumer =
- eligible_topic->metadata->partition_cnt /
- rd_list_cnt(&eligible_topic->members);
-
- /* If it does not evenly divide, then the first few consumers
- * will have one extra partition. */
- consumersWithExtraPartition =
- eligible_topic->metadata->partition_cnt %
- rd_list_cnt(&eligible_topic->members);
-
- rd_kafka_dbg(rk, CGRP, "ASSIGN",
- "range: Topic %s with %d partition(s) and "
- "%d subscribing member(s)",
- eligible_topic->metadata->topic,
- eligible_topic->metadata->partition_cnt,
- rd_list_cnt(&eligible_topic->members));
-
- for (i = 0; i < rd_list_cnt(&eligible_topic->members); i++) {
- rd_kafka_group_member_t *rkgm =
- rd_list_elem(&eligible_topic->members, i);
- int start = numPartitionsPerConsumer * i +
- RD_MIN(i, consumersWithExtraPartition);
- int length =
- numPartitionsPerConsumer +
- (i + 1 > consumersWithExtraPartition ? 0 : 1);
-
- if (length == 0)
- continue;
-
- rd_kafka_dbg(rk, CGRP, "ASSIGN",
- "range: Member \"%s\": "
- "assigned topic %s partitions %d..%d",
- rkgm->rkgm_member_id->str,
- eligible_topic->metadata->topic, start,
- start + length - 1);
- rd_kafka_topic_partition_list_add_range(
- rkgm->rkgm_assignment,
- eligible_topic->metadata->topic, start,
- start + length - 1);
- }
- }
-
- return 0;
-}
-
-
-
-/**
- * @brief Initialzie and add range assignor.
- */
-rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk) {
- return rd_kafka_assignor_add(
- rk, "consumer", "range", RD_KAFKA_REBALANCE_PROTOCOL_EAGER,
- rd_kafka_range_assignor_assign_cb,
- rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL,
- NULL, NULL);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c
deleted file mode 100644
index 12d9eb30e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c
+++ /dev/null
@@ -1,5378 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdarg.h>
-
-#include "rdkafka_int.h"
-#include "rdkafka_request.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_offset.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_msgset.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_txnmgr.h"
-#include "rdkafka_sasl.h"
-
-#include "rdrand.h"
-#include "rdstring.h"
-#include "rdunittest.h"
-
-
-/**
- * Kafka protocol request and response handling.
- * All of this code runs in the broker thread and uses op queues for
- * propagating results back to the various sub-systems operating in
- * other threads.
- */
-
-
-/* RD_KAFKA_ERR_ACTION_.. to string map */
-static const char *rd_kafka_actions_descs[] = {
- "Permanent", "Ignore", "Refresh", "Retry",
- "Inform", "Special", "MsgNotPersisted", "MsgPossiblyPersisted",
- "MsgPersisted", NULL,
-};
-
-const char *rd_kafka_actions2str(int actions) {
- static RD_TLS char actstr[128];
- return rd_flags2str(actstr, sizeof(actstr), rd_kafka_actions_descs,
- actions);
-}
-
-
-/**
- * @brief Decide action(s) to take based on the returned error code.
- *
- * The optional var-args is a .._ACTION_END terminated list
- * of action,error tuples which overrides the general behaviour.
- * It is to be read as: for \p error, return \p action(s).
- *
- * @warning \p request, \p rkbuf and \p rkb may be NULL.
- */
-int rd_kafka_err_action(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- const rd_kafka_buf_t *request,
- ...) {
- va_list ap;
- int actions = 0;
- int exp_act;
-
- if (!err)
- return 0;
-
- /* Match explicitly defined error mappings first. */
- va_start(ap, request);
- while ((exp_act = va_arg(ap, int))) {
- int exp_err = va_arg(ap, int);
-
- if (err == exp_err)
- actions |= exp_act;
- }
- va_end(ap);
-
- /* Explicit error match. */
- if (actions) {
- if (err && rkb && request)
- rd_rkb_dbg(
- rkb, BROKER, "REQERR",
- "%sRequest failed: %s: explicit actions %s",
- rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey),
- rd_kafka_err2str(err),
- rd_kafka_actions2str(actions));
-
- return actions;
- }
-
- /* Default error matching */
- switch (err) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- break;
- case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION:
- case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- case RD_KAFKA_RESP_ERR__WAIT_COORD:
- /* Request metadata information update */
- actions |= RD_KAFKA_ERR_ACTION_REFRESH |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR:
- /* Request metadata update and retry */
- actions |= RD_KAFKA_ERR_ACTION_REFRESH |
- RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT:
- case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
- case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND:
- actions |= RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS:
- /* Client-side wait-response/in-queue timeout */
- case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
- actions |= RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR__PURGE_INFLIGHT:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR__BAD_MSG:
- /* Buffer parse failures are typically a client-side bug,
- * treat them as permanent failures. */
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
- break;
-
- case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- case RD_KAFKA_RESP_ERR__DESTROY:
- case RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT:
- case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE:
- case RD_KAFKA_RESP_ERR__PURGE_QUEUE:
- default:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
- break;
- }
-
- /* Fatal or permanent errors are not retriable */
- if (actions &
- (RD_KAFKA_ERR_ACTION_FATAL | RD_KAFKA_ERR_ACTION_PERMANENT))
- actions &= ~RD_KAFKA_ERR_ACTION_RETRY;
-
- /* If no request buffer was specified, which might be the case
- * in certain error call chains, mask out the retry action. */
- if (!request)
- actions &= ~RD_KAFKA_ERR_ACTION_RETRY;
- else if (request->rkbuf_reqhdr.ApiKey != RD_KAFKAP_Produce)
- /* Mask out message-related bits for non-Produce requests */
- actions &= ~RD_KAFKA_ERR_ACTION_MSG_FLAGS;
-
- if (err && actions && rkb && request)
- rd_rkb_dbg(
- rkb, BROKER, "REQERR", "%sRequest failed: %s: actions %s",
- rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey),
- rd_kafka_err2str(err), rd_kafka_actions2str(actions));
-
- return actions;
-}
-
-
-/**
- * @brief Read a list of topic+partitions+extra from \p rkbuf.
- *
- * @param rkbuf buffer to read from
- * @param fields An array of fields to read from the buffer and set on
- * the rktpar object, in the specified order, must end
- * with RD_KAFKA_TOPIC_PARTITION_FIELD_END.
- *
- * @returns a newly allocated list on success, or NULL on parse error.
- */
-rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions(
- rd_kafka_buf_t *rkbuf,
- size_t estimated_part_cnt,
- const rd_kafka_topic_partition_field_t *fields) {
- const int log_decode_errors = LOG_ERR;
- int32_t TopicArrayCnt;
- rd_kafka_topic_partition_list_t *parts = NULL;
-
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX);
-
- parts = rd_kafka_topic_partition_list_new(
- RD_MAX(TopicArrayCnt * 4, (int)estimated_part_cnt));
-
- while (TopicArrayCnt-- > 0) {
- rd_kafkap_str_t kTopic;
- int32_t PartArrayCnt;
- char *topic;
-
- rd_kafka_buf_read_str(rkbuf, &kTopic);
- rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt,
- RD_KAFKAP_PARTITIONS_MAX);
-
- RD_KAFKAP_STR_DUPA(&topic, &kTopic);
-
- while (PartArrayCnt-- > 0) {
- int32_t Partition = -1, Epoch = -1234,
- CurrentLeaderEpoch = -1234;
- int64_t Offset = -1234;
- int16_t ErrorCode = 0;
- rd_kafka_topic_partition_t *rktpar;
- int fi;
-
- /*
- * Read requested fields
- */
- for (fi = 0;
- fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END;
- fi++) {
- switch (fields[fi]) {
- case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION:
- rd_kafka_buf_read_i32(rkbuf,
- &Partition);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET:
- rd_kafka_buf_read_i64(rkbuf, &Offset);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH:
- rd_kafka_buf_read_i32(
- rkbuf, &CurrentLeaderEpoch);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH:
- rd_kafka_buf_read_i32(rkbuf, &Epoch);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR:
- rd_kafka_buf_read_i16(rkbuf,
- &ErrorCode);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA:
- rd_assert(!*"metadata not implemented");
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP:
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_END:
- break;
- }
- }
-
- rktpar = rd_kafka_topic_partition_list_add(parts, topic,
- Partition);
- /* Use dummy sentinel values that are unlikely to be
- * seen from the broker to know if we are to set these
- * fields or not. */
- if (Offset != -1234)
- rktpar->offset = Offset;
- if (Epoch != -1234)
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar, Epoch);
- if (CurrentLeaderEpoch != -1234)
- rd_kafka_topic_partition_set_current_leader_epoch(
- rktpar, CurrentLeaderEpoch);
- rktpar->err = ErrorCode;
-
-
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- return parts;
-
-err_parse:
- if (parts)
- rd_kafka_topic_partition_list_destroy(parts);
-
- return NULL;
-}
-
-
-/**
- * @brief Write a list of topic+partitions+offsets+extra to \p rkbuf
- *
- * @returns the number of partitions written to buffer.
- *
- * @remark The \p parts list MUST be sorted.
- */
-int rd_kafka_buf_write_topic_partitions(
- rd_kafka_buf_t *rkbuf,
- const rd_kafka_topic_partition_list_t *parts,
- rd_bool_t skip_invalid_offsets,
- rd_bool_t only_invalid_offsets,
- const rd_kafka_topic_partition_field_t *fields) {
- size_t of_TopicArrayCnt;
- size_t of_PartArrayCnt = 0;
- int TopicArrayCnt = 0, PartArrayCnt = 0;
- int i;
- const char *prev_topic = NULL;
- int cnt = 0;
-
- rd_assert(!only_invalid_offsets ||
- (only_invalid_offsets != skip_invalid_offsets));
-
- /* TopicArrayCnt */
- of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
-
- for (i = 0; i < parts->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar = &parts->elems[i];
- int fi;
-
- if (rktpar->offset < 0) {
- if (skip_invalid_offsets)
- continue;
- } else if (only_invalid_offsets)
- continue;
-
- if (!prev_topic || strcmp(rktpar->topic, prev_topic)) {
- /* Finish previous topic, if any. */
- if (of_PartArrayCnt > 0) {
- rd_kafka_buf_finalize_arraycnt(
- rkbuf, of_PartArrayCnt, PartArrayCnt);
- /* Tags for previous topic struct */
- rd_kafka_buf_write_tags(rkbuf);
- }
-
-
- /* Topic */
- rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
- TopicArrayCnt++;
- prev_topic = rktpar->topic;
- /* New topic so reset partition count */
- PartArrayCnt = 0;
-
- /* PartitionArrayCnt: updated later */
- of_PartArrayCnt =
- rd_kafka_buf_write_arraycnt_pos(rkbuf);
- }
-
-
- /*
- * Write requested fields
- */
- for (fi = 0; fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END;
- fi++) {
- switch (fields[fi]) {
- case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION:
- rd_kafka_buf_write_i32(rkbuf,
- rktpar->partition);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET:
- rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH:
- rd_kafka_buf_write_i32(
- rkbuf,
- rd_kafka_topic_partition_get_current_leader_epoch(
- rktpar));
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH:
- rd_kafka_buf_write_i32(
- rkbuf,
- rd_kafka_topic_partition_get_leader_epoch(
- rktpar));
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR:
- rd_kafka_buf_write_i16(rkbuf, rktpar->err);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA:
- /* Java client 0.9.0 and broker <0.10.0 can't
- * parse Null metadata fields, so as a
- * workaround we send an empty string if
- * it's Null. */
- if (!rktpar->metadata)
- rd_kafka_buf_write_str(rkbuf, "", 0);
- else
- rd_kafka_buf_write_str(
- rkbuf, rktpar->metadata,
- rktpar->metadata_size);
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP:
- break;
- case RD_KAFKA_TOPIC_PARTITION_FIELD_END:
- break;
- }
- }
-
-
- if (fi > 1)
- /* If there was more than one field written
- * then this was a struct and thus needs the
- * struct suffix tags written. */
- rd_kafka_buf_write_tags(rkbuf);
-
- PartArrayCnt++;
- cnt++;
- }
-
- if (of_PartArrayCnt > 0) {
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartArrayCnt,
- PartArrayCnt);
- /* Tags for topic struct */
- rd_kafka_buf_write_tags(rkbuf);
- }
-
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, TopicArrayCnt);
-
- return cnt;
-}
-
-
-/**
- * @brief Send FindCoordinatorRequest.
- *
- * @param coordkey is the group.id for RD_KAFKA_COORD_GROUP,
- * and the transactional.id for RD_KAFKA_COORD_TXN
- */
-rd_kafka_resp_err_t
-rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_FindCoordinator, 0, 2, NULL);
-
- if (coordtype != RD_KAFKA_COORD_GROUP && ApiVersion < 1)
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_FindCoordinator, 1,
- 1 + 2 + strlen(coordkey));
-
- rd_kafka_buf_write_str(rkbuf, coordkey, -1);
-
- if (ApiVersion >= 1)
- rd_kafka_buf_write_i8(rkbuf, (int8_t)coordtype);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Parses a ListOffsets reply.
- *
- * Returns the parsed offsets (and errors) in \p offsets which must have been
- * initialized by caller.
- *
- * @returns 0 on success, else an error (\p offsets may be completely or
- * partially updated, depending on the nature of the error, and per
- * partition error codes should be checked by the caller).
- */
-static rd_kafka_resp_err_t
-rd_kafka_parse_ListOffsets(rd_kafka_buf_t *rkbuf,
- rd_kafka_topic_partition_list_t *offsets) {
- const int log_decode_errors = LOG_ERR;
- int32_t TopicArrayCnt;
- int16_t api_version;
- rd_kafka_resp_err_t all_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- api_version = rkbuf->rkbuf_reqhdr.ApiVersion;
-
- if (api_version >= 2)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- /* NOTE:
- * Broker may return offsets in a different constellation than
- * in the original request .*/
-
- rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
- while (TopicArrayCnt-- > 0) {
- rd_kafkap_str_t ktopic;
- int32_t PartArrayCnt;
- char *topic_name;
-
- rd_kafka_buf_read_str(rkbuf, &ktopic);
- rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
-
- RD_KAFKAP_STR_DUPA(&topic_name, &ktopic);
-
- while (PartArrayCnt-- > 0) {
- int32_t kpartition;
- int16_t ErrorCode;
- int32_t OffsetArrayCnt;
- int64_t Offset = -1;
- int32_t LeaderEpoch = -1;
- rd_kafka_topic_partition_t *rktpar;
-
- rd_kafka_buf_read_i32(rkbuf, &kpartition);
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- if (api_version >= 1) {
- int64_t Timestamp;
- rd_kafka_buf_read_i64(rkbuf, &Timestamp);
- rd_kafka_buf_read_i64(rkbuf, &Offset);
- if (api_version >= 4)
- rd_kafka_buf_read_i32(rkbuf,
- &LeaderEpoch);
- } else if (api_version == 0) {
- rd_kafka_buf_read_i32(rkbuf, &OffsetArrayCnt);
- /* We only request one offset so just grab
- * the first one. */
- while (OffsetArrayCnt-- > 0)
- rd_kafka_buf_read_i64(rkbuf, &Offset);
- } else {
- RD_NOTREACHED();
- }
-
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, topic_name, kpartition);
- rktpar->err = ErrorCode;
- rktpar->offset = Offset;
- rd_kafka_topic_partition_set_leader_epoch(rktpar,
- LeaderEpoch);
-
- if (ErrorCode && !all_err)
- all_err = ErrorCode;
- }
- }
-
- return all_err;
-
-err_parse:
- return rkbuf->rkbuf_err;
-}
-
-
-
-/**
- * @brief Parses and handles ListOffsets replies.
- *
- * Returns the parsed offsets (and errors) in \p offsets.
- * \p offsets must be initialized by the caller.
- *
- * @returns 0 on success, else an error. \p offsets may be populated on error,
- * depending on the nature of the error.
- * On error \p actionsp (unless NULL) is updated with the recommended
- * error actions.
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_ListOffsets(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t *offsets,
- int *actionsp) {
-
- int actions;
-
- if (!err)
- err = rd_kafka_parse_ListOffsets(rkbuf, offsets);
- if (!err)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- actions = rd_kafka_err_action(
- rkb, err, request, RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
-
- RD_KAFKA_ERR_ACTION_REFRESH,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
-
- RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
-
-
- RD_KAFKA_ERR_ACTION_END);
-
- if (actionsp)
- *actionsp = actions;
-
- if (rkb)
- rd_rkb_dbg(
- rkb, TOPIC, "OFFSET", "OffsetRequest failed: %s (%s)",
- rd_kafka_err2str(err), rd_kafka_actions2str(actions));
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- char tmp[256];
- /* Re-query for leader */
- rd_snprintf(tmp, sizeof(tmp), "ListOffsetsRequest failed: %s",
- rd_kafka_err2str(err));
- rd_kafka_metadata_refresh_known_topics(rk, NULL,
- rd_true /*force*/, tmp);
- }
-
- if ((actions & RD_KAFKA_ERR_ACTION_RETRY) &&
- rd_kafka_buf_retry(rkb, request))
- return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-
- return err;
-}
-
-
-
-/**
- * @brief Async maker for ListOffsetsRequest.
- */
-static rd_kafka_resp_err_t
-rd_kafka_make_ListOffsetsRequest(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf,
- void *make_opaque) {
- const rd_kafka_topic_partition_list_t *partitions =
- (const rd_kafka_topic_partition_list_t *)make_opaque;
- int i;
- size_t of_TopicArrayCnt = 0, of_PartArrayCnt = 0;
- const char *last_topic = "";
- int32_t topic_cnt = 0, part_cnt = 0;
- int16_t ApiVersion;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_ListOffsets, 0, 5, NULL);
- if (ApiVersion == -1)
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
-
- /* ReplicaId */
- rd_kafka_buf_write_i32(rkbuf, -1);
-
- /* IsolationLevel */
- if (ApiVersion >= 2)
- rd_kafka_buf_write_i8(rkbuf,
- rkb->rkb_rk->rk_conf.isolation_level);
-
- /* TopicArrayCnt */
- of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* updated later */
-
- for (i = 0; i < partitions->cnt; i++) {
- const rd_kafka_topic_partition_t *rktpar =
- &partitions->elems[i];
-
- if (strcmp(rktpar->topic, last_topic)) {
- /* Finish last topic, if any. */
- if (of_PartArrayCnt > 0)
- rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt,
- part_cnt);
-
- /* Topic */
- rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
- topic_cnt++;
- last_topic = rktpar->topic;
- /* New topic so reset partition count */
- part_cnt = 0;
-
- /* PartitionArrayCnt: updated later */
- of_PartArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
- }
-
- /* Partition */
- rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
- part_cnt++;
-
- if (ApiVersion >= 4)
- /* CurrentLeaderEpoch */
- rd_kafka_buf_write_i32(
- rkbuf,
- rd_kafka_topic_partition_get_current_leader_epoch(
- rktpar));
-
- /* Time/Offset */
- rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
-
- if (ApiVersion == 0) {
- /* MaxNumberOfOffsets */
- rd_kafka_buf_write_i32(rkbuf, 1);
- }
- }
-
- if (of_PartArrayCnt > 0) {
- rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt, part_cnt);
- rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, topic_cnt);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_rkb_dbg(rkb, TOPIC, "OFFSET",
- "ListOffsetsRequest (v%hd, opv %d) "
- "for %" PRId32 " topic(s) and %" PRId32 " partition(s)",
- ApiVersion, rkbuf->rkbuf_replyq.version, topic_cnt,
- partitions->cnt);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Send ListOffsetsRequest for partitions in \p partitions.
- */
-void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb,
- rd_kafka_topic_partition_list_t *partitions,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- rd_kafka_topic_partition_list_t *make_parts;
-
- make_parts = rd_kafka_topic_partition_list_copy(partitions);
- rd_kafka_topic_partition_list_sort_by_topic(make_parts);
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_ListOffsets, 1,
- /* ReplicaId+IsolationLevel+TopicArrayCnt+Topic */
- 4 + 1 + 4 + 100 +
- /* PartArrayCnt */
- 4 +
- /* partition_cnt * Partition+Time+MaxNumOffs */
- (make_parts->cnt * (4 + 8 + 4)));
-
- /* Postpone creating the request contents until time to send,
- * at which time the ApiVersion is known. */
- rd_kafka_buf_set_maker(rkbuf, rd_kafka_make_ListOffsetsRequest,
- make_parts,
- rd_kafka_topic_partition_list_destroy_free);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * @brief OffsetForLeaderEpochResponse handler.
- */
-rd_kafka_resp_err_t rd_kafka_handle_OffsetForLeaderEpoch(
- rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t **offsets) {
- const int log_decode_errors = LOG_ERR;
- int16_t ApiVersion;
-
- if (err)
- goto err;
-
- ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion;
-
- if (ApiVersion >= 2)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- ApiVersion >= 1 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH
- : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
- RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- *offsets = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields);
- if (!*offsets)
- goto err_parse;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-err:
- return err;
-
-err_parse:
- err = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-/**
- * @brief Send OffsetForLeaderEpochRequest for partition(s).
- *
- */
-void rd_kafka_OffsetForLeaderEpochRequest(
- rd_kafka_broker_t *rkb,
- rd_kafka_topic_partition_list_t *parts,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_OffsetForLeaderEpoch, 2, 2, NULL);
- /* If the supported ApiVersions are not yet known,
- * or this broker doesn't support it, we let this request
- * succeed or fail later from the broker thread where the
- * version is checked again. */
- if (ApiVersion == -1)
- ApiVersion = 2;
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_OffsetForLeaderEpoch, 1, 4 + (parts->cnt * 64),
- ApiVersion >= 4 /*flexver*/);
-
- /* Sort partitions by topic */
- rd_kafka_topic_partition_list_sort_by_topic(parts);
-
- /* Write partition list */
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- /* CurrentLeaderEpoch */
- RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH,
- /* LeaderEpoch */
- RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(
- rkbuf, parts, rd_false /*include invalid offsets*/,
- rd_false /*skip valid offsets */, fields);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- /* Let caller perform retries */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-/**
- * Generic handler for OffsetFetch responses.
- * Offsets for included partitions will be propagated through the passed
- * 'offsets' list.
- *
- * @param rkbuf response buffer, may be NULL if \p err is set.
- * @param update_toppar update toppar's committed_offset
- * @param add_part if true add partitions from the response to \p *offsets,
- * else just update the partitions that are already
- * in \p *offsets.
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetFetch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t **offsets,
- rd_bool_t update_toppar,
- rd_bool_t add_part,
- rd_bool_t allow_retry) {
- const int log_decode_errors = LOG_ERR;
- int32_t TopicArrayCnt;
- int64_t offset = RD_KAFKA_OFFSET_INVALID;
- int16_t ApiVersion;
- rd_kafkap_str_t metadata;
- int retry_unstable = 0;
- int i;
- int actions;
- int seen_cnt = 0;
-
- if (err)
- goto err;
-
- ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion;
-
- if (ApiVersion >= 3)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- if (!*offsets)
- *offsets = rd_kafka_topic_partition_list_new(16);
-
- /* Set default offset for all partitions. */
- rd_kafka_topic_partition_list_set_offsets(rkb->rkb_rk, *offsets, 0,
- RD_KAFKA_OFFSET_INVALID,
- 0 /* !is commit */);
-
- rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX);
- for (i = 0; i < TopicArrayCnt; i++) {
- rd_kafkap_str_t topic;
- int32_t PartArrayCnt;
- char *topic_name;
- int j;
-
- rd_kafka_buf_read_str(rkbuf, &topic);
-
- rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt,
- RD_KAFKAP_PARTITIONS_MAX);
-
- RD_KAFKAP_STR_DUPA(&topic_name, &topic);
-
- for (j = 0; j < PartArrayCnt; j++) {
- int32_t partition;
- rd_kafka_toppar_t *rktp;
- rd_kafka_topic_partition_t *rktpar;
- int32_t LeaderEpoch = -1;
- int16_t err2;
-
- rd_kafka_buf_read_i32(rkbuf, &partition);
- rd_kafka_buf_read_i64(rkbuf, &offset);
- if (ApiVersion >= 5)
- rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch);
- rd_kafka_buf_read_str(rkbuf, &metadata);
- rd_kafka_buf_read_i16(rkbuf, &err2);
- rd_kafka_buf_skip_tags(rkbuf);
-
- rktpar = rd_kafka_topic_partition_list_find(
- *offsets, topic_name, partition);
- if (!rktpar && add_part)
- rktpar = rd_kafka_topic_partition_list_add(
- *offsets, topic_name, partition);
- else if (!rktpar) {
- rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
- "OffsetFetchResponse: %s [%" PRId32
- "] "
- "not found in local list: ignoring",
- topic_name, partition);
- continue;
- }
-
- seen_cnt++;
-
- rktp = rd_kafka_topic_partition_get_toppar(
- rk, rktpar, rd_false /*no create on miss*/);
-
- /* broker reports invalid offset as -1 */
- if (offset == -1)
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
- else
- rktpar->offset = offset;
-
- rd_kafka_topic_partition_set_leader_epoch(rktpar,
- LeaderEpoch);
- rktpar->err = err2;
-
- rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
- "OffsetFetchResponse: %s [%" PRId32
- "] "
- "offset %" PRId64 ", leader epoch %" PRId32
- ", metadata %d byte(s): %s",
- topic_name, partition, offset, LeaderEpoch,
- RD_KAFKAP_STR_LEN(&metadata),
- rd_kafka_err2name(rktpar->err));
-
- if (update_toppar && !err2 && rktp) {
- /* Update toppar's committed offset */
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_committed_pos =
- rd_kafka_topic_partition_get_fetch_pos(
- rktpar);
- rd_kafka_toppar_unlock(rktp);
- }
-
- if (rktpar->err ==
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
- retry_unstable++;
-
-
- if (rktpar->metadata)
- rd_free(rktpar->metadata);
-
- if (RD_KAFKAP_STR_IS_NULL(&metadata)) {
- rktpar->metadata = NULL;
- rktpar->metadata_size = 0;
- } else {
- rktpar->metadata = RD_KAFKAP_STR_DUP(&metadata);
- rktpar->metadata_size =
- RD_KAFKAP_STR_LEN(&metadata);
- }
-
- /* Loose ref from get_toppar() */
- if (rktp)
- rd_kafka_toppar_destroy(rktp);
- }
-
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- if (ApiVersion >= 2) {
- int16_t ErrorCode;
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- if (ErrorCode) {
- err = ErrorCode;
- goto err;
- }
- }
-
-
-err:
- if (!*offsets)
- rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", "OffsetFetch returned %s",
- rd_kafka_err2str(err));
- else
- rd_rkb_dbg(rkb, TOPIC, "OFFFETCH",
- "OffsetFetch for %d/%d partition(s) "
- "(%d unstable partition(s)) returned %s",
- seen_cnt, (*offsets)->cnt, retry_unstable,
- rd_kafka_err2str(err));
-
- actions =
- rd_kafka_err_action(rkb, err, request, RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Re-query for coordinator */
- rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_COORD_QUERY, err);
- }
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY || retry_unstable) {
- if (allow_retry && rd_kafka_buf_retry(rkb, request))
- return RD_KAFKA_RESP_ERR__IN_PROGRESS;
- /* FALLTHRU */
- }
-
- return err;
-
-err_parse:
- err = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-
-/**
- * @brief Handle OffsetFetch response based on an RD_KAFKA_OP_OFFSET_FETCH
- * rko in \p opaque.
- *
- * @param opaque rko wrapper for handle_OffsetFetch.
- *
- * The \c rko->rko_u.offset_fetch.partitions list will be filled in with
- * the fetched offsets.
- *
- * A reply will be sent on 'rko->rko_replyq' with type RD_KAFKA_OP_OFFSET_FETCH.
- *
- * @remark \p rkb, \p rkbuf and \p request are optional.
- *
- * @remark The \p request buffer may be retried on error.
- *
- * @locality cgrp's broker thread
- */
-void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_op_t *rko = opaque;
- rd_kafka_op_t *rko_reply;
- rd_kafka_topic_partition_list_t *offsets;
-
- RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH);
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* Termination, quick cleanup. */
- rd_kafka_op_destroy(rko);
- return;
- }
-
- offsets = rd_kafka_topic_partition_list_copy(
- rko->rko_u.offset_fetch.partitions);
-
- /* If all partitions already had usable offsets then there
- * was no request sent and thus no reply, the offsets list is
- * good to go.. */
- if (rkbuf) {
- /* ..else parse the response (or perror) */
- err = rd_kafka_handle_OffsetFetch(
- rkb->rkb_rk, rkb, err, rkbuf, request, &offsets,
- rd_false /*dont update rktp*/, rd_false /*dont add part*/,
- /* Allow retries if replyq is valid */
- rd_kafka_op_replyq_is_valid(rko));
- if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
- if (offsets)
- rd_kafka_topic_partition_list_destroy(offsets);
- return; /* Retrying */
- }
- }
-
- rko_reply =
- rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY);
- rko_reply->rko_err = err;
- rko_reply->rko_u.offset_fetch.partitions = offsets;
- rko_reply->rko_u.offset_fetch.do_free = 1;
- if (rko->rko_rktp)
- rko_reply->rko_rktp = rd_kafka_toppar_keep(rko->rko_rktp);
-
- rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0);
-
- rd_kafka_op_destroy(rko);
-}
-
-/**
- * Send OffsetFetchRequest for a consumer group id.
- *
- * Any partition with a usable offset will be ignored, if all partitions
- * have usable offsets then no request is sent at all but an empty
- * reply is enqueued on the replyq.
- *
- * @param group_id Request offset for this group id.
- * @param parts (optional) List of topic partitions to request,
- * or NULL to return all topic partitions associated with the
- * group.
- * @param require_stable_offsets Whether broker should return stable offsets
- * (transaction-committed).
- * @param timeout Optional timeout to set to the buffer.
- */
-void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb,
- const char *group_id,
- rd_kafka_topic_partition_list_t *parts,
- rd_bool_t require_stable_offsets,
- int timeout,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
- size_t parts_size = 0;
- int PartCnt = -1;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_OffsetFetch, 0, 7, NULL);
-
- if (parts) {
- parts_size = parts->cnt * 32;
- }
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_OffsetFetch, 1,
- /* GroupId + rd_kafka_buf_write_arraycnt_pos +
- * Topics + RequireStable */
- 32 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/);
-
- /* ConsumerGroup */
- rd_kafka_buf_write_str(rkbuf, group_id, -1);
-
- if (parts) {
- /* Sort partitions by topic */
- rd_kafka_topic_partition_list_sort_by_topic(parts);
-
- /* Write partition list, filtering out partitions with valid
- * offsets */
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- PartCnt = rd_kafka_buf_write_topic_partitions(
- rkbuf, parts, rd_false /*include invalid offsets*/,
- rd_false /*skip valid offsets */, fields);
- } else {
- rd_kafka_buf_write_arraycnt_pos(rkbuf);
- }
-
- if (ApiVersion >= 7) {
- /* RequireStable */
- rd_kafka_buf_write_i8(rkbuf, require_stable_offsets);
- }
-
- if (PartCnt == 0) {
- /* No partitions needs OffsetFetch, enqueue empty
- * response right away. */
- rkbuf->rkbuf_replyq = replyq;
- rkbuf->rkbuf_cb = resp_cb;
- rkbuf->rkbuf_opaque = opaque;
- rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
- return;
- }
-
- if (timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- if (parts) {
- rd_rkb_dbg(
- rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER,
- "OFFSET",
- "Group %s OffsetFetchRequest(v%d) for %d/%d partition(s)",
- group_id, ApiVersion, PartCnt, parts->cnt);
- } else {
- rd_rkb_dbg(
- rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER,
- "OFFSET",
- "Group %s OffsetFetchRequest(v%d) for all partitions",
- group_id, ApiVersion);
- }
-
- /* Let handler decide if retries should be performed */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
-
- if (parts) {
- rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET",
- "Fetch committed offsets for %d/%d partition(s)",
- PartCnt, parts->cnt);
- } else {
- rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET",
- "Fetch committed offsets all the partitions");
- }
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-/**
- * @brief Handle per-partition OffsetCommit errors and returns actions flags.
- */
-static int
-rd_kafka_handle_OffsetCommit_error(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *request,
- const rd_kafka_topic_partition_t *rktpar) {
-
- /* These actions are mimicking AK's ConsumerCoordinator.java */
-
- return rd_kafka_err_action(
- rkb, rktpar->err, request,
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
-
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
-
-
- RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
-
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-
-
- /* .._SPECIAL: mark coordinator dead, refresh and retry */
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_SPECIAL,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_SPECIAL,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
-
- /* Replicas possibly unavailable:
- * Refresh coordinator (but don't mark as dead (!.._SPECIAL)),
- * and retry */
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
-
-
- /* FIXME: There are some cases in the Java code where
- * this is not treated as a fatal error. */
- RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_FATAL,
- RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
-
-
- RD_KAFKA_ERR_ACTION_PERMANENT,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
-
-
- RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
-
- RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
-
- RD_KAFKA_ERR_ACTION_END);
-}
-
-
-/**
- * @brief Handle OffsetCommit response.
- *
- * @remark \p offsets may be NULL if \p err is set
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if all partitions were successfully
- * committed,
- * RD_KAFKA_RESP_ERR__IN_PROGRESS if a retry was scheduled,
- * or any other error code if the request was not retried.
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetCommit(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t *offsets,
- rd_bool_t ignore_cgrp) {
- const int log_decode_errors = LOG_ERR;
- int32_t TopicArrayCnt;
- int errcnt = 0;
- int partcnt = 0;
- int i;
- int actions = 0;
-
- if (err)
- goto err;
-
- if (rd_kafka_buf_ApiVersion(rkbuf) >= 3)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
- for (i = 0; i < TopicArrayCnt; i++) {
- rd_kafkap_str_t topic;
- char *topic_str;
- int32_t PartArrayCnt;
- int j;
-
- rd_kafka_buf_read_str(rkbuf, &topic);
- rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
-
- RD_KAFKAP_STR_DUPA(&topic_str, &topic);
-
- for (j = 0; j < PartArrayCnt; j++) {
- int32_t partition;
- int16_t ErrorCode;
- rd_kafka_topic_partition_t *rktpar;
-
- rd_kafka_buf_read_i32(rkbuf, &partition);
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- rktpar = rd_kafka_topic_partition_list_find(
- offsets, topic_str, partition);
-
- if (!rktpar) {
- /* Received offset for topic/partition we didn't
- * ask for, this shouldn't really happen. */
- continue;
- }
-
- rktpar->err = ErrorCode;
- if (ErrorCode) {
- err = ErrorCode;
- errcnt++;
-
- /* Accumulate actions for per-partition
- * errors. */
- actions |= rd_kafka_handle_OffsetCommit_error(
- rkb, request, rktpar);
- }
-
- partcnt++;
- }
- }
-
- /* If all partitions failed use error code
- * from last partition as the global error. */
- if (offsets && err && errcnt == partcnt)
- goto err;
-
- goto done;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-
-err:
- if (!actions) /* Transport/Request-level error */
- actions = rd_kafka_err_action(rkb, err, request,
-
- RD_KAFKA_ERR_ACTION_REFRESH |
- RD_KAFKA_ERR_ACTION_SPECIAL |
- RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR__TRANSPORT,
-
- RD_KAFKA_ERR_ACTION_END);
-
- if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_FATAL)) {
- rd_kafka_set_fatal_error(rk, err, "OffsetCommit failed: %s",
- rd_kafka_err2str(err));
- return err;
- }
-
- if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_REFRESH) &&
- rk->rk_cgrp) {
- /* Mark coordinator dead or re-query for coordinator.
- * ..dead() will trigger a re-query. */
- if (actions & RD_KAFKA_ERR_ACTION_SPECIAL)
- rd_kafka_cgrp_coord_dead(rk->rk_cgrp, err,
- "OffsetCommitRequest failed");
- else
- rd_kafka_cgrp_coord_query(rk->rk_cgrp,
- "OffsetCommitRequest failed");
- }
-
- if (!ignore_cgrp && actions & RD_KAFKA_ERR_ACTION_RETRY &&
- !(actions & RD_KAFKA_ERR_ACTION_PERMANENT) &&
- rd_kafka_buf_retry(rkb, request))
- return RD_KAFKA_RESP_ERR__IN_PROGRESS;
-
-done:
- return err;
-}
-
-/**
- * @brief Send OffsetCommitRequest for a list of partitions.
- *
- * @param cgmetadata consumer group metadata.
- *
- * @param offsets - offsets to commit for each topic-partition.
- *
- * @returns 0 if none of the partitions in \p offsets had valid offsets,
- * else 1.
- */
-int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb,
- rd_kafka_consumer_group_metadata_t *cgmetadata,
- rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque,
- const char *reason) {
- rd_kafka_buf_t *rkbuf;
- ssize_t of_TopicCnt = -1;
- int TopicCnt = 0;
- const char *last_topic = NULL;
- ssize_t of_PartCnt = -1;
- int PartCnt = 0;
- int tot_PartCnt = 0;
- int i;
- int16_t ApiVersion;
- int features;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_OffsetCommit, 0, 7, &features);
-
- rd_kafka_assert(NULL, offsets != NULL);
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, 1,
- 100 + (offsets->cnt * 128));
-
- /* ConsumerGroup */
- rd_kafka_buf_write_str(rkbuf, cgmetadata->group_id, -1);
-
- /* v1,v2 */
- if (ApiVersion >= 1) {
- /* ConsumerGroupGenerationId */
- rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id);
- /* ConsumerId */
- rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1);
- }
-
- /* v7: GroupInstanceId */
- if (ApiVersion >= 7)
- rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id,
- -1);
-
- /* v2-4: RetentionTime */
- if (ApiVersion >= 2 && ApiVersion <= 4)
- rd_kafka_buf_write_i64(rkbuf, -1);
-
- /* Sort offsets by topic */
- rd_kafka_topic_partition_list_sort_by_topic(offsets);
-
- /* TopicArrayCnt: Will be updated when we know the number of topics. */
- of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-
- for (i = 0; i < offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
-
- /* Skip partitions with invalid offset. */
- if (rktpar->offset < 0)
- continue;
-
- if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) {
- /* New topic */
-
- /* Finalize previous PartitionCnt */
- if (PartCnt > 0)
- rd_kafka_buf_update_u32(rkbuf, of_PartCnt,
- PartCnt);
-
- /* TopicName */
- rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
- /* PartitionCnt, finalized later */
- of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
- PartCnt = 0;
- last_topic = rktpar->topic;
- TopicCnt++;
- }
-
- /* Partition */
- rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
- PartCnt++;
- tot_PartCnt++;
-
- /* Offset */
- rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
-
- /* v6: KIP-101 CommittedLeaderEpoch */
- if (ApiVersion >= 6)
- rd_kafka_buf_write_i32(
- rkbuf,
- rd_kafka_topic_partition_get_leader_epoch(rktpar));
-
- /* v1: TimeStamp */
- if (ApiVersion == 1)
- rd_kafka_buf_write_i64(rkbuf, -1);
-
- /* Metadata */
- /* Java client 0.9.0 and broker <0.10.0 can't parse
- * Null metadata fields, so as a workaround we send an
- * empty string if it's Null. */
- if (!rktpar->metadata)
- rd_kafka_buf_write_str(rkbuf, "", 0);
- else
- rd_kafka_buf_write_str(rkbuf, rktpar->metadata,
- rktpar->metadata_size);
- }
-
- if (tot_PartCnt == 0) {
- /* No topic+partitions had valid offsets to commit. */
- rd_kafka_replyq_destroy(&replyq);
- rd_kafka_buf_destroy(rkbuf);
- return 0;
- }
-
- /* Finalize previous PartitionCnt */
- if (PartCnt > 0)
- rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt);
-
- /* Finalize TopicCnt */
- rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_rkb_dbg(rkb, TOPIC, "OFFSET",
- "Enqueue OffsetCommitRequest(v%d, %d/%d partition(s))): %s",
- ApiVersion, tot_PartCnt, offsets->cnt, reason);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return 1;
-}
-
-/**
- * @brief Construct and send OffsetDeleteRequest to \p rkb
- * with the partitions in del_grpoffsets (DeleteConsumerGroupOffsets_t*)
- * using \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @remark Only one del_grpoffsets element is supported.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb,
- /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */
- const rd_list_t *del_grpoffsets,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
- const rd_kafka_DeleteConsumerGroupOffsets_t *grpoffsets =
- rd_list_elem(del_grpoffsets, 0);
-
- rd_assert(rd_list_cnt(del_grpoffsets) == 1);
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_OffsetDelete, 0, 0, &features);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "OffsetDelete API (KIP-496) not supported "
- "by broker, requires broker version >= 2.4.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_OffsetDelete, 1,
- 2 + strlen(grpoffsets->group) + (64 * grpoffsets->partitions->cnt));
-
- /* GroupId */
- rd_kafka_buf_write_str(rkbuf, grpoffsets->group, -1);
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(
- rkbuf, grpoffsets->partitions,
- rd_false /*dont skip invalid offsets*/, rd_false /*any offset*/,
- fields);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Write "consumer" protocol type MemberState for SyncGroupRequest to
- * enveloping buffer \p rkbuf.
- */
-static void
-rd_kafka_group_MemberState_consumer_write(rd_kafka_buf_t *env_rkbuf,
- const rd_kafka_group_member_t *rkgm) {
- rd_kafka_buf_t *rkbuf;
- rd_slice_t slice;
-
- rkbuf = rd_kafka_buf_new(1, 100);
- rd_kafka_buf_write_i16(rkbuf, 0); /* Version */
- rd_assert(rkgm->rkgm_assignment);
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(
- rkbuf, rkgm->rkgm_assignment,
- rd_false /*don't skip invalid offsets*/, rd_false /* any offset */,
- fields);
- rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata);
-
- /* Get pointer to binary buffer */
- rd_slice_init_full(&slice, &rkbuf->rkbuf_buf);
-
- /* Write binary buffer as Kafka Bytes to enveloping buffer. */
- rd_kafka_buf_write_i32(env_rkbuf, (int32_t)rd_slice_remains(&slice));
- rd_buf_write_slice(&env_rkbuf->rkbuf_buf, &slice);
-
- rd_kafka_buf_destroy(rkbuf);
-}
-
-/**
- * Send SyncGroupRequest
- */
-void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- int32_t generation_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- const rd_kafka_group_member_t *assignments,
- int assignment_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int i;
- int16_t ApiVersion;
- int features;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_SyncGroup, 0, 3, &features);
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_SyncGroup, 1,
- RD_KAFKAP_STR_SIZE(group_id) + 4 /* GenerationId */ +
- RD_KAFKAP_STR_SIZE(member_id) +
- RD_KAFKAP_STR_SIZE(group_instance_id) +
- 4 /* array size group_assignment */ +
- (assignment_cnt * 100 /*guess*/));
- rd_kafka_buf_write_kstr(rkbuf, group_id);
- rd_kafka_buf_write_i32(rkbuf, generation_id);
- rd_kafka_buf_write_kstr(rkbuf, member_id);
- if (ApiVersion >= 3)
- rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
- rd_kafka_buf_write_i32(rkbuf, assignment_cnt);
-
- for (i = 0; i < assignment_cnt; i++) {
- const rd_kafka_group_member_t *rkgm = &assignments[i];
-
- rd_kafka_buf_write_kstr(rkbuf, rkgm->rkgm_member_id);
- rd_kafka_group_MemberState_consumer_write(rkbuf, rkgm);
- }
-
- /* This is a blocking request */
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
- rd_kafka_buf_set_abs_timeout(
- rkbuf,
- rkb->rkb_rk->rk_conf.group_session_timeout_ms +
- 3000 /* 3s grace period*/,
- 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-/**
- * Send JoinGroupRequest
- */
-void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- const rd_kafkap_str_t *protocol_type,
- const rd_list_t *topics,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_assignor_t *rkas;
- int i;
- int16_t ApiVersion = 0;
- int features;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_JoinGroup, 0, 5, &features);
-
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_JoinGroup, 1,
- RD_KAFKAP_STR_SIZE(group_id) + 4 /* sessionTimeoutMs */ +
- 4 /* rebalanceTimeoutMs */ + RD_KAFKAP_STR_SIZE(member_id) +
- RD_KAFKAP_STR_SIZE(group_instance_id) +
- RD_KAFKAP_STR_SIZE(protocol_type) +
- 4 /* array count GroupProtocols */ +
- (rd_list_cnt(topics) * 100));
- rd_kafka_buf_write_kstr(rkbuf, group_id);
- rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.group_session_timeout_ms);
- if (ApiVersion >= 1)
- rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.max_poll_interval_ms);
- rd_kafka_buf_write_kstr(rkbuf, member_id);
- if (ApiVersion >= 5)
- rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
- rd_kafka_buf_write_kstr(rkbuf, protocol_type);
- rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.enabled_assignor_cnt);
-
- RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) {
- rd_kafkap_bytes_t *member_metadata;
- if (!rkas->rkas_enabled)
- continue;
- rd_kafka_buf_write_kstr(rkbuf, rkas->rkas_protocol_name);
- member_metadata = rkas->rkas_get_metadata_cb(
- rkas, rk->rk_cgrp->rkcg_assignor_state, topics,
- rk->rk_cgrp->rkcg_group_assignment);
- rd_kafka_buf_write_kbytes(rkbuf, member_metadata);
- rd_kafkap_bytes_destroy(member_metadata);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- if (ApiVersion < 1 &&
- rk->rk_conf.max_poll_interval_ms >
- rk->rk_conf.group_session_timeout_ms &&
- rd_interval(&rkb->rkb_suppress.unsupported_kip62,
- /* at most once per day */
- (rd_ts_t)86400 * 1000 * 1000, 0) > 0)
- rd_rkb_log(rkb, LOG_NOTICE, "MAXPOLL",
- "Broker does not support KIP-62 "
- "(requires Apache Kafka >= v0.10.1.0): "
- "consumer configuration "
- "`max.poll.interval.ms` (%d) "
- "is effectively limited "
- "by `session.timeout.ms` (%d) "
- "with this broker version",
- rk->rk_conf.max_poll_interval_ms,
- rk->rk_conf.group_session_timeout_ms);
-
-
- if (ApiVersion < 5 && rk->rk_conf.group_instance_id &&
- rd_interval(&rkb->rkb_suppress.unsupported_kip345,
- /* at most once per day */
- (rd_ts_t)86400 * 1000 * 1000, 0) > 0)
- rd_rkb_log(rkb, LOG_NOTICE, "STATICMEMBER",
- "Broker does not support KIP-345 "
- "(requires Apache Kafka >= v2.3.0): "
- "consumer configuration "
- "`group.instance.id` (%s) "
- "will not take effect",
- rk->rk_conf.group_instance_id);
-
- /* Absolute timeout */
- rd_kafka_buf_set_abs_timeout_force(
- rkbuf,
- /* Request timeout is max.poll.interval.ms + grace
- * if the broker supports it, else
- * session.timeout.ms + grace. */
- (ApiVersion >= 1 ? rk->rk_conf.max_poll_interval_ms
- : rk->rk_conf.group_session_timeout_ms) +
- 3000 /* 3s grace period*/,
- 0);
-
- /* This is a blocking request */
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-/**
- * Send LeaveGroupRequest
- */
-void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb,
- const char *group_id,
- const char *member_id,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_LeaveGroup, 0, 1, &features);
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, 1, 300);
-
- rd_kafka_buf_write_str(rkbuf, group_id, -1);
- rd_kafka_buf_write_str(rkbuf, member_id, -1);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- /* LeaveGroupRequests are best-effort, the local consumer
- * does not care if it succeeds or not, so the request timeout
- * is shortened.
- * Retries are not needed. */
- rd_kafka_buf_set_abs_timeout(rkbuf, 5000, 0);
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-/**
- * Handler for LeaveGroup responses
- * opaque must be the cgrp handle.
- */
-void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_cgrp_t *rkcg = opaque;
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode = 0;
- int actions;
-
- if (err) {
- ErrorCode = err;
- goto err;
- }
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
-err:
- actions = rd_kafka_err_action(rkb, ErrorCode, request,
- RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Re-query for coordinator */
- rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
- RD_KAFKA_OP_COORD_QUERY, ErrorCode);
- }
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- if (rd_kafka_buf_retry(rkb, request))
- return;
- /* FALLTHRU */
- }
-
- if (ErrorCode)
- rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
- "LeaveGroup response: %s",
- rd_kafka_err2str(ErrorCode));
-
- return;
-
-err_parse:
- ErrorCode = rkbuf->rkbuf_err;
- goto err;
-}
-
-
-
-/**
- * Send HeartbeatRequest
- */
-void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- int32_t generation_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_Heartbeat, 0, 3, &features);
-
- rd_rkb_dbg(rkb, CGRP, "HEARTBEAT",
- "Heartbeat for group \"%s\" generation id %" PRId32,
- group_id->str, generation_id);
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, 1,
- RD_KAFKAP_STR_SIZE(group_id) +
- 4 /* GenerationId */ +
- RD_KAFKAP_STR_SIZE(member_id));
-
- rd_kafka_buf_write_kstr(rkbuf, group_id);
- rd_kafka_buf_write_i32(rkbuf, generation_id);
- rd_kafka_buf_write_kstr(rkbuf, member_id);
- if (ApiVersion >= 3)
- rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_buf_set_abs_timeout(
- rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-}
-
-
-
-/**
- * @brief Construct and send ListGroupsRequest to \p rkb
- * with the states (const char *) in \p states.
- * Uses \p max_ApiVersion as maximum API version,
- * pass -1 to use the maximum available version.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @return NULL on success, a new error instance that must be
- * released with rd_kafka_error_destroy() in case of error.
- */
-rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb,
- int16_t max_ApiVersion,
- const char **states,
- size_t states_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- size_t i;
-
- if (max_ApiVersion < 0)
- max_ApiVersion = 4;
-
- if (max_ApiVersion > ApiVersion) {
- /* Remark: don't check if max_ApiVersion is zero.
- * As rd_kafka_broker_ApiVersion_supported cannot be checked
- * in the application thread reliably . */
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_ListGroups, 0, max_ApiVersion, NULL);
- }
-
- if (ApiVersion == -1) {
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "ListGroupsRequest not supported by broker");
- }
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_ListGroups, 1,
- /* rd_kafka_buf_write_arraycnt_pos + tags + StatesFilter */
- 4 + 1 + 32 * states_cnt, ApiVersion >= 3 /* is_flexver */);
-
- if (ApiVersion >= 4) {
- size_t of_GroupsArrayCnt =
- rd_kafka_buf_write_arraycnt_pos(rkbuf);
- for (i = 0; i < states_cnt; i++) {
- rd_kafka_buf_write_str(rkbuf, states[i], -1);
- }
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, i);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
- return NULL;
-}
-
-/**
- * @brief Construct and send DescribeGroupsRequest to \p rkb
- * with the groups (const char *) in \p groups.
- * Uses \p max_ApiVersion as maximum API version,
- * pass -1 to use the maximum available version.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @return NULL on success, a new error instance that must be
- * released with rd_kafka_error_destroy() in case of error.
- */
-rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb,
- int16_t max_ApiVersion,
- char **groups,
- size_t group_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- size_t of_GroupsArrayCnt;
-
- if (max_ApiVersion < 0)
- max_ApiVersion = 4;
-
- if (max_ApiVersion > ApiVersion) {
- /* Remark: don't check if max_ApiVersion is zero.
- * As rd_kafka_broker_ApiVersion_supported cannot be checked
- * in the application thread reliably . */
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DescribeGroups, 0, max_ApiVersion, NULL);
- }
-
- if (ApiVersion == -1) {
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "DescribeGroupsRequest not supported by broker");
- }
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_DescribeGroups, 1,
- 4 /* rd_kafka_buf_write_arraycnt_pos */ +
- 1 /* IncludeAuthorizedOperations */ + 1 /* tags */ +
- 32 * group_cnt /* Groups */,
- rd_false);
-
- /* write Groups */
- of_GroupsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, group_cnt);
- while (group_cnt-- > 0)
- rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1);
-
- /* write IncludeAuthorizedOperations */
- if (ApiVersion >= 3) {
- /* TODO: implement KIP-430 */
- rd_kafka_buf_write_bool(rkbuf, rd_false);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
- return NULL;
-}
-
-/**
- * @brief Generic handler for Metadata responses
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_handle_Metadata(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_op_t *rko = opaque; /* Possibly NULL */
- struct rd_kafka_metadata *md = NULL;
- const rd_list_t *topics = request->rkbuf_u.Metadata.topics;
- int actions;
-
- rd_kafka_assert(NULL, err == RD_KAFKA_RESP_ERR__DESTROY ||
- thrd_is_current(rk->rk_thread));
-
- /* Avoid metadata updates when we're terminating. */
- if (rd_kafka_terminating(rkb->rkb_rk) ||
- err == RD_KAFKA_RESP_ERR__DESTROY) {
- /* Terminating */
- goto done;
- }
-
- if (err)
- goto err;
-
- if (!topics)
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "===== Received metadata: %s =====",
- request->rkbuf_u.Metadata.reason);
- else
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "===== Received metadata "
- "(for %d requested topics): %s =====",
- rd_list_cnt(topics),
- request->rkbuf_u.Metadata.reason);
-
- err = rd_kafka_parse_Metadata(rkb, request, rkbuf, &md);
- if (err)
- goto err;
-
- if (rko && rko->rko_replyq.q) {
- /* Reply to metadata requester, passing on the metadata.
- * Reuse requesting rko for the reply. */
- rko->rko_err = err;
- rko->rko_u.metadata.md = md;
-
- rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
- rko = NULL;
- } else {
- if (md)
- rd_free(md);
- }
-
- goto done;
-
-err:
- actions = rd_kafka_err_action(rkb, err, request,
-
- RD_KAFKA_ERR_ACTION_RETRY,
- RD_KAFKA_RESP_ERR__PARTIAL,
-
- RD_KAFKA_ERR_ACTION_END);
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- if (rd_kafka_buf_retry(rkb, request))
- return;
- /* FALLTHRU */
- } else {
- rd_rkb_log(rkb, LOG_WARNING, "METADATA",
- "Metadata request failed: %s: %s (%dms): %s",
- request->rkbuf_u.Metadata.reason,
- rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000),
- rd_kafka_actions2str(actions));
- /* Respond back to caller on non-retriable errors */
- if (rko && rko->rko_replyq.q) {
- rko->rko_err = err;
- rko->rko_u.metadata.md = NULL;
- rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
- rko = NULL;
- }
- }
-
-
-
- /* FALLTHRU */
-
-done:
- if (rko)
- rd_kafka_op_destroy(rko);
-}
-
-
-/**
- * @brief Construct MetadataRequest (does not send)
- *
- * \p topics is a list of topic names (char *) to request.
- *
- * !topics - only request brokers (if supported by broker, else
- * all topics)
- * topics.cnt==0 - all topics in cluster are requested
- * topics.cnt >0 - only specified topics are requested
- *
- * @param reason - metadata request reason
- * @param allow_auto_create_topics - allow broker-side auto topic creation.
- * This is best-effort, depending on broker
- * config and version.
- * @param cgrp_update - Update cgrp in parse_Metadata (see comment there).
- * @param rko - (optional) rko with replyq for handling response.
- * Specifying an rko forces a metadata request even if
- * there is already a matching one in-transit.
- *
- * If full metadata for all topics is requested (or all brokers, which
- * results in all-topics on older brokers) and there is already a full request
- * in transit then this function will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
- * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. If \p rko is non-NULL the request
- * is sent regardless.
- */
-rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- const char *reason,
- rd_bool_t allow_auto_create_topics,
- rd_bool_t cgrp_update,
- rd_kafka_op_t *rko) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- size_t of_TopicArrayCnt;
- int features;
- int topic_cnt = topics ? rd_list_cnt(topics) : 0;
- int *full_incr = NULL;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_Metadata, 0, 9, &features);
-
- rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_Metadata, 1,
- 4 + (50 * topic_cnt) + 1,
- ApiVersion >= 9);
-
- if (!reason)
- reason = "";
-
- rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason);
- rkbuf->rkbuf_u.Metadata.cgrp_update = cgrp_update;
-
- /* TopicArrayCnt */
- of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
-
- if (!topics) {
- /* v0: keep 0, brokers only not available,
- * request all topics */
- /* v1-8: 0 means empty array, brokers only */
- if (ApiVersion >= 9) {
- /* v9+: varint encoded empty array (1), brokers only */
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt,
- topic_cnt);
- }
-
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "Request metadata for brokers only: %s", reason);
- full_incr =
- &rkb->rkb_rk->rk_metadata_cache.rkmc_full_brokers_sent;
-
- } else if (topic_cnt == 0) {
- /* v0: keep 0, request all topics */
- if (ApiVersion >= 1 && ApiVersion < 9) {
- /* v1-8: update to -1, all topics */
- rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, -1);
- }
- /* v9+: keep 0, varint encoded null, all topics */
-
- rkbuf->rkbuf_u.Metadata.all_topics = 1;
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "Request metadata for all topics: "
- "%s",
- reason);
-
- if (!rko)
- full_incr = &rkb->rkb_rk->rk_metadata_cache
- .rkmc_full_topics_sent;
-
- } else {
- /* request cnt topics */
- rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt,
- topic_cnt);
-
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "Request metadata for %d topic(s): "
- "%s",
- topic_cnt, reason);
- }
-
- if (full_incr) {
- /* Avoid multiple outstanding full requests
- * (since they are redundant and side-effect-less).
- * Forced requests (app using metadata() API) are passed
- * through regardless. */
-
- mtx_lock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
- if (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force)) {
- mtx_unlock(
- &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
- rd_rkb_dbg(rkb, METADATA, "METADATA",
- "Skipping metadata request: %s: "
- "full request already in-transit",
- reason);
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
- }
-
- (*full_incr)++;
- mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
- rkbuf->rkbuf_u.Metadata.decr = full_incr;
- rkbuf->rkbuf_u.Metadata.decr_lock =
- &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock;
- }
-
-
- if (topic_cnt > 0) {
- char *topic;
- int i;
-
- /* Maintain a copy of the topics list so we can purge
- * hints from the metadata cache on error. */
- rkbuf->rkbuf_u.Metadata.topics =
- rd_list_copy(topics, rd_list_string_copy, NULL);
-
- RD_LIST_FOREACH(topic, topics, i) {
- rd_kafka_buf_write_str(rkbuf, topic, -1);
- /* Tags for previous topic */
- rd_kafka_buf_write_tags(rkbuf);
- }
- }
-
- if (ApiVersion >= 4) {
- /* AllowAutoTopicCreation */
- rd_kafka_buf_write_bool(rkbuf, allow_auto_create_topics);
-
- } else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER &&
- !rkb->rkb_rk->rk_conf.allow_auto_create_topics &&
- rd_kafka_conf_is_modified(&rkb->rkb_rk->rk_conf,
- "allow.auto.create.topics") &&
- rd_interval(
- &rkb->rkb_rk->rk_suppress.allow_auto_create_topics,
- 30 * 60 * 1000 /* every 30 minutes */, 0) >= 0) {
- /* Let user know we can't obey allow.auto.create.topics */
- rd_rkb_log(rkb, LOG_WARNING, "AUTOCREATE",
- "allow.auto.create.topics=false not supported "
- "by broker: requires broker version >= 0.11.0.0: "
- "requested topic(s) may be auto created depending "
- "on broker auto.create.topics.enable configuration");
- }
-
- if (ApiVersion >= 8 && ApiVersion < 10) {
- /* TODO: implement KIP-430 */
- /* IncludeClusterAuthorizedOperations */
- rd_kafka_buf_write_bool(rkbuf, rd_false);
- }
-
- if (ApiVersion >= 8) {
- /* TODO: implement KIP-430 */
- /* IncludeTopicAuthorizedOperations */
- rd_kafka_buf_write_bool(rkbuf, rd_false);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- /* Metadata requests are part of the important control plane
- * and should go before most other requests (Produce, Fetch, etc). */
- rkbuf->rkbuf_prio = RD_KAFKA_PRIO_HIGH;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf,
- /* Handle response thru rk_ops,
- * but forward parsed result to
- * rko's replyq when done. */
- RD_KAFKA_REPLYQ(rkb->rkb_rk->rk_ops, 0),
- rd_kafka_handle_Metadata, rko);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Parses and handles ApiVersion reply.
- *
- * @param apis will be allocated, populated and sorted
- * with broker's supported APIs, or set to NULL.
- * @param api_cnt will be set to the number of elements in \p *apis
- *
- * @returns 0 on success, else an error.
- *
- * @remark A valid \p apis might be returned even if an error is returned.
- */
-rd_kafka_resp_err_t
-rd_kafka_handle_ApiVersion(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- struct rd_kafka_ApiVersion **apis,
- size_t *api_cnt) {
- const int log_decode_errors = LOG_DEBUG;
- int32_t ApiArrayCnt;
- int16_t ErrorCode;
- int i = 0;
-
- *apis = NULL;
- *api_cnt = 0;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- err = ErrorCode;
-
- rd_kafka_buf_read_arraycnt(rkbuf, &ApiArrayCnt, 1000);
- if (err && ApiArrayCnt < 1) {
- /* Version >=3 returns the ApiVersions array if the error
- * code is ERR_UNSUPPORTED_VERSION, previous versions don't */
- goto err;
- }
-
- rd_rkb_dbg(rkb, FEATURE, "APIVERSION", "Broker API support:");
-
- *apis = rd_malloc(sizeof(**apis) * ApiArrayCnt);
-
- for (i = 0; i < ApiArrayCnt; i++) {
- struct rd_kafka_ApiVersion *api = &(*apis)[i];
-
- rd_kafka_buf_read_i16(rkbuf, &api->ApiKey);
- rd_kafka_buf_read_i16(rkbuf, &api->MinVer);
- rd_kafka_buf_read_i16(rkbuf, &api->MaxVer);
-
- rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
- " ApiKey %s (%hd) Versions %hd..%hd",
- rd_kafka_ApiKey2str(api->ApiKey), api->ApiKey,
- api->MinVer, api->MaxVer);
-
- /* Discard struct tags */
- rd_kafka_buf_skip_tags(rkbuf);
- }
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- /* Discard end tags */
- rd_kafka_buf_skip_tags(rkbuf);
-
- *api_cnt = ApiArrayCnt;
- qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp);
-
- goto done;
-
-err_parse:
- /* If the broker does not support our ApiVersionRequest version it
- * will respond with a version 0 response, which will most likely
- * fail parsing. Instead of propagating the parse error we
- * propagate the original error, unless there isn't one in which case
- * we use the parse error. */
- if (!err)
- err = rkbuf->rkbuf_err;
-err:
- /* There are no retryable errors. */
-
- if (*apis)
- rd_free(*apis);
-
- *apis = NULL;
- *api_cnt = 0;
-
-done:
- return err;
-}
-
-
-
-/**
- * @brief Send ApiVersionRequest (KIP-35)
- *
- * @param ApiVersion If -1 use the highest supported version, else use the
- * specified value.
- */
-void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb,
- int16_t ApiVersion,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
-
- if (ApiVersion == -1)
- ApiVersion = 3;
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_ApiVersion, 1, 3, ApiVersion >= 3 /*flexver*/);
-
- if (ApiVersion >= 3) {
- /* KIP-511 adds software name and version through the optional
- * protocol fields defined in KIP-482. */
-
- /* ClientSoftwareName */
- rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_name, -1);
-
- /* ClientSoftwareVersion */
- rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_version,
- -1);
- }
-
- /* Should be sent before any other requests since it is part of
- * the initial connection handshake. */
- rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
-
- /* Non-supporting brokers will tear down the connection when they
- * receive an unknown API request, so dont retry request on failure. */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- /* 0.9.0.x brokers will not close the connection on unsupported
- * API requests, so we minimize the timeout for the request.
- * This is a regression on the broker part. */
- rd_kafka_buf_set_abs_timeout(
- rkbuf, rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- if (replyq.q)
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
- opaque);
- else /* in broker thread */
- rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
-}
-
-
-/**
- * Send SaslHandshakeRequest (KIP-43)
- */
-void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb,
- const char *mechanism,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int mechlen = (int)strlen(mechanism);
- int16_t ApiVersion;
- int features;
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, 1,
- RD_KAFKAP_STR_SIZE0(mechlen));
-
- /* Should be sent before any other requests since it is part of
- * the initial connection handshake. */
- rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
-
- rd_kafka_buf_write_str(rkbuf, mechanism, mechlen);
-
- /* Non-supporting brokers will tear down the conneciton when they
- * receive an unknown API request or where the SASL GSSAPI
- * token type is not recognized, so dont retry request on failure. */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- /* 0.9.0.x brokers will not close the connection on unsupported
- * API requests, so we minimize the timeout of the request.
- * This is a regression on the broker part. */
- if (!rkb->rkb_rk->rk_conf.api_version_request &&
- rkb->rkb_rk->rk_conf.socket_timeout_ms > 10 * 1000)
- rd_kafka_buf_set_abs_timeout(rkbuf, 10 * 1000 /*10s*/, 0);
-
- /* ApiVersion 1 / RD_KAFKA_FEATURE_SASL_REQ enables
- * the SaslAuthenticateRequest */
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_SaslHandshake, 0, 1, &features);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- if (replyq.q)
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
- opaque);
- else /* in broker thread */
- rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
-}
-
-
-/**
- * @brief Parses and handles an SaslAuthenticate reply.
- *
- * @returns 0 on success, else an error.
- *
- * @locality broker thread
- * @locks none
- */
-void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int16_t error_code;
- rd_kafkap_str_t error_str;
- rd_kafkap_bytes_t auth_data;
- char errstr[512];
-
- if (err) {
- rd_snprintf(errstr, sizeof(errstr),
- "SaslAuthenticateRequest failed: %s",
- rd_kafka_err2str(err));
- goto err;
- }
-
- rd_kafka_buf_read_i16(rkbuf, &error_code);
- rd_kafka_buf_read_str(rkbuf, &error_str);
-
- if (error_code) {
- /* Authentication failed */
-
- /* For backwards compatibility translate the
- * new broker-side auth error code to our local error code. */
- if (error_code == RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
- err = RD_KAFKA_RESP_ERR__AUTHENTICATION;
- else
- err = error_code;
-
- rd_snprintf(errstr, sizeof(errstr), "%.*s",
- RD_KAFKAP_STR_PR(&error_str));
- goto err;
- }
-
- rd_kafka_buf_read_bytes(rkbuf, &auth_data);
-
- /* Pass SASL auth frame to SASL handler */
- if (rd_kafka_sasl_recv(rkb->rkb_transport, auth_data.data,
- (size_t)RD_KAFKAP_BYTES_LEN(&auth_data), errstr,
- sizeof(errstr)) == -1) {
- err = RD_KAFKA_RESP_ERR__AUTHENTICATION;
- goto err;
- }
-
- return;
-
-
-err_parse:
- err = rkbuf->rkbuf_err;
- rd_snprintf(errstr, sizeof(errstr),
- "SaslAuthenticateResponse parsing failed: %s",
- rd_kafka_err2str(err));
-
-err:
- rd_kafka_broker_fail(rkb, LOG_ERR, err, "SASL authentication error: %s",
- errstr);
-}
-
-
-/**
- * @brief Send SaslAuthenticateRequest (KIP-152)
- */
-void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb,
- const void *buf,
- size_t size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslAuthenticate, 0, 0);
-
- /* Should be sent before any other requests since it is part of
- * the initial connection handshake. */
- rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
-
- /* Broker does not support -1 (Null) for this field */
- rd_kafka_buf_write_bytes(rkbuf, buf ? buf : "", size);
-
- /* There are no errors that can be retried, instead
- * close down the connection and reconnect on failure. */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- if (replyq.q)
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
- opaque);
- else /* in broker thread */
- rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
-}
-
-
-
-/**
- * @struct Hold temporary result and return values from ProduceResponse
- */
-struct rd_kafka_Produce_result {
- int64_t offset; /**< Assigned offset of first message */
- int64_t timestamp; /**< (Possibly assigned) offset of first message */
-};
-
-/**
- * @brief Parses a Produce reply.
- * @returns 0 on success or an error code on failure.
- * @locality broker thread
- */
-static rd_kafka_resp_err_t
-rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- struct rd_kafka_Produce_result *result) {
- int32_t TopicArrayCnt;
- int32_t PartitionArrayCnt;
- struct {
- int32_t Partition;
- int16_t ErrorCode;
- int64_t Offset;
- } hdr;
- const int log_decode_errors = LOG_ERR;
- int64_t log_start_offset = -1;
-
- rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
- if (TopicArrayCnt != 1)
- goto err;
-
- /* Since we only produce to one single topic+partition in each
- * request we assume that the reply only contains one topic+partition
- * and that it is the same that we requested.
- * If not the broker is buggy. */
- rd_kafka_buf_skip_str(rkbuf);
- rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
-
- if (PartitionArrayCnt != 1)
- goto err;
-
- rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
- rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
- rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
-
- result->offset = hdr.Offset;
-
- result->timestamp = -1;
- if (request->rkbuf_reqhdr.ApiVersion >= 2)
- rd_kafka_buf_read_i64(rkbuf, &result->timestamp);
-
- if (request->rkbuf_reqhdr.ApiVersion >= 5)
- rd_kafka_buf_read_i64(rkbuf, &log_start_offset);
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1) {
- int32_t Throttle_Time;
- rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
-
- rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
- Throttle_Time);
- }
-
-
- return hdr.ErrorCode;
-
-err_parse:
- return rkbuf->rkbuf_err;
-err:
- return RD_KAFKA_RESP_ERR__BAD_MSG;
-}
-
-
-/**
- * @struct Hold temporary Produce error state
- */
-struct rd_kafka_Produce_err {
- rd_kafka_resp_err_t err; /**< Error code */
- int actions; /**< Actions to take */
- int incr_retry; /**< Increase per-message retry cnt */
- rd_kafka_msg_status_t status; /**< Messages persistence status */
-
- /* Idempotent Producer */
- int32_t next_ack_seq; /**< Next expected sequence to ack */
- int32_t next_err_seq; /**< Next expected error sequence */
- rd_bool_t update_next_ack; /**< Update next_ack_seq */
- rd_bool_t update_next_err; /**< Update next_err_seq */
- rd_kafka_pid_t rktp_pid; /**< Partition's current PID */
- int32_t last_seq; /**< Last sequence in current batch */
-};
-
-
-/**
- * @brief Error-handling for Idempotent Producer-specific Produce errors.
- *
- * May update \p errp, \p actionsp and \p incr_retryp.
- *
- * The resulting \p actionsp are handled by the caller.
- *
- * @warning May be called on the old leader thread. Lock rktp appropriately!
- *
- * @locality broker thread (but not necessarily the leader broker)
- * @locks none
- */
-static void
-rd_kafka_handle_idempotent_Produce_error(rd_kafka_broker_t *rkb,
- rd_kafka_msgbatch_t *batch,
- struct rd_kafka_Produce_err *perr) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_toppar_t *rktp = batch->rktp;
- rd_kafka_msg_t *firstmsg, *lastmsg;
- int r;
- rd_ts_t now = rd_clock(), state_age;
- struct rd_kafka_toppar_err last_err;
-
- rd_kafka_rdlock(rkb->rkb_rk);
- state_age = now - rkb->rkb_rk->rk_eos.ts_idemp_state;
- rd_kafka_rdunlock(rkb->rkb_rk);
-
- firstmsg = rd_kafka_msgq_first(&batch->msgq);
- lastmsg = rd_kafka_msgq_last(&batch->msgq);
- rd_assert(firstmsg && lastmsg);
-
- /* Store the last msgid of the batch
- * on the first message in case we need to retry
- * and thus reconstruct the entire batch. */
- if (firstmsg->rkm_u.producer.last_msgid) {
- /* last_msgid already set, make sure it
- * actually points to the last message. */
- rd_assert(firstmsg->rkm_u.producer.last_msgid ==
- lastmsg->rkm_u.producer.msgid);
- } else {
- firstmsg->rkm_u.producer.last_msgid =
- lastmsg->rkm_u.producer.msgid;
- }
-
- if (!rd_kafka_pid_eq(batch->pid, perr->rktp_pid)) {
- /* Don't retry if PID changed since we can't
- * guarantee correctness across PID sessions. */
- perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
- perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
-
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "ERRPID",
- "%.*s [%" PRId32
- "] PID mismatch: "
- "request %s != partition %s: "
- "failing messages with error %s",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_pid2str(batch->pid),
- rd_kafka_pid2str(perr->rktp_pid),
- rd_kafka_err2str(perr->err));
- return;
- }
-
- /*
- * Special error handling
- */
- switch (perr->err) {
- case RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER:
- /* Compare request's sequence to expected next
- * acked sequence.
- *
- * Example requests in flight:
- * R1(base_seq:5) R2(10) R3(15) R4(20)
- */
-
- /* Acquire the last partition error to help
- * troubleshoot this problem. */
- rd_kafka_toppar_lock(rktp);
- last_err = rktp->rktp_last_err;
- rd_kafka_toppar_unlock(rktp);
-
- r = batch->first_seq - perr->next_ack_seq;
-
- if (r == 0) {
- /* R1 failed:
- * If this was the head-of-line request in-flight it
- * means there is a state desynchronization between the
- * producer and broker (a bug), in which case
- * we'll raise a fatal error since we can no longer
- * reason about the state of messages and thus
- * not guarantee ordering or once-ness for R1,
- * nor give the user a chance to opt out of sending
- * R2 to R4 which would be retried automatically. */
-
- rd_kafka_idemp_set_fatal_error(
- rk, perr->err,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to sequence desynchronization with "
- "broker %" PRId32 " (%s, base seq %" PRId32
- ", "
- "idemp state change %" PRId64
- "ms ago, "
- "last partition error %s (actions %s, "
- "base seq %" PRId32 "..%" PRId32
- ", base msgid %" PRIu64 ", %" PRId64 "ms ago)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq,
- state_age / 1000, rd_kafka_err2name(last_err.err),
- rd_kafka_actions2str(last_err.actions),
- last_err.base_seq, last_err.last_seq,
- last_err.base_msgid,
- last_err.ts ? (now - last_err.ts) / 1000 : -1);
-
- perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
- perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
-
- } else if (r > 0) {
- /* R2 failed:
- * With max.in.flight > 1 we can have a situation
- * where the first request in-flight (R1) to the broker
- * fails, which causes the sub-sequent requests
- * that are in-flight to have a non-sequential
- * sequence number and thus fail.
- * But these sub-sequent requests (R2 to R4) are not at
- * the risk of being duplicated so we bump the epoch and
- * re-enqueue the messages for later retry
- * (without incrementing retries).
- */
- rd_rkb_dbg(
- rkb, MSG | RD_KAFKA_DBG_EOS, "ERRSEQ",
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to skipped sequence numbers "
- "(%s, base seq %" PRId32
- " > "
- "next seq %" PRId32
- ") "
- "caused by previous failed request "
- "(%s, actions %s, "
- "base seq %" PRId32 "..%" PRId32
- ", base msgid %" PRIu64 ", %" PRId64
- "ms ago): "
- "recovering and retrying",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_pid2str(batch->pid), batch->first_seq,
- perr->next_ack_seq, rd_kafka_err2name(last_err.err),
- rd_kafka_actions2str(last_err.actions),
- last_err.base_seq, last_err.last_seq,
- last_err.base_msgid,
- last_err.ts ? (now - last_err.ts) / 1000 : -1);
-
- perr->incr_retry = 0;
- perr->actions = RD_KAFKA_ERR_ACTION_RETRY;
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
-
- rd_kafka_idemp_drain_epoch_bump(
- rk, perr->err, "skipped sequence numbers");
-
- } else {
- /* Request's sequence is less than next ack,
- * this should never happen unless we have
- * local bug or the broker did not respond
- * to the requests in order. */
- rd_kafka_idemp_set_fatal_error(
- rk, perr->err,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "with rewound sequence number on "
- "broker %" PRId32
- " (%s, "
- "base seq %" PRId32 " < next seq %" PRId32
- "): "
- "last error %s (actions %s, "
- "base seq %" PRId32 "..%" PRId32
- ", base msgid %" PRIu64 ", %" PRId64 "ms ago)",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq,
- perr->next_ack_seq, rd_kafka_err2name(last_err.err),
- rd_kafka_actions2str(last_err.actions),
- last_err.base_seq, last_err.last_seq,
- last_err.base_msgid,
- last_err.ts ? (now - last_err.ts) / 1000 : -1);
-
- perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
- perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_false;
- }
- break;
-
- case RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER:
- /* This error indicates that we successfully produced
- * this set of messages before but this (supposed) retry failed.
- *
- * Treat as success, however offset and timestamp
- * will be invalid. */
-
- /* Future improvement/FIXME:
- * But first make sure the first message has actually
- * been retried, getting this error for a non-retried message
- * indicates a synchronization issue or bug. */
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "DUPSEQ",
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to duplicate sequence number: "
- "previous send succeeded but was not acknowledged "
- "(%s, base seq %" PRId32
- "): "
- "marking the messages successfully delivered",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_pid2str(batch->pid), batch->first_seq);
-
- /* Void error, delivery succeeded */
- perr->err = RD_KAFKA_RESP_ERR_NO_ERROR;
- perr->actions = 0;
- perr->status = RD_KAFKA_MSG_STATUS_PERSISTED;
- perr->update_next_ack = rd_true;
- perr->update_next_err = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
- /* The broker/cluster lost track of our PID because
- * the last message we produced has now been deleted
- * (by DeleteRecords, compaction, or topic retention policy).
- *
- * If all previous messages are accounted for and this is not
- * a retry we can simply bump the epoch and reset the sequence
- * number and then retry the message(s) again.
- *
- * If there are outstanding messages not yet acknowledged
- * then there is no safe way to carry on without risking
- * duplication or reordering, in which case we fail
- * the producer.
- *
- * In case of the transactional producer and a transaction
- * coordinator that supports KIP-360 (>= AK 2.5, checked from
- * the txnmgr, not here) we'll raise an abortable error and
- * flag that the epoch needs to be bumped on the coordinator. */
- if (rd_kafka_is_transactional(rk)) {
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID",
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to unknown producer id "
- "(%s, base seq %" PRId32
- ", %d retries): "
- "failing the current transaction",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_pid2str(batch->pid),
- batch->first_seq,
- firstmsg->rkm_u.producer.retries);
-
- /* Drain outstanding requests and bump epoch. */
- rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
- "unknown producer id");
-
- rd_kafka_txn_set_abortable_error_with_bump(
- rk, RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to unknown producer id",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq));
-
- perr->incr_retry = 0;
- perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
- break;
-
- } else if (!firstmsg->rkm_u.producer.retries &&
- perr->next_err_seq == batch->first_seq) {
- rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID",
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to unknown producer id "
- "(%s, base seq %" PRId32
- ", %d retries): "
- "no risk of duplication/reordering: "
- "resetting PID and retrying",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_pid2str(batch->pid),
- batch->first_seq,
- firstmsg->rkm_u.producer.retries);
-
- /* Drain outstanding requests and bump epoch. */
- rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
- "unknown producer id");
-
- perr->incr_retry = 0;
- perr->actions = RD_KAFKA_ERR_ACTION_RETRY;
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
- break;
- }
-
- rd_kafka_idemp_set_fatal_error(
- rk, perr->err,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed "
- "due to unknown producer id ("
- "broker %" PRId32 " %s, base seq %" PRId32
- ", %d retries): "
- "unable to retry without risking "
- "duplication/reordering",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq),
- rkb->rkb_nodeid, rd_kafka_pid2str(batch->pid),
- batch->first_seq, firstmsg->rkm_u.producer.retries);
-
- perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
- perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
- break;
-
- default:
- /* All other errors are handled in the standard
- * error Produce handler, which will set
- * update_next_ack|err accordingly. */
- break;
- }
-}
-
-
-
-/**
- * @brief Error-handling for failed ProduceRequests
- *
- * @param errp Is the input and output error, it may be changed
- * by this function.
- *
- * @returns 0 if no further processing of the request should be performed,
- * such as triggering delivery reports, else 1.
- *
- * @warning May be called on the old leader thread. Lock rktp appropriately!
- *
- * @warning \p request may be NULL.
- *
- * @locality broker thread (but not necessarily the leader broker)
- * @locks none
- */
-static int rd_kafka_handle_Produce_error(rd_kafka_broker_t *rkb,
- const rd_kafka_buf_t *request,
- rd_kafka_msgbatch_t *batch,
- struct rd_kafka_Produce_err *perr) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_toppar_t *rktp = batch->rktp;
- int is_leader;
-
- if (unlikely(perr->err == RD_KAFKA_RESP_ERR__DESTROY))
- return 0; /* Terminating */
-
- /* When there is a partition leader change any outstanding
- * requests to the old broker will be handled by the old
- * broker thread when the responses are received/timeout:
- * in this case we need to be careful with locking:
- * check once if we're the leader (which allows relaxed
- * locking), and cache the current rktp's eos state vars. */
- rd_kafka_toppar_lock(rktp);
- is_leader = rktp->rktp_broker == rkb;
- perr->rktp_pid = rktp->rktp_eos.pid;
- perr->next_ack_seq = rktp->rktp_eos.next_ack_seq;
- perr->next_err_seq = rktp->rktp_eos.next_err_seq;
- rd_kafka_toppar_unlock(rktp);
-
- /* All failures are initially treated as if the message
- * was not persisted, but the status may be changed later
- * for specific errors and actions. */
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
-
- /* Set actions for known errors (may be overriden later),
- * all other errors are considered permanent failures.
- * (also see rd_kafka_err_action() for the default actions). */
- perr->actions = rd_kafka_err_action(
- rkb, perr->err, request,
-
- RD_KAFKA_ERR_ACTION_REFRESH |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR__TRANSPORT,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
-
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
-
- RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
-
- RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS,
-
- RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND,
-
- RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE,
-
- RD_KAFKA_ERR_ACTION_RETRY |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR__TIMED_OUT,
-
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
-
- /* All Idempotent Producer-specific errors are
- * initially set as permanent errors,
- * special handling may change the actions. */
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
-
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
- RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER,
-
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
-
- RD_KAFKA_ERR_ACTION_PERMANENT |
- RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
- RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH,
-
- /* Message was purged from out-queue due to
- * Idempotent Producer Id change */
- RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__RETRY,
-
- RD_KAFKA_ERR_ACTION_END);
-
- rd_rkb_dbg(rkb, MSG, "MSGSET",
- "%s [%" PRId32
- "]: MessageSet with %i message(s) "
- "(MsgId %" PRIu64 ", BaseSeq %" PRId32
- ") "
- "encountered error: %s (actions %s)%s",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq), batch->first_msgid,
- batch->first_seq, rd_kafka_err2str(perr->err),
- rd_kafka_actions2str(perr->actions),
- is_leader ? "" : " [NOT LEADER]");
-
-
- /*
- * Special handling for Idempotent Producer
- *
- * Note: Idempotent Producer-specific errors received
- * on a non-idempotent producer will be passed through
- * directly to the application.
- */
- if (rd_kafka_is_idempotent(rk))
- rd_kafka_handle_idempotent_Produce_error(rkb, batch, perr);
-
- /* Update message persistence status based on action flags.
- * None of these are typically set after an idempotent error,
- * which sets the status explicitly. */
- if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED)
- perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- else if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED)
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- else if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_PERSISTED)
- perr->status = RD_KAFKA_MSG_STATUS_PERSISTED;
-
- /* Save the last error for debugging sub-sequent errors,
- * useful for Idempotent Producer throubleshooting. */
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_last_err.err = perr->err;
- rktp->rktp_last_err.actions = perr->actions;
- rktp->rktp_last_err.ts = rd_clock();
- rktp->rktp_last_err.base_seq = batch->first_seq;
- rktp->rktp_last_err.last_seq = perr->last_seq;
- rktp->rktp_last_err.base_msgid = batch->first_msgid;
- rd_kafka_toppar_unlock(rktp);
-
- /*
- * Handle actions
- */
- if (perr->actions &
- (RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY)) {
- /* Retry (refresh also implies retry) */
-
- if (perr->actions & RD_KAFKA_ERR_ACTION_REFRESH) {
- /* Request metadata information update.
- * These errors imply that we have stale
- * information and the request was
- * either rejected or not sent -
- * we don't need to increment the retry count
- * when we perform a retry since:
- * - it is a temporary error (hopefully)
- * - there is no chance of duplicate delivery
- */
- rd_kafka_toppar_leader_unavailable(rktp, "produce",
- perr->err);
-
- /* We can't be certain the request wasn't
- * sent in case of transport failure,
- * so the ERR__TRANSPORT case will need
- * the retry count to be increased,
- * In case of certain other errors we want to
- * avoid retrying for the duration of the
- * message.timeout.ms to speed up error propagation. */
- if (perr->err != RD_KAFKA_RESP_ERR__TRANSPORT &&
- perr->err != RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
- perr->incr_retry = 0;
- }
-
- /* If message timed out in queue, not in transit,
- * we will retry at a later time but not increment
- * the retry count since there is no risk
- * of duplicates. */
- if (!rd_kafka_buf_was_sent(request))
- perr->incr_retry = 0;
-
- if (!perr->incr_retry) {
- /* If retries are not to be incremented then
- * there is no chance of duplicates on retry, which
- * means these messages were not persisted. */
- perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- }
-
- if (rd_kafka_is_idempotent(rk)) {
- /* Any currently in-flight requests will
- * fail with ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
- * which should not be treated as a fatal error
- * since this request and sub-sequent requests
- * will be retried and thus return to order.
- * Unless the error was a timeout, or similar,
- * in which case the request might have made it
- * and the messages are considered possibly persisted:
- * in this case we allow the next in-flight response
- * to be successful, in which case we mark
- * this request's messages as succesfully delivered. */
- if (perr->status &
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED)
- perr->update_next_ack = rd_true;
- else
- perr->update_next_ack = rd_false;
- perr->update_next_err = rd_true;
-
- /* Drain outstanding requests so that retries
- * are attempted with proper state knowledge and
- * without any in-flight requests. */
- rd_kafka_toppar_lock(rktp);
- rd_kafka_idemp_drain_toppar(rktp,
- "drain before retrying");
- rd_kafka_toppar_unlock(rktp);
- }
-
- /* Since requests are specific to a broker
- * we move the retryable messages from the request
- * back to the partition queue (prepend) and then
- * let the new broker construct a new request.
- * While doing this we also make sure the retry count
- * for each message is honoured, any messages that
- * would exceeded the retry count will not be
- * moved but instead fail below. */
- rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, perr->incr_retry,
- perr->status);
-
- if (rd_kafka_msgq_len(&batch->msgq) == 0) {
- /* No need do anything more with the request
- * here since the request no longer has any
- * messages associated with it. */
- return 0;
- }
- }
-
- if (perr->actions & RD_KAFKA_ERR_ACTION_PERMANENT &&
- rd_kafka_is_idempotent(rk)) {
- if (rd_kafka_is_transactional(rk) &&
- perr->err == RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) {
- /* Producer was fenced by new transactional producer
- * with the same transactional.id */
- rd_kafka_txn_set_fatal_error(
- rk, RD_DO_LOCK, RD_KAFKA_RESP_ERR__FENCED,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed: %s "
- "(broker %" PRId32 " %s, base seq %" PRId32
- "): "
- "transactional producer fenced by newer "
- "producer instance",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq);
-
- /* Drain outstanding requests and reset PID. */
- rd_kafka_idemp_drain_reset(
- rk, "fenced by new transactional producer");
-
- } else if (rd_kafka_is_transactional(rk)) {
- /* When transactional any permanent produce failure
- * would lead to an incomplete transaction, so raise
- * an abortable transaction error. */
- rd_kafka_txn_set_abortable_error(
- rk, perr->err,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed: %s "
- "(broker %" PRId32 " %s, base seq %" PRId32
- "): "
- "current transaction must be aborted",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq);
-
- } else if (rk->rk_conf.eos.gapless) {
- /* A permanent non-idempotent error will lead to
- * gaps in the message series, the next request
- * will fail with ...ERR_OUT_OF_ORDER_SEQUENCE_NUMBER.
- * To satisfy the gapless guarantee we need to raise
- * a fatal error here. */
- rd_kafka_idemp_set_fatal_error(
- rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE,
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) failed: "
- "%s (broker %" PRId32 " %s, base seq %" PRId32
- "): "
- "unable to satisfy gap-less guarantee",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq),
- rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq);
-
- /* Drain outstanding requests and reset PID. */
- rd_kafka_idemp_drain_reset(
- rk, "unable to satisfy gap-less guarantee");
-
- } else {
- /* If gapless is not set we bump the Epoch and
- * renumber the messages to send. */
-
- /* Drain outstanding requests and bump the epoch .*/
- rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
- "message sequence gap");
- }
-
- perr->update_next_ack = rd_false;
- /* Make sure the next error will not raise a fatal error. */
- perr->update_next_err = rd_true;
- }
-
- if (perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT ||
- perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) {
- /* Translate request-level timeout error code
- * to message-level timeout error code. */
- perr->err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- } else if (perr->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) {
- /* If we're no longer authorized to access the topic mark
- * it as errored to deny further produce requests. */
- rd_kafka_topic_wrlock(rktp->rktp_rkt);
- rd_kafka_topic_set_error(rktp->rktp_rkt, perr->err);
- rd_kafka_topic_wrunlock(rktp->rktp_rkt);
- }
-
- return 1;
-}
-
-/**
- * @brief Handle ProduceResponse success for idempotent producer
- *
- * @warning May be called on the old leader thread. Lock rktp appropriately!
- *
- * @locks none
- * @locality broker thread (but not necessarily the leader broker thread)
- */
-static void
-rd_kafka_handle_idempotent_Produce_success(rd_kafka_broker_t *rkb,
- rd_kafka_msgbatch_t *batch,
- int32_t next_seq) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_toppar_t *rktp = batch->rktp;
- char fatal_err[512];
- uint64_t first_msgid, last_msgid;
-
- *fatal_err = '\0';
-
- first_msgid = rd_kafka_msgq_first(&batch->msgq)->rkm_u.producer.msgid;
- last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid;
-
- rd_kafka_toppar_lock(rktp);
-
- /* If the last acked msgid is higher than
- * the next message to (re)transmit in the message queue
- * it means a previous series of R1,R2 ProduceRequests
- * had R1 fail with uncertain persistence status,
- * such as timeout or transport error, but R2 succeeded,
- * which means the messages in R1 were in fact persisted.
- * In this case trigger delivery reports for all messages
- * in queue until we hit a non-acked message msgid. */
- if (unlikely(rktp->rktp_eos.acked_msgid < first_msgid - 1)) {
- rd_kafka_dr_implicit_ack(rkb, rktp, last_msgid);
-
- } else if (unlikely(batch->first_seq != rktp->rktp_eos.next_ack_seq &&
- batch->first_seq == rktp->rktp_eos.next_err_seq)) {
- /* Response ordering is typically not a concern
- * (but will not happen with current broker versions),
- * unless we're expecting an error to be returned at
- * this sequence rather than a success ack, in which
- * case raise a fatal error. */
-
- /* Can't call set_fatal_error() while
- * holding the toppar lock, so construct
- * the error string here and call
- * set_fatal_error() below after
- * toppar lock has been released. */
- rd_snprintf(fatal_err, sizeof(fatal_err),
- "ProduceRequest for %.*s [%" PRId32
- "] "
- "with %d message(s) "
- "succeeded when expecting failure "
- "(broker %" PRId32
- " %s, "
- "base seq %" PRId32
- ", "
- "next ack seq %" PRId32
- ", "
- "next err seq %" PRId32
- ": "
- "unable to retry without risking "
- "duplication/reordering",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
- rd_kafka_pid2str(batch->pid), batch->first_seq,
- rktp->rktp_eos.next_ack_seq,
- rktp->rktp_eos.next_err_seq);
-
- rktp->rktp_eos.next_err_seq = next_seq;
- }
-
- if (likely(!*fatal_err)) {
- /* Advance next expected err and/or ack sequence */
-
- /* Only step err seq if it hasn't diverged. */
- if (rktp->rktp_eos.next_err_seq == rktp->rktp_eos.next_ack_seq)
- rktp->rktp_eos.next_err_seq = next_seq;
-
- rktp->rktp_eos.next_ack_seq = next_seq;
- }
-
- /* Store the last acked message sequence,
- * since retries within the broker cache window (5 requests)
- * will succeed for older messages we must only update the
- * acked msgid if it is higher than the last acked. */
- if (last_msgid > rktp->rktp_eos.acked_msgid)
- rktp->rktp_eos.acked_msgid = last_msgid;
-
- rd_kafka_toppar_unlock(rktp);
-
- /* Must call set_fatal_error() after releasing
- * the toppar lock. */
- if (unlikely(*fatal_err))
- rd_kafka_idemp_set_fatal_error(
- rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err);
-}
-
-
-/**
- * @brief Handle ProduceRequest result for a message batch.
- *
- * @warning \p request may be NULL.
- *
- * @localiy broker thread (but not necessarily the toppar's handler thread)
- * @locks none
- */
-static void rd_kafka_msgbatch_handle_Produce_result(
- rd_kafka_broker_t *rkb,
- rd_kafka_msgbatch_t *batch,
- rd_kafka_resp_err_t err,
- const struct rd_kafka_Produce_result *presult,
- const rd_kafka_buf_t *request) {
-
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_toppar_t *rktp = batch->rktp;
- rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- rd_bool_t last_inflight;
- int32_t next_seq;
-
- /* Decrease partition's messages in-flight counter */
- rd_assert(rd_atomic32_get(&rktp->rktp_msgs_inflight) >=
- rd_kafka_msgq_len(&batch->msgq));
- last_inflight = !rd_atomic32_sub(&rktp->rktp_msgs_inflight,
- rd_kafka_msgq_len(&batch->msgq));
-
- /* Next expected sequence (and handle wrap) */
- next_seq = rd_kafka_seq_wrap(batch->first_seq +
- rd_kafka_msgq_len(&batch->msgq));
-
- if (likely(!err)) {
- rd_rkb_dbg(rkb, MSG, "MSGSET",
- "%s [%" PRId32
- "]: MessageSet with %i message(s) "
- "(MsgId %" PRIu64 ", BaseSeq %" PRId32 ") delivered",
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_msgq_len(&batch->msgq), batch->first_msgid,
- batch->first_seq);
-
- if (rktp->rktp_rkt->rkt_conf.required_acks != 0)
- status = RD_KAFKA_MSG_STATUS_PERSISTED;
-
- if (rd_kafka_is_idempotent(rk))
- rd_kafka_handle_idempotent_Produce_success(rkb, batch,
- next_seq);
- } else {
- /* Error handling */
- struct rd_kafka_Produce_err perr = {
- .err = err,
- .incr_retry = 1,
- .status = status,
- .update_next_ack = rd_true,
- .update_next_err = rd_true,
- .last_seq = (batch->first_seq +
- rd_kafka_msgq_len(&batch->msgq) - 1)};
-
- rd_kafka_handle_Produce_error(rkb, request, batch, &perr);
-
- /* Update next expected acked and/or err sequence. */
- if (perr.update_next_ack || perr.update_next_err) {
- rd_kafka_toppar_lock(rktp);
- if (perr.update_next_ack)
- rktp->rktp_eos.next_ack_seq = next_seq;
- if (perr.update_next_err)
- rktp->rktp_eos.next_err_seq = next_seq;
- rd_kafka_toppar_unlock(rktp);
- }
-
- err = perr.err;
- status = perr.status;
- }
-
-
- /* Messages to retry will have been removed from the request's queue */
- if (likely(rd_kafka_msgq_len(&batch->msgq) > 0)) {
- /* Set offset, timestamp and status for each message. */
- rd_kafka_msgq_set_metadata(&batch->msgq, rkb->rkb_nodeid,
- presult->offset, presult->timestamp,
- status);
-
- /* Enqueue messages for delivery report. */
- rd_kafka_dr_msgq(rktp->rktp_rkt, &batch->msgq, err);
- }
-
- if (rd_kafka_is_idempotent(rk) && last_inflight)
- rd_kafka_idemp_inflight_toppar_sub(rk, rktp);
-}
-
-
-/**
- * @brief Handle ProduceResponse
- *
- * @param reply is NULL when `acks=0` and on various local errors.
- *
- * @remark ProduceRequests are never retried, retriable errors are
- * instead handled by re-enqueuing the request's messages back
- * on the partition queue to have a new ProduceRequest constructed
- * eventually.
- *
- * @warning May be called on the old leader thread. Lock rktp appropriately!
- *
- * @locality broker thread (but not necessarily the leader broker thread)
- */
-static void rd_kafka_handle_Produce(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *reply,
- rd_kafka_buf_t *request,
- void *opaque) {
- rd_kafka_msgbatch_t *batch = &request->rkbuf_batch;
- rd_kafka_toppar_t *rktp = batch->rktp;
- struct rd_kafka_Produce_result result = {
- .offset = RD_KAFKA_OFFSET_INVALID, .timestamp = -1};
-
- /* Unit test interface: inject errors */
- if (unlikely(rk->rk_conf.ut.handle_ProduceResponse != NULL)) {
- err = rk->rk_conf.ut.handle_ProduceResponse(
- rkb->rkb_rk, rkb->rkb_nodeid, batch->first_msgid, err);
- }
-
- /* Parse Produce reply (unless the request errored) */
- if (!err && reply)
- err = rd_kafka_handle_Produce_parse(rkb, rktp, reply, request,
- &result);
-
- rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, &result,
- request);
-}
-
-
-/**
- * @brief Send ProduceRequest for messages in toppar queue.
- *
- * @returns the number of messages included, or 0 on error / no messages.
- *
- * @locality broker thread
- */
-int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid) {
- rd_kafka_buf_t *rkbuf;
- rd_kafka_topic_t *rkt = rktp->rktp_rkt;
- size_t MessageSetSize = 0;
- int cnt;
- rd_ts_t now;
- int64_t first_msg_timeout;
- int tmout;
-
- /**
- * Create ProduceRequest with as many messages from the toppar
- * transmit queue as possible.
- */
- rkbuf = rd_kafka_msgset_create_ProduceRequest(
- rkb, rktp, &rktp->rktp_xmit_msgq, pid, epoch_base_msgid,
- &MessageSetSize);
- if (unlikely(!rkbuf))
- return 0;
-
- cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq);
- rd_dassert(cnt > 0);
-
- rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchcnt, (int64_t)cnt);
- rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchsize, (int64_t)MessageSetSize);
-
- if (!rkt->rkt_conf.required_acks)
- rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NO_RESPONSE;
-
- /* Use timeout from first message in batch */
- now = rd_clock();
- first_msg_timeout =
- (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)->rkm_ts_timeout -
- now) /
- 1000;
-
- if (unlikely(first_msg_timeout <= 0)) {
- /* Message has already timed out, allow 100 ms
- * to produce anyway */
- tmout = 100;
- } else {
- tmout = (int)RD_MIN(INT_MAX, first_msg_timeout);
- }
-
- /* Set absolute timeout (including retries), the
- * effective timeout for this specific request will be
- * capped by socket.timeout.ms */
- rd_kafka_buf_set_abs_timeout(rkbuf, tmout, now);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, RD_KAFKA_NO_REPLYQ,
- rd_kafka_handle_Produce, NULL);
-
- return cnt;
-}
-
-
-/**
- * @brief Construct and send CreateTopicsRequest to \p rkb
- * with the topics (NewTopic_t*) in \p new_topics, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *new_topics /*(NewTopic_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
- int i = 0;
- rd_kafka_NewTopic_t *newt;
- int op_timeout;
-
- if (rd_list_cnt(new_topics) == 0) {
- rd_snprintf(errstr, errstr_size, "No topics to create");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_CreateTopics, 0, 4, &features);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "Topic Admin API (KIP-4) not supported "
- "by broker, requires broker version >= 0.10.2.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- if (rd_kafka_confval_get_int(&options->validate_only) &&
- ApiVersion < 1) {
- rd_snprintf(errstr, errstr_size,
- "CreateTopics.validate_only=true not "
- "supported by broker");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateTopics, 1,
- 4 + (rd_list_cnt(new_topics) * 200) +
- 4 + 1);
-
- /* #topics */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_topics));
-
- while ((newt = rd_list_elem(new_topics, i++))) {
- int partition;
- int ei = 0;
- const rd_kafka_ConfigEntry_t *entry;
-
- if (ApiVersion < 4) {
- if (newt->num_partitions == -1) {
- rd_snprintf(errstr, errstr_size,
- "Default partition count (KIP-464) "
- "not supported by broker, "
- "requires broker version <= 2.4.0");
- rd_kafka_replyq_destroy(&replyq);
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- if (newt->replication_factor == -1 &&
- rd_list_empty(&newt->replicas)) {
- rd_snprintf(errstr, errstr_size,
- "Default replication factor "
- "(KIP-464) "
- "not supported by broker, "
- "requires broker version <= 2.4.0");
- rd_kafka_replyq_destroy(&replyq);
- rd_kafka_buf_destroy(rkbuf);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
-
- /* topic */
- rd_kafka_buf_write_str(rkbuf, newt->topic, -1);
-
- if (rd_list_cnt(&newt->replicas)) {
- /* num_partitions and replication_factor must be
- * set to -1 if a replica assignment is sent. */
- /* num_partitions */
- rd_kafka_buf_write_i32(rkbuf, -1);
- /* replication_factor */
- rd_kafka_buf_write_i16(rkbuf, -1);
- } else {
- /* num_partitions */
- rd_kafka_buf_write_i32(rkbuf, newt->num_partitions);
- /* replication_factor */
- rd_kafka_buf_write_i16(
- rkbuf, (int16_t)newt->replication_factor);
- }
-
- /* #replica_assignment */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newt->replicas));
-
- /* Replicas per partition, see rdkafka_admin.[ch]
- * for how these are constructed. */
- for (partition = 0; partition < rd_list_cnt(&newt->replicas);
- partition++) {
- const rd_list_t *replicas;
- int ri = 0;
-
- replicas = rd_list_elem(&newt->replicas, partition);
- if (!replicas)
- continue;
-
- /* partition */
- rd_kafka_buf_write_i32(rkbuf, partition);
- /* #replicas */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(replicas));
-
- for (ri = 0; ri < rd_list_cnt(replicas); ri++) {
- /* replica */
- rd_kafka_buf_write_i32(
- rkbuf, rd_list_get_int32(replicas, ri));
- }
- }
-
- /* #config_entries */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newt->config));
-
- RD_LIST_FOREACH(entry, &newt->config, ei) {
- /* config_name */
- rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
- /* config_value (nullable) */
- rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1);
- }
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- rd_kafka_buf_write_i32(rkbuf, op_timeout);
-
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- if (ApiVersion >= 1) {
- /* validate_only */
- rd_kafka_buf_write_i8(
- rkbuf, rd_kafka_confval_get_int(&options->validate_only));
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send DeleteTopicsRequest to \p rkb
- * with the topics (DeleteTopic_t *) in \p del_topics, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_topics /*(DeleteTopic_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
- int i = 0;
- rd_kafka_DeleteTopic_t *delt;
- int op_timeout;
-
- if (rd_list_cnt(del_topics) == 0) {
- rd_snprintf(errstr, errstr_size, "No topics to delete");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "Topic Admin API (KIP-4) not supported "
- "by broker, requires broker version >= 0.10.2.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf =
- rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1,
- /* FIXME */
- 4 + (rd_list_cnt(del_topics) * 100) + 4);
-
- /* #topics */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_topics));
-
- while ((delt = rd_list_elem(del_topics, i++)))
- rd_kafka_buf_write_str(rkbuf, delt->topic, -1);
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- rd_kafka_buf_write_i32(rkbuf, op_timeout);
-
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send DeleteRecordsRequest to \p rkb
- * with the offsets to delete (rd_kafka_topic_partition_list_t *) in
- * \p offsets_list, using \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @remark The rd_kafka_topic_partition_list_t in \p offsets_list must already
- * be sorted.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb,
- /*(rd_kafka_topic_partition_list_t*)*/
- const rd_list_t *offsets_list,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
- const rd_kafka_topic_partition_list_t *partitions;
- int op_timeout;
-
- partitions = rd_list_elem(offsets_list, 0);
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DeleteRecords, 0, 1, &features);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "DeleteRecords Admin API (KIP-107) not supported "
- "by broker, requires broker version >= 0.11.0");
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteRecords, 1,
- 4 + (partitions->cnt * 100) + 4);
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(
- rkbuf, partitions, rd_false /*don't skip invalid offsets*/,
- rd_false /*any offset*/, fields);
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- rd_kafka_buf_write_i32(rkbuf, op_timeout);
-
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send CreatePartitionsRequest to \p rkb
- * with the topics (NewPartitions_t*) in \p new_parts, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_CreatePartitionsRequest(rd_kafka_broker_t *rkb,
- /*(NewPartitions_t*)*/
- const rd_list_t *new_parts,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int i = 0;
- rd_kafka_NewPartitions_t *newp;
- int op_timeout;
-
- if (rd_list_cnt(new_parts) == 0) {
- rd_snprintf(errstr, errstr_size, "No partitions to create");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "CreatePartitions (KIP-195) not supported "
- "by broker, requires broker version >= 1.0.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreatePartitions, 1,
- 4 + (rd_list_cnt(new_parts) * 200) +
- 4 + 1);
-
- /* #topics */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_parts));
-
- while ((newp = rd_list_elem(new_parts, i++))) {
- /* topic */
- rd_kafka_buf_write_str(rkbuf, newp->topic, -1);
-
- /* New partition count */
- rd_kafka_buf_write_i32(rkbuf, (int32_t)newp->total_cnt);
-
- /* #replica_assignment */
- if (rd_list_empty(&newp->replicas)) {
- rd_kafka_buf_write_i32(rkbuf, -1);
- } else {
- const rd_list_t *replicas;
- int pi = -1;
-
- rd_kafka_buf_write_i32(rkbuf,
- rd_list_cnt(&newp->replicas));
-
- while (
- (replicas = rd_list_elem(&newp->replicas, ++pi))) {
- int ri = 0;
-
- /* replica count */
- rd_kafka_buf_write_i32(rkbuf,
- rd_list_cnt(replicas));
-
- /* replica */
- for (ri = 0; ri < rd_list_cnt(replicas); ri++) {
- rd_kafka_buf_write_i32(
- rkbuf,
- rd_list_get_int32(replicas, ri));
- }
- }
- }
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- rd_kafka_buf_write_i32(rkbuf, op_timeout);
-
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- /* validate_only */
- rd_kafka_buf_write_i8(
- rkbuf, rd_kafka_confval_get_int(&options->validate_only));
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send AlterConfigsRequest to \p rkb
- * with the configs (ConfigResource_t*) in \p configs, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *configs /*(ConfigResource_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int i;
- const rd_kafka_ConfigResource_t *config;
- int op_timeout;
-
- if (rd_list_cnt(configs) == 0) {
- rd_snprintf(errstr, errstr_size,
- "No config resources specified");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_AlterConfigs, 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "AlterConfigs (KIP-133) not supported "
- "by broker, requires broker version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- /* Incremental requires IncrementalAlterConfigs */
- if (rd_kafka_confval_get_int(&options->incremental)) {
- rd_snprintf(errstr, errstr_size,
- "AlterConfigs.incremental=true (KIP-248) "
- "not supported by broker, "
- "replaced by IncrementalAlterConfigs");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_AlterConfigs, 1,
- rd_list_cnt(configs) * 200);
-
- /* #resources */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs));
-
- RD_LIST_FOREACH(config, configs, i) {
- const rd_kafka_ConfigEntry_t *entry;
- int ei;
-
- /* resource_type */
- rd_kafka_buf_write_i8(rkbuf, config->restype);
-
- /* resource_name */
- rd_kafka_buf_write_str(rkbuf, config->name, -1);
-
- /* #config */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&config->config));
-
- RD_LIST_FOREACH(entry, &config->config, ei) {
- /* config_name */
- rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
- /* config_value (nullable) */
- rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1);
-
- if (entry->a.operation != RD_KAFKA_ALTER_OP_SET) {
- rd_snprintf(errstr, errstr_size,
- "IncrementalAlterConfigs required "
- "for add/delete config "
- "entries: only set supported "
- "by this operation");
- rd_kafka_buf_destroy(rkbuf);
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- /* validate_only */
- rd_kafka_buf_write_i8(
- rkbuf, rd_kafka_confval_get_int(&options->validate_only));
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send DescribeConfigsRequest to \p rkb
- * with the configs (ConfigResource_t*) in \p configs, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest(
- rd_kafka_broker_t *rkb,
- const rd_list_t *configs /*(ConfigResource_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int i;
- const rd_kafka_ConfigResource_t *config;
- int op_timeout;
-
- if (rd_list_cnt(configs) == 0) {
- rd_snprintf(errstr, errstr_size,
- "No config resources specified");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "DescribeConfigs (KIP-133) not supported "
- "by broker, requires broker version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeConfigs, 1,
- rd_list_cnt(configs) * 200);
-
- /* #resources */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs));
-
- RD_LIST_FOREACH(config, configs, i) {
- const rd_kafka_ConfigEntry_t *entry;
- int ei;
-
- /* resource_type */
- rd_kafka_buf_write_i8(rkbuf, config->restype);
-
- /* resource_name */
- rd_kafka_buf_write_str(rkbuf, config->name, -1);
-
- /* #config */
- if (rd_list_empty(&config->config)) {
- /* Get all configs */
- rd_kafka_buf_write_i32(rkbuf, -1);
- } else {
- /* Get requested configs only */
- rd_kafka_buf_write_i32(rkbuf,
- rd_list_cnt(&config->config));
- }
-
- RD_LIST_FOREACH(entry, &config->config, ei) {
- /* config_name */
- rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
- }
- }
-
-
- if (ApiVersion == 1) {
- /* include_synonyms */
- rd_kafka_buf_write_i8(rkbuf, 1);
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send DeleteGroupsRequest to \p rkb
- * with the groups (DeleteGroup_t *) in \p del_groups, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_groups /*(DeleteGroup_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- int features;
- int i = 0;
- rd_kafka_DeleteGroup_t *delt;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DeleteGroups, 0, 1, &features);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "DeleteGroups Admin API (KIP-229) not supported "
- "by broker, requires broker version >= 1.1.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf =
- rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteGroups, 1,
- 4 + (rd_list_cnt(del_groups) * 100) + 4);
-
- /* #groups */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_groups));
-
- while ((delt = rd_list_elem(del_groups, i++)))
- rd_kafka_buf_write_str(rkbuf, delt->group, -1);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Returns the request size needed to send a specific AclBinding
- * specified in \p acl, using the ApiVersion provided in
- * \p ApiVersion.
- *
- * @returns and int16_t with the request size in bytes.
- */
-static RD_INLINE size_t
-rd_kafka_AclBinding_request_size(const rd_kafka_AclBinding_t *acl,
- int ApiVersion) {
- return 1 + 2 + (acl->name ? strlen(acl->name) : 0) + 2 +
- (acl->principal ? strlen(acl->principal) : 0) + 2 +
- (acl->host ? strlen(acl->host) : 0) + 1 + 1 +
- (ApiVersion > 0 ? 1 : 0);
-}
-
-/**
- * @brief Construct and send CreateAclsRequest to \p rkb
- * with the acls (AclBinding_t*) in \p new_acls, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *new_acls /*(AclBinding_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
- int i;
- size_t len;
- int op_timeout;
- rd_kafka_AclBinding_t *new_acl;
-
- if (rd_list_cnt(new_acls) == 0) {
- rd_snprintf(errstr, errstr_size, "No acls to create");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_CreateAcls, 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "ACLs Admin API (KIP-140) not supported "
- "by broker, requires broker version >= 0.11.0.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- if (ApiVersion == 0) {
- RD_LIST_FOREACH(new_acl, new_acls, i) {
- if (new_acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_LITERAL) {
- rd_snprintf(errstr, errstr_size,
- "Broker only supports LITERAL "
- "resource pattern types");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
- } else {
- RD_LIST_FOREACH(new_acl, new_acls, i) {
- if (new_acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
- new_acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_PREFIXED) {
- rd_snprintf(errstr, errstr_size,
- "Only LITERAL and PREFIXED "
- "resource patterns are supported "
- "when creating ACLs");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
- }
-
- len = 4;
- RD_LIST_FOREACH(new_acl, new_acls, i) {
- len += rd_kafka_AclBinding_request_size(new_acl, ApiVersion);
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateAcls, 1, len);
-
- /* #acls */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_acls));
-
- RD_LIST_FOREACH(new_acl, new_acls, i) {
- rd_kafka_buf_write_i8(rkbuf, new_acl->restype);
-
- rd_kafka_buf_write_str(rkbuf, new_acl->name, -1);
-
- if (ApiVersion >= 1) {
- rd_kafka_buf_write_i8(rkbuf,
- new_acl->resource_pattern_type);
- }
-
- rd_kafka_buf_write_str(rkbuf, new_acl->principal, -1);
-
- rd_kafka_buf_write_str(rkbuf, new_acl->host, -1);
-
- rd_kafka_buf_write_i8(rkbuf, new_acl->operation);
-
- rd_kafka_buf_write_i8(rkbuf, new_acl->permission_type);
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Construct and send DescribeAclsRequest to \p rkb
- * with the acls (AclBinding_t*) in \p acls, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t rd_kafka_DescribeAclsRequest(
- rd_kafka_broker_t *rkb,
- const rd_list_t *acls /*(rd_kafka_AclBindingFilter_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- const rd_kafka_AclBindingFilter_t *acl;
- int op_timeout;
-
- if (rd_list_cnt(acls) == 0) {
- rd_snprintf(errstr, errstr_size,
- "No acl binding filters specified");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
- if (rd_list_cnt(acls) > 1) {
- rd_snprintf(errstr, errstr_size,
- "Too many acl binding filters specified");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- acl = rd_list_elem(acls, 0);
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DescribeAcls, 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "ACLs Admin API (KIP-140) not supported "
- "by broker, requires broker version >= 0.11.0.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- if (ApiVersion == 0) {
- if (acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
- acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_ANY) {
- rd_snprintf(errstr, errstr_size,
- "Broker only supports LITERAL and ANY "
- "resource pattern types");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- } else {
- if (acl->resource_pattern_type ==
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
- rd_snprintf(errstr, errstr_size,
- "Filter contains UNKNOWN elements");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
-
- rkbuf = rd_kafka_buf_new_request(
- rkb, RD_KAFKAP_DescribeAcls, 1,
- rd_kafka_AclBinding_request_size(acl, ApiVersion));
-
- /* resource_type */
- rd_kafka_buf_write_i8(rkbuf, acl->restype);
-
- /* resource_name filter */
- rd_kafka_buf_write_str(rkbuf, acl->name, -1);
-
- if (ApiVersion > 0) {
- /* resource_pattern_type (rd_kafka_ResourcePatternType_t) */
- rd_kafka_buf_write_i8(rkbuf, acl->resource_pattern_type);
- }
-
- /* principal filter */
- rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
-
- /* host filter */
- rd_kafka_buf_write_str(rkbuf, acl->host, -1);
-
- /* operation (rd_kafka_AclOperation_t) */
- rd_kafka_buf_write_i8(rkbuf, acl->operation);
-
- /* permission type (rd_kafka_AclPermissionType_t) */
- rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Construct and send DeleteAclsRequest to \p rkb
- * with the acl filters (AclBindingFilter_t*) in \p del_acls, using
- * \p options.
- *
- * The response (unparsed) will be enqueued on \p replyq
- * for handling by \p resp_cb (with \p opaque passed).
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_acls /*(AclBindingFilter_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- const rd_kafka_AclBindingFilter_t *acl;
- int op_timeout;
- int i;
- size_t len;
-
- if (rd_list_cnt(del_acls) == 0) {
- rd_snprintf(errstr, errstr_size,
- "No acl binding filters specified");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_DeleteAcls, 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "ACLs Admin API (KIP-140) not supported "
- "by broker, requires broker version >= 0.11.0.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- len = 4;
-
- RD_LIST_FOREACH(acl, del_acls, i) {
- if (ApiVersion == 0) {
- if (acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
- acl->resource_pattern_type !=
- RD_KAFKA_RESOURCE_PATTERN_ANY) {
- rd_snprintf(errstr, errstr_size,
- "Broker only supports LITERAL "
- "and ANY resource pattern types");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- } else {
- if (acl->resource_pattern_type ==
- RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
- rd_snprintf(errstr, errstr_size,
- "Filter contains UNKNOWN elements");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
-
- len += rd_kafka_AclBinding_request_size(acl, ApiVersion);
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteAcls, 1, len);
-
- /* #acls */
- rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_acls));
-
- RD_LIST_FOREACH(acl, del_acls, i) {
- /* resource_type */
- rd_kafka_buf_write_i8(rkbuf, acl->restype);
-
- /* resource_name filter */
- rd_kafka_buf_write_str(rkbuf, acl->name, -1);
-
- if (ApiVersion > 0) {
- /* resource_pattern_type
- * (rd_kafka_ResourcePatternType_t) */
- rd_kafka_buf_write_i8(rkbuf,
- acl->resource_pattern_type);
- }
-
- /* principal filter */
- rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
-
- /* host filter */
- rd_kafka_buf_write_str(rkbuf, acl->host, -1);
-
- /* operation (rd_kafka_AclOperation_t) */
- rd_kafka_buf_write_i8(rkbuf, acl->operation);
-
- /* permission type (rd_kafka_AclPermissionType_t) */
- rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
- }
-
- /* timeout */
- op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
- if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
- rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parses and handles an InitProducerId reply.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int16_t error_code;
- rd_kafka_pid_t pid;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &error_code);
- if ((err = error_code))
- goto err;
-
- rd_kafka_buf_read_i64(rkbuf, &pid.id);
- rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
-
- rd_kafka_idemp_pid_update(rkb, pid);
-
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- /* Retries are performed by idempotence state handler */
- rd_kafka_idemp_request_pid_failed(rkb, err);
-}
-
-/**
- * @brief Construct and send InitProducerIdRequest to \p rkb.
- *
- * @param transactional_id may be NULL.
- * @param transaction_timeout_ms may be set to -1.
- * @param current_pid the current PID to reset, requires KIP-360. If not NULL
- * and KIP-360 is not supported by the broker this function
- * will return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE.
- *
- * The response (unparsed) will be handled by \p resp_cb served
- * by queue \p replyq.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code and errstr will be
- * updated with a human readable error string.
- */
-rd_kafka_resp_err_t
-rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- int transaction_timeout_ms,
- const rd_kafka_pid_t *current_pid,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
-
- if (current_pid) {
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_InitProducerId, 3, 4, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "InitProducerId (KIP-360) not supported by "
- "broker, requires broker version >= 2.5.0: "
- "unable to recover from previous "
- "transactional error");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- } else {
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_InitProducerId, 0, 4, NULL);
-
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "InitProducerId (KIP-98) not supported by "
- "broker, requires broker "
- "version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
- }
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_InitProducerId, 1,
- 2 + (transactional_id ? strlen(transactional_id) : 0) + 4 + 8 + 4,
- ApiVersion >= 2 /*flexver*/);
-
- /* transactional_id */
- rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
-
- /* transaction_timeout_ms */
- rd_kafka_buf_write_i32(rkbuf, transaction_timeout_ms);
-
- if (ApiVersion >= 3) {
- /* Current PID */
- rd_kafka_buf_write_i64(rkbuf,
- current_pid ? current_pid->id : -1);
- /* Current Epoch */
- rd_kafka_buf_write_i16(rkbuf,
- current_pid ? current_pid->epoch : -1);
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- /* Let the idempotence state handler perform retries */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send AddPartitionsToTxnRequest to \p rkb.
- *
- * The response (unparsed) will be handled by \p resp_cb served
- * by queue \p replyq.
- *
- * @param rktps MUST be sorted by topic name.
- *
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code.
- */
-rd_kafka_resp_err_t
-rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- const rd_kafka_toppar_tqhead_t *rktps,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
- rd_kafka_toppar_t *rktp;
- rd_kafka_topic_t *last_rkt = NULL;
- size_t of_TopicCnt;
- ssize_t of_PartCnt = -1;
- int TopicCnt = 0, PartCnt = 0;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_AddPartitionsToTxn, 0, 0, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "AddPartitionsToTxnRequest (KIP-98) not supported "
- "by broker, requires broker version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf =
- rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddPartitionsToTxn, 1, 500);
-
- /* transactional_id */
- rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
-
- /* PID */
- rd_kafka_buf_write_i64(rkbuf, pid.id);
- rd_kafka_buf_write_i16(rkbuf, pid.epoch);
-
- /* Topics/partitions array (count updated later) */
- of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-
- TAILQ_FOREACH(rktp, rktps, rktp_txnlink) {
- if (last_rkt != rktp->rktp_rkt) {
-
- if (last_rkt) {
- /* Update last topic's partition count field */
- rd_kafka_buf_update_i32(rkbuf, of_PartCnt,
- PartCnt);
- of_PartCnt = -1;
- }
-
- /* Topic name */
- rd_kafka_buf_write_kstr(rkbuf,
- rktp->rktp_rkt->rkt_topic);
- /* Partition count, updated later */
- of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
-
- PartCnt = 0;
- TopicCnt++;
- last_rkt = rktp->rktp_rkt;
- }
-
- /* Partition id */
- rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition);
- PartCnt++;
- }
-
- /* Update last partition and topic count fields */
- if (of_PartCnt != -1)
- rd_kafka_buf_update_i32(rkbuf, (size_t)of_PartCnt, PartCnt);
- rd_kafka_buf_update_i32(rkbuf, of_TopicCnt, TopicCnt);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- /* Let the handler perform retries so that it can pick
- * up more added partitions. */
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Construct and send AddOffsetsToTxnRequest to \p rkb.
- *
- * The response (unparsed) will be handled by \p resp_cb served
- * by queue \p replyq.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code.
- */
-rd_kafka_resp_err_t
-rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- const char *group_id,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_AddOffsetsToTxn, 0, 0, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "AddOffsetsToTxnRequest (KIP-98) not supported "
- "by broker, requires broker version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf =
- rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddOffsetsToTxn, 1, 100);
-
- /* transactional_id */
- rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
-
- /* PID */
- rd_kafka_buf_write_i64(rkbuf, pid.id);
- rd_kafka_buf_write_i16(rkbuf, pid.epoch);
-
- /* Group Id */
- rd_kafka_buf_write_str(rkbuf, group_id, -1);
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Construct and send EndTxnRequest to \p rkb.
- *
- * The response (unparsed) will be handled by \p resp_cb served
- * by queue \p replyq.
- *
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
- * transmission, otherwise an error code.
- */
-rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- rd_bool_t committed,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque) {
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion = 0;
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_EndTxn,
- 0, 1, NULL);
- if (ApiVersion == -1) {
- rd_snprintf(errstr, errstr_size,
- "EndTxnRequest (KIP-98) not supported "
- "by broker, requires broker version >= 0.11.0");
- rd_kafka_replyq_destroy(&replyq);
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_EndTxn, 1, 500);
-
- /* transactional_id */
- rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
-
- /* PID */
- rd_kafka_buf_write_i64(rkbuf, pid.id);
- rd_kafka_buf_write_i16(rkbuf, pid.epoch);
-
- /* Committed */
- rd_kafka_buf_write_bool(rkbuf, committed);
- rkbuf->rkbuf_u.EndTxn.commit = committed;
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @name Unit tests
- * @{
- *
- *
- *
- *
- */
-
-/**
- * @brief Create \p cnt messages, starting at \p msgid, and add them
- * to \p rkmq.
- *
- * @returns the number of messages added.
- */
-static int ut_create_msgs(rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) {
- int i;
-
- for (i = 0; i < cnt; i++) {
- rd_kafka_msg_t *rkm;
-
- rkm = ut_rd_kafka_msg_new(0);
- rkm->rkm_u.producer.msgid = msgid++;
- rkm->rkm_ts_enq = rd_clock();
- rkm->rkm_ts_timeout = rkm->rkm_ts_enq + (900 * 1000 * 1000);
-
- rd_kafka_msgq_enq(rkmq, rkm);
- }
-
- return cnt;
-}
-
-/**
- * @brief Idempotent Producer request/response unit tests
- *
- * The current test verifies proper handling of the following case:
- * Batch 0 succeeds
- * Batch 1 fails with temporary error
- * Batch 2,3 fails with out of order sequence
- * Retry Batch 1-3 should succeed.
- */
-static int unittest_idempotent_producer(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_broker_t *rkb;
-#define _BATCH_CNT 4
-#define _MSGS_PER_BATCH 3
- const int msgcnt = _BATCH_CNT * _MSGS_PER_BATCH;
- int remaining_batches;
- uint64_t msgid = 1;
- rd_kafka_toppar_t *rktp;
- rd_kafka_pid_t pid = {.id = 1000, .epoch = 0};
- struct rd_kafka_Produce_result result = {.offset = 1,
- .timestamp = 1000};
- rd_kafka_queue_t *rkqu;
- rd_kafka_event_t *rkev;
- rd_kafka_buf_t *request[_BATCH_CNT];
- int rcnt = 0;
- int retry_msg_cnt = 0;
- int drcnt = 0;
- rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
- const char *tmp;
- int i, r;
-
- RD_UT_SAY("Verifying idempotent producer error handling");
-
- conf = rd_kafka_conf_new();
- rd_kafka_conf_set(conf, "batch.num.messages", "3", NULL, 0);
- rd_kafka_conf_set(conf, "retry.backoff.ms", "1", NULL, 0);
- if ((tmp = rd_getenv("TEST_DEBUG", NULL)))
- rd_kafka_conf_set(conf, "debug", tmp, NULL, 0);
- if (rd_kafka_conf_set(conf, "enable.idempotence", "true", NULL, 0) !=
- RD_KAFKA_CONF_OK)
- RD_UT_FAIL("Failed to enable idempotence");
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, NULL, 0);
- RD_UT_ASSERT(rk, "failed to create producer");
-
- rkqu = rd_kafka_queue_get_main(rk);
-
- /* We need a broker handle, use a logical broker to avoid
- * any connection attempts. */
- rkb = rd_kafka_broker_add_logical(rk, "unittest");
-
- /* Have the broker support everything so msgset_writer selects
- * the most up-to-date output features. */
- rd_kafka_broker_lock(rkb);
- rkb->rkb_features = RD_KAFKA_FEATURE_UNITTEST | RD_KAFKA_FEATURE_ALL;
- rd_kafka_broker_unlock(rkb);
-
- /* Get toppar */
- rktp = rd_kafka_toppar_get2(rk, "uttopic", 0, rd_false, rd_true);
- RD_UT_ASSERT(rktp, "failed to get toppar");
-
- /* Set the topic as exists so messages are enqueued on
- * the desired rktp away (otherwise UA partition) */
- rd_ut_kafka_topic_set_topic_exists(rktp->rktp_rkt, 1, -1);
-
- /* Produce messages */
- ut_create_msgs(&rkmq, 1, msgcnt);
-
- /* Set the pid */
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_WAIT_PID);
- rd_kafka_idemp_pid_update(rkb, pid);
- pid = rd_kafka_idemp_get_pid(rk);
- RD_UT_ASSERT(rd_kafka_pid_valid(pid), "PID is invalid");
- rd_kafka_toppar_pid_change(rktp, pid, msgid);
-
- remaining_batches = _BATCH_CNT;
-
- /* Create a ProduceRequest for each batch */
- for (rcnt = 0; rcnt < remaining_batches; rcnt++) {
- size_t msize;
- request[rcnt] = rd_kafka_msgset_create_ProduceRequest(
- rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize);
- RD_UT_ASSERT(request[rcnt], "request #%d failed", rcnt);
- }
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&rkmq) == 0,
- "expected input message queue to be empty, "
- "but still has %d message(s)",
- rd_kafka_msgq_len(&rkmq));
-
- /*
- * Mock handling of each request
- */
-
- /* Batch 0: accepted */
- i = 0;
- r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
- RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
- rd_kafka_msgbatch_handle_Produce_result(rkb, &request[i]->rkbuf_batch,
- RD_KAFKA_RESP_ERR_NO_ERROR,
- &result, request[i]);
- result.offset += r;
- RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == 0,
- "batch %d: expected no messages in rktp_msgq, not %d", i,
- rd_kafka_msgq_len(&rktp->rktp_msgq));
- rd_kafka_buf_destroy(request[i]);
- remaining_batches--;
-
- /* Batch 1: fail, triggering retry (re-enq on rktp_msgq) */
- i = 1;
- r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
- RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
- rd_kafka_msgbatch_handle_Produce_result(
- rkb, &request[i]->rkbuf_batch,
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, &result, request[i]);
- retry_msg_cnt += r;
- RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
- "batch %d: expected %d messages in rktp_msgq, not %d", i,
- retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
- rd_kafka_buf_destroy(request[i]);
-
- /* Batch 2: OUT_OF_ORDER, triggering retry .. */
- i = 2;
- r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
- RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
- rd_kafka_msgbatch_handle_Produce_result(
- rkb, &request[i]->rkbuf_batch,
- RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result,
- request[i]);
- retry_msg_cnt += r;
- RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
- "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
- i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
- rd_kafka_buf_destroy(request[i]);
-
- /* Batch 3: OUT_OF_ORDER, triggering retry .. */
- i = 3;
- r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
- rd_kafka_msgbatch_handle_Produce_result(
- rkb, &request[i]->rkbuf_batch,
- RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result,
- request[i]);
- retry_msg_cnt += r;
- RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
- "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
- i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
- rd_kafka_buf_destroy(request[i]);
-
-
- /* Retried messages will have been moved to rktp_msgq,
- * move them back to our local queue. */
- rd_kafka_toppar_lock(rktp);
- rd_kafka_msgq_move(&rkmq, &rktp->rktp_msgq);
- rd_kafka_toppar_unlock(rktp);
-
- RD_UT_ASSERT(rd_kafka_msgq_len(&rkmq) == retry_msg_cnt,
- "Expected %d messages in retry queue, not %d",
- retry_msg_cnt, rd_kafka_msgq_len(&rkmq));
-
- /* Sleep a short while to make sure the retry backoff expires. */
- rd_usleep(5 * 1000, NULL); /* 5ms */
-
- /*
- * Create requests for remaining batches.
- */
- for (rcnt = 0; rcnt < remaining_batches; rcnt++) {
- size_t msize;
- request[rcnt] = rd_kafka_msgset_create_ProduceRequest(
- rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize);
- RD_UT_ASSERT(request[rcnt],
- "Failed to create retry #%d (%d msgs in queue)",
- rcnt, rd_kafka_msgq_len(&rkmq));
- }
-
- /*
- * Mock handling of each request, they will now succeed.
- */
- for (i = 0; i < rcnt; i++) {
- r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
- rd_kafka_msgbatch_handle_Produce_result(
- rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR,
- &result, request[i]);
- result.offset += r;
- rd_kafka_buf_destroy(request[i]);
- }
-
- retry_msg_cnt = 0;
- RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
- "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
- i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
-
- /*
- * Wait for delivery reports, they should all be successful.
- */
- while ((rkev = rd_kafka_queue_poll(rkqu, 1000))) {
- const rd_kafka_message_t *rkmessage;
-
- RD_UT_SAY("Got %s event with %d message(s)",
- rd_kafka_event_name(rkev),
- (int)rd_kafka_event_message_count(rkev));
-
- while ((rkmessage = rd_kafka_event_message_next(rkev))) {
- RD_UT_SAY(" DR for message: %s: (persistence=%d)",
- rd_kafka_err2str(rkmessage->err),
- rd_kafka_message_status(rkmessage));
- if (rkmessage->err)
- RD_UT_WARN(" ^ Should not have failed");
- else
- drcnt++;
- }
- rd_kafka_event_destroy(rkev);
- }
-
- /* Should be no more messages in queues */
- r = rd_kafka_outq_len(rk);
- RD_UT_ASSERT(r == 0, "expected outq to return 0, not %d", r);
-
- /* Verify the expected number of good delivery reports were seen */
- RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt);
-
- rd_kafka_queue_destroy(rkqu);
- rd_kafka_toppar_destroy(rktp);
- rd_kafka_broker_destroy(rkb);
- rd_kafka_destroy(rk);
-
- RD_UT_PASS();
- return 0;
-}
-
-/**
- * @brief Request/response unit tests
- */
-int unittest_request(void) {
- int fails = 0;
-
- fails += unittest_idempotent_producer();
-
- return fails;
-}
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h
deleted file mode 100644
index 3eda6be61..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_REQUEST_H_
-#define _RDKAFKA_REQUEST_H_
-
-#include "rdkafka_cgrp.h"
-#include "rdkafka_feature.h"
-
-
-#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */
-#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */
-#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */
-#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */
-#define RD_KAFKA_ERR_ACTION_INFORM 0x10 /* Inform application about err */
-#define RD_KAFKA_ERR_ACTION_SPECIAL \
- 0x20 /* Special-purpose, depends on context */
-#define RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED 0x40 /* ProduceReq msg status */
-#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED \
- 0x80 /* ProduceReq msg status */
-#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */
-#define RD_KAFKA_ERR_ACTION_FATAL 0x200 /**< Fatal error */
-#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */
-
-/** @macro bitmask of the message persistence flags */
-#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \
- (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \
- RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \
- RD_KAFKA_ERR_ACTION_MSG_PERSISTED)
-
-int rd_kafka_err_action(rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- const rd_kafka_buf_t *request,
- ...);
-
-
-const char *rd_kafka_actions2str(int actions);
-
-
-typedef enum {
- /** Array end sentinel */
- RD_KAFKA_TOPIC_PARTITION_FIELD_END = 0,
- /** Read/write int32_t for partition */
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- /** Read/write int64_t for offset */
- RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
- /** Read/write int32_t for offset leader_epoch */
- RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH,
- /** Read/write int32_t for current leader_epoch */
- RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH,
- /** Read/write int16_t for error code */
- RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
- /** Read/write str for metadata */
- RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA,
- /** Noop, useful for ternary ifs */
- RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
-} rd_kafka_topic_partition_field_t;
-
-rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions(
- rd_kafka_buf_t *rkbuf,
- size_t estimated_part_cnt,
- const rd_kafka_topic_partition_field_t *fields);
-
-int rd_kafka_buf_write_topic_partitions(
- rd_kafka_buf_t *rkbuf,
- const rd_kafka_topic_partition_list_t *parts,
- rd_bool_t skip_invalid_offsets,
- rd_bool_t only_invalid_offsets,
- const rd_kafka_topic_partition_field_t *fields);
-
-rd_kafka_resp_err_t
-rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb,
- rd_kafka_coordtype_t coordtype,
- const char *coordkey,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_ListOffsets(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t *offsets,
- int *actionsp);
-
-void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb,
- rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetForLeaderEpoch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t **offsets);
-void rd_kafka_OffsetForLeaderEpochRequest(
- rd_kafka_broker_t *rkb,
- rd_kafka_topic_partition_list_t *parts,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetFetch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t **offsets,
- rd_bool_t update_toppar,
- rd_bool_t add_part,
- rd_bool_t allow_retry);
-
-void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-
-void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb,
- const char *group_id,
- rd_kafka_topic_partition_list_t *parts,
- rd_bool_t require_stable_offsets,
- int timeout,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_OffsetCommit(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- rd_kafka_topic_partition_list_t *offsets,
- rd_bool_t ignore_cgrp);
-
-int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb,
- rd_kafka_consumer_group_metadata_t *cgmetadata,
- rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque,
- const char *reason);
-
-rd_kafka_resp_err_t
-rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb,
- /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */
- const rd_list_t *del_grpoffsets,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-
-void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- const rd_kafkap_str_t *protocol_type,
- const rd_list_t *topics,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-
-void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb,
- const char *group_id,
- const char *member_id,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-
-void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- int32_t generation_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- const rd_kafka_group_member_t *assignments,
- int assignment_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-void rd_kafka_handle_SyncGroup(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-
-rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb,
- int16_t max_ApiVersion,
- const char **states,
- size_t states_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb,
- int16_t max_ApiVersion,
- char **groups,
- size_t group_cnt,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-
-void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb,
- const rd_kafkap_str_t *group_id,
- int32_t generation_id,
- const rd_kafkap_str_t *member_id,
- const rd_kafkap_str_t *group_instance_id,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *topics,
- const char *reason,
- rd_bool_t allow_auto_create_topics,
- rd_bool_t cgrp_update,
- rd_kafka_op_t *rko);
-
-rd_kafka_resp_err_t
-rd_kafka_handle_ApiVersion(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- struct rd_kafka_ApiVersion **apis,
- size_t *api_cnt);
-void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb,
- int16_t ApiVersion,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb,
- const char *mechanism,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb,
- const void *buf,
- size_t size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb,
- rd_kafka_toppar_t *rktp,
- const rd_kafka_pid_t pid,
- uint64_t epoch_base_msgid);
-
-rd_kafka_resp_err_t
-rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *new_topics /*(NewTopic_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_topics /*(DeleteTopic_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_CreatePartitionsRequest(
- rd_kafka_broker_t *rkb,
- const rd_list_t *new_parts /*(NewPartitions_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *configs /*(ConfigResource_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest(
- rd_kafka_broker_t *rkb,
- const rd_list_t *configs /*(ConfigResource_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_groups /*(DeleteGroup_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- int transaction_timeout_ms,
- const rd_kafka_pid_t *current_pid,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- const rd_kafka_toppar_tqhead_t *rktps,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- const char *group_id,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb,
- const char *transactional_id,
- rd_kafka_pid_t pid,
- rd_bool_t committed,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-int unittest_request(void);
-
-
-rd_kafka_resp_err_t
-rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb,
- /*(rd_topic_partition_list_t*)*/
- const rd_list_t *offsets_list,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *new_acls /*(AclBinding_t*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_DescribeAclsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *acls /*(AclBinding*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
- const rd_list_t *del_acls /*(AclBindingFilter*)*/,
- rd_kafka_AdminOptions_t *options,
- char *errstr,
- size_t errstr_size,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *opaque);
-
-
-#endif /* _RDKAFKA_REQUEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c
deleted file mode 100644
index 6cb919364..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-
-
-/**
- * Source:
- * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
- *
- * The roundrobin assignor lays out all the available partitions and all the
- * available consumers. It then proceeds to do a roundrobin assignment from
- * partition to consumer. If the subscriptions of all consumer instances are
- * identical, then the partitions will be uniformly distributed. (i.e., the
- * partition ownership counts will be within a delta of exactly one across all
- * consumers.)
- *
- * For example, suppose there are two consumers C0 and C1, two topics t0 and
- * t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1,
- * t0p2, t1p0, t1p1, and t1p2.
- *
- * The assignment will be:
- * C0: [t0p0, t0p2, t1p1]
- * C1: [t0p1, t1p0, t1p2]
- */
-
-rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_assign_cb(
- rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque) {
- unsigned int ti;
- int next = -1; /* Next member id */
-
- /* Sort topics by name */
- qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics),
- rd_kafka_assignor_topic_cmp);
-
- /* Sort members by name */
- qsort(members, member_cnt, sizeof(*members), rd_kafka_group_member_cmp);
-
- for (ti = 0; ti < eligible_topic_cnt; ti++) {
- rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
- int partition;
-
- /* For each topic+partition, assign one member (in a cyclic
- * iteration) per partition until the partitions are exhausted*/
- for (partition = 0;
- partition < eligible_topic->metadata->partition_cnt;
- partition++) {
- rd_kafka_group_member_t *rkgm;
-
- /* Scan through members until we find one with a
- * subscription to this topic. */
- do {
- next = (next + 1) % member_cnt;
- } while (!rd_kafka_group_member_find_subscription(
- rk, &members[next],
- eligible_topic->metadata->topic));
-
- rkgm = &members[next];
-
- rd_kafka_dbg(rk, CGRP, "ASSIGN",
- "roundrobin: Member \"%s\": "
- "assigned topic %s partition %d",
- rkgm->rkgm_member_id->str,
- eligible_topic->metadata->topic,
- partition);
-
- rd_kafka_topic_partition_list_add(
- rkgm->rkgm_assignment,
- eligible_topic->metadata->topic, partition);
- }
- }
-
-
- return 0;
-}
-
-
-
-/**
- * @brief Initialzie and add roundrobin assignor.
- */
-rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk) {
- return rd_kafka_assignor_add(
- rk, "consumer", "roundrobin", RD_KAFKA_REBALANCE_PROTOCOL_EAGER,
- rd_kafka_roundrobin_assignor_assign_cb,
- rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL,
- NULL, NULL);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c
deleted file mode 100644
index cab67f241..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_request.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-#include "rdkafka_request.h"
-#include "rdkafka_queue.h"
-
-/**
- * @brief Send SASL auth data using legacy directly on socket framing.
- *
- * @warning This is a blocking call.
- */
-static int rd_kafka_sasl_send_legacy(rd_kafka_transport_t *rktrans,
- const void *payload,
- int len,
- char *errstr,
- size_t errstr_size) {
- rd_buf_t buf;
- rd_slice_t slice;
- int32_t hdr;
-
- rd_buf_init(&buf, 1 + 1, sizeof(hdr));
-
- hdr = htobe32(len);
- rd_buf_write(&buf, &hdr, sizeof(hdr));
- if (payload)
- rd_buf_push(&buf, payload, len, NULL);
-
- rd_slice_init_full(&slice, &buf);
-
- /* Simulate blocking behaviour on non-blocking socket..
- * FIXME: This isn't optimal but is highly unlikely to stall since
- * the socket buffer will most likely not be exceeded. */
- do {
- int r;
-
- r = (int)rd_kafka_transport_send(rktrans, &slice, errstr,
- errstr_size);
- if (r == -1) {
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "SASL send failed: %s", errstr);
- rd_buf_destroy(&buf);
- return -1;
- }
-
- if (rd_slice_remains(&slice) == 0)
- break;
-
- /* Avoid busy-looping */
- rd_usleep(10 * 1000, NULL);
-
- } while (1);
-
- rd_buf_destroy(&buf);
-
- return 0;
-}
-
-/**
- * @brief Send auth message with framing (either legacy or Kafka framing).
- *
- * @warning This is a blocking call when used with the legacy framing.
- */
-int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans,
- const void *payload,
- int len,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
- rd_rkb_dbg(
- rkb, SECURITY, "SASL", "Send SASL %s frame to broker (%d bytes)",
- (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? "Kafka"
- : "legacy",
- len);
-
- /* Blocking legacy framed send directly on the socket */
- if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ))
- return rd_kafka_sasl_send_legacy(rktrans, payload, len, errstr,
- errstr_size);
-
- /* Kafka-framed asynchronous send */
- rd_kafka_SaslAuthenticateRequest(
- rkb, payload, (size_t)len, RD_KAFKA_NO_REPLYQ,
- rd_kafka_handle_SaslAuthenticate, NULL);
-
- return 0;
-}
-
-
-/**
- * @brief Authentication succesful
- *
- * Transition to next connect state.
- */
-void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans) {
- /* Authenticated */
- rd_kafka_broker_connect_up(rktrans->rktrans_rkb);
-}
-
-
-/**
- * @brief Handle SASL auth data from broker.
- *
- * @locality broker thread
- *
- * @returns -1 on error, else 0.
- */
-int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans,
- const void *buf,
- size_t len,
- char *errstr,
- size_t errstr_size) {
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "Received SASL frame from broker (%" PRIusz " bytes)", len);
-
- return rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider->recv(
- rktrans, buf, len, errstr, errstr_size);
-}
-
-/**
- * @brief Non-kafka-protocol framed SASL auth data receive event.
- *
- * @locality broker thread
- *
- * @returns -1 on error, else 0.
- */
-int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans,
- int events,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_buf_t *rkbuf;
- int r;
- const void *buf;
- size_t len;
-
- if (!(events & POLLIN))
- return 0;
-
- r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, errstr,
- errstr_size);
- if (r == -1) {
- if (!strcmp(errstr, "Disconnected"))
- rd_snprintf(errstr, errstr_size,
- "Disconnected: check client %s credentials "
- "and broker logs",
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl
- .mechanisms);
- return -1;
- } else if (r == 0) /* not fully received yet */
- return 0;
-
- if (rkbuf) {
- rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
- /* Seek past framing header */
- rd_slice_seek(&rkbuf->rkbuf_reader, 4);
- len = rd_slice_remains(&rkbuf->rkbuf_reader);
- buf = rd_slice_ensure_contig(&rkbuf->rkbuf_reader, len);
- } else {
- buf = NULL;
- len = 0;
- }
-
- r = rd_kafka_sasl_recv(rktrans, buf, len, errstr, errstr_size);
-
- if (rkbuf)
- rd_kafka_buf_destroy(rkbuf);
-
- return r;
-}
-
-
-/**
- * @brief Close SASL session (from transport code)
- * @remark May be called on non-SASL transports (no-op)
- */
-void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans) {
- const struct rd_kafka_sasl_provider *provider =
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider;
-
- if (provider && provider->close)
- provider->close(rktrans);
-}
-
-
-
-/**
- * Initialize and start SASL authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * Locality: broker thread
- */
-int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size) {
- int r;
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- char *hostname, *t;
- const struct rd_kafka_sasl_provider *provider =
- rk->rk_conf.sasl.provider;
-
- /* Verify broker support:
- * - RD_KAFKA_FEATURE_SASL_GSSAPI - GSSAPI supported
- * - RD_KAFKA_FEATURE_SASL_HANDSHAKE - GSSAPI, PLAIN and possibly
- * other mechanisms supported. */
- if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
- if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_GSSAPI)) {
- rd_snprintf(errstr, errstr_size,
- "SASL GSSAPI authentication not supported "
- "by broker");
- return -1;
- }
- } else if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
- rd_snprintf(errstr, errstr_size,
- "SASL Handshake not supported by broker "
- "(required by mechanism %s)%s",
- rk->rk_conf.sasl.mechanisms,
- rk->rk_conf.api_version_request
- ? ""
- : ": try api.version.request=true");
- return -1;
- }
-
- rd_kafka_broker_lock(rktrans->rktrans_rkb);
- rd_strdupa(&hostname, rktrans->rktrans_rkb->rkb_nodename);
- rd_kafka_broker_unlock(rktrans->rktrans_rkb);
-
- if ((t = strchr(hostname, ':')))
- *t = '\0'; /* remove ":port" */
-
- rd_rkb_dbg(rkb, SECURITY, "SASL",
- "Initializing SASL client: service name %s, "
- "hostname %s, mechanisms %s, provider %s",
- rk->rk_conf.sasl.service_name, hostname,
- rk->rk_conf.sasl.mechanisms, provider->name);
-
- r = provider->client_new(rktrans, hostname, errstr, errstr_size);
- if (r != -1)
- rd_kafka_transport_poll_set(rktrans, POLLIN);
-
- return r;
-}
-
-
-
-rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk) {
- if (!rk->rk_sasl.callback_q)
- return NULL;
-
- return rd_kafka_queue_new0(rk, rk->rk_sasl.callback_q);
-}
-
-
-/**
- * Per handle SASL term.
- *
- * Locality: broker thread
- */
-void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb) {
- const struct rd_kafka_sasl_provider *provider =
- rkb->rkb_rk->rk_conf.sasl.provider;
- if (provider->broker_term)
- provider->broker_term(rkb);
-}
-
-/**
- * Broker SASL init.
- *
- * Locality: broker thread
- */
-void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb) {
- const struct rd_kafka_sasl_provider *provider =
- rkb->rkb_rk->rk_conf.sasl.provider;
- if (provider->broker_init)
- provider->broker_init(rkb);
-}
-
-
-/**
- * @brief Per-instance initializer using the selected provider
- *
- * @returns 0 on success or -1 on error.
- *
- * @locality app thread (from rd_kafka_new())
- */
-int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- const struct rd_kafka_sasl_provider *provider =
- rk->rk_conf.sasl.provider;
-
- if (provider && provider->init)
- return provider->init(rk, errstr, errstr_size);
-
- return 0;
-}
-
-
-/**
- * @brief Per-instance destructor for the selected provider
- *
- * @locality app thread (from rd_kafka_new()) or rdkafka main thread
- */
-void rd_kafka_sasl_term(rd_kafka_t *rk) {
- const struct rd_kafka_sasl_provider *provider =
- rk->rk_conf.sasl.provider;
-
- if (provider && provider->term)
- provider->term(rk);
-
- RD_IF_FREE(rk->rk_sasl.callback_q, rd_kafka_q_destroy_owner);
-}
-
-
-/**
- * @returns rd_true if provider is ready to be used or SASL not configured,
- * else rd_false.
- *
- * @locks none
- * @locality any thread
- */
-rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk) {
- const struct rd_kafka_sasl_provider *provider =
- rk->rk_conf.sasl.provider;
-
- if (provider && provider->ready)
- return provider->ready(rk);
-
- return rd_true;
-}
-
-
-/**
- * @brief Select SASL provider for configured mechanism (singularis)
- * @returns 0 on success or -1 on failure.
- */
-int rd_kafka_sasl_select_provider(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- const struct rd_kafka_sasl_provider *provider = NULL;
-
- if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
- /* GSSAPI / Kerberos */
-#ifdef _WIN32
- provider = &rd_kafka_sasl_win32_provider;
-#elif WITH_SASL_CYRUS
- provider = &rd_kafka_sasl_cyrus_provider;
-#endif
-
- } else if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
- /* SASL PLAIN */
- provider = &rd_kafka_sasl_plain_provider;
-
- } else if (!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM-SHA-",
- strlen("SCRAM-SHA-"))) {
- /* SASL SCRAM */
-#if WITH_SASL_SCRAM
- provider = &rd_kafka_sasl_scram_provider;
-#endif
-
- } else if (!strcmp(rk->rk_conf.sasl.mechanisms, "OAUTHBEARER")) {
- /* SASL OAUTHBEARER */
-#if WITH_SASL_OAUTHBEARER
- provider = &rd_kafka_sasl_oauthbearer_provider;
-#endif
- } else {
- /* Unsupported mechanism */
- rd_snprintf(errstr, errstr_size,
- "Unsupported SASL mechanism: %s",
- rk->rk_conf.sasl.mechanisms);
- return -1;
- }
-
- if (!provider) {
- rd_snprintf(errstr, errstr_size,
- "No provider for SASL mechanism %s"
- ": recompile librdkafka with "
-#ifndef _WIN32
- "libsasl2 or "
-#endif
- "openssl support. "
- "Current build options:"
- " PLAIN"
-#ifdef _WIN32
- " WindowsSSPI(GSSAPI)"
-#endif
-#if WITH_SASL_CYRUS
- " SASL_CYRUS"
-#endif
-#if WITH_SASL_SCRAM
- " SASL_SCRAM"
-#endif
-#if WITH_SASL_OAUTHBEARER
- " OAUTHBEARER"
-#endif
- ,
- rk->rk_conf.sasl.mechanisms);
- return -1;
- }
-
- rd_kafka_dbg(rk, SECURITY, "SASL",
- "Selected provider %s for SASL mechanism %s",
- provider->name, rk->rk_conf.sasl.mechanisms);
-
- /* Validate SASL config */
- if (provider->conf_validate &&
- provider->conf_validate(rk, errstr, errstr_size) == -1)
- return -1;
-
- rk->rk_conf.sasl.provider = provider;
-
- return 0;
-}
-
-
-rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk) {
- rd_kafka_queue_t *saslq, *bgq;
-
- if (!(saslq = rd_kafka_queue_get_sasl(rk)))
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
- "No SASL mechanism using callbacks is configured");
-
- if (!(bgq = rd_kafka_queue_get_background(rk))) {
- rd_kafka_queue_destroy(saslq);
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
- "The background thread is not available");
- }
-
- rd_kafka_queue_forward(saslq, bgq);
-
- rd_kafka_queue_destroy(saslq);
- rd_kafka_queue_destroy(bgq);
-
- return NULL;
-}
-
-
-/**
- * Global SASL termination.
- */
-void rd_kafka_sasl_global_term(void) {
-#if WITH_SASL_CYRUS
- rd_kafka_sasl_cyrus_global_term();
-#endif
-}
-
-
-/**
- * Global SASL init, called once per runtime.
- */
-int rd_kafka_sasl_global_init(void) {
-#if WITH_SASL_CYRUS
- return rd_kafka_sasl_cyrus_global_init();
-#else
- return 0;
-#endif
-}
-
-/**
- * Sets or resets the SASL (PLAIN or SCRAM) credentials used by this
- * client when making new connections to brokers.
- *
- * @returns NULL on success or an error object on error.
- */
-rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk,
- const char *username,
- const char *password) {
-
- if (!username || !password)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Username and password are required");
-
- mtx_lock(&rk->rk_conf.sasl.lock);
-
- if (rk->rk_conf.sasl.username)
- rd_free(rk->rk_conf.sasl.username);
- rk->rk_conf.sasl.username = rd_strdup(username);
-
- if (rk->rk_conf.sasl.password)
- rd_free(rk->rk_conf.sasl.password);
- rk->rk_conf.sasl.password = rd_strdup(password);
-
- mtx_unlock(&rk->rk_conf.sasl.lock);
-
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "SASL credentials updated");
-
- return NULL;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h
deleted file mode 100644
index d0dd01b8b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_SASL_H_
-#define _RDKAFKA_SASL_H_
-
-
-
-int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans,
- const void *buf,
- size_t len,
- char *errstr,
- size_t errstr_size);
-int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans,
- int events,
- char *errstr,
- size_t errstr_size);
-void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans);
-int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size);
-
-void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb);
-void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb);
-
-int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-void rd_kafka_sasl_term(rd_kafka_t *rk);
-
-rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk);
-
-void rd_kafka_sasl_global_term(void);
-int rd_kafka_sasl_global_init(void);
-
-int rd_kafka_sasl_select_provider(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size);
-
-#endif /* _RDKAFKA_SASL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c
deleted file mode 100644
index 41452a336..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-#include "rdstring.h"
-
-#if defined(__FreeBSD__) || defined(__OpenBSD__)
-#include <sys/wait.h> /* For WIF.. */
-#endif
-
-#ifdef __APPLE__
-/* Apple has deprecated most of the SASL API for unknown reason,
- * silence those warnings. */
-#pragma clang diagnostic ignored "-Wdeprecated-declarations"
-#endif
-
-#include <sasl/sasl.h>
-
-/**
- * @brief Process-global lock to avoid simultaneous invocation of
- * kinit.cmd when refreshing the tickets, which could lead to
- * kinit cache corruption.
- */
-static mtx_t rd_kafka_sasl_cyrus_kinit_lock;
-
-/**
- * @struct Per-client-instance handle
- */
-typedef struct rd_kafka_sasl_cyrus_handle_s {
- rd_kafka_timer_t kinit_refresh_tmr;
- rd_atomic32_t ready; /**< First kinit command has finished, or there
- * is no kinit command. */
-} rd_kafka_sasl_cyrus_handle_t;
-
-/**
- * @struct Per-connection state
- */
-typedef struct rd_kafka_sasl_cyrus_state_s {
- sasl_conn_t *conn;
- sasl_callback_t callbacks[16];
-} rd_kafka_sasl_cyrus_state_t;
-
-
-
-/**
- * Handle received frame from broker.
- */
-static int rd_kafka_sasl_cyrus_recv(struct rd_kafka_transport_s *rktrans,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
- int r;
- int sendcnt = 0;
-
- if (rktrans->rktrans_sasl.complete && size == 0)
- goto auth_successful;
-
- do {
- sasl_interact_t *interact = NULL;
- const char *out;
- unsigned int outlen;
-
- mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- r = sasl_client_step(state->conn, size > 0 ? buf : NULL, size,
- &interact, &out, &outlen);
- mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
-
- if (r >= 0) {
- /* Note: outlen may be 0 here for an empty response */
- if (rd_kafka_sasl_send(rktrans, out, outlen, errstr,
- errstr_size) == -1)
- return -1;
- sendcnt++;
- }
-
- if (r == SASL_INTERACT)
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "SASL_INTERACT: %lu %s, %s, %s, %p",
- interact->id, interact->challenge,
- interact->prompt, interact->defresult,
- interact->result);
-
- } while (r == SASL_INTERACT);
-
- if (r == SASL_CONTINUE)
- return 0; /* Wait for more data from broker */
- else if (r != SASL_OK) {
- rd_snprintf(errstr, errstr_size,
- "SASL handshake failed (step): %s",
- sasl_errdetail(state->conn));
- return -1;
- }
-
- if (!rktrans->rktrans_sasl.complete && sendcnt > 0) {
- /* With SaslAuthenticateRequest Kafka protocol framing
- * we'll get a Response back after authentication is done,
- * which should not be processed by Cyrus, but we still
- * need to wait for the response to propgate its error,
- * if any, before authentication is considered done.
- *
- * The legacy framing does not have a final broker->client
- * response. */
- rktrans->rktrans_sasl.complete = 1;
-
- if (rktrans->rktrans_rkb->rkb_features &
- RD_KAFKA_FEATURE_SASL_AUTH_REQ) {
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "%s authentication complete but awaiting "
- "final response from broker",
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl
- .mechanisms);
- return 0;
- }
- }
-
- /* Authentication successful */
-auth_successful:
- if (rktrans->rktrans_rkb->rkb_rk->rk_conf.debug &
- RD_KAFKA_DBG_SECURITY) {
- const char *user, *mech, *authsrc;
-
- mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- if (sasl_getprop(state->conn, SASL_USERNAME,
- (const void **)&user) != SASL_OK)
- user = "(unknown)";
- mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
-
- if (sasl_getprop(state->conn, SASL_MECHNAME,
- (const void **)&mech) != SASL_OK)
- mech = "(unknown)";
-
- if (sasl_getprop(state->conn, SASL_AUTHSOURCE,
- (const void **)&authsrc) != SASL_OK)
- authsrc = "(unknown)";
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "Authenticated as %s using %s (%s)", user, mech,
- authsrc);
- }
-
- rd_kafka_sasl_auth_done(rktrans);
-
- return 0;
-}
-
-
-
-static ssize_t
-render_callback(const char *key, char *buf, size_t size, void *opaque) {
- rd_kafka_t *rk = opaque;
- rd_kafka_conf_res_t res;
- size_t destsize = size;
-
- /* Try config lookup. */
- res = rd_kafka_conf_get(&rk->rk_conf, key, buf, &destsize);
- if (res != RD_KAFKA_CONF_OK)
- return -1;
-
- /* Dont include \0 in returned size */
- return (destsize > 0 ? destsize - 1 : destsize);
-}
-
-
-/**
- * @brief Execute kinit to refresh ticket.
- *
- * @returns 0 on success, -1 on error.
- *
- * @locality rdkafka main thread
- */
-static int rd_kafka_sasl_cyrus_kinit_refresh(rd_kafka_t *rk) {
- rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
- int r;
- char *cmd;
- char errstr[128];
- rd_ts_t ts_start;
- int duration;
-
- /* Build kinit refresh command line using string rendering and config */
- cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, errstr,
- sizeof(errstr), render_callback, rk);
- if (!cmd) {
- rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
- "Failed to construct kinit command "
- "from sasl.kerberos.kinit.cmd template: %s",
- errstr);
- return -1;
- }
-
- /* Execute kinit */
- rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
- "Refreshing Kerberos ticket with command: %s", cmd);
-
- ts_start = rd_clock();
-
- /* Prevent multiple simultaneous refreshes by the same process to
- * avoid Kerberos credential cache corruption. */
- mtx_lock(&rd_kafka_sasl_cyrus_kinit_lock);
- r = system(cmd);
- mtx_unlock(&rd_kafka_sasl_cyrus_kinit_lock);
-
- duration = (int)((rd_clock() - ts_start) / 1000);
- if (duration > 5000)
- rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH",
- "Slow Kerberos ticket refresh: %dms: %s", duration,
- cmd);
-
- /* Regardless of outcome from the kinit command (it can fail
- * even if the ticket is available), we now allow broker connections. */
- if (rd_atomic32_add(&handle->ready, 1) == 1) {
- rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
- "First kinit command finished: waking up "
- "broker threads");
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "Kerberos ticket refresh");
- }
-
- if (r == -1) {
- if (errno == ECHILD) {
- rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH",
- "Kerberos ticket refresh command "
- "returned ECHILD: %s: exit status "
- "unknown, assuming success",
- cmd);
- } else {
- rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
- "Kerberos ticket refresh failed: %s: %s",
- cmd, rd_strerror(errno));
- rd_free(cmd);
- return -1;
- }
- } else if (WIFSIGNALED(r)) {
- rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
- "Kerberos ticket refresh failed: %s: "
- "received signal %d",
- cmd, WTERMSIG(r));
- rd_free(cmd);
- return -1;
- } else if (WIFEXITED(r) && WEXITSTATUS(r) != 0) {
- rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
- "Kerberos ticket refresh failed: %s: "
- "exited with code %d",
- cmd, WEXITSTATUS(r));
- rd_free(cmd);
- return -1;
- }
-
- rd_free(cmd);
-
- rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
- "Kerberos ticket refreshed in %dms", duration);
- return 0;
-}
-
-
-/**
- * @brief Refresh timer callback
- *
- * @locality rdkafka main thread
- */
-static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_t *rk = arg;
-
- rd_kafka_sasl_cyrus_kinit_refresh(rk);
-}
-
-
-
-/**
- *
- * libsasl callbacks
- *
- */
-static RD_UNUSED int rd_kafka_sasl_cyrus_cb_getopt(void *context,
- const char *plugin_name,
- const char *option,
- const char **result,
- unsigned *len) {
- rd_kafka_transport_t *rktrans = context;
-
- if (!strcmp(option, "client_mech_list"))
- *result = "GSSAPI";
- if (!strcmp(option, "canon_user_plugin"))
- *result = "INTERNAL";
-
- if (*result && len)
- *len = strlen(*result);
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_GETOPT: plugin %s, option %s: returning %s", plugin_name,
- option, *result);
-
- return SASL_OK;
-}
-
-static int
-rd_kafka_sasl_cyrus_cb_log(void *context, int level, const char *message) {
- rd_kafka_transport_t *rktrans = context;
-
- /* Provide a more helpful error message in case Kerberos
- * plugins are missing. */
- if (strstr(message, "No worthy mechs found") &&
- strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
- "GSSAPI"))
- message =
- "Cyrus/libsasl2 is missing a GSSAPI module: "
- "make sure the libsasl2-modules-gssapi-mit or "
- "cyrus-sasl-gssapi packages are installed";
-
- /* Treat the "client step" log messages as debug. */
- if (level >= LOG_DEBUG || !strncmp(message, "GSSAPI client step ", 19))
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "%s",
- message);
- else
- rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", "%s",
- message);
-
- return SASL_OK;
-}
-
-
-static int rd_kafka_sasl_cyrus_cb_getsimple(void *context,
- int id,
- const char **result,
- unsigned *len) {
- rd_kafka_transport_t *rktrans = context;
-
- switch (id) {
- case SASL_CB_USER:
- case SASL_CB_AUTHNAME:
- /* Since cyrus expects the returned pointer to be stable
- * and not have its content changed, but the username
- * and password may be updated at anytime by the application
- * calling sasl_set_credentials(), we need to lock
- * rk_conf.sasl.lock before each call into cyrus-sasl.
- * So when we get here the lock is already held. */
- *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username;
- break;
-
- default:
- *result = NULL;
- break;
- }
-
- if (len)
- *len = *result ? strlen(*result) : 0;
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_GETSIMPLE: id 0x%x: returning %s", id, *result);
-
- return *result ? SASL_OK : SASL_FAIL;
-}
-
-
-static int rd_kafka_sasl_cyrus_cb_getsecret(sasl_conn_t *conn,
- void *context,
- int id,
- sasl_secret_t **psecret) {
- rd_kafka_transport_t *rktrans = context;
- const char *password;
-
- /* rk_conf.sasl.lock is already locked */
- password = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.password;
-
- if (!password) {
- *psecret = NULL;
- } else {
- size_t passlen = strlen(password);
- *psecret = rd_realloc(*psecret, sizeof(**psecret) + passlen);
- (*psecret)->len = passlen;
- memcpy((*psecret)->data, password, passlen);
- }
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_GETSECRET: id 0x%x: returning %s", id,
- *psecret ? "(hidden)" : "NULL");
-
- return SASL_OK;
-}
-
-static int rd_kafka_sasl_cyrus_cb_chalprompt(void *context,
- int id,
- const char *challenge,
- const char *prompt,
- const char *defres,
- const char **result,
- unsigned *len) {
- rd_kafka_transport_t *rktrans = context;
-
- *result = "min_chalprompt";
- *len = strlen(*result);
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_CHALPROMPT: id 0x%x, challenge %s, prompt %s, "
- "default %s: returning %s",
- id, challenge, prompt, defres, *result);
-
- return SASL_OK;
-}
-
-static int rd_kafka_sasl_cyrus_cb_getrealm(void *context,
- int id,
- const char **availrealms,
- const char **result) {
- rd_kafka_transport_t *rktrans = context;
-
- *result = *availrealms;
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_GETREALM: id 0x%x: returning %s", id, *result);
-
- return SASL_OK;
-}
-
-
-static RD_UNUSED int rd_kafka_sasl_cyrus_cb_canon(sasl_conn_t *conn,
- void *context,
- const char *in,
- unsigned inlen,
- unsigned flags,
- const char *user_realm,
- char *out,
- unsigned out_max,
- unsigned *out_len) {
- rd_kafka_transport_t *rktrans = context;
-
- if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
- "GSSAPI")) {
- *out_len = rd_snprintf(
- out, out_max, "%s",
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.principal);
- } else if (!strcmp(
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
- "PLAIN")) {
- *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in);
- } else
- out = NULL;
-
- rd_rkb_dbg(
- rktrans->rktrans_rkb, SECURITY, "LIBSASL",
- "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"",
- flags, (int)inlen, in, user_realm, (int)(*out_len), out);
-
- return out ? SASL_OK : SASL_FAIL;
-}
-
-
-static void rd_kafka_sasl_cyrus_close(struct rd_kafka_transport_s *rktrans) {
- rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
-
- if (!state)
- return;
-
- if (state->conn) {
- mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- sasl_dispose(&state->conn);
- mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- }
- rd_free(state);
-}
-
-
-/**
- * Initialize and start SASL authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * Locality: broker thread
- */
-static int rd_kafka_sasl_cyrus_client_new(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size) {
- int r;
- rd_kafka_sasl_cyrus_state_t *state;
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- sasl_callback_t callbacks[16] = {
- // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans
- // },
- {SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans},
- {SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple,
- rktrans},
- {SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans},
- {SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt,
- rktrans},
- {SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm,
- rktrans},
- {SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans},
- {SASL_CB_LIST_END}};
-
- state = rd_calloc(1, sizeof(*state));
- rktrans->rktrans_sasl.state = state;
-
- /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */
- if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
- int endidx;
- /* Find end of callbacks array */
- for (endidx = 0; callbacks[endidx].id != SASL_CB_LIST_END;
- endidx++)
- ;
-
- callbacks[endidx].id = SASL_CB_USER;
- callbacks[endidx].proc =
- (void *)rd_kafka_sasl_cyrus_cb_getsimple;
- callbacks[endidx].context = rktrans;
- endidx++;
- callbacks[endidx].id = SASL_CB_LIST_END;
- }
-
- memcpy(state->callbacks, callbacks, sizeof(callbacks));
-
- mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL,
- NULL, /* no local & remote IP checks */
- state->callbacks, 0, &state->conn);
- mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- if (r != SASL_OK) {
- rd_snprintf(errstr, errstr_size, "%s",
- sasl_errstring(r, NULL, NULL));
- return -1;
- }
-
- if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) {
- const char *avail_mechs;
- sasl_listmech(state->conn, NULL, NULL, " ", NULL, &avail_mechs,
- NULL, NULL);
- rd_rkb_dbg(rkb, SECURITY, "SASL",
- "My supported SASL mechanisms: %s", avail_mechs);
- }
-
- do {
- const char *out;
- unsigned int outlen;
- const char *mech = NULL;
-
- mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
- r = sasl_client_start(state->conn, rk->rk_conf.sasl.mechanisms,
- NULL, &out, &outlen, &mech);
- mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
-
- if (r >= 0)
- if (rd_kafka_sasl_send(rktrans, out, outlen, errstr,
- errstr_size))
- return -1;
- } while (r == SASL_INTERACT);
-
- if (r == SASL_OK) {
- /* PLAIN is appearantly done here, but we still need to make
- * sure the PLAIN frame is sent and we get a response back (but
- * we must not pass the response to libsasl or it will fail). */
- rktrans->rktrans_sasl.complete = 1;
- return 0;
-
- } else if (r != SASL_CONTINUE) {
- rd_snprintf(errstr, errstr_size,
- "SASL handshake failed (start (%d)): %s", r,
- sasl_errdetail(state->conn));
- return -1;
- }
-
- return 0;
-}
-
-
-/**
- * @brief SASL/GSSAPI is ready when at least one kinit command has been
- * executed (regardless of exit status).
- */
-static rd_bool_t rd_kafka_sasl_cyrus_ready(rd_kafka_t *rk) {
- rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
- if (!rk->rk_conf.sasl.relogin_min_time)
- return rd_true;
- if (!handle)
- return rd_false;
-
- return rd_atomic32_get(&handle->ready) > 0;
-}
-
-/**
- * @brief Per-client-instance initializer
- */
-static int
-rd_kafka_sasl_cyrus_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- rd_kafka_sasl_cyrus_handle_t *handle;
-
- if (!rk->rk_conf.sasl.relogin_min_time || !rk->rk_conf.sasl.kinit_cmd ||
- strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
- return 0; /* kinit not configured, no need to start timer */
-
- handle = rd_calloc(1, sizeof(*handle));
- rk->rk_sasl.handle = handle;
-
- rd_kafka_timer_start(&rk->rk_timers, &handle->kinit_refresh_tmr,
- rk->rk_conf.sasl.relogin_min_time * 1000ll,
- rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb, rk);
-
- /* Kick off the timer immediately to refresh the ticket.
- * (Timer is triggered from the main loop). */
- rd_kafka_timer_override_once(&rk->rk_timers, &handle->kinit_refresh_tmr,
- 0 /*immediately*/);
-
- return 0;
-}
-
-
-/**
- * @brief Per-client-instance destructor
- */
-static void rd_kafka_sasl_cyrus_term(rd_kafka_t *rk) {
- rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
-
- if (!handle)
- return;
-
- rd_kafka_timer_stop(&rk->rk_timers, &handle->kinit_refresh_tmr, 1);
- rd_free(handle);
- rk->rk_sasl.handle = NULL;
-}
-
-
-static int rd_kafka_sasl_cyrus_conf_validate(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
-
- if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
- return 0;
-
- if (rk->rk_conf.sasl.relogin_min_time && rk->rk_conf.sasl.kinit_cmd) {
- char *cmd;
- char tmperr[128];
-
- cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, tmperr,
- sizeof(tmperr), render_callback, rk);
-
- if (!cmd) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.kerberos.kinit.cmd value: %s",
- tmperr);
- return -1;
- }
-
- rd_free(cmd);
- }
-
- return 0;
-}
-
-
-/**
- * Global SASL termination.
- */
-void rd_kafka_sasl_cyrus_global_term(void) {
- /* NOTE: Should not be called since the application may be using SASL
- * too*/
- /* sasl_done(); */
- mtx_destroy(&rd_kafka_sasl_cyrus_kinit_lock);
-}
-
-
-/**
- * Global SASL init, called once per runtime.
- */
-int rd_kafka_sasl_cyrus_global_init(void) {
- int r;
-
- mtx_init(&rd_kafka_sasl_cyrus_kinit_lock, mtx_plain);
-
- r = sasl_client_init(NULL);
- if (r != SASL_OK) {
- fprintf(stderr, "librdkafka: sasl_client_init() failed: %s\n",
- sasl_errstring(r, NULL, NULL));
- return -1;
- }
-
- return 0;
-}
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider = {
- .name = "Cyrus",
- .init = rd_kafka_sasl_cyrus_init,
- .term = rd_kafka_sasl_cyrus_term,
- .client_new = rd_kafka_sasl_cyrus_client_new,
- .recv = rd_kafka_sasl_cyrus_recv,
- .close = rd_kafka_sasl_cyrus_close,
- .ready = rd_kafka_sasl_cyrus_ready,
- .conf_validate = rd_kafka_sasl_cyrus_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h
deleted file mode 100644
index 33e3bdd05..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_SASL_INT_H_
-#define _RDKAFKA_SASL_INT_H_
-
-struct rd_kafka_sasl_provider {
- const char *name;
-
- /** Per client-instance (rk) initializer */
- int (*init)(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-
- /** Per client-instance (rk) destructor */
- void (*term)(rd_kafka_t *rk);
-
- /** Returns rd_true if provider is ready to be used, else rd_false */
- rd_bool_t (*ready)(rd_kafka_t *rk);
-
- int (*client_new)(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size);
-
- int (*recv)(struct rd_kafka_transport_s *s,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size);
- void (*close)(struct rd_kafka_transport_s *);
-
- void (*broker_init)(rd_kafka_broker_t *rkb);
- void (*broker_term)(rd_kafka_broker_t *rkb);
-
- int (*conf_validate)(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-};
-
-#ifdef _WIN32
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider;
-#endif
-
-#if WITH_SASL_CYRUS
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider;
-void rd_kafka_sasl_cyrus_global_term(void);
-int rd_kafka_sasl_cyrus_global_init(void);
-#endif
-
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider;
-
-#if WITH_SASL_SCRAM
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider;
-#endif
-
-#if WITH_SASL_OAUTHBEARER
-extern const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider;
-#endif
-
-void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans);
-int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans,
- const void *payload,
- int len,
- char *errstr,
- size_t errstr_size);
-
-#endif /* _RDKAFKA_SASL_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c
deleted file mode 100644
index 39b165a7d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c
+++ /dev/null
@@ -1,1825 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL OAUTHBEARER support
- */
-#include "rdkafka_int.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl_int.h"
-#include <openssl/evp.h>
-#include "rdunittest.h"
-
-#if WITH_OAUTHBEARER_OIDC
-#include "rdkafka_sasl_oauthbearer_oidc.h"
-#endif
-
-
-/**
- * @struct Per-client-instance SASL/OAUTHBEARER handle.
- */
-typedef struct rd_kafka_sasl_oauthbearer_handle_s {
- /**< Read-write lock for fields in the handle. */
- rwlock_t lock;
-
- /**< The b64token value as defined in RFC 6750 Section 2.1
- * https://tools.ietf.org/html/rfc6750#section-2.1
- */
- char *token_value;
-
- /**< When the token expires, in terms of the number of
- * milliseconds since the epoch. Wall clock time.
- */
- rd_ts_t wts_md_lifetime;
-
- /**< The point after which this token should be replaced with a
- * new one, in terms of the number of milliseconds since the
- * epoch. Wall clock time.
- */
- rd_ts_t wts_refresh_after;
-
- /**< When the last token refresh was equeued (0 = never)
- * in terms of the number of milliseconds since the epoch.
- * Wall clock time.
- */
- rd_ts_t wts_enqueued_refresh;
-
- /**< The name of the principal to which this token applies. */
- char *md_principal_name;
-
- /**< The SASL extensions, as per RFC 7628 Section 3.1
- * https://tools.ietf.org/html/rfc7628#section-3.1
- */
- rd_list_t extensions; /* rd_strtup_t list */
-
- /**< Error message for validation and/or token retrieval problems. */
- char *errstr;
-
- /**< Back-pointer to client instance. */
- rd_kafka_t *rk;
-
- /**< Token refresh timer */
- rd_kafka_timer_t token_refresh_tmr;
-
- /** Queue to enqueue token_refresh_cb ops on. */
- rd_kafka_q_t *callback_q;
-
- /** Using internal refresh callback (sasl.oauthbearer.method=oidc) */
- rd_bool_t internal_refresh;
-
-} rd_kafka_sasl_oauthbearer_handle_t;
-
-
-/**
- * @struct Unsecured JWS info populated when sasl.oauthbearer.config is parsed
- */
-struct rd_kafka_sasl_oauthbearer_parsed_ujws {
- char *principal_claim_name;
- char *principal;
- char *scope_claim_name;
- char *scope_csv_text;
- int life_seconds;
- rd_list_t extensions; /* rd_strtup_t list */
-};
-
-/**
- * @struct Unsecured JWS token to be set on the client handle
- */
-struct rd_kafka_sasl_oauthbearer_token {
- char *token_value;
- int64_t md_lifetime_ms;
- char *md_principal_name;
- char **extensions;
- size_t extension_size;
-};
-
-/**
- * @brief Per-connection state
- */
-struct rd_kafka_sasl_oauthbearer_state {
- enum { RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE,
- RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG,
- RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL,
- } state;
- char *server_error_msg;
-
- /*
- * A place to store a consistent view of the token and extensions
- * throughout the authentication process -- even if it is refreshed
- * midway through this particular authentication.
- */
- char *token_value;
- char *md_principal_name;
- rd_list_t extensions; /* rd_strtup_t list */
-};
-
-
-
-/**
- * @brief free memory inside the given token
- */
-static void rd_kafka_sasl_oauthbearer_token_free(
- struct rd_kafka_sasl_oauthbearer_token *token) {
- size_t i;
-
- RD_IF_FREE(token->token_value, rd_free);
- RD_IF_FREE(token->md_principal_name, rd_free);
-
- for (i = 0; i < token->extension_size; i++)
- rd_free(token->extensions[i]);
-
- RD_IF_FREE(token->extensions, rd_free);
-
- memset(token, 0, sizeof(*token));
-}
-
-
-/**
- * @brief Op callback for RD_KAFKA_OP_OAUTHBEARER_REFRESH
- *
- * @locality Application thread
- */
-static rd_kafka_op_res_t rd_kafka_oauthbearer_refresh_op(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- /* The op callback is invoked when the op is destroyed via
- * rd_kafka_op_destroy() or rd_kafka_event_destroy(), so
- * make sure we don't refresh upon destruction since
- * the op has already been handled by this point.
- */
- if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY &&
- rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
- rk->rk_conf.sasl.oauthbearer.token_refresh_cb(
- rk, rk->rk_conf.sasl.oauthbearer_config,
- rk->rk_conf.opaque);
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * @brief Enqueue a token refresh.
- * @locks rwlock_wrlock(&handle->lock) MUST be held
- */
-static void rd_kafka_oauthbearer_enqueue_token_refresh(
- rd_kafka_sasl_oauthbearer_handle_t *handle) {
- rd_kafka_op_t *rko;
-
- rko = rd_kafka_op_new_cb(handle->rk, RD_KAFKA_OP_OAUTHBEARER_REFRESH,
- rd_kafka_oauthbearer_refresh_op);
- rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
-
- /* For internal OIDC refresh callback:
- * Force op to be handled by internal callback on the
- * receiving queue, rather than being passed as an event to
- * the application. */
- if (handle->internal_refresh)
- rko->rko_flags |= RD_KAFKA_OP_F_FORCE_CB;
-
- handle->wts_enqueued_refresh = rd_uclock();
- rd_kafka_q_enq(handle->callback_q, rko);
-}
-
-/**
- * @brief Enqueue a token refresh if necessary.
- *
- * The method rd_kafka_oauthbearer_enqueue_token_refresh() is invoked
- * if necessary; the required lock is acquired and released. This method
- * returns immediately when SASL/OAUTHBEARER is not in use by the client.
- */
-static void rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary(
- rd_kafka_sasl_oauthbearer_handle_t *handle) {
- rd_ts_t now_wallclock;
-
- now_wallclock = rd_uclock();
-
- rwlock_wrlock(&handle->lock);
- if (handle->wts_refresh_after < now_wallclock &&
- handle->wts_enqueued_refresh <= handle->wts_refresh_after)
- /* Refresh required and not yet scheduled; refresh it */
- rd_kafka_oauthbearer_enqueue_token_refresh(handle);
- rwlock_wrunlock(&handle->lock);
-}
-
-/**
- * @returns \c rd_true if SASL/OAUTHBEARER is the configured authentication
- * mechanism and a token is available, otherwise \c rd_false.
- *
- * @locks none
- * @locality any
- */
-static rd_bool_t
-rd_kafka_oauthbearer_has_token(rd_kafka_sasl_oauthbearer_handle_t *handle) {
- rd_bool_t retval_has_token;
-
- rwlock_rdlock(&handle->lock);
- retval_has_token = handle->token_value != NULL;
- rwlock_rdunlock(&handle->lock);
-
- return retval_has_token;
-}
-
-/**
- * @brief Verify that the provided \p key is valid.
- * @returns 0 on success or -1 if \p key is invalid.
- */
-static int check_oauthbearer_extension_key(const char *key,
- char *errstr,
- size_t errstr_size) {
- const char *c;
-
- if (!strcmp(key, "auth")) {
- rd_snprintf(errstr, errstr_size,
- "Cannot explicitly set the reserved `auth` "
- "SASL/OAUTHBEARER extension key");
- return -1;
- }
-
- /*
- * https://tools.ietf.org/html/rfc7628#section-3.1
- * key = 1*(ALPHA)
- *
- * https://tools.ietf.org/html/rfc5234#appendix-B.1
- * ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
- */
- if (!*key) {
- rd_snprintf(errstr, errstr_size,
- "SASL/OAUTHBEARER extension keys "
- "must not be empty");
- return -1;
- }
-
- for (c = key; *c; c++) {
- if (!(*c >= 'A' && *c <= 'Z') && !(*c >= 'a' && *c <= 'z')) {
- rd_snprintf(errstr, errstr_size,
- "SASL/OAUTHBEARER extension keys must "
- "only consist of A-Z or "
- "a-z characters: %s (%c)",
- key, *c);
- return -1;
- }
- }
-
- return 0;
-}
-
-/**
- * @brief Verify that the provided \p value is valid.
- * @returns 0 on success or -1 if \p value is invalid.
- */
-static int check_oauthbearer_extension_value(const char *value,
- char *errstr,
- size_t errstr_size) {
- const char *c;
-
- /*
- * https://tools.ietf.org/html/rfc7628#section-3.1
- * value = *(VCHAR / SP / HTAB / CR / LF )
- *
- * https://tools.ietf.org/html/rfc5234#appendix-B.1
- * VCHAR = %x21-7E ; visible (printing) characters
- * SP = %x20 ; space
- * HTAB = %x09 ; horizontal tab
- * CR = %x0D ; carriage return
- * LF = %x0A ; linefeed
- */
- for (c = value; *c; c++) {
- if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' &&
- *c != '\x09' && *c != '\x0D' && *c != '\x0A') {
- rd_snprintf(errstr, errstr_size,
- "SASL/OAUTHBEARER extension values must "
- "only consist of space, horizontal tab, "
- "CR, LF, and "
- "visible characters (%%x21-7E): %s (%c)",
- value, *c);
- return -1;
- }
- }
-
- return 0;
-}
-
-/**
- * @brief Set SASL/OAUTHBEARER token and metadata
- *
- * @param rk Client instance.
- * @param token_value the mandatory token value to set, often (but not
- * necessarily) a JWS compact serialization as per
- * https://tools.ietf.org/html/rfc7515#section-3.1.
- * Use rd_kafka_sasl_oauthbearer_token_free() to free members if
- * return value is not -1.
- * @param md_lifetime_ms when the token expires, in terms of the number of
- * milliseconds since the epoch. See https://currentmillis.com/.
- * @param md_principal_name the mandatory Kafka principal name associated
- * with the token.
- * @param extensions optional SASL extensions key-value array with
- * \p extensions_size elements (number of keys * 2), where [i] is the key and
- * [i+1] is the key's value, to be communicated to the broker
- * as additional key-value pairs during the initial client response as per
- * https://tools.ietf.org/html/rfc7628#section-3.1.
- * @param extension_size the number of SASL extension keys plus values,
- * which should be a non-negative multiple of 2.
- *
- * The SASL/OAUTHBEARER token refresh callback or event handler should cause
- * this method to be invoked upon success, via
- * rd_kafka_oauthbearer_set_token(). The extension keys must not include the
- * reserved key "`auth`", and all extension keys and values must conform to the
- * required format as per https://tools.ietf.org/html/rfc7628#section-3.1:
- *
- * key = 1*(ALPHA)
- * value = *(VCHAR / SP / HTAB / CR / LF )
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise errstr set and:
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are
- * invalid;
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is not configured as
- * the client's authentication mechanism.
- *
- * @sa rd_kafka_oauthbearer_set_token_failure0
- */
-rd_kafka_resp_err_t
-rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk,
- const char *token_value,
- int64_t md_lifetime_ms,
- const char *md_principal_name,
- const char **extensions,
- size_t extension_size,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
- size_t i;
- rd_ts_t now_wallclock;
- rd_ts_t wts_md_lifetime = md_lifetime_ms * 1000;
-
- /* Check if SASL/OAUTHBEARER is the configured auth mechanism */
- if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider ||
- !handle) {
- rd_snprintf(errstr, errstr_size,
- "SASL/OAUTHBEARER is not the "
- "configured authentication mechanism");
- return RD_KAFKA_RESP_ERR__STATE;
- }
-
- /* Check if there is an odd number of extension keys + values */
- if (extension_size & 1) {
- rd_snprintf(errstr, errstr_size,
- "Incorrect extension size "
- "(must be a non-negative multiple of 2): %" PRIusz,
- extension_size);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- /* Check args for correct format/value */
- now_wallclock = rd_uclock();
- if (wts_md_lifetime <= now_wallclock) {
- rd_snprintf(errstr, errstr_size,
- "Must supply an unexpired token: "
- "now=%" PRId64 "ms, exp=%" PRId64 "ms",
- now_wallclock / 1000, wts_md_lifetime / 1000);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- if (check_oauthbearer_extension_value(token_value, errstr,
- errstr_size) == -1)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- for (i = 0; i + 1 < extension_size; i += 2) {
- if (check_oauthbearer_extension_key(extensions[i], errstr,
- errstr_size) == -1 ||
- check_oauthbearer_extension_value(extensions[i + 1], errstr,
- errstr_size) == -1)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- rwlock_wrlock(&handle->lock);
-
- RD_IF_FREE(handle->md_principal_name, rd_free);
- handle->md_principal_name = rd_strdup(md_principal_name);
-
- RD_IF_FREE(handle->token_value, rd_free);
- handle->token_value = rd_strdup(token_value);
-
- handle->wts_md_lifetime = wts_md_lifetime;
-
- /* Schedule a refresh 80% through its remaining lifetime */
- handle->wts_refresh_after =
- (rd_ts_t)(now_wallclock + 0.8 * (wts_md_lifetime - now_wallclock));
-
- rd_list_clear(&handle->extensions);
- for (i = 0; i + 1 < extension_size; i += 2)
- rd_list_add(&handle->extensions,
- rd_strtup_new(extensions[i], extensions[i + 1]));
-
- RD_IF_FREE(handle->errstr, rd_free);
- handle->errstr = NULL;
-
- rwlock_wrunlock(&handle->lock);
-
- rd_kafka_dbg(rk, SECURITY, "BRKMAIN",
- "Waking up waiting broker threads after "
- "setting OAUTHBEARER token");
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT,
- "OAUTHBEARER token update");
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief SASL/OAUTHBEARER token refresh failure indicator.
- *
- * @param rk Client instance.
- * @param errstr mandatory human readable error reason for failing to acquire
- * a token.
- *
- * The SASL/OAUTHBEARER token refresh callback or event handler should cause
- * this method to be invoked upon failure, via
- * rd_kafka_oauthbearer_set_token_failure().
- *
- * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise
- * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is enabled but is
- * not configured to be the client's authentication mechanism,
- * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
-
- * @sa rd_kafka_oauthbearer_set_token0
- */
-rd_kafka_resp_err_t
-rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, const char *errstr) {
- rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
- rd_bool_t error_changed;
-
- /* Check if SASL/OAUTHBEARER is the configured auth mechanism */
- if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider ||
- !handle)
- return RD_KAFKA_RESP_ERR__STATE;
-
- if (!errstr || !*errstr)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- rwlock_wrlock(&handle->lock);
- error_changed = !handle->errstr || strcmp(handle->errstr, errstr);
- RD_IF_FREE(handle->errstr, rd_free);
- handle->errstr = rd_strdup(errstr);
- /* Leave any existing token because it may have some life left,
- * schedule a refresh for 10 seconds later. */
- handle->wts_refresh_after = rd_uclock() + (10 * 1000 * 1000);
- rwlock_wrunlock(&handle->lock);
-
- /* Trigger an ERR__AUTHENTICATION error if the error changed. */
- if (error_changed)
- rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "Failed to acquire SASL OAUTHBEARER token: %s",
- errstr);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Parse a config value from the string pointed to by \p loc and starting
- * with the given \p prefix and ending with the given \p value_end_char, storing
- * the newly-allocated memory result in the string pointed to by \p value.
- * @returns -1 if string pointed to by \p value is non-empty (\p errstr set, no
- * memory allocated), else 0 (caller must free allocated memory).
- */
-static int parse_ujws_config_value_for_prefix(char **loc,
- const char *prefix,
- const char value_end_char,
- char **value,
- char *errstr,
- size_t errstr_size) {
- if (*value) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "multiple '%s' entries",
- prefix);
- return -1;
- }
-
- *loc += strlen(prefix);
- *value = *loc;
- while (**loc != '\0' && **loc != value_end_char)
- ++*loc;
-
- if (**loc == value_end_char) {
- /* End the string and skip the character */
- **loc = '\0';
- ++*loc;
- }
-
- /* return new allocated memory */
- *value = rd_strdup(*value);
-
- return 0;
-}
-
-/*
- * @brief Parse Unsecured JWS config, allocates strings that must be freed
- * @param cfg the config to parse (typically from `sasl.oauthbearer.config`)
- * @param parsed holds the parsed output; it must be all zeros to start.
- * @returns -1 on failure (\p errstr set), else 0.
- */
-static int
-parse_ujws_config(const char *cfg,
- struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed,
- char *errstr,
- size_t errstr_size) {
- /*
- * Extensions:
- *
- * https://tools.ietf.org/html/rfc7628#section-3.1
- * key = 1*(ALPHA)
- * value = *(VCHAR / SP / HTAB / CR / LF )
- *
- * https://tools.ietf.org/html/rfc5234#appendix-B.1
- * ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
- * VCHAR = %x21-7E ; visible (printing) characters
- * SP = %x20 ; space
- * HTAB = %x09 ; horizontal tab
- * CR = %x0D ; carriage return
- * LF = %x0A ; linefeed
- */
-
- static const char *prefix_principal_claim_name = "principalClaimName=";
- static const char *prefix_principal = "principal=";
- static const char *prefix_scope_claim_name = "scopeClaimName=";
- static const char *prefix_scope = "scope=";
- static const char *prefix_life_seconds = "lifeSeconds=";
- static const char *prefix_extension = "extension_";
-
- char *cfg_copy = rd_strdup(cfg);
- char *loc = cfg_copy;
- int r = 0;
-
- while (*loc != '\0' && !r) {
- if (*loc == ' ')
- ++loc;
- else if (!strncmp(prefix_principal_claim_name, loc,
- strlen(prefix_principal_claim_name))) {
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_principal_claim_name, ' ',
- &parsed->principal_claim_name, errstr, errstr_size);
-
- if (!r && !*parsed->principal_claim_name) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "empty '%s'",
- prefix_principal_claim_name);
- r = -1;
- }
-
- } else if (!strncmp(prefix_principal, loc,
- strlen(prefix_principal))) {
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_principal, ' ', &parsed->principal,
- errstr, errstr_size);
-
- if (!r && !*parsed->principal) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "empty '%s'",
- prefix_principal);
- r = -1;
- }
-
- } else if (!strncmp(prefix_scope_claim_name, loc,
- strlen(prefix_scope_claim_name))) {
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_scope_claim_name, ' ',
- &parsed->scope_claim_name, errstr, errstr_size);
-
- if (!r && !*parsed->scope_claim_name) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "empty '%s'",
- prefix_scope_claim_name);
- r = -1;
- }
-
- } else if (!strncmp(prefix_scope, loc, strlen(prefix_scope))) {
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_scope, ' ', &parsed->scope_csv_text,
- errstr, errstr_size);
-
- if (!r && !*parsed->scope_csv_text) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "empty '%s'",
- prefix_scope);
- r = -1;
- }
-
- } else if (!strncmp(prefix_life_seconds, loc,
- strlen(prefix_life_seconds))) {
- char *life_seconds_text = NULL;
-
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_life_seconds, ' ', &life_seconds_text,
- errstr, errstr_size);
-
- if (!r && !*life_seconds_text) {
- rd_snprintf(errstr, errstr_size,
- "Invalid "
- "sasl.oauthbearer.config: "
- "empty '%s'",
- prefix_life_seconds);
- r = -1;
- } else if (!r) {
- long long life_seconds_long;
- char *end_ptr;
- life_seconds_long =
- strtoll(life_seconds_text, &end_ptr, 10);
- if (*end_ptr != '\0') {
- rd_snprintf(errstr, errstr_size,
- "Invalid "
- "sasl.oauthbearer.config: "
- "non-integral '%s': %s",
- prefix_life_seconds,
- life_seconds_text);
- r = -1;
- } else if (life_seconds_long <= 0 ||
- life_seconds_long > INT_MAX) {
- rd_snprintf(errstr, errstr_size,
- "Invalid "
- "sasl.oauthbearer.config: "
- "value out of range of "
- "positive int '%s': %s",
- prefix_life_seconds,
- life_seconds_text);
- r = -1;
- } else {
- parsed->life_seconds =
- (int)life_seconds_long;
- }
- }
-
- RD_IF_FREE(life_seconds_text, rd_free);
-
- } else if (!strncmp(prefix_extension, loc,
- strlen(prefix_extension))) {
- char *extension_key = NULL;
-
- r = parse_ujws_config_value_for_prefix(
- &loc, prefix_extension, '=', &extension_key, errstr,
- errstr_size);
-
- if (!r && !*extension_key) {
- rd_snprintf(errstr, errstr_size,
- "Invalid "
- "sasl.oauthbearer.config: "
- "empty '%s' key",
- prefix_extension);
- r = -1;
- } else if (!r) {
- char *extension_value = NULL;
- r = parse_ujws_config_value_for_prefix(
- &loc, "", ' ', &extension_value, errstr,
- errstr_size);
- if (!r) {
- rd_list_add(
- &parsed->extensions,
- rd_strtup_new(extension_key,
- extension_value));
- rd_free(extension_value);
- }
- }
-
- RD_IF_FREE(extension_key, rd_free);
-
- } else {
- rd_snprintf(errstr, errstr_size,
- "Unrecognized sasl.oauthbearer.config "
- "beginning at: %s",
- loc);
- r = -1;
- }
- }
-
- rd_free(cfg_copy);
-
- return r;
-}
-
-/**
- * @brief Create unsecured JWS compact serialization
- * from the given information.
- * @returns allocated memory that the caller must free.
- */
-static char *create_jws_compact_serialization(
- const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed,
- rd_ts_t now_wallclock) {
- static const char *jose_header_encoded =
- "eyJhbGciOiJub25lIn0"; // {"alg":"none"}
- int scope_json_length = 0;
- int max_json_length;
- double now_wallclock_seconds;
- char *scope_json;
- char *scope_curr;
- int i;
- char *claims_json;
- char *jws_claims;
- size_t encode_len;
- char *jws_last_char;
- char *jws_maybe_non_url_char;
- char *retval_jws;
- size_t retval_size;
- rd_list_t scope;
-
- rd_list_init(&scope, 0, rd_free);
- if (parsed->scope_csv_text) {
- /* Convert from csv to rd_list_t and
- * calculate json length. */
- char *start = parsed->scope_csv_text;
- char *curr = start;
-
- while (*curr != '\0') {
- /* Ignore empty elements (e.g. ",,") */
- while (*curr == ',') {
- ++curr;
- ++start;
- }
-
- while (*curr != '\0' && *curr != ',')
- ++curr;
-
- if (curr == start)
- continue;
-
- if (*curr == ',') {
- *curr = '\0';
- ++curr;
- }
-
- if (!rd_list_find(&scope, start, (void *)strcmp))
- rd_list_add(&scope, rd_strdup(start));
-
- if (scope_json_length == 0) {
- scope_json_length =
- 2 + // ,"
- (int)strlen(parsed->scope_claim_name) +
- 4 + // ":["
- (int)strlen(start) + 1 + // "
- 1; // ]
- } else {
- scope_json_length += 2; // ,"
- scope_json_length += (int)strlen(start);
- scope_json_length += 1; // "
- }
-
- start = curr;
- }
- }
-
- now_wallclock_seconds = now_wallclock / 1000000.0;
-
- /* Generate json */
- max_json_length = 2 + // {"
- (int)strlen(parsed->principal_claim_name) +
- 3 + // ":"
- (int)strlen(parsed->principal) + 8 + // ","iat":
- 14 + // iat NumericDate (e.g. 1549251467.546)
- 7 + // ,"exp":
- 14 + // exp NumericDate (e.g. 1549252067.546)
- scope_json_length + 1; // }
-
- /* Generate scope portion of json */
- scope_json = rd_malloc(scope_json_length + 1);
- *scope_json = '\0';
- scope_curr = scope_json;
-
- for (i = 0; i < rd_list_cnt(&scope); i++) {
- if (i == 0)
- scope_curr += rd_snprintf(
- scope_curr,
- (size_t)(scope_json + scope_json_length + 1 -
- scope_curr),
- ",\"%s\":[\"", parsed->scope_claim_name);
- else
- scope_curr += sprintf(scope_curr, "%s", ",\"");
- scope_curr += sprintf(scope_curr, "%s\"",
- (const char *)rd_list_elem(&scope, i));
- if (i == rd_list_cnt(&scope) - 1)
- scope_curr += sprintf(scope_curr, "%s", "]");
- }
-
- claims_json = rd_malloc(max_json_length + 1);
- rd_snprintf(claims_json, max_json_length + 1,
- "{\"%s\":\"%s\",\"iat\":%.3f,\"exp\":%.3f%s}",
- parsed->principal_claim_name, parsed->principal,
- now_wallclock_seconds,
- now_wallclock_seconds + parsed->life_seconds, scope_json);
- rd_free(scope_json);
-
- /* Convert to base64URL format, first to base64, then to base64URL */
- retval_size = strlen(jose_header_encoded) + 1 +
- (((max_json_length + 2) / 3) * 4) + 1 + 1;
- retval_jws = rd_malloc(retval_size);
- rd_snprintf(retval_jws, retval_size, "%s.", jose_header_encoded);
- jws_claims = retval_jws + strlen(retval_jws);
- encode_len =
- EVP_EncodeBlock((uint8_t *)jws_claims, (uint8_t *)claims_json,
- (int)strlen(claims_json));
- rd_free(claims_json);
- jws_last_char = jws_claims + encode_len - 1;
-
- /* Convert from padded base64 to unpadded base64URL
- * and eliminate any padding. */
- while (jws_last_char >= jws_claims && *jws_last_char == '=')
- --jws_last_char;
- *(++jws_last_char) = '.';
- *(jws_last_char + 1) = '\0';
-
- /* Convert the 2 differing encode characters */
- for (jws_maybe_non_url_char = retval_jws; *jws_maybe_non_url_char;
- jws_maybe_non_url_char++)
- if (*jws_maybe_non_url_char == '+')
- *jws_maybe_non_url_char = '-';
- else if (*jws_maybe_non_url_char == '/')
- *jws_maybe_non_url_char = '_';
-
- rd_list_destroy(&scope);
-
- return retval_jws;
-}
-
-/**
- * @brief Same as rd_kafka_oauthbearer_unsecured_token() except it takes
- * additional explicit arguments and return a status code along with
- * the token to set in order to facilitate unit testing.
- * @param token output defining the token to set
- * @param cfg the config to parse (typically from `sasl.oauthbearer.config`)
- * @param now_wallclock_ms the valued to be used for the `iat` claim
- * (and by implication, the `exp` claim)
- * @returns -1 on failure (\p errstr set), else 0.
- */
-static int rd_kafka_oauthbearer_unsecured_token0(
- struct rd_kafka_sasl_oauthbearer_token *token,
- const char *cfg,
- int64_t now_wallclock_ms,
- char *errstr,
- size_t errstr_size) {
- struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = RD_ZERO_INIT;
- int r;
- int i;
-
- if (!cfg || !*cfg) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "must not be empty");
- return -1;
- }
-
- memset(token, 0, sizeof(*token));
-
- rd_list_init(&parsed.extensions, 0,
- (void (*)(void *))rd_strtup_destroy);
-
- if (!(r = parse_ujws_config(cfg, &parsed, errstr, errstr_size))) {
- /* Make sure we have required and valid info */
- if (!parsed.principal_claim_name)
- parsed.principal_claim_name = rd_strdup("sub");
- if (!parsed.scope_claim_name)
- parsed.scope_claim_name = rd_strdup("scope");
- if (!parsed.life_seconds)
- parsed.life_seconds = 3600;
- if (!parsed.principal) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "no principal=<value>");
- r = -1;
- } else if (strchr(parsed.principal, '"')) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "'\"' cannot appear in principal: %s",
- parsed.principal);
- r = -1;
- } else if (strchr(parsed.principal_claim_name, '"')) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "'\"' cannot appear in "
- "principalClaimName: %s",
- parsed.principal_claim_name);
- r = -1;
- } else if (strchr(parsed.scope_claim_name, '"')) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "'\"' cannot appear in scopeClaimName: %s",
- parsed.scope_claim_name);
- r = -1;
- } else if (parsed.scope_csv_text &&
- strchr(parsed.scope_csv_text, '"')) {
- rd_snprintf(errstr, errstr_size,
- "Invalid sasl.oauthbearer.config: "
- "'\"' cannot appear in scope: %s",
- parsed.scope_csv_text);
- r = -1;
- } else {
- char **extensionv;
- int extension_pair_count;
- char *jws = create_jws_compact_serialization(
- &parsed, now_wallclock_ms * 1000);
-
- extension_pair_count = rd_list_cnt(&parsed.extensions);
- extensionv = rd_malloc(sizeof(*extensionv) * 2 *
- extension_pair_count);
- for (i = 0; i < extension_pair_count; ++i) {
- rd_strtup_t *strtup =
- (rd_strtup_t *)rd_list_elem(
- &parsed.extensions, i);
- extensionv[2 * i] = rd_strdup(strtup->name);
- extensionv[2 * i + 1] =
- rd_strdup(strtup->value);
- }
- token->token_value = jws;
- token->md_lifetime_ms =
- now_wallclock_ms + parsed.life_seconds * 1000;
- token->md_principal_name = rd_strdup(parsed.principal);
- token->extensions = extensionv;
- token->extension_size = 2 * extension_pair_count;
- }
- }
- RD_IF_FREE(parsed.principal_claim_name, rd_free);
- RD_IF_FREE(parsed.principal, rd_free);
- RD_IF_FREE(parsed.scope_claim_name, rd_free);
- RD_IF_FREE(parsed.scope_csv_text, rd_free);
- rd_list_destroy(&parsed.extensions);
-
- if (r == -1)
- rd_kafka_sasl_oauthbearer_token_free(token);
-
- return r;
-}
-
-/**
- * @brief Default SASL/OAUTHBEARER token refresh callback that generates an
- * unsecured JWS as per https://tools.ietf.org/html/rfc7515#appendix-A.5.
- *
- * This method interprets `sasl.oauthbearer.config` as space-separated
- * name=value pairs with valid names including principalClaimName,
- * principal, scopeClaimName, scope, and lifeSeconds. The default
- * value for principalClaimName is "sub". The principal must be specified.
- * The default value for scopeClaimName is "scope", and the default value
- * for lifeSeconds is 3600. The scope value is CSV format with the
- * default value being no/empty scope. For example:
- * "principalClaimName=azp principal=admin scopeClaimName=roles
- * scope=role1,role2 lifeSeconds=600".
- *
- * SASL extensions can be communicated to the broker via
- * extension_NAME=value. For example:
- * "principal=admin extension_traceId=123". Extension names and values
- * must conform to the required syntax as per
- * https://tools.ietf.org/html/rfc7628#section-3.1
- *
- * All values -- whether extensions, claim names, or scope elements -- must not
- * include a quote (") character. The parsing rules also imply that names
- * and values cannot include a space character, and scope elements cannot
- * include a comma (,) character.
- *
- * The existence of any kind of parsing problem -- an unrecognized name,
- * a quote character in a value, an empty value, etc. -- raises the
- * \c RD_KAFKA_RESP_ERR__AUTHENTICATION event.
- *
- * Unsecured tokens are not to be used in production -- they are only good for
- * testing and development purposess -- so while the inflexibility of the
- * parsing rules is acknowledged, it is assumed that this is not problematic.
- */
-void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque) {
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
-
- rd_kafka_dbg(rk, SECURITY, "OAUTHBEARER", "Creating unsecured token");
-
- if (rd_kafka_oauthbearer_unsecured_token0(&token, oauthbearer_config,
- rd_uclock() / 1000, errstr,
- sizeof(errstr)) == -1 ||
- rd_kafka_oauthbearer_set_token(
- rk, token.token_value, token.md_lifetime_ms,
- token.md_principal_name, (const char **)token.extensions,
- token.extension_size, errstr, sizeof(errstr)) == -1) {
- rd_kafka_oauthbearer_set_token_failure(rk, errstr);
- }
-
- rd_kafka_sasl_oauthbearer_token_free(&token);
-}
-
-/**
- * @brief Close and free authentication state
- */
-static void rd_kafka_sasl_oauthbearer_close(rd_kafka_transport_t *rktrans) {
- struct rd_kafka_sasl_oauthbearer_state *state =
- rktrans->rktrans_sasl.state;
-
- if (!state)
- return;
-
- RD_IF_FREE(state->server_error_msg, rd_free);
- rd_free(state->token_value);
- rd_free(state->md_principal_name);
- rd_list_destroy(&state->extensions);
- rd_free(state);
-}
-
-
-
-/**
- * @brief Build client-first-message
- */
-static void rd_kafka_sasl_oauthbearer_build_client_first_message(
- rd_kafka_transport_t *rktrans,
- rd_chariov_t *out) {
- struct rd_kafka_sasl_oauthbearer_state *state =
- rktrans->rktrans_sasl.state;
-
- /*
- * https://tools.ietf.org/html/rfc7628#section-3.1
- * kvsep = %x01
- * key = 1*(ALPHA)
- * value = *(VCHAR / SP / HTAB / CR / LF )
- * kvpair = key "=" value kvsep
- * ;;gs2-header = See RFC 5801
- * client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
- */
-
- static const char *gs2_header = "n,,";
- static const char *kvsep = "\x01";
- const int kvsep_size = (int)strlen(kvsep);
- int extension_size = 0;
- int i;
- char *buf;
- int size_written;
- unsigned long r;
-
- for (i = 0; i < rd_list_cnt(&state->extensions); i++) {
- rd_strtup_t *extension = rd_list_elem(&state->extensions, i);
- // kvpair = key "=" value kvsep
- extension_size += (int)strlen(extension->name) + 1 // "="
- + (int)strlen(extension->value) + kvsep_size;
- }
-
- // client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
- out->size = strlen(gs2_header) + kvsep_size + strlen("auth=Bearer ") +
- strlen(state->token_value) + kvsep_size + extension_size +
- kvsep_size;
- out->ptr = rd_malloc(out->size + 1);
-
- buf = out->ptr;
- size_written = 0;
- r = rd_snprintf(buf, out->size + 1 - size_written,
- "%s%sauth=Bearer %s%s", gs2_header, kvsep,
- state->token_value, kvsep);
- rd_assert(r < out->size + 1 - size_written);
- size_written += r;
- buf = out->ptr + size_written;
-
- for (i = 0; i < rd_list_cnt(&state->extensions); i++) {
- rd_strtup_t *extension = rd_list_elem(&state->extensions, i);
- r = rd_snprintf(buf, out->size + 1 - size_written, "%s=%s%s",
- extension->name, extension->value, kvsep);
- rd_assert(r < out->size + 1 - size_written);
- size_written += r;
- buf = out->ptr + size_written;
- }
-
- r = rd_snprintf(buf, out->size + 1 - size_written, "%s", kvsep);
- rd_assert(r < out->size + 1 - size_written);
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER",
- "Built client first message");
-}
-
-
-
-/**
- * @brief SASL OAUTHBEARER client state machine
- * @returns -1 on failure (\p errstr set), else 0.
- */
-static int rd_kafka_sasl_oauthbearer_fsm(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *in,
- char *errstr,
- size_t errstr_size) {
- static const char *state_names[] = {
- "client-first-message",
- "server-first-message",
- "server-failure-message",
- };
- struct rd_kafka_sasl_oauthbearer_state *state =
- rktrans->rktrans_sasl.state;
- rd_chariov_t out = RD_ZERO_INIT;
- int r = -1;
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER",
- "SASL OAUTHBEARER client in state %s",
- state_names[state->state]);
-
- switch (state->state) {
- case RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE:
- rd_dassert(!in); /* Not expecting any server-input */
-
- rd_kafka_sasl_oauthbearer_build_client_first_message(rktrans,
- &out);
- state->state = RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG;
- break;
-
-
- case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG:
- if (!in->size || !*in->ptr) {
- /* Success */
- rd_rkb_dbg(rktrans->rktrans_rkb,
- SECURITY | RD_KAFKA_DBG_BROKER,
- "OAUTHBEARER",
- "SASL OAUTHBEARER authentication "
- "successful (principal=%s)",
- state->md_principal_name);
- rd_kafka_sasl_auth_done(rktrans);
- r = 0;
- break;
- }
-
- /* Failure; save error message for later */
- state->server_error_msg = rd_strndup(in->ptr, in->size);
-
- /*
- * https://tools.ietf.org/html/rfc7628#section-3.1
- * kvsep = %x01
- * client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
- *
- * Send final kvsep (CTRL-A) character
- */
- out.size = 1;
- out.ptr = rd_malloc(out.size + 1);
- rd_snprintf(out.ptr, out.size + 1, "\x01");
- state->state =
- RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL;
- r = 0; // Will fail later in next state after sending response
- break;
-
- case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL:
- /* Failure as previosuly communicated by server first message */
- rd_snprintf(errstr, errstr_size,
- "SASL OAUTHBEARER authentication failed "
- "(principal=%s): %s",
- state->md_principal_name, state->server_error_msg);
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
- "OAUTHBEARER", "%s", errstr);
- r = -1;
- break;
- }
-
- if (out.ptr) {
- r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr,
- errstr_size);
- rd_free(out.ptr);
- }
-
- return r;
-}
-
-
-/**
- * @brief Handle received frame from broker.
- */
-static int rd_kafka_sasl_oauthbearer_recv(rd_kafka_transport_t *rktrans,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- const rd_chariov_t in = {.ptr = (char *)buf, .size = size};
- return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, errstr, errstr_size);
-}
-
-
-/**
- * @brief Initialize and start SASL OAUTHBEARER (builtin) authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * @locality broker thread
- */
-static int rd_kafka_sasl_oauthbearer_client_new(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_oauthbearer_handle_t *handle =
- rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle;
- struct rd_kafka_sasl_oauthbearer_state *state;
-
- state = rd_calloc(1, sizeof(*state));
- state->state = RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE;
-
- /*
- * Save off the state structure now, before any possibility of
- * returning, so that we will always free up the allocated memory in
- * rd_kafka_sasl_oauthbearer_close().
- */
- rktrans->rktrans_sasl.state = state;
-
- /*
- * Make sure we have a consistent view of the token and extensions
- * throughout the authentication process -- even if it is refreshed
- * midway through this particular authentication.
- */
- rwlock_rdlock(&handle->lock);
- if (!handle->token_value) {
- rd_snprintf(errstr, errstr_size,
- "OAUTHBEARER cannot log in because there "
- "is no token available; last error: %s",
- handle->errstr ? handle->errstr
- : "(not available)");
- rwlock_rdunlock(&handle->lock);
- return -1;
- }
-
- state->token_value = rd_strdup(handle->token_value);
- state->md_principal_name = rd_strdup(handle->md_principal_name);
- rd_list_copy_to(&state->extensions, &handle->extensions,
- rd_strtup_list_copy, NULL);
-
- rwlock_rdunlock(&handle->lock);
-
- /* Kick off the FSM */
- return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, errstr,
- errstr_size);
-}
-
-
-/**
- * @brief Token refresh timer callback.
- *
- * @locality rdkafka main thread
- */
-static void
-rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_t *rk = arg;
- rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
-
- /* Enqueue a token refresh if necessary */
- rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary(handle);
-}
-
-
-/**
- * @brief Per-client-instance initializer
- */
-static int rd_kafka_sasl_oauthbearer_init(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_oauthbearer_handle_t *handle;
-
- handle = rd_calloc(1, sizeof(*handle));
- rk->rk_sasl.handle = handle;
-
- rwlock_init(&handle->lock);
-
- handle->rk = rk;
-
- rd_list_init(&handle->extensions, 0,
- (void (*)(void *))rd_strtup_destroy);
-
- rd_kafka_timer_start(
- &rk->rk_timers, &handle->token_refresh_tmr, 1 * 1000 * 1000,
- rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, rk);
-
- /* Automatically refresh the token if using the builtin
- * unsecure JWS token refresher, to avoid an initial connection
- * stall as we wait for the application to call poll(). */
- if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb ==
- rd_kafka_oauthbearer_unsecured_token) {
- rk->rk_conf.sasl.oauthbearer.token_refresh_cb(
- rk, rk->rk_conf.sasl.oauthbearer_config,
- rk->rk_conf.opaque);
-
- return 0;
- }
-
- if (rk->rk_conf.sasl.enable_callback_queue) {
- /* SASL specific callback queue enabled */
- rk->rk_sasl.callback_q = rd_kafka_q_new(rk);
- handle->callback_q = rd_kafka_q_keep(rk->rk_sasl.callback_q);
- } else {
- /* Use main queue */
- handle->callback_q = rd_kafka_q_keep(rk->rk_rep);
- }
-
-#if WITH_OAUTHBEARER_OIDC
- if (rk->rk_conf.sasl.oauthbearer.method ==
- RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
- rk->rk_conf.sasl.oauthbearer.token_refresh_cb ==
- rd_kafka_oidc_token_refresh_cb) {
- handle->internal_refresh = rd_true;
- rd_kafka_sasl_background_callbacks_enable(rk);
- }
-#endif
-
- /* Otherwise enqueue a refresh callback for the application. */
- rd_kafka_oauthbearer_enqueue_token_refresh(handle);
-
- return 0;
-}
-
-
-/**
- * @brief Per-client-instance destructor
- */
-static void rd_kafka_sasl_oauthbearer_term(rd_kafka_t *rk) {
- rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
-
- if (!handle)
- return;
-
- rk->rk_sasl.handle = NULL;
-
- rd_kafka_timer_stop(&rk->rk_timers, &handle->token_refresh_tmr, 1);
-
- RD_IF_FREE(handle->md_principal_name, rd_free);
- RD_IF_FREE(handle->token_value, rd_free);
- rd_list_destroy(&handle->extensions);
- RD_IF_FREE(handle->errstr, rd_free);
- RD_IF_FREE(handle->callback_q, rd_kafka_q_destroy);
-
- rwlock_destroy(&handle->lock);
-
- rd_free(handle);
-}
-
-
-/**
- * @brief SASL/OAUTHBEARER is unable to connect unless a valid
- * token is available, and a valid token CANNOT be
- * available unless/until an initial token retrieval
- * succeeds, so wait for this precondition if necessary.
- */
-static rd_bool_t rd_kafka_sasl_oauthbearer_ready(rd_kafka_t *rk) {
- rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
-
- if (!handle)
- return rd_false;
-
- return rd_kafka_oauthbearer_has_token(handle);
-}
-
-
-/**
- * @brief Validate OAUTHBEARER config, which is a no-op
- * (we rely on initial token retrieval)
- */
-static int rd_kafka_sasl_oauthbearer_conf_validate(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- /*
- * We must rely on the initial token retrieval as a proxy
- * for configuration validation because the configuration is
- * implementation-dependent, and it is not necessarily the case
- * that the config reflects the default unsecured JWS config
- * that we know how to parse.
- */
- return 0;
-}
-
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = {
- .name = "OAUTHBEARER (builtin)",
- .init = rd_kafka_sasl_oauthbearer_init,
- .term = rd_kafka_sasl_oauthbearer_term,
- .ready = rd_kafka_sasl_oauthbearer_ready,
- .client_new = rd_kafka_sasl_oauthbearer_client_new,
- .recv = rd_kafka_sasl_oauthbearer_recv,
- .close = rd_kafka_sasl_oauthbearer_close,
- .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate,
-};
-
-
-
-/**
- * @name Unit tests
- *
- *
- */
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should generate correct default values.
- */
-static int do_unittest_config_defaults(void) {
- static const char *sasl_oauthbearer_config =
- "principal=fubar "
- "scopeClaimName=whatever";
- // default scope is empty, default lifetime is 3600 seconds
- // {"alg":"none"}
- // .
- // {"sub":"fubar","iat":1.000,"exp":3601.000}
- //
- static const char *expected_token_value =
- "eyJhbGciOiJub25lIn0"
- "."
- "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9"
- ".";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r == -1)
- RD_UT_FAIL("Failed to create a token: %s: %s",
- sasl_oauthbearer_config, errstr);
-
- RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 3600 * 1000,
- "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
- RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
- "Invalid md_principal_name %s", token.md_principal_name);
- RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
- "Invalid token_value %s, expected %s", token.token_value,
- expected_token_value);
-
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should generate correct token for explicit scope and lifeSeconds values.
- */
-static int do_unittest_config_explicit_scope_and_life(void) {
- static const char *sasl_oauthbearer_config =
- "principal=fubar "
- "scope=role1,role2 lifeSeconds=60";
- // {"alg":"none"}
- // .
- // {"sub":"fubar","iat":1.000,"exp":61.000,"scope":["role1","role2"]}
- //
- static const char *expected_token_value =
- "eyJhbGciOiJub25lIn0"
- "."
- "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ"
- "SI6WyJyb2xlMSIsInJvbGUyIl19"
- ".";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r == -1)
- RD_UT_FAIL("Failed to create a token: %s: %s",
- sasl_oauthbearer_config, errstr);
-
- RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000,
- "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
- RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
- "Invalid md_principal_name %s", token.md_principal_name);
- RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
- "Invalid token_value %s, expected %s", token.token_value,
- expected_token_value);
-
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should generate correct token when all values are provided explicitly.
- */
-static int do_unittest_config_all_explicit_values(void) {
- static const char *sasl_oauthbearer_config =
- "principal=fubar "
- "principalClaimName=azp scope=role1,role2 "
- "scopeClaimName=roles lifeSeconds=60";
- // {"alg":"none"}
- // .
- // {"azp":"fubar","iat":1.000,"exp":61.000,"roles":["role1","role2"]}
- //
- static const char *expected_token_value =
- "eyJhbGciOiJub25lIn0"
- "."
- "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc"
- "yI6WyJyb2xlMSIsInJvbGUyIl19"
- ".";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r == -1)
- RD_UT_FAIL("Failed to create a token: %s: %s",
- sasl_oauthbearer_config, errstr);
-
- RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000,
- "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
- RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
- "Invalid md_principal_name %s", token.md_principal_name);
- RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
- "Invalid token_value %s, expected %s", token.token_value,
- expected_token_value);
-
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should fail when no principal specified.
- */
-static int do_unittest_config_no_principal_should_fail(void) {
- static const char *expected_msg =
- "Invalid sasl.oauthbearer.config: "
- "no principal=<value>";
- static const char *sasl_oauthbearer_config =
- "extension_notaprincipal=hi";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r != -1)
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_ASSERT(r == -1, "Did not fail despite missing principal");
-
- RD_UT_ASSERT(!strcmp(errstr, expected_msg),
- "Incorrect error message when no principal: "
- "expected=%s received=%s",
- expected_msg, errstr);
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should fail when no sasl.oauthbearer.config is specified.
- */
-static int do_unittest_config_empty_should_fail(void) {
- static const char *expected_msg =
- "Invalid sasl.oauthbearer.config: "
- "must not be empty";
- static const char *sasl_oauthbearer_config = "";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r != -1)
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_ASSERT(r == -1, "Did not fail despite empty config");
-
- RD_UT_ASSERT(!strcmp(errstr, expected_msg),
- "Incorrect error message with empty config: "
- "expected=%s received=%s",
- expected_msg, errstr);
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should fail when something unrecognized is specified.
- */
-static int do_unittest_config_unrecognized_should_fail(void) {
- static const char *expected_msg =
- "Unrecognized "
- "sasl.oauthbearer.config beginning at: unrecognized";
- static const char *sasl_oauthbearer_config =
- "principal=fubar unrecognized";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
- if (r != -1)
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_ASSERT(r == -1, "Did not fail with something unrecognized");
-
- RD_UT_ASSERT(!strcmp(errstr, expected_msg),
- "Incorrect error message with something unrecognized: "
- "expected=%s received=%s",
- expected_msg, errstr);
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should fail when empty values are specified.
- */
-static int do_unittest_config_empty_value_should_fail(void) {
- static const char *sasl_oauthbearer_configs[] = {
- "principal=", "principal=fubar principalClaimName=",
- "principal=fubar scope=", "principal=fubar scopeClaimName=",
- "principal=fubar lifeSeconds="};
- static const char *expected_prefix =
- "Invalid sasl.oauthbearer.config: empty";
- size_t i;
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- int r;
-
- for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *);
- i++) {
- struct rd_kafka_sasl_oauthbearer_token token;
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_configs[i], now_wallclock_ms,
- errstr, sizeof(errstr));
- if (r != -1)
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_ASSERT(r == -1, "Did not fail with an empty value: %s",
- sasl_oauthbearer_configs[i]);
-
- RD_UT_ASSERT(
- !strncmp(expected_prefix, errstr, strlen(expected_prefix)),
- "Incorrect error message prefix when empty "
- "(%s): expected=%s received=%s",
- sasl_oauthbearer_configs[i], expected_prefix, errstr);
- }
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should fail when value with embedded quote is specified.
- */
-static int do_unittest_config_value_with_quote_should_fail(void) {
- static const char *sasl_oauthbearer_configs[] = {
- "principal=\"fu", "principal=fubar principalClaimName=\"bar",
- "principal=fubar scope=\"a,b,c",
- "principal=fubar scopeClaimName=\"baz"};
- static const char *expected_prefix =
- "Invalid "
- "sasl.oauthbearer.config: '\"' cannot appear in ";
- size_t i;
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- int r;
-
- for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *);
- i++) {
- struct rd_kafka_sasl_oauthbearer_token token;
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_configs[i], now_wallclock_ms,
- errstr, sizeof(errstr));
- if (r != -1)
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_ASSERT(r == -1, "Did not fail with embedded quote: %s",
- sasl_oauthbearer_configs[i]);
-
- RD_UT_ASSERT(
- !strncmp(expected_prefix, errstr, strlen(expected_prefix)),
- "Incorrect error message prefix with "
- "embedded quote (%s): expected=%s received=%s",
- sasl_oauthbearer_configs[i], expected_prefix, errstr);
- }
- RD_UT_PASS();
-}
-
-/**
- * @brief `sasl.oauthbearer.config` test:
- * should generate correct extensions.
- */
-static int do_unittest_config_extensions(void) {
- static const char *sasl_oauthbearer_config =
- "principal=fubar "
- "extension_a=b extension_yz=yzval";
- rd_ts_t now_wallclock_ms = 1000;
- char errstr[512];
- struct rd_kafka_sasl_oauthbearer_token token;
- int r;
-
- r = rd_kafka_oauthbearer_unsecured_token0(
- &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
- sizeof(errstr));
-
- if (r == -1)
- RD_UT_FAIL("Failed to create a token: %s: %s",
- sasl_oauthbearer_config, errstr);
-
- RD_UT_ASSERT(token.extension_size == 4,
- "Incorrect extensions: expected 4, received %" PRIusz,
- token.extension_size);
-
- RD_UT_ASSERT(!strcmp(token.extensions[0], "a") &&
- !strcmp(token.extensions[1], "b") &&
- !strcmp(token.extensions[2], "yz") &&
- !strcmp(token.extensions[3], "yzval"),
- "Incorrect extensions: expected a=b and "
- "yz=yzval but received %s=%s and %s=%s",
- token.extensions[0], token.extensions[1],
- token.extensions[2], token.extensions[3]);
-
- rd_kafka_sasl_oauthbearer_token_free(&token);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief make sure illegal extensions keys are rejected
- */
-static int do_unittest_illegal_extension_keys_should_fail(void) {
- static const char *illegal_keys[] = {"", "auth", "a1", " a"};
- size_t i;
- char errstr[512];
- int r;
-
- for (i = 0; i < sizeof(illegal_keys) / sizeof(const char *); i++) {
- r = check_oauthbearer_extension_key(illegal_keys[i], errstr,
- sizeof(errstr));
- RD_UT_ASSERT(r == -1,
- "Did not recognize illegal extension key: %s",
- illegal_keys[i]);
- }
- RD_UT_PASS();
-}
-
-/**
- * @brief make sure illegal extensions keys are rejected
- */
-static int do_unittest_odd_extension_size_should_fail(void) {
- static const char *expected_errstr =
- "Incorrect extension size "
- "(must be a non-negative multiple of 2): 1";
- char errstr[512];
- rd_kafka_resp_err_t err;
- rd_kafka_t rk = RD_ZERO_INIT;
- rd_kafka_sasl_oauthbearer_handle_t handle = RD_ZERO_INIT;
-
- rk.rk_conf.sasl.provider = &rd_kafka_sasl_oauthbearer_provider;
- rk.rk_sasl.handle = &handle;
-
- rwlock_init(&handle.lock);
-
- err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", NULL,
- 1, errstr, sizeof(errstr));
-
- rwlock_destroy(&handle.lock);
-
- RD_UT_ASSERT(err, "Did not recognize illegal extension size");
- RD_UT_ASSERT(!strcmp(errstr, expected_errstr),
- "Incorrect error message for illegal "
- "extension size: expected=%s; received=%s",
- expected_errstr, errstr);
- RD_UT_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "Expected ErrInvalidArg, not %s", rd_kafka_err2name(err));
-
- RD_UT_PASS();
-}
-
-int unittest_sasl_oauthbearer(void) {
- int fails = 0;
-
- fails += do_unittest_config_no_principal_should_fail();
- fails += do_unittest_config_empty_should_fail();
- fails += do_unittest_config_empty_value_should_fail();
- fails += do_unittest_config_value_with_quote_should_fail();
- fails += do_unittest_config_unrecognized_should_fail();
- fails += do_unittest_config_defaults();
- fails += do_unittest_config_explicit_scope_and_life();
- fails += do_unittest_config_all_explicit_values();
- fails += do_unittest_config_extensions();
- fails += do_unittest_illegal_extension_keys_should_fail();
- fails += do_unittest_odd_extension_size_should_fail();
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h
deleted file mode 100644
index 75ab51d02..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_SASL_OAUTHBEARER_H_
-#define _RDKAFKA_SASL_OAUTHBEARER_H_
-
-void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque);
-
-rd_kafka_resp_err_t
-rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk,
- const char *token_value,
- int64_t md_lifetime_ms,
- const char *md_principal_name,
- const char **extensions,
- size_t extension_size,
- char *errstr,
- size_t errstr_size);
-
-rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk,
- const char *errstr);
-
-int unittest_sasl_oauthbearer(void);
-
-
-#endif /* _RDKAFKA_SASL_OAUTHBEARER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c
deleted file mode 100644
index 6c2773b02..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2021 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL OAUTHBEARER OIDC support
- */
-#include "rdkafka_int.h"
-#include "rdkafka_sasl_int.h"
-#include "rdunittest.h"
-#include "cJSON.h"
-#include <curl/curl.h>
-#include "rdhttp.h"
-#include "rdkafka_sasl_oauthbearer_oidc.h"
-
-
-/**
- * @brief Base64 encode binary input \p in, and write base64-encoded string
- * and it's size to \p out
- */
-static void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) {
- size_t max_len;
-
- max_len = (((in->size + 2) / 3) * 4) + 1;
- out->ptr = rd_malloc(max_len);
- rd_assert(out->ptr);
-
- out->size = EVP_EncodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr,
- (int)in->size);
-
- rd_assert(out->size <= max_len);
- out->ptr[out->size] = 0;
-}
-
-
-/**
- * @brief Generate Authorization field for HTTP header.
- * The field contains base64-encoded string which
- * is generated from \p client_id and \p client_secret.
- *
- * @returns Return the authorization field.
- *
- * @locality Any thread.
- */
-static char *rd_kafka_oidc_build_auth_header(const char *client_id,
- const char *client_secret) {
-
- rd_chariov_t client_authorization_in;
- rd_chariov_t client_authorization_out;
-
- size_t authorization_base64_header_size;
- char *authorization_base64_header;
-
- client_authorization_in.size =
- strlen(client_id) + strlen(client_secret) + 2;
- client_authorization_in.ptr = rd_malloc(client_authorization_in.size);
- rd_snprintf(client_authorization_in.ptr, client_authorization_in.size,
- "%s:%s", client_id, client_secret);
-
- client_authorization_in.size--;
- rd_base64_encode(&client_authorization_in, &client_authorization_out);
-
- authorization_base64_header_size =
- strlen("Authorization: Basic ") + client_authorization_out.size + 1;
- authorization_base64_header =
- rd_malloc(authorization_base64_header_size);
- rd_snprintf(authorization_base64_header,
- authorization_base64_header_size, "Authorization: Basic %s",
- client_authorization_out.ptr);
-
- rd_free(client_authorization_in.ptr);
- rd_free(client_authorization_out.ptr);
- return authorization_base64_header;
-}
-
-
-/**
- * @brief Build headers for HTTP(S) requests based on \p client_id
- * and \p client_secret. The result will be returned in \p *headersp.
- *
- * @locality Any thread.
- */
-static void rd_kafka_oidc_build_headers(const char *client_id,
- const char *client_secret,
- struct curl_slist **headersp) {
- char *authorization_base64_header;
-
- authorization_base64_header =
- rd_kafka_oidc_build_auth_header(client_id, client_secret);
-
- *headersp = curl_slist_append(*headersp, "Accept: application/json");
- *headersp = curl_slist_append(*headersp, authorization_base64_header);
-
- *headersp = curl_slist_append(
- *headersp, "Content-Type: application/x-www-form-urlencoded");
-
- rd_free(authorization_base64_header);
-}
-
-/**
- * @brief The format of JWT is Header.Payload.Signature.
- * Extract and decode payloads from JWT \p src.
- * The decoded payloads will be returned in \p *bufplainp.
- *
- * @returns Return error message while decoding the payload.
- */
-static const char *rd_kafka_jwt_b64_decode_payload(const char *src,
- char **bufplainp) {
- char *converted_src;
- char *payload = NULL;
-
- const char *errstr = NULL;
-
- int i, padding, len;
-
- int payload_len;
- int nbytesdecoded;
-
- int payloads_start = 0;
- int payloads_end = 0;
-
- len = (int)strlen(src);
- converted_src = rd_malloc(len + 4);
-
- for (i = 0; i < len; i++) {
- switch (src[i]) {
- case '-':
- converted_src[i] = '+';
- break;
-
- case '_':
- converted_src[i] = '/';
- break;
-
- case '.':
- if (payloads_start == 0)
- payloads_start = i + 1;
- else {
- if (payloads_end > 0) {
- errstr =
- "The token is invalid with more "
- "than 2 delimiters";
- goto done;
- }
- payloads_end = i;
- }
- /* FALLTHRU */
-
- default:
- converted_src[i] = src[i];
- }
- }
-
- if (payloads_start == 0 || payloads_end == 0) {
- errstr = "The token is invalid with less than 2 delimiters";
- goto done;
- }
-
- payload_len = payloads_end - payloads_start;
- payload = rd_malloc(payload_len + 4);
- strncpy(payload, (converted_src + payloads_start), payload_len);
-
- padding = 4 - (payload_len % 4);
- if (padding < 4) {
- while (padding--)
- payload[payload_len++] = '=';
- }
-
- nbytesdecoded = ((payload_len + 3) / 4) * 3;
- *bufplainp = rd_malloc(nbytesdecoded + 1);
-
- if (EVP_DecodeBlock((uint8_t *)(*bufplainp), (uint8_t *)payload,
- (int)payload_len) == -1) {
- errstr = "Failed to decode base64 payload";
- }
-
-done:
- RD_IF_FREE(payload, rd_free);
- RD_IF_FREE(converted_src, rd_free);
- return errstr;
-}
-
-/**
- * @brief Build post_fields with \p scope.
- * The format of the post_fields is
- * `grant_type=client_credentials&scope=scope`
- * The post_fields will be returned in \p *post_fields.
- * The post_fields_size will be returned in \p post_fields_size.
- *
- */
-static void rd_kafka_oidc_build_post_fields(const char *scope,
- char **post_fields,
- size_t *post_fields_size) {
- size_t scope_size = 0;
-
- if (scope)
- scope_size = strlen(scope);
- if (scope_size == 0) {
- *post_fields = rd_strdup("grant_type=client_credentials");
- *post_fields_size = strlen("grant_type=client_credentials");
- } else {
- *post_fields_size =
- strlen("grant_type=client_credentials&scope=") + scope_size;
- *post_fields = rd_malloc(*post_fields_size + 1);
- rd_snprintf(*post_fields, *post_fields_size + 1,
- "grant_type=client_credentials&scope=%s", scope);
- }
-}
-
-
-/**
- * @brief Implementation of Oauth/OIDC token refresh callback function,
- * will receive the JSON response after HTTP call to token provider,
- * then extract the jwt from the JSON response, and forward it to
- * the broker.
- */
-void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque) {
- const int timeout_s = 20;
- const int retry = 4;
- const int retry_ms = 5 * 1000;
-
- double exp;
-
- cJSON *json = NULL;
- cJSON *payloads = NULL;
- cJSON *parsed_token, *jwt_exp, *jwt_sub;
-
- rd_http_error_t *herr;
-
- char *jwt_token;
- char *post_fields;
- char *decoded_payloads = NULL;
-
- struct curl_slist *headers = NULL;
-
- const char *token_url;
- const char *sub;
- const char *errstr;
-
- size_t post_fields_size;
- size_t extension_cnt;
- size_t extension_key_value_cnt = 0;
-
- char set_token_errstr[512];
- char decode_payload_errstr[512];
-
- char **extensions = NULL;
- char **extension_key_value = NULL;
-
- if (rd_kafka_terminating(rk))
- return;
-
- rd_kafka_oidc_build_headers(rk->rk_conf.sasl.oauthbearer.client_id,
- rk->rk_conf.sasl.oauthbearer.client_secret,
- &headers);
-
- /* Build post fields */
- rd_kafka_oidc_build_post_fields(rk->rk_conf.sasl.oauthbearer.scope,
- &post_fields, &post_fields_size);
-
- token_url = rk->rk_conf.sasl.oauthbearer.token_endpoint_url;
-
- herr = rd_http_post_expect_json(rk, token_url, headers, post_fields,
- post_fields_size, timeout_s, retry,
- retry_ms, &json);
-
- if (unlikely(herr != NULL)) {
- rd_kafka_log(rk, LOG_ERR, "OIDC",
- "Failed to retrieve OIDC "
- "token from \"%s\": %s (%d)",
- token_url, herr->errstr, herr->code);
- rd_kafka_oauthbearer_set_token_failure(rk, herr->errstr);
- rd_http_error_destroy(herr);
- goto done;
- }
-
- parsed_token = cJSON_GetObjectItem(json, "access_token");
-
- if (parsed_token == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON JWT response with "
- "\"access_token\" field");
- goto done;
- }
-
- jwt_token = cJSON_GetStringValue(parsed_token);
- if (jwt_token == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON "
- "response as a value string");
- goto done;
- }
-
- errstr = rd_kafka_jwt_b64_decode_payload(jwt_token, &decoded_payloads);
- if (errstr != NULL) {
- rd_snprintf(decode_payload_errstr,
- sizeof(decode_payload_errstr),
- "Failed to decode JWT payload: %s", errstr);
- rd_kafka_oauthbearer_set_token_failure(rk,
- decode_payload_errstr);
- goto done;
- }
-
- payloads = cJSON_Parse(decoded_payloads);
- if (payloads == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk, "Failed to parse JSON JWT payload");
- goto done;
- }
-
- jwt_exp = cJSON_GetObjectItem(payloads, "exp");
- if (jwt_exp == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON JWT response with "
- "\"exp\" field");
- goto done;
- }
-
- exp = cJSON_GetNumberValue(jwt_exp);
- if (exp <= 0) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON JWT response with "
- "valid \"exp\" field");
- goto done;
- }
-
- jwt_sub = cJSON_GetObjectItem(payloads, "sub");
- if (jwt_sub == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON JWT response with "
- "\"sub\" field");
- goto done;
- }
-
- sub = cJSON_GetStringValue(jwt_sub);
- if (sub == NULL) {
- rd_kafka_oauthbearer_set_token_failure(
- rk,
- "Expected JSON JWT response with "
- "valid \"sub\" field");
- goto done;
- }
-
- if (rk->rk_conf.sasl.oauthbearer.extensions_str) {
- extensions =
- rd_string_split(rk->rk_conf.sasl.oauthbearer.extensions_str,
- ',', rd_true, &extension_cnt);
-
- extension_key_value = rd_kafka_conf_kv_split(
- (const char **)extensions, extension_cnt,
- &extension_key_value_cnt);
- }
-
- if (rd_kafka_oauthbearer_set_token(
- rk, jwt_token, (int64_t)exp * 1000, sub,
- (const char **)extension_key_value, extension_key_value_cnt,
- set_token_errstr,
- sizeof(set_token_errstr)) != RD_KAFKA_RESP_ERR_NO_ERROR)
- rd_kafka_oauthbearer_set_token_failure(rk, set_token_errstr);
-
-done:
- RD_IF_FREE(decoded_payloads, rd_free);
- RD_IF_FREE(post_fields, rd_free);
- RD_IF_FREE(json, cJSON_Delete);
- RD_IF_FREE(headers, curl_slist_free_all);
- RD_IF_FREE(extensions, rd_free);
- RD_IF_FREE(extension_key_value, rd_free);
- RD_IF_FREE(payloads, cJSON_Delete);
-}
-
-
-/**
- * @brief Make sure the jwt is able to be extracted from HTTP(S) response.
- * The JSON response after HTTP(S) call to token provider will be in
- * rd_http_req_t.hreq_buf and jwt is the value of field "access_token",
- * the format is {"access_token":"*******"}.
- * This function mocks up the rd_http_req_t.hreq_buf using an dummy
- * jwt. The rd_http_parse_json will extract the jwt from rd_http_req_t
- * and make sure the extracted jwt is same with the dummy one.
- */
-static int ut_sasl_oauthbearer_oidc_should_succeed(void) {
- /* Generate a token in the https://jwt.io/ website by using the
- * following steps:
- * 1. Select the algorithm RS256 from the Algorithm drop-down menu.
- * 2. Enter the header and the payload.
- * payload should contains "exp", "iat", "sub", for example:
- * payloads = {"exp": 1636532769,
- "iat": 1516239022,
- "sub": "sub"}
- header should contains "kid", for example:
- headers={"kid": "abcedfg"} */
- static const char *expected_jwt_token =
- "eyJhbGciOiJIUzI1NiIsInR5"
- "cCI6IkpXVCIsImtpZCI6ImFiY2VkZmcifQ"
- "."
- "eyJpYXQiOjE2MzIzNzUzMjAsInN1YiI6InN"
- "1YiIsImV4cCI6MTYzMjM3NTYyMH0"
- "."
- "bT5oY8K-rS2gQ7Awc40844bK3zhzBhZb7sputErqQHY";
- char *expected_token_value;
- size_t token_len;
- rd_http_req_t hreq;
- rd_http_error_t *herr;
- cJSON *json = NULL;
- char *token;
- cJSON *parsed_token;
-
- RD_UT_BEGIN();
-
- herr = rd_http_req_init(&hreq, "");
-
- RD_UT_ASSERT(!herr,
- "Expected initialize to succeed, "
- "but failed with error code: %d, error string: %s",
- herr->code, herr->errstr);
-
- token_len = strlen("access_token") + strlen(expected_jwt_token) + 8;
-
- expected_token_value = rd_malloc(token_len);
- rd_snprintf(expected_token_value, token_len, "{\"%s\":\"%s\"}",
- "access_token", expected_jwt_token);
- rd_buf_write(hreq.hreq_buf, expected_token_value, token_len);
-
- herr = rd_http_parse_json(&hreq, &json);
- RD_UT_ASSERT(!herr,
- "Failed to parse JSON token: error code: %d, "
- "error string: %s",
- herr->code, herr->errstr);
-
- RD_UT_ASSERT(json, "Expected non-empty json.");
-
- parsed_token = cJSON_GetObjectItem(json, "access_token");
-
- RD_UT_ASSERT(parsed_token, "Expected access_token in JSON response.");
- token = parsed_token->valuestring;
-
- RD_UT_ASSERT(!strcmp(expected_jwt_token, token),
- "Incorrect token received: "
- "expected=%s; received=%s",
- expected_jwt_token, token);
-
- rd_free(expected_token_value);
- rd_http_error_destroy(herr);
- rd_http_req_destroy(&hreq);
- cJSON_Delete(json);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Make sure JSON doesn't include the "access_token" key,
- * it will fail and return an empty token.
- */
-static int ut_sasl_oauthbearer_oidc_with_empty_key(void) {
- static const char *empty_token_format = "{}";
- size_t token_len;
- rd_http_req_t hreq;
- rd_http_error_t *herr;
- cJSON *json = NULL;
- cJSON *parsed_token;
-
- RD_UT_BEGIN();
-
- herr = rd_http_req_init(&hreq, "");
- RD_UT_ASSERT(!herr,
- "Expected initialization to succeed, "
- "but it failed with error code: %d, error string: %s",
- herr->code, herr->errstr);
-
- token_len = strlen(empty_token_format);
-
- rd_buf_write(hreq.hreq_buf, empty_token_format, token_len);
-
- herr = rd_http_parse_json(&hreq, &json);
-
- RD_UT_ASSERT(!herr,
- "Expected JSON token parsing to succeed, "
- "but it failed with error code: %d, error string: %s",
- herr->code, herr->errstr);
-
- RD_UT_ASSERT(json, "Expected non-empty json.");
-
- parsed_token = cJSON_GetObjectItem(json, "access_token");
-
- RD_UT_ASSERT(!parsed_token,
- "Did not expecte access_token in JSON response");
-
- rd_http_req_destroy(&hreq);
- rd_http_error_destroy(herr);
- cJSON_Delete(json);
- cJSON_Delete(parsed_token);
- RD_UT_PASS();
-}
-
-/**
- * @brief Make sure the post_fields return correct with the scope.
- */
-static int ut_sasl_oauthbearer_oidc_post_fields(void) {
- static const char *scope = "test-scope";
- static const char *expected_post_fields =
- "grant_type=client_credentials&scope=test-scope";
-
- size_t expected_post_fields_size = strlen(expected_post_fields);
-
- size_t post_fields_size;
-
- char *post_fields;
-
- RD_UT_BEGIN();
-
- rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size);
-
- RD_UT_ASSERT(expected_post_fields_size == post_fields_size,
- "Expected expected_post_fields_size is %" PRIusz
- " received post_fields_size is %" PRIusz,
- expected_post_fields_size, post_fields_size);
- RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields),
- "Expected expected_post_fields is %s"
- " received post_fields is %s",
- expected_post_fields, post_fields);
-
- rd_free(post_fields);
-
- RD_UT_PASS();
-}
-
-/**
- * @brief Make sure the post_fields return correct with the empty scope.
- */
-static int ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope(void) {
- static const char *scope = NULL;
- static const char *expected_post_fields =
- "grant_type=client_credentials";
-
- size_t expected_post_fields_size = strlen(expected_post_fields);
-
- size_t post_fields_size;
-
- char *post_fields;
-
- RD_UT_BEGIN();
-
- rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size);
-
- RD_UT_ASSERT(expected_post_fields_size == post_fields_size,
- "Expected expected_post_fields_size is %" PRIusz
- " received post_fields_size is %" PRIusz,
- expected_post_fields_size, post_fields_size);
- RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields),
- "Expected expected_post_fields is %s"
- " received post_fields is %s",
- expected_post_fields, post_fields);
-
- rd_free(post_fields);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief make sure the jwt is able to be extracted from HTTP(S) requests
- * or fail as expected.
- */
-int unittest_sasl_oauthbearer_oidc(void) {
- int fails = 0;
- fails += ut_sasl_oauthbearer_oidc_should_succeed();
- fails += ut_sasl_oauthbearer_oidc_with_empty_key();
- fails += ut_sasl_oauthbearer_oidc_post_fields();
- fails += ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope();
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h
deleted file mode 100644
index a944f2efa..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2021 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
-#define _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
-void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
- const char *oauthbearer_config,
- void *opaque);
-
-int unittest_sasl_oauthbearer_oidc(void);
-
-#endif /* _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c
deleted file mode 100644
index 1e715cfba..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL PLAIN support when Cyrus SASL is not available
- */
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-
-
-/**
- * @brief Handle received frame from broker.
- */
-static int rd_kafka_sasl_plain_recv(struct rd_kafka_transport_s *rktrans,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- if (size)
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLPLAIN",
- "Received non-empty SASL PLAIN (builtin) "
- "response from broker (%" PRIusz " bytes)",
- size);
-
- rd_kafka_sasl_auth_done(rktrans);
-
- return 0;
-}
-
-
-/**
- * @brief Initialize and start SASL PLAIN (builtin) authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * @locality broker thread
- */
-int rd_kafka_sasl_plain_client_new(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- rd_kafka_t *rk = rkb->rkb_rk;
- /* [authzid] UTF8NUL authcid UTF8NUL passwd */
- char *buf;
- int of = 0;
- int zidlen = 0;
- int cidlen, pwlen;
-
- mtx_lock(&rk->rk_conf.sasl.lock);
-
- cidlen = rk->rk_conf.sasl.username
- ? (int)strlen(rk->rk_conf.sasl.username)
- : 0;
- pwlen = rk->rk_conf.sasl.password
- ? (int)strlen(rk->rk_conf.sasl.password)
- : 0;
-
- buf = rd_alloca(zidlen + 1 + cidlen + 1 + pwlen + 1);
-
- /* authzid: none (empty) */
- /* UTF8NUL */
- buf[of++] = 0;
- /* authcid */
- memcpy(&buf[of], rk->rk_conf.sasl.username, cidlen);
- of += cidlen;
- /* UTF8NUL */
- buf[of++] = 0;
- /* passwd */
- memcpy(&buf[of], rk->rk_conf.sasl.password, pwlen);
- of += pwlen;
- mtx_unlock(&rk->rk_conf.sasl.lock);
-
- rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN",
- "Sending SASL PLAIN (builtin) authentication token");
-
- if (rd_kafka_sasl_send(rktrans, buf, of, errstr, errstr_size))
- return -1;
-
- /* PLAIN is appearantly done here, but we still need to make sure
- * the PLAIN frame is sent and we get a response back (empty) */
- rktrans->rktrans_sasl.complete = 1;
- return 0;
-}
-
-
-/**
- * @brief Validate PLAIN config
- */
-static int rd_kafka_sasl_plain_conf_validate(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- rd_bool_t both_set;
-
- mtx_lock(&rk->rk_conf.sasl.lock);
- both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password;
- mtx_unlock(&rk->rk_conf.sasl.lock);
-
- if (!both_set) {
- rd_snprintf(errstr, errstr_size,
- "sasl.username and sasl.password must be set");
- return -1;
- }
-
- return 0;
-}
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider = {
- .name = "PLAIN (builtin)",
- .client_new = rd_kafka_sasl_plain_client_new,
- .recv = rd_kafka_sasl_plain_recv,
- .conf_validate = rd_kafka_sasl_plain_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c
deleted file mode 100644
index 7d5db5649..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c
+++ /dev/null
@@ -1,973 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Builtin SASL SCRAM support when Cyrus SASL is not available
- */
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-#include "rdrand.h"
-#include "rdunittest.h"
-
-
-#if WITH_SSL
-#include <openssl/hmac.h>
-#include <openssl/evp.h>
-#include <openssl/sha.h>
-#else
-#error "WITH_SSL (OpenSSL) is required for SASL SCRAM"
-#endif
-
-
-/**
- * @brief Per-connection state
- */
-struct rd_kafka_sasl_scram_state {
- enum { RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE,
- RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE,
- RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE,
- } state;
- rd_chariov_t cnonce; /* client c-nonce */
- rd_chariov_t first_msg_bare; /* client-first-message-bare */
- char *ServerSignatureB64; /* ServerSignature in Base64 */
- const EVP_MD *evp; /* Hash function pointer */
-};
-
-
-/**
- * @brief Close and free authentication state
- */
-static void rd_kafka_sasl_scram_close(rd_kafka_transport_t *rktrans) {
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
-
- if (!state)
- return;
-
- RD_IF_FREE(state->cnonce.ptr, rd_free);
- RD_IF_FREE(state->first_msg_bare.ptr, rd_free);
- RD_IF_FREE(state->ServerSignatureB64, rd_free);
- rd_free(state);
-}
-
-
-
-/**
- * @brief Generates a nonce string (a random printable string)
- * @remark dst->ptr will be allocated and must be freed.
- */
-static void rd_kafka_sasl_scram_generate_nonce(rd_chariov_t *dst) {
- int i;
- dst->size = 32;
- dst->ptr = rd_malloc(dst->size + 1);
- for (i = 0; i < (int)dst->size; i++)
- dst->ptr[i] = (char)rd_jitter(0x2d /*-*/, 0x7e /*~*/);
- dst->ptr[i] = 0;
-}
-
-
-/**
- * @brief Parses inbuf for SCRAM attribute \p attr (e.g., 's')
- * @returns a newly allocated copy of the value, or NULL
- * on failure in which case an error is written to \p errstr
- * prefixed by \p description.
- */
-static char *rd_kafka_sasl_scram_get_attr(const rd_chariov_t *inbuf,
- char attr,
- const char *description,
- char *errstr,
- size_t errstr_size) {
- size_t of = 0;
-
- for (of = 0; of < inbuf->size;) {
- const char *td;
- size_t len;
-
- /* Find next delimiter , (if any) */
- td = memchr(&inbuf->ptr[of], ',', inbuf->size - of);
- if (td)
- len = (size_t)(td - &inbuf->ptr[of]);
- else
- len = inbuf->size - of;
-
- /* Check if attr "x=" matches */
- if (inbuf->ptr[of] == attr && inbuf->size > of + 1 &&
- inbuf->ptr[of + 1] == '=') {
- char *ret;
- of += 2; /* past = */
- ret = rd_malloc(len - 2 + 1);
- memcpy(ret, &inbuf->ptr[of], len - 2);
- ret[len - 2] = '\0';
- return ret;
- }
-
- /* Not the attr we are looking for, skip
- * past the next delimiter and continue looking. */
- of += len + 1;
- }
-
- rd_snprintf(errstr, errstr_size, "%s: could not find attribute (%c)",
- description, attr);
- return NULL;
-}
-
-
-/**
- * @brief Base64 encode binary input \p in
- * @returns a newly allocated, base64-encoded string or NULL on error.
- */
-static char *rd_base64_encode(const rd_chariov_t *in) {
- char *ret;
- size_t ret_len, max_len;
-
- /* OpenSSL takes an |int| argument so the input cannot exceed that. */
- if (in->size > INT_MAX) {
- return NULL;
- }
-
- /* This does not overflow given the |INT_MAX| bound, above. */
- max_len = (((in->size + 2) / 3) * 4) + 1;
- ret = rd_malloc(max_len);
- if (ret == NULL) {
- return NULL;
- }
-
- ret_len =
- EVP_EncodeBlock((uint8_t *)ret, (uint8_t *)in->ptr, (int)in->size);
- assert(ret_len < max_len);
- ret[ret_len] = 0;
-
- return ret;
-}
-
-
-/**
- * @brief Base64 decode input string \p in. Ignores leading and trailing
- * whitespace.
- * @returns -1 on invalid Base64, or 0 on successes in which case a
- * newly allocated binary string is set in out (and size).
- */
-static int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) {
- size_t ret_len;
-
- /* OpenSSL takes an |int| argument, so |in->size| must not exceed
- * that. */
- if (in->size % 4 != 0 || in->size > INT_MAX) {
- return -1;
- }
-
- ret_len = ((in->size / 4) * 3);
- out->ptr = rd_malloc(ret_len + 1);
-
- if (EVP_DecodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr,
- (int)in->size) == -1) {
- rd_free(out->ptr);
- out->ptr = NULL;
- return -1;
- }
-
- /* EVP_DecodeBlock will pad the output with trailing NULs and count
- * them in the return value. */
- if (in->size > 1 && in->ptr[in->size - 1] == '=') {
- if (in->size > 2 && in->ptr[in->size - 2] == '=') {
- ret_len -= 2;
- } else {
- ret_len -= 1;
- }
- }
-
- out->ptr[ret_len] = 0;
- out->size = ret_len;
-
- return 0;
-}
-
-
-/**
- * @brief Perform H(str) hash function and stores the result in \p out
- * which must be at least EVP_MAX_MD_SIZE.
- * @returns 0 on success, else -1
- */
-static int rd_kafka_sasl_scram_H(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *str,
- rd_chariov_t *out) {
-
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H(
- (const unsigned char *)str->ptr, str->size,
- (unsigned char *)out->ptr);
-
- out->size = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H_size;
- return 0;
-}
-
-/**
- * @brief Perform HMAC(key,str) and stores the result in \p out
- * which must be at least EVP_MAX_MD_SIZE.
- * @returns 0 on success, else -1
- */
-static int rd_kafka_sasl_scram_HMAC(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *key,
- const rd_chariov_t *str,
- rd_chariov_t *out) {
- const EVP_MD *evp =
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
- unsigned int outsize;
-
- if (!HMAC(evp, (const unsigned char *)key->ptr, (int)key->size,
- (const unsigned char *)str->ptr, (int)str->size,
- (unsigned char *)out->ptr, &outsize)) {
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
- "HMAC failed");
- return -1;
- }
-
- out->size = outsize;
-
- return 0;
-}
-
-
-
-/**
- * @brief Perform \p itcnt iterations of HMAC() on the given buffer \p in
- * using \p salt, writing the output into \p out which must be
- * at least EVP_MAX_MD_SIZE. Actual size is updated in \p *outsize.
- * @returns 0 on success, else -1
- */
-static int rd_kafka_sasl_scram_Hi(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *in,
- const rd_chariov_t *salt,
- int itcnt,
- rd_chariov_t *out) {
- const EVP_MD *evp =
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
- unsigned int ressize = 0;
- unsigned char tempres[EVP_MAX_MD_SIZE];
- unsigned char *saltplus;
- int i;
-
- /* U1 := HMAC(str, salt + INT(1)) */
- saltplus = rd_alloca(salt->size + 4);
- memcpy(saltplus, salt->ptr, salt->size);
- saltplus[salt->size] = 0;
- saltplus[salt->size + 1] = 0;
- saltplus[salt->size + 2] = 0;
- saltplus[salt->size + 3] = 1;
-
- /* U1 := HMAC(str, salt + INT(1)) */
- if (!HMAC(evp, (const unsigned char *)in->ptr, (int)in->size, saltplus,
- salt->size + 4, tempres, &ressize)) {
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
- "HMAC priming failed");
- return -1;
- }
-
- memcpy(out->ptr, tempres, ressize);
-
- /* Ui-1 := HMAC(str, Ui-2) .. */
- for (i = 1; i < itcnt; i++) {
- unsigned char tempdest[EVP_MAX_MD_SIZE];
- int j;
-
- if (unlikely(!HMAC(evp, (const unsigned char *)in->ptr,
- (int)in->size, tempres, ressize, tempdest,
- NULL))) {
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
- "Hi() HMAC #%d/%d failed", i, itcnt);
- return -1;
- }
-
- /* U1 XOR U2 .. */
- for (j = 0; j < (int)ressize; j++) {
- out->ptr[j] ^= tempdest[j];
- tempres[j] = tempdest[j];
- }
- }
-
- out->size = ressize;
-
- return 0;
-}
-
-
-/**
- * @returns a SASL value-safe-char encoded string, replacing "," and "="
- * with their escaped counterparts in a newly allocated string.
- */
-static char *rd_kafka_sasl_safe_string(const char *str) {
- char *safe = NULL, *d = NULL /*avoid warning*/;
- int pass;
- size_t len = 0;
-
- /* Pass #1: scan for needed length and allocate.
- * Pass #2: encode string */
- for (pass = 0; pass < 2; pass++) {
- const char *s;
- for (s = str; *s; s++) {
- if (pass == 0) {
- /* If this byte needs to be escaped then
- * 3 output bytes are needed instead of 1. */
- len += (*s == ',' || *s == '=') ? 3 : 1;
- continue;
- }
-
- if (*s == ',') {
- *(d++) = '=';
- *(d++) = '2';
- *(d++) = 'C';
- } else if (*s == '=') {
- *(d++) = '=';
- *(d++) = '3';
- *(d++) = 'D';
- } else
- *(d++) = *s;
- }
-
- if (pass == 0)
- d = safe = rd_malloc(len + 1);
- }
-
- rd_assert(d == safe + (int)len);
- *d = '\0';
-
- return safe;
-}
-
-
-/**
- * @brief Build client-final-message-without-proof
- * @remark out->ptr will be allocated and must be freed.
- */
-static void rd_kafka_sasl_scram_build_client_final_message_wo_proof(
- struct rd_kafka_sasl_scram_state *state,
- const char *snonce,
- rd_chariov_t *out) {
- const char *attr_c = "biws"; /* base64 encode of "n,," */
-
- /*
- * client-final-message-without-proof =
- * channel-binding "," nonce [","
- * extensions]
- */
- out->size = strlen("c=,r=") + strlen(attr_c) + state->cnonce.size +
- strlen(snonce);
- out->ptr = rd_malloc(out->size + 1);
- rd_snprintf(out->ptr, out->size + 1, "c=%s,r=%.*s%s", attr_c,
- (int)state->cnonce.size, state->cnonce.ptr, snonce);
-}
-
-
-/**
- * @brief Build client-final-message
- * @returns -1 on error.
- */
-static int rd_kafka_sasl_scram_build_client_final_message(
- rd_kafka_transport_t *rktrans,
- const rd_chariov_t *salt,
- const char *server_nonce,
- const rd_chariov_t *server_first_msg,
- int itcnt,
- rd_chariov_t *out) {
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
- rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
- rd_chariov_t SaslPassword = RD_ZERO_INIT;
- rd_chariov_t SaltedPassword = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t ClientKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t ServerKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t StoredKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t AuthMessage = RD_ZERO_INIT;
- rd_chariov_t ClientSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t ServerSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- const rd_chariov_t ClientKeyVerbatim = {.ptr = "Client Key",
- .size = 10};
- const rd_chariov_t ServerKeyVerbatim = {.ptr = "Server Key",
- .size = 10};
- rd_chariov_t ClientProof = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
- rd_chariov_t client_final_msg_wo_proof;
- char *ClientProofB64;
- int i;
-
- mtx_lock(&conf->sasl.lock);
- rd_strdupa(&SaslPassword.ptr, conf->sasl.password);
- mtx_unlock(&conf->sasl.lock);
- SaslPassword.size = strlen(SaslPassword.ptr);
-
- /* Constructing the ClientProof attribute (p):
- *
- * p = Base64-encoded ClientProof
- * SaltedPassword := Hi(Normalize(password), salt, i)
- * ClientKey := HMAC(SaltedPassword, "Client Key")
- * StoredKey := H(ClientKey)
- * AuthMessage := client-first-message-bare + "," +
- * server-first-message + "," +
- * client-final-message-without-proof
- * ClientSignature := HMAC(StoredKey, AuthMessage)
- * ClientProof := ClientKey XOR ClientSignature
- * ServerKey := HMAC(SaltedPassword, "Server Key")
- * ServerSignature := HMAC(ServerKey, AuthMessage)
- */
-
- /* SaltedPassword := Hi(Normalize(password), salt, i) */
- if (rd_kafka_sasl_scram_Hi(rktrans, &SaslPassword, salt, itcnt,
- &SaltedPassword) == -1)
- return -1;
-
- /* ClientKey := HMAC(SaltedPassword, "Client Key") */
- if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword,
- &ClientKeyVerbatim, &ClientKey) == -1)
- return -1;
-
- /* StoredKey := H(ClientKey) */
- if (rd_kafka_sasl_scram_H(rktrans, &ClientKey, &StoredKey) == -1)
- return -1;
-
- /* client-final-message-without-proof */
- rd_kafka_sasl_scram_build_client_final_message_wo_proof(
- state, server_nonce, &client_final_msg_wo_proof);
-
- /* AuthMessage := client-first-message-bare + "," +
- * server-first-message + "," +
- * client-final-message-without-proof */
- AuthMessage.size = state->first_msg_bare.size + 1 +
- server_first_msg->size + 1 +
- client_final_msg_wo_proof.size;
- AuthMessage.ptr = rd_alloca(AuthMessage.size + 1);
- rd_snprintf(AuthMessage.ptr, AuthMessage.size + 1, "%.*s,%.*s,%.*s",
- (int)state->first_msg_bare.size, state->first_msg_bare.ptr,
- (int)server_first_msg->size, server_first_msg->ptr,
- (int)client_final_msg_wo_proof.size,
- client_final_msg_wo_proof.ptr);
-
- /*
- * Calculate ServerSignature for later verification when
- * server-final-message is received.
- */
-
- /* ServerKey := HMAC(SaltedPassword, "Server Key") */
- if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword,
- &ServerKeyVerbatim, &ServerKey) == -1) {
- rd_free(client_final_msg_wo_proof.ptr);
- return -1;
- }
-
- /* ServerSignature := HMAC(ServerKey, AuthMessage) */
- if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, &AuthMessage,
- &ServerSignature) == -1) {
- rd_free(client_final_msg_wo_proof.ptr);
- return -1;
- }
-
- /* Store the Base64 encoded ServerSignature for quick comparison */
- state->ServerSignatureB64 = rd_base64_encode(&ServerSignature);
- if (state->ServerSignatureB64 == NULL) {
- rd_free(client_final_msg_wo_proof.ptr);
- return -1;
- }
-
- /*
- * Continue with client-final-message
- */
-
- /* ClientSignature := HMAC(StoredKey, AuthMessage) */
- if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, &AuthMessage,
- &ClientSignature) == -1) {
- rd_free(client_final_msg_wo_proof.ptr);
- return -1;
- }
-
- /* ClientProof := ClientKey XOR ClientSignature */
- assert(ClientKey.size == ClientSignature.size);
- for (i = 0; i < (int)ClientKey.size; i++)
- ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i];
- ClientProof.size = ClientKey.size;
-
-
- /* Base64 encoded ClientProof */
- ClientProofB64 = rd_base64_encode(&ClientProof);
- if (ClientProofB64 == NULL) {
- rd_free(client_final_msg_wo_proof.ptr);
- return -1;
- }
-
- /* Construct client-final-message */
- out->size = client_final_msg_wo_proof.size + strlen(",p=") +
- strlen(ClientProofB64);
- out->ptr = rd_malloc(out->size + 1);
-
- rd_snprintf(out->ptr, out->size + 1, "%.*s,p=%s",
- (int)client_final_msg_wo_proof.size,
- client_final_msg_wo_proof.ptr, ClientProofB64);
- rd_free(ClientProofB64);
- rd_free(client_final_msg_wo_proof.ptr);
-
- return 0;
-}
-
-
-/**
- * @brief Handle first message from server
- *
- * Parse server response which looks something like:
- * "r=fyko+d2lbbFgONR....,s=QSXCR+Q6sek8bf92,i=4096"
- *
- * @returns -1 on error.
- */
-static int
-rd_kafka_sasl_scram_handle_server_first_message(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *in,
- rd_chariov_t *out,
- char *errstr,
- size_t errstr_size) {
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
- char *server_nonce;
- rd_chariov_t salt_b64, salt;
- char *itcntstr;
- const char *endptr;
- int itcnt;
- char *attr_m;
-
- /* Mandatory future extension check */
- if ((attr_m = rd_kafka_sasl_scram_get_attr(in, 'm', NULL, NULL, 0))) {
- rd_snprintf(errstr, errstr_size,
- "Unsupported mandatory SCRAM extension");
- rd_free(attr_m);
- return -1;
- }
-
- /* Server nonce */
- if (!(server_nonce = rd_kafka_sasl_scram_get_attr(
- in, 'r', "Server nonce in server-first-message", errstr,
- errstr_size)))
- return -1;
-
- if (strlen(server_nonce) <= state->cnonce.size ||
- strncmp(state->cnonce.ptr, server_nonce, state->cnonce.size)) {
- rd_snprintf(errstr, errstr_size,
- "Server/client nonce mismatch in "
- "server-first-message");
- rd_free(server_nonce);
- return -1;
- }
-
- /* Salt (Base64) */
- if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr(
- in, 's', "Salt in server-first-message", errstr,
- errstr_size))) {
- rd_free(server_nonce);
- return -1;
- }
- salt_b64.size = strlen(salt_b64.ptr);
-
- /* Convert Salt to binary */
- if (rd_base64_decode(&salt_b64, &salt) == -1) {
- rd_snprintf(errstr, errstr_size,
- "Invalid Base64 Salt in server-first-message");
- rd_free(server_nonce);
- rd_free(salt_b64.ptr);
- return -1;
- }
- rd_free(salt_b64.ptr);
-
- /* Iteration count (as string) */
- if (!(itcntstr = rd_kafka_sasl_scram_get_attr(
- in, 'i', "Iteration count in server-first-message", errstr,
- errstr_size))) {
- rd_free(server_nonce);
- rd_free(salt.ptr);
- return -1;
- }
-
- /* Iteration count (as int) */
- errno = 0;
- itcnt = (int)strtoul(itcntstr, (char **)&endptr, 10);
- if (itcntstr == endptr || *endptr != '\0' || errno != 0 ||
- itcnt > 1000000) {
- rd_snprintf(errstr, errstr_size,
- "Invalid value (not integer or too large) "
- "for Iteration count in server-first-message");
- rd_free(server_nonce);
- rd_free(salt.ptr);
- rd_free(itcntstr);
- return -1;
- }
- rd_free(itcntstr);
-
- /* Build client-final-message */
- if (rd_kafka_sasl_scram_build_client_final_message(
- rktrans, &salt, server_nonce, in, itcnt, out) == -1) {
- rd_snprintf(errstr, errstr_size,
- "Failed to build SCRAM client-final-message");
- rd_free(salt.ptr);
- rd_free(server_nonce);
- return -1;
- }
-
- rd_free(server_nonce);
- rd_free(salt.ptr);
-
- return 0;
-}
-
-/**
- * @brief Handle server-final-message
- *
- * This is the end of authentication and the SCRAM state
- * will be freed at the end of this function regardless of
- * authentication outcome.
- *
- * @returns -1 on failure
- */
-static int
-rd_kafka_sasl_scram_handle_server_final_message(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *in,
- char *errstr,
- size_t errstr_size) {
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
- char *attr_v, *attr_e;
-
- if ((attr_e = rd_kafka_sasl_scram_get_attr(
- in, 'e', "server-error in server-final-message", errstr,
- errstr_size))) {
- /* Authentication failed */
-
- rd_snprintf(errstr, errstr_size,
- "SASL SCRAM authentication failed: "
- "broker responded with %s",
- attr_e);
- rd_free(attr_e);
- return -1;
-
- } else if ((attr_v = rd_kafka_sasl_scram_get_attr(
- in, 'v', "verifier in server-final-message", errstr,
- errstr_size))) {
- rd_kafka_conf_t *conf;
-
- /* Authentication succesful on server,
- * but we need to verify the ServerSignature too. */
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
- "SCRAMAUTH",
- "SASL SCRAM authentication successful on server: "
- "verifying ServerSignature");
-
- if (strcmp(attr_v, state->ServerSignatureB64)) {
- rd_snprintf(errstr, errstr_size,
- "SASL SCRAM authentication failed: "
- "ServerSignature mismatch "
- "(server's %s != ours %s)",
- attr_v, state->ServerSignatureB64);
- rd_free(attr_v);
- return -1;
- }
- rd_free(attr_v);
-
- conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
-
- mtx_lock(&conf->sasl.lock);
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
- "SCRAMAUTH", "Authenticated as %s using %s",
- conf->sasl.username, conf->sasl.mechanisms);
- mtx_unlock(&conf->sasl.lock);
-
- rd_kafka_sasl_auth_done(rktrans);
- return 0;
-
- } else {
- rd_snprintf(errstr, errstr_size,
- "SASL SCRAM authentication failed: "
- "no verifier or server-error returned from broker");
- return -1;
- }
-}
-
-
-
-/**
- * @brief Build client-first-message
- */
-static void
-rd_kafka_sasl_scram_build_client_first_message(rd_kafka_transport_t *rktrans,
- rd_chariov_t *out) {
- char *sasl_username;
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
- rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
-
- rd_kafka_sasl_scram_generate_nonce(&state->cnonce);
-
- mtx_lock(&conf->sasl.lock);
- sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username);
- mtx_unlock(&conf->sasl.lock);
-
- out->size =
- strlen("n,,n=,r=") + strlen(sasl_username) + state->cnonce.size;
- out->ptr = rd_malloc(out->size + 1);
-
- rd_snprintf(out->ptr, out->size + 1, "n,,n=%s,r=%.*s", sasl_username,
- (int)state->cnonce.size, state->cnonce.ptr);
- rd_free(sasl_username);
-
- /* Save client-first-message-bare (skip gs2-header) */
- state->first_msg_bare.size = out->size - 3;
- state->first_msg_bare.ptr =
- rd_memdup(out->ptr + 3, state->first_msg_bare.size);
-}
-
-
-
-/**
- * @brief SASL SCRAM client state machine
- * @returns -1 on failure (errstr set), else 0.
- */
-static int rd_kafka_sasl_scram_fsm(rd_kafka_transport_t *rktrans,
- const rd_chariov_t *in,
- char *errstr,
- size_t errstr_size) {
- static const char *state_names[] = {
- "client-first-message",
- "server-first-message",
- "client-final-message",
- };
- struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
- rd_chariov_t out = RD_ZERO_INIT;
- int r = -1;
- rd_ts_t ts_start = rd_clock();
- int prev_state = state->state;
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM",
- "SASL SCRAM client in state %s", state_names[state->state]);
-
- switch (state->state) {
- case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE:
- rd_dassert(!in); /* Not expecting any server-input */
-
- rd_kafka_sasl_scram_build_client_first_message(rktrans, &out);
- state->state = RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE;
- break;
-
-
- case RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE:
- rd_dassert(in); /* Requires server-input */
-
- if (rd_kafka_sasl_scram_handle_server_first_message(
- rktrans, in, &out, errstr, errstr_size) == -1)
- return -1;
-
- state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE;
- break;
-
- case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE:
- rd_dassert(in); /* Requires server-input */
-
- r = rd_kafka_sasl_scram_handle_server_final_message(
- rktrans, in, errstr, errstr_size);
- break;
- }
-
- if (out.ptr) {
- r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr,
- errstr_size);
- rd_free(out.ptr);
- }
-
- ts_start = (rd_clock() - ts_start) / 1000;
- if (ts_start >= 100)
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
- "SASL SCRAM state %s handled in %" PRId64 "ms",
- state_names[prev_state], ts_start);
-
-
- return r;
-}
-
-
-/**
- * @brief Handle received frame from broker.
- */
-static int rd_kafka_sasl_scram_recv(rd_kafka_transport_t *rktrans,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- const rd_chariov_t in = {.ptr = (char *)buf, .size = size};
- return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size);
-}
-
-
-/**
- * @brief Initialize and start SASL SCRAM (builtin) authentication.
- *
- * Returns 0 on successful init and -1 on error.
- *
- * @locality broker thread
- */
-static int rd_kafka_sasl_scram_client_new(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size) {
- struct rd_kafka_sasl_scram_state *state;
-
- state = rd_calloc(1, sizeof(*state));
- state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE;
- rktrans->rktrans_sasl.state = state;
-
- /* Kick off the FSM */
- return rd_kafka_sasl_scram_fsm(rktrans, NULL, errstr, errstr_size);
-}
-
-
-
-/**
- * @brief Validate SCRAM config and look up the hash function
- */
-static int rd_kafka_sasl_scram_conf_validate(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- const char *mech = rk->rk_conf.sasl.mechanisms;
- rd_bool_t both_set;
-
- mtx_lock(&rk->rk_conf.sasl.lock);
- both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password;
- mtx_unlock(&rk->rk_conf.sasl.lock);
-
- if (!both_set) {
- rd_snprintf(errstr, errstr_size,
- "sasl.username and sasl.password must be set");
- return -1;
- }
-
- if (!strcmp(mech, "SCRAM-SHA-1")) {
- rk->rk_conf.sasl.scram_evp = EVP_sha1();
- rk->rk_conf.sasl.scram_H = SHA1;
- rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH;
- } else if (!strcmp(mech, "SCRAM-SHA-256")) {
- rk->rk_conf.sasl.scram_evp = EVP_sha256();
- rk->rk_conf.sasl.scram_H = SHA256;
- rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH;
- } else if (!strcmp(mech, "SCRAM-SHA-512")) {
- rk->rk_conf.sasl.scram_evp = EVP_sha512();
- rk->rk_conf.sasl.scram_H = SHA512;
- rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH;
- } else {
- rd_snprintf(errstr, errstr_size,
- "Unsupported hash function: %s "
- "(try SCRAM-SHA-512)",
- mech);
- return -1;
- }
-
- return 0;
-}
-
-
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = {
- .name = "SCRAM (builtin)",
- .client_new = rd_kafka_sasl_scram_client_new,
- .recv = rd_kafka_sasl_scram_recv,
- .close = rd_kafka_sasl_scram_close,
- .conf_validate = rd_kafka_sasl_scram_conf_validate,
-};
-
-
-
-/**
- * @name Unit tests
- */
-
-/**
- * @brief Verify that a random nonce is generated.
- */
-static int unittest_scram_nonce(void) {
- rd_chariov_t out1 = RD_ZERO_INIT;
- rd_chariov_t out2 = RD_ZERO_INIT;
-
- rd_kafka_sasl_scram_generate_nonce(&out1);
- RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out1.size);
-
- rd_kafka_sasl_scram_generate_nonce(&out2);
- RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out2.size);
-
- RD_UT_ASSERT(memcmp(out1.ptr, out2.ptr, out1.size) != 0,
- "Expected generate_nonce() to return a random nonce");
-
- rd_free(out1.ptr);
- rd_free(out2.ptr);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Verify that the safe string function does not overwrite memory.
- * Needs to be run with ASAN (which is done in release-tests) for
- * proper verification.
- */
-static int unittest_scram_safe(void) {
- const char *inout[] = {
- "just a string",
- "just a string",
-
- "another,one,that,needs=escaping!",
- "another=2Cone=2Cthat=2Cneeds=3Descaping!",
-
- "overflow?============================",
- "overflow?=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D"
- "=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D",
-
- "=3D=3D=3D the mind boggles",
- "=3D3D=3D3D=3D3D the mind boggles",
-
- NULL,
- NULL};
- int i;
-
- for (i = 0; inout[i]; i += 2) {
- char *out = rd_kafka_sasl_safe_string(inout[i]);
- const char *expected = inout[i + 1];
-
- RD_UT_ASSERT(!strcmp(out, expected),
- "Expected sasl_safe_string(%s) => %s, not %s\n",
- inout[i], expected, out);
-
- rd_free(out);
- }
-
- RD_UT_PASS();
-}
-
-
-int unittest_scram(void) {
- int fails = 0;
-
- fails += unittest_scram_nonce();
- fails += unittest_scram_safe();
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c
deleted file mode 100644
index b07e1808d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Impelements SASL Kerberos GSSAPI authentication client
- * using the native Win32 SSPI.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_sasl.h"
-#include "rdkafka_sasl_int.h"
-
-
-#include <stdio.h>
-#include <windows.h>
-#include <ntsecapi.h>
-
-#define SECURITY_WIN32
-#pragma comment(lib, "secur32.lib")
-#include <sspi.h>
-
-
-#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \
- (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \
- ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION)
-
-
-/* Default maximum kerberos token size for newer versions of Windows */
-#define RD_KAFKA_SSPI_MAX_TOKEN_SIZE 48000
-
-
-/**
- * @brief Per-connection SASL state
- */
-typedef struct rd_kafka_sasl_win32_state_s {
- CredHandle *cred;
- CtxtHandle *ctx;
- wchar_t principal[512]; /* Broker service principal and hostname */
-} rd_kafka_sasl_win32_state_t;
-
-
-/**
- * @returns the string representation of a SECURITY_STATUS error code
- */
-static const char *rd_kafka_sasl_sspi_err2str(SECURITY_STATUS sr) {
- switch (sr) {
- case SEC_E_INSUFFICIENT_MEMORY:
- return "Insufficient memory";
- case SEC_E_INTERNAL_ERROR:
- return "Internal error";
- case SEC_E_INVALID_HANDLE:
- return "Invalid handle";
- case SEC_E_INVALID_TOKEN:
- return "Invalid token";
- case SEC_E_LOGON_DENIED:
- return "Logon denied";
- case SEC_E_NO_AUTHENTICATING_AUTHORITY:
- return "No authority could be contacted for authentication.";
- case SEC_E_NO_CREDENTIALS:
- return "No credentials";
- case SEC_E_TARGET_UNKNOWN:
- return "Target unknown";
- case SEC_E_UNSUPPORTED_FUNCTION:
- return "Unsupported functionality";
- case SEC_E_WRONG_CREDENTIAL_HANDLE:
- return "The principal that received the authentication "
- "request is not the same as the one passed "
- "into the pszTargetName parameter. "
- "This indicates a failure in mutual "
- "authentication.";
- default:
- return "(no string representation)";
- }
-}
-
-
-/**
- * @brief Create new CredHandle
- */
-static CredHandle *rd_kafka_sasl_sspi_cred_new(rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size) {
- TimeStamp expiry = {0, 0};
- SECURITY_STATUS sr;
- CredHandle *cred = rd_calloc(1, sizeof(*cred));
-
- sr = AcquireCredentialsHandle(NULL, __TEXT("Kerberos"),
- SECPKG_CRED_OUTBOUND, NULL, NULL, NULL,
- NULL, cred, &expiry);
-
- if (sr != SEC_E_OK) {
- rd_free(cred);
- rd_snprintf(errstr, errstr_size,
- "Failed to acquire CredentialsHandle: "
- "error code %d",
- sr);
- return NULL;
- }
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
- "Acquired Kerberos credentials handle (expiry in %d.%ds)",
- expiry.u.HighPart, expiry.u.LowPart);
-
- return cred;
-}
-
-
-/**
- * @brief Start or continue SSPI-based authentication processing.
- */
-static int rd_kafka_sasl_sspi_continue(rd_kafka_transport_t *rktrans,
- const void *inbuf,
- size_t insize,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
- SecBufferDesc outbufdesc, inbufdesc;
- SecBuffer outsecbuf, insecbuf;
- BYTE outbuf[RD_KAFKA_SSPI_MAX_TOKEN_SIZE];
- TimeStamp lifespan = {0, 0};
- ULONG ret_ctxattrs;
- CtxtHandle *ctx;
- SECURITY_STATUS sr;
-
- if (inbuf) {
- if (insize > ULONG_MAX) {
- rd_snprintf(errstr, errstr_size,
- "Input buffer length too large (%" PRIusz
- ") "
- "and would overflow",
- insize);
- return -1;
- }
-
- inbufdesc.ulVersion = SECBUFFER_VERSION;
- inbufdesc.cBuffers = 1;
- inbufdesc.pBuffers = &insecbuf;
-
- insecbuf.cbBuffer = (unsigned long)insize;
- insecbuf.BufferType = SECBUFFER_TOKEN;
- insecbuf.pvBuffer = (void *)inbuf;
- }
-
- outbufdesc.ulVersion = SECBUFFER_VERSION;
- outbufdesc.cBuffers = 1;
- outbufdesc.pBuffers = &outsecbuf;
-
- outsecbuf.cbBuffer = sizeof(outbuf);
- outsecbuf.BufferType = SECBUFFER_TOKEN;
- outsecbuf.pvBuffer = outbuf;
-
- if (!(ctx = state->ctx)) {
- /* First time: allocate context handle
- * which will be filled in by Initialize..() */
- ctx = rd_calloc(1, sizeof(*ctx));
- }
-
- sr = InitializeSecurityContext(
- state->cred, state->ctx, state->principal,
- RD_KAFKA_SASL_SSPI_CTX_ATTRS |
- (state->ctx ? 0 : ISC_REQ_MUTUAL_AUTH | ISC_REQ_IDENTIFY),
- 0, SECURITY_NATIVE_DREP, inbuf ? &inbufdesc : NULL, 0, ctx,
- &outbufdesc, &ret_ctxattrs, &lifespan);
-
- if (!state->ctx)
- state->ctx = ctx;
-
- switch (sr) {
- case SEC_E_OK:
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
- "Initialized security context");
-
- rktrans->rktrans_sasl.complete = 1;
- break;
- case SEC_I_CONTINUE_NEEDED:
- break;
- case SEC_I_COMPLETE_NEEDED:
- case SEC_I_COMPLETE_AND_CONTINUE:
- rd_snprintf(errstr, errstr_size,
- "CompleteAuthToken (Digest auth, %d) "
- "not implemented",
- sr);
- return -1;
- case SEC_I_INCOMPLETE_CREDENTIALS:
- rd_snprintf(errstr, errstr_size,
- "Incomplete credentials: "
- "invalid or untrusted certificate");
- return -1;
- default:
- rd_snprintf(errstr, errstr_size,
- "InitializeSecurityContext "
- "failed: %s (0x%x)",
- rd_kafka_sasl_sspi_err2str(sr), sr);
- return -1;
- }
-
- if (rd_kafka_sasl_send(rktrans, outsecbuf.pvBuffer, outsecbuf.cbBuffer,
- errstr, errstr_size) == -1)
- return -1;
-
- return 0;
-}
-
-
-/**
- * @brief Sends the token response to the broker
- */
-static int rd_kafka_sasl_win32_send_response(rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size,
- SecBuffer *server_token) {
- rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
- SECURITY_STATUS sr;
- SecBuffer in_buffer;
- SecBuffer out_buffer;
- SecBuffer buffers[4];
- SecBufferDesc buffer_desc;
- SecPkgContext_Sizes sizes;
- SecPkgCredentials_NamesA names;
- int send_response;
- size_t namelen;
-
- sr = QueryContextAttributes(state->ctx, SECPKG_ATTR_SIZES, &sizes);
- if (sr != SEC_E_OK) {
- rd_snprintf(errstr, errstr_size,
- "Send response failed: %s (0x%x)",
- rd_kafka_sasl_sspi_err2str(sr), sr);
- return -1;
- }
-
- RD_MEMZERO(names);
- sr = QueryCredentialsAttributesA(state->cred, SECPKG_CRED_ATTR_NAMES,
- &names);
-
- if (sr != SEC_E_OK) {
- rd_snprintf(errstr, errstr_size,
- "Query credentials failed: %s (0x%x)",
- rd_kafka_sasl_sspi_err2str(sr), sr);
- return -1;
- }
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
- "Sending response message for user: %s", names.sUserName);
-
- namelen = strlen(names.sUserName) + 1;
- if (namelen > ULONG_MAX) {
- rd_snprintf(errstr, errstr_size,
- "User name length too large (%" PRIusz
- ") "
- "and would overflow");
- return -1;
- }
-
- in_buffer.pvBuffer = (char *)names.sUserName;
- in_buffer.cbBuffer = (unsigned long)namelen;
-
- buffer_desc.cBuffers = 4;
- buffer_desc.pBuffers = buffers;
- buffer_desc.ulVersion = SECBUFFER_VERSION;
-
- /* security trailer */
- buffers[0].cbBuffer = sizes.cbSecurityTrailer;
- buffers[0].BufferType = SECBUFFER_TOKEN;
- buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer);
-
- /* protection level and buffer size received from the server */
- buffers[1].cbBuffer = server_token->cbBuffer;
- buffers[1].BufferType = SECBUFFER_DATA;
- buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer);
- memcpy(buffers[1].pvBuffer, server_token->pvBuffer,
- server_token->cbBuffer);
-
- /* user principal */
- buffers[2].cbBuffer = in_buffer.cbBuffer;
- buffers[2].BufferType = SECBUFFER_DATA;
- buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
- memcpy(buffers[2].pvBuffer, in_buffer.pvBuffer, in_buffer.cbBuffer);
-
- /* padding */
- buffers[3].cbBuffer = sizes.cbBlockSize;
- buffers[3].BufferType = SECBUFFER_PADDING;
- buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
-
- sr = EncryptMessage(state->ctx, KERB_WRAP_NO_ENCRYPT, &buffer_desc, 0);
- if (sr != SEC_E_OK) {
- rd_snprintf(errstr, errstr_size,
- "Encrypt message failed: %s (0x%x)",
- rd_kafka_sasl_sspi_err2str(sr), sr);
-
- FreeContextBuffer(in_buffer.pvBuffer);
- rd_free(buffers[0].pvBuffer);
- rd_free(buffers[1].pvBuffer);
- rd_free(buffers[2].pvBuffer);
- rd_free(buffers[3].pvBuffer);
- return -1;
- }
-
- out_buffer.cbBuffer = buffers[0].cbBuffer + buffers[1].cbBuffer +
- buffers[2].cbBuffer + buffers[3].cbBuffer;
-
- out_buffer.pvBuffer =
- rd_calloc(1, buffers[0].cbBuffer + buffers[1].cbBuffer +
- buffers[2].cbBuffer + buffers[3].cbBuffer);
-
- memcpy(out_buffer.pvBuffer, buffers[0].pvBuffer, buffers[0].cbBuffer);
-
- memcpy((unsigned char *)out_buffer.pvBuffer + (int)buffers[0].cbBuffer,
- buffers[1].pvBuffer, buffers[1].cbBuffer);
-
- memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer +
- buffers[1].cbBuffer,
- buffers[2].pvBuffer, buffers[2].cbBuffer);
-
- memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer +
- buffers[1].cbBuffer + buffers[2].cbBuffer,
- buffers[3].pvBuffer, buffers[3].cbBuffer);
-
- send_response =
- rd_kafka_sasl_send(rktrans, out_buffer.pvBuffer,
- out_buffer.cbBuffer, errstr, errstr_size);
-
- FreeContextBuffer(in_buffer.pvBuffer);
- rd_free(out_buffer.pvBuffer);
- rd_free(buffers[0].pvBuffer);
- rd_free(buffers[1].pvBuffer);
- rd_free(buffers[2].pvBuffer);
- rd_free(buffers[3].pvBuffer);
-
- return send_response;
-}
-
-
-/**
- * @brief Unwrap and validate token response from broker.
- */
-static int rd_kafka_sasl_win32_validate_token(rd_kafka_transport_t *rktrans,
- const void *inbuf,
- size_t insize,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
- SecBuffer buffers[2];
- SecBufferDesc buffer_desc;
- SECURITY_STATUS sr;
- char supported;
-
- if (insize > ULONG_MAX) {
- rd_snprintf(errstr, errstr_size,
- "Input buffer length too large (%" PRIusz
- ") "
- "and would overflow");
- return -1;
- }
-
- buffer_desc.cBuffers = 2;
- buffer_desc.pBuffers = buffers;
- buffer_desc.ulVersion = SECBUFFER_VERSION;
-
- buffers[0].cbBuffer = (unsigned long)insize;
- buffers[0].BufferType = SECBUFFER_STREAM;
- buffers[0].pvBuffer = (void *)inbuf;
-
- buffers[1].cbBuffer = 0;
- buffers[1].BufferType = SECBUFFER_DATA;
- buffers[1].pvBuffer = NULL;
-
- sr = DecryptMessage(state->ctx, &buffer_desc, 0, NULL);
- if (sr != SEC_E_OK) {
- rd_snprintf(errstr, errstr_size,
- "Decrypt message failed: %s (0x%x)",
- rd_kafka_sasl_sspi_err2str(sr), sr);
- return -1;
- }
-
- if (buffers[1].cbBuffer < 4) {
- rd_snprintf(errstr, errstr_size,
- "Validate token: "
- "invalid message");
- return -1;
- }
-
- supported = ((char *)buffers[1].pvBuffer)[0];
- if (!(supported & 1)) {
- rd_snprintf(errstr, errstr_size,
- "Validate token: "
- "server does not support layer");
- return -1;
- }
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
- "Validated server token");
-
- return rd_kafka_sasl_win32_send_response(rktrans, errstr, errstr_size,
- &buffers[1]);
-}
-
-
-/**
- * @brief Handle SASL frame received from broker.
- */
-static int rd_kafka_sasl_win32_recv(struct rd_kafka_transport_s *rktrans,
- const void *buf,
- size_t size,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-
- if (rktrans->rktrans_sasl.complete) {
-
- if (size > 0) {
- /* After authentication is done the broker will send
- * back its token for us to verify.
- * The client responds to the broker which will
- * return an empty (size==0) frame that
- * completes the authentication handshake.
- * With legacy SASL framing the final empty token
- * is not sent. */
- int r;
-
- r = rd_kafka_sasl_win32_validate_token(
- rktrans, buf, size, errstr, errstr_size);
-
- if (r == -1) {
- rktrans->rktrans_sasl.complete = 0;
- return r;
- } else if (rktrans->rktrans_rkb->rkb_features &
- RD_KAFKA_FEATURE_SASL_AUTH_REQ) {
- /* Kafka-framed handshake requires
- * one more back and forth. */
- return r;
- }
-
- /* Legacy-framed handshake is done here */
- }
-
- /* Final ack from broker. */
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
- "Authenticated");
- rd_kafka_sasl_auth_done(rktrans);
- return 0;
- }
-
- return rd_kafka_sasl_sspi_continue(rktrans, buf, size, errstr,
- errstr_size);
-}
-
-
-/**
- * @brief Decommission SSPI state
- */
-static void rd_kafka_sasl_win32_close(rd_kafka_transport_t *rktrans) {
- rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
-
- if (!state)
- return;
-
- if (state->ctx) {
- DeleteSecurityContext(state->ctx);
- rd_free(state->ctx);
- }
- if (state->cred) {
- FreeCredentialsHandle(state->cred);
- rd_free(state->cred);
- }
- rd_free(state);
-}
-
-
-static int rd_kafka_sasl_win32_client_new(rd_kafka_transport_t *rktrans,
- const char *hostname,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_t *rk = rktrans->rktrans_rkb->rkb_rk;
- rd_kafka_sasl_win32_state_t *state;
-
- if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
- rd_snprintf(errstr, errstr_size,
- "SASL mechanism \"%s\" not supported on platform",
- rk->rk_conf.sasl.mechanisms);
- return -1;
- }
-
- state = rd_calloc(1, sizeof(*state));
- rktrans->rktrans_sasl.state = state;
-
- _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), L"%hs/%hs",
- rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.service_name,
- hostname);
-
- state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, errstr_size);
- if (!state->cred)
- return -1;
-
- if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, errstr,
- errstr_size) == -1)
- return -1;
-
- return 0;
-}
-
-/**
- * @brief Validate config
- */
-static int rd_kafka_sasl_win32_conf_validate(rd_kafka_t *rk,
- char *errstr,
- size_t errstr_size) {
- if (!rk->rk_conf.sasl.service_name) {
- rd_snprintf(errstr, errstr_size,
- "sasl.kerberos.service.name must be set");
- return -1;
- }
-
- return 0;
-}
-
-const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider = {
- .name = "Win32 SSPI",
- .client_new = rd_kafka_sasl_win32_client_new,
- .recv = rd_kafka_sasl_win32_recv,
- .close = rd_kafka_sasl_win32_close,
- .conf_validate = rd_kafka_sasl_win32_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c
deleted file mode 100644
index 9961a240f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c
+++ /dev/null
@@ -1,1841 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name OpenSSL integration
- *
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_cert.h"
-
-#ifdef _WIN32
-#include <wincrypt.h>
-#pragma comment(lib, "crypt32.lib")
-#pragma comment(lib, "libcrypto.lib")
-#pragma comment(lib, "libssl.lib")
-#endif
-
-#include <openssl/x509.h>
-#include <openssl/x509_vfy.h>
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
-#include <openssl/provider.h>
-#endif
-
-#include <ctype.h>
-
-#if !_WIN32
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#endif
-
-
-#if WITH_VALGRIND
-/* OpenSSL relies on uninitialized memory, which Valgrind will whine about.
- * We use in-code Valgrind macros to suppress those warnings. */
-#include <valgrind/memcheck.h>
-#else
-#define VALGRIND_MAKE_MEM_DEFINED(A, B)
-#endif
-
-
-#if OPENSSL_VERSION_NUMBER < 0x10100000L
-static mtx_t *rd_kafka_ssl_locks;
-static int rd_kafka_ssl_locks_cnt;
-#endif
-
-
-/**
- * @brief Close and destroy SSL session
- */
-void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans) {
- SSL_shutdown(rktrans->rktrans_ssl);
- SSL_free(rktrans->rktrans_ssl);
- rktrans->rktrans_ssl = NULL;
-}
-
-
-/**
- * @brief Clear OpenSSL error queue to get a proper error reporting in case
- * the next SSL_*() operation fails.
- */
-static RD_INLINE void
-rd_kafka_transport_ssl_clear_error(rd_kafka_transport_t *rktrans) {
- ERR_clear_error();
-#ifdef _WIN32
- WSASetLastError(0);
-#else
- rd_set_errno(0);
-#endif
-}
-
-/**
- * @returns a thread-local single-invocation-use error string for
- * the last thread-local error in OpenSSL, or an empty string
- * if no error.
- */
-const char *rd_kafka_ssl_last_error_str(void) {
- static RD_TLS char errstr[256];
- unsigned long l;
- const char *file, *data, *func;
- int line, flags;
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
- l = ERR_peek_last_error_all(&file, &line, &func, &data, &flags);
-#else
- l = ERR_peek_last_error_line_data(&file, &line, &data, &flags);
- func = ERR_func_error_string(l);
-#endif
-
- if (!l)
- return "";
-
- rd_snprintf(errstr, sizeof(errstr), "%lu:%s:%s:%s:%d: %s", l,
- ERR_lib_error_string(l), func, file, line,
- ((flags & ERR_TXT_STRING) && data && *data)
- ? data
- : ERR_reason_error_string(l));
-
- return errstr;
-}
-
-/**
- * Serves the entire OpenSSL error queue and logs each error.
- * The last error is not logged but returned in 'errstr'.
- *
- * If 'rkb' is non-NULL broker-specific logging will be used,
- * else it will fall back on global 'rk' debugging.
- */
-static char *rd_kafka_ssl_error(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- char *errstr,
- size_t errstr_size) {
- unsigned long l;
- const char *file, *data, *func;
- int line, flags;
- int cnt = 0;
-
- if (!rk) {
- rd_assert(rkb);
- rk = rkb->rkb_rk;
- }
-
- while (
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
- (l = ERR_get_error_all(&file, &line, &func, &data, &flags))
-#else
- (l = ERR_get_error_line_data(&file, &line, &data, &flags))
-#endif
- ) {
- char buf[256];
-
-#if OPENSSL_VERSION_NUMBER < 0x30000000
- func = ERR_func_error_string(l);
-#endif
-
- if (cnt++ > 0) {
- /* Log last message */
- if (rkb)
- rd_rkb_log(rkb, LOG_ERR, "SSL", "%s", errstr);
- else
- rd_kafka_log(rk, LOG_ERR, "SSL", "%s", errstr);
- }
-
- ERR_error_string_n(l, buf, sizeof(buf));
-
- if (!(flags & ERR_TXT_STRING) || !data || !*data)
- data = NULL;
-
- /* Include openssl file:line:func if debugging is enabled */
- if (rk->rk_conf.log_level >= LOG_DEBUG)
- rd_snprintf(errstr, errstr_size, "%s:%d:%s %s%s%s",
- file, line, func, buf, data ? ": " : "",
- data ? data : "");
- else
- rd_snprintf(errstr, errstr_size, "%s%s%s", buf,
- data ? ": " : "", data ? data : "");
- }
-
- if (cnt == 0)
- rd_snprintf(errstr, errstr_size,
- "No further error information available");
-
- return errstr;
-}
-
-
-
-/**
- * Set transport IO event polling based on SSL error.
- *
- * Returns -1 on permanent errors.
- *
- * Locality: broker thread
- */
-static RD_INLINE int
-rd_kafka_transport_ssl_io_update(rd_kafka_transport_t *rktrans,
- int ret,
- char *errstr,
- size_t errstr_size) {
- int serr = SSL_get_error(rktrans->rktrans_ssl, ret);
- int serr2;
-
- switch (serr) {
- case SSL_ERROR_WANT_READ:
- rd_kafka_transport_poll_set(rktrans, POLLIN);
- break;
-
- case SSL_ERROR_WANT_WRITE:
- rd_kafka_transport_set_blocked(rktrans, rd_true);
- rd_kafka_transport_poll_set(rktrans, POLLOUT);
- break;
-
- case SSL_ERROR_SYSCALL:
- serr2 = ERR_peek_error();
- if (serr2)
- rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr,
- errstr_size);
- else if (!rd_socket_errno || rd_socket_errno == ECONNRESET)
- rd_snprintf(errstr, errstr_size, "Disconnected");
- else
- rd_snprintf(errstr, errstr_size,
- "SSL transport error: %s",
- rd_strerror(rd_socket_errno));
- return -1;
-
- case SSL_ERROR_ZERO_RETURN:
- rd_snprintf(errstr, errstr_size, "Disconnected");
- return -1;
-
- default:
- rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr,
- errstr_size);
- return -1;
- }
-
- return 0;
-}
-
-ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size) {
- ssize_t sum = 0;
- const void *p;
- size_t rlen;
-
- rd_kafka_transport_ssl_clear_error(rktrans);
-
- while ((rlen = rd_slice_peeker(slice, &p))) {
- int r;
- size_t r2;
-
- r = SSL_write(rktrans->rktrans_ssl, p, (int)rlen);
-
- if (unlikely(r <= 0)) {
- if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
- errstr_size) == -1)
- return -1;
- else
- return sum;
- }
-
- /* Update buffer read position */
- r2 = rd_slice_read(slice, NULL, (size_t)r);
- rd_assert((size_t)r == r2 &&
- *"BUG: wrote more bytes than available in slice");
-
-
- sum += r;
- /* FIXME: remove this and try again immediately and let
- * the next SSL_write() call fail instead? */
- if ((size_t)r < rlen)
- break;
- }
- return sum;
-}
-
-ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size) {
- ssize_t sum = 0;
- void *p;
- size_t len;
-
- while ((len = rd_buf_get_writable(rbuf, &p))) {
- int r;
-
- rd_kafka_transport_ssl_clear_error(rktrans);
-
- r = SSL_read(rktrans->rktrans_ssl, p, (int)len);
-
- if (unlikely(r <= 0)) {
- if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
- errstr_size) == -1)
- return -1;
- else
- return sum;
- }
-
- VALGRIND_MAKE_MEM_DEFINED(p, r);
-
- /* Update buffer write position */
- rd_buf_write(rbuf, NULL, (size_t)r);
-
- sum += r;
-
- /* FIXME: remove this and try again immediately and let
- * the next SSL_read() call fail instead? */
- if ((size_t)r < len)
- break;
- }
- return sum;
-}
-
-
-/**
- * OpenSSL password query callback
- *
- * Locality: application thread
- */
-static int rd_kafka_transport_ssl_passwd_cb(char *buf,
- int size,
- int rwflag,
- void *userdata) {
- rd_kafka_t *rk = userdata;
- int pwlen;
-
- rd_kafka_dbg(rk, SECURITY, "SSLPASSWD",
- "Private key requires password");
-
- if (!rk->rk_conf.ssl.key_password) {
- rd_kafka_log(rk, LOG_WARNING, "SSLPASSWD",
- "Private key requires password but "
- "no password configured (ssl.key.password)");
- return -1;
- }
-
-
- pwlen = (int)strlen(rk->rk_conf.ssl.key_password);
- memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size));
-
- return pwlen;
-}
-
-
-/**
- * @brief OpenSSL callback to perform additional broker certificate
- * verification and validation.
- *
- * @return 1 on success when the broker certificate
- * is valid and 0 when the certificate is not valid.
- *
- * @sa SSL_CTX_set_verify()
- */
-static int rd_kafka_transport_ssl_cert_verify_cb(int preverify_ok,
- X509_STORE_CTX *x509_ctx) {
- rd_kafka_transport_t *rktrans = rd_kafka_curr_transport;
- rd_kafka_broker_t *rkb;
- rd_kafka_t *rk;
- X509 *cert;
- char *buf = NULL;
- int buf_size;
- int depth;
- int x509_orig_error, x509_error;
- char errstr[512];
- int ok;
-
- rd_assert(rktrans != NULL);
- rkb = rktrans->rktrans_rkb;
- rk = rkb->rkb_rk;
-
- cert = X509_STORE_CTX_get_current_cert(x509_ctx);
- if (!cert) {
- rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
- "Failed to get current certificate to verify");
- return 0;
- }
-
- depth = X509_STORE_CTX_get_error_depth(x509_ctx);
-
- x509_orig_error = x509_error = X509_STORE_CTX_get_error(x509_ctx);
-
- buf_size = i2d_X509(cert, (unsigned char **)&buf);
- if (buf_size < 0 || !buf) {
- rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
- "Unable to convert certificate to X509 format");
- return 0;
- }
-
- *errstr = '\0';
-
- /* Call application's verification callback. */
- ok = rk->rk_conf.ssl.cert_verify_cb(
- rk, rkb->rkb_nodename, rkb->rkb_nodeid, &x509_error, depth, buf,
- (size_t)buf_size, errstr, sizeof(errstr), rk->rk_conf.opaque);
-
- OPENSSL_free(buf);
-
- if (!ok) {
- char subject[128];
- char issuer[128];
-
- X509_NAME_oneline(X509_get_subject_name(cert), subject,
- sizeof(subject));
- X509_NAME_oneline(X509_get_issuer_name(cert), issuer,
- sizeof(issuer));
- rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
- "Certificate (subject=%s, issuer=%s) verification "
- "callback failed: %s",
- subject, issuer, errstr);
-
- X509_STORE_CTX_set_error(x509_ctx, x509_error);
-
- return 0; /* verification failed */
- }
-
- /* Clear error */
- if (x509_orig_error != 0 && x509_error == 0)
- X509_STORE_CTX_set_error(x509_ctx, 0);
-
- return 1; /* verification successful */
-}
-
-/**
- * @brief Set TLSEXT hostname for SNI and optionally enable
- * SSL endpoint identification verification.
- *
- * @returns 0 on success or -1 on error.
- */
-static int rd_kafka_transport_ssl_set_endpoint_id(rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size) {
- char name[RD_KAFKA_NODENAME_SIZE];
- char *t;
-
- rd_kafka_broker_lock(rktrans->rktrans_rkb);
- rd_snprintf(name, sizeof(name), "%s",
- rktrans->rktrans_rkb->rkb_nodename);
- rd_kafka_broker_unlock(rktrans->rktrans_rkb);
-
- /* Remove ":9092" port suffix from nodename */
- if ((t = strrchr(name, ':')))
- *t = '\0';
-
-#if (OPENSSL_VERSION_NUMBER >= 0x0090806fL) && !defined(OPENSSL_NO_TLSEXT)
- /* If non-numerical hostname, send it for SNI */
- if (!(/*ipv6*/ (strchr(name, ':') &&
- strspn(name, "0123456789abcdefABCDEF:.[]%") ==
- strlen(name)) ||
- /*ipv4*/ strspn(name, "0123456789.") == strlen(name)) &&
- !SSL_set_tlsext_host_name(rktrans->rktrans_ssl, name))
- goto fail;
-#endif
-
- if (rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.endpoint_identification ==
- RD_KAFKA_SSL_ENDPOINT_ID_NONE)
- return 0;
-
-#if OPENSSL_VERSION_NUMBER >= 0x10100000 && !defined(OPENSSL_IS_BORINGSSL)
- if (!SSL_set1_host(rktrans->rktrans_ssl, name))
- goto fail;
-#elif OPENSSL_VERSION_NUMBER >= 0x1000200fL /* 1.0.2 */
- {
- X509_VERIFY_PARAM *param;
-
- param = SSL_get0_param(rktrans->rktrans_ssl);
-
- if (!X509_VERIFY_PARAM_set1_host(param, name, 0))
- goto fail;
- }
-#else
- rd_snprintf(errstr, errstr_size,
- "Endpoint identification not supported on this "
- "OpenSSL version (0x%lx)",
- OPENSSL_VERSION_NUMBER);
- return -1;
-#endif
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "ENDPOINT",
- "Enabled endpoint identification using hostname %s", name);
-
- return 0;
-
-fail:
- rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, errstr_size);
- return -1;
-}
-
-
-/**
- * @brief Set up SSL for a newly connected connection
- *
- * @returns -1 on failure, else 0.
- */
-int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb,
- rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size) {
- int r;
-
- rktrans->rktrans_ssl = SSL_new(rkb->rkb_rk->rk_conf.ssl.ctx);
- if (!rktrans->rktrans_ssl)
- goto fail;
-
- if (!SSL_set_fd(rktrans->rktrans_ssl, (int)rktrans->rktrans_s))
- goto fail;
-
- if (rd_kafka_transport_ssl_set_endpoint_id(rktrans, errstr,
- errstr_size) == -1)
- return -1;
-
- rd_kafka_transport_ssl_clear_error(rktrans);
-
- r = SSL_connect(rktrans->rktrans_ssl);
- if (r == 1) {
- /* Connected, highly unlikely since this is a
- * non-blocking operation. */
- rd_kafka_transport_connect_done(rktrans, NULL);
- return 0;
- }
-
- if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) ==
- -1)
- return -1;
-
- return 0;
-
-fail:
- rd_kafka_ssl_error(NULL, rkb, errstr, errstr_size);
- return -1;
-}
-
-
-static RD_UNUSED int
-rd_kafka_transport_ssl_io_event(rd_kafka_transport_t *rktrans, int events) {
- int r;
- char errstr[512];
-
- if (events & POLLOUT) {
- rd_kafka_transport_ssl_clear_error(rktrans);
-
- r = SSL_write(rktrans->rktrans_ssl, NULL, 0);
- if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
- sizeof(errstr)) == -1)
- goto fail;
- }
-
- return 0;
-
-fail:
- /* Permanent error */
- rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr);
- return -1;
-}
-
-
-/**
- * @brief Verify SSL handshake was valid.
- */
-static int rd_kafka_transport_ssl_verify(rd_kafka_transport_t *rktrans) {
- long int rl;
- X509 *cert;
-
- if (!rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.enable_verify)
- return 0;
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
- cert = SSL_get1_peer_certificate(rktrans->rktrans_ssl);
-#else
- cert = SSL_get_peer_certificate(rktrans->rktrans_ssl);
-#endif
- X509_free(cert);
- if (!cert) {
- rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__SSL,
- "Broker did not provide a certificate");
- return -1;
- }
-
- if ((rl = SSL_get_verify_result(rktrans->rktrans_ssl)) != X509_V_OK) {
- rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__SSL,
- "Failed to verify broker certificate: %s",
- X509_verify_cert_error_string(rl));
- return -1;
- }
-
- rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SSLVERIFY",
- "Broker SSL certificate verified");
- return 0;
-}
-
-/**
- * @brief SSL handshake handling.
- * Call repeatedly (based on IO events) until handshake is done.
- *
- * @returns -1 on error, 0 if handshake is still in progress,
- * or 1 on completion.
- */
-int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- char errstr[512];
- int r;
-
- r = SSL_do_handshake(rktrans->rktrans_ssl);
- if (r == 1) {
- /* SSL handshake done. Verify. */
- if (rd_kafka_transport_ssl_verify(rktrans) == -1)
- return -1;
-
- rd_kafka_transport_connect_done(rktrans, NULL);
- return 1;
-
- } else if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
- sizeof(errstr)) == -1) {
- const char *extra = "";
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__SSL;
-
- if (strstr(errstr, "unexpected message"))
- extra =
- ": client SSL authentication might be "
- "required (see ssl.key.location and "
- "ssl.certificate.location and consult the "
- "broker logs for more information)";
- else if (strstr(errstr,
- "tls_process_server_certificate:"
- "certificate verify failed") ||
- strstr(errstr, "error:0A000086") /*openssl3*/ ||
- strstr(errstr,
- "get_server_certificate:"
- "certificate verify failed"))
- extra =
- ": broker certificate could not be verified, "
- "verify that ssl.ca.location is correctly "
- "configured or root CA certificates are "
- "installed"
-#ifdef __APPLE__
- " (brew install openssl)"
-#elif defined(_WIN32)
- " (add broker's CA certificate to the Windows "
- "Root certificate store)"
-#else
- " (install ca-certificates package)"
-#endif
- ;
- else if (!strcmp(errstr, "Disconnected")) {
- extra = ": connecting to a PLAINTEXT broker listener?";
- /* Disconnects during handshake are most likely
- * not due to SSL, but rather at the transport level */
- err = RD_KAFKA_RESP_ERR__TRANSPORT;
- }
-
- rd_kafka_broker_fail(rkb, LOG_ERR, err,
- "SSL handshake failed: %s%s", errstr,
- extra);
- return -1;
- }
-
- return 0;
-}
-
-
-
-/**
- * @brief Parse a PEM-formatted string into an EVP_PKEY (PrivateKey) object.
- *
- * @param str Input PEM string, nul-terminated
- *
- * @remark This method does not provide automatic addition of PEM
- * headers and footers.
- *
- * @returns a new EVP_PKEY on success or NULL on error.
- */
-static EVP_PKEY *rd_kafka_ssl_PKEY_from_string(rd_kafka_t *rk,
- const char *str) {
- BIO *bio = BIO_new_mem_buf((void *)str, -1);
- EVP_PKEY *pkey;
-
- pkey = PEM_read_bio_PrivateKey(bio, NULL,
- rd_kafka_transport_ssl_passwd_cb, rk);
-
- BIO_free(bio);
-
- return pkey;
-}
-
-/**
- * @brief Parse a PEM-formatted string into an X509 object.
- *
- * @param str Input PEM string, nul-terminated
- *
- * @returns a new X509 on success or NULL on error.
- */
-static X509 *rd_kafka_ssl_X509_from_string(rd_kafka_t *rk, const char *str) {
- BIO *bio = BIO_new_mem_buf((void *)str, -1);
- X509 *x509;
-
- x509 =
- PEM_read_bio_X509(bio, NULL, rd_kafka_transport_ssl_passwd_cb, rk);
-
- BIO_free(bio);
-
- return x509;
-}
-
-
-#ifdef _WIN32
-
-/**
- * @brief Attempt load CA certificates from a Windows Certificate store.
- */
-static int rd_kafka_ssl_win_load_cert_store(rd_kafka_t *rk,
- SSL_CTX *ctx,
- const char *store_name) {
- HCERTSTORE w_store;
- PCCERT_CONTEXT w_cctx = NULL;
- X509_STORE *store;
- int fail_cnt = 0, cnt = 0;
- char errstr[256];
- wchar_t *wstore_name;
- size_t wsize = 0;
- errno_t werr;
-
- /* Convert store_name to wide-char */
- werr = mbstowcs_s(&wsize, NULL, 0, store_name, strlen(store_name));
- if (werr || wsize < 2 || wsize > 1000) {
- rd_kafka_log(rk, LOG_ERR, "CERTSTORE",
- "Invalid Windows certificate store name: %.*s%s",
- 30, store_name,
- wsize < 2 ? " (empty)" : " (truncated)");
- return -1;
- }
- wstore_name = rd_alloca(sizeof(*wstore_name) * wsize);
- werr = mbstowcs_s(NULL, wstore_name, wsize, store_name,
- strlen(store_name));
- rd_assert(!werr);
-
- w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, 0,
- CERT_SYSTEM_STORE_CURRENT_USER |
- CERT_STORE_READONLY_FLAG |
- CERT_STORE_OPEN_EXISTING_FLAG,
- wstore_name);
- if (!w_store) {
- rd_kafka_log(
- rk, LOG_ERR, "CERTSTORE",
- "Failed to open Windows certificate "
- "%s store: %s",
- store_name,
- rd_strerror_w32(GetLastError(), errstr, sizeof(errstr)));
- return -1;
- }
-
- /* Get the OpenSSL trust store */
- store = SSL_CTX_get_cert_store(ctx);
-
- /* Enumerate the Windows certs */
- while ((w_cctx = CertEnumCertificatesInStore(w_store, w_cctx))) {
- X509 *x509;
-
- /* Parse Windows cert: DER -> X.509 */
- x509 = d2i_X509(NULL,
- (const unsigned char **)&w_cctx->pbCertEncoded,
- (long)w_cctx->cbCertEncoded);
- if (!x509) {
- fail_cnt++;
- continue;
- }
-
- /* Add cert to OpenSSL's trust store */
- if (!X509_STORE_add_cert(store, x509))
- fail_cnt++;
- else
- cnt++;
-
- X509_free(x509);
- }
-
- if (w_cctx)
- CertFreeCertificateContext(w_cctx);
-
- CertCloseStore(w_store, 0);
-
- rd_kafka_dbg(rk, SECURITY, "CERTSTORE",
- "%d certificate(s) successfully added from "
- "Windows Certificate %s store, %d failed",
- cnt, store_name, fail_cnt);
-
- if (cnt == 0 && fail_cnt > 0)
- return -1;
-
- return cnt;
-}
-
-/**
- * @brief Load certs from the configured CSV list of Windows Cert stores.
- *
- * @returns the number of successfully loaded certificates, or -1 on error.
- */
-static int rd_kafka_ssl_win_load_cert_stores(rd_kafka_t *rk,
- SSL_CTX *ctx,
- const char *store_names) {
- char *s;
- int cert_cnt = 0, fail_cnt = 0;
-
- if (!store_names || !*store_names)
- return 0;
-
- rd_strdupa(&s, store_names);
-
- /* Parse CSV list ("Root,CA, , ,Something") and load
- * each store in order. */
- while (*s) {
- char *t;
- const char *store_name;
- int r;
-
- while (isspace((int)*s) || *s == ',')
- s++;
-
- if (!*s)
- break;
-
- store_name = s;
-
- t = strchr(s, (int)',');
- if (t) {
- *t = '\0';
- s = t + 1;
- for (; t >= store_name && isspace((int)*t); t--)
- *t = '\0';
- } else {
- s = "";
- }
-
- r = rd_kafka_ssl_win_load_cert_store(rk, ctx, store_name);
- if (r != -1)
- cert_cnt += r;
- else
- fail_cnt++;
- }
-
- if (cert_cnt == 0 && fail_cnt > 0)
- return -1;
-
- return cert_cnt;
-}
-#endif /* MSC_VER */
-
-
-
-/**
- * @brief Probe for the system's CA certificate location and if found set it
- * on the \p CTX.
- *
- * @returns 0 if CA location was set, else -1.
- */
-static int rd_kafka_ssl_probe_and_set_default_ca_location(rd_kafka_t *rk,
- SSL_CTX *ctx) {
-#if _WIN32
- /* No standard location on Windows, CA certs are in the ROOT store. */
- return -1;
-#else
- /* The probe paths are based on:
- * https://www.happyassassin.net/posts/2015/01/12/a-note-about-ssltls-trusted-certificate-stores-and-platforms/
- * Golang's crypto probing paths:
- * https://golang.org/search?q=certFiles and certDirectories
- */
- static const char *paths[] = {
- "/etc/pki/tls/certs/ca-bundle.crt",
- "/etc/ssl/certs/ca-bundle.crt",
- "/etc/pki/tls/certs/ca-bundle.trust.crt",
- "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
-
- "/etc/ssl/ca-bundle.pem",
- "/etc/pki/tls/cacert.pem",
- "/etc/ssl/cert.pem",
- "/etc/ssl/cacert.pem",
-
- "/etc/certs/ca-certificates.crt",
- "/etc/ssl/certs/ca-certificates.crt",
-
- "/etc/ssl/certs",
-
- "/usr/local/etc/ssl/cert.pem",
- "/usr/local/etc/ssl/cacert.pem",
-
- "/usr/local/etc/ssl/certs/cert.pem",
- "/usr/local/etc/ssl/certs/cacert.pem",
-
- /* BSD */
- "/usr/local/share/certs/ca-root-nss.crt",
- "/etc/openssl/certs/ca-certificates.crt",
-#ifdef __APPLE__
- "/private/etc/ssl/cert.pem",
- "/private/etc/ssl/certs",
- "/usr/local/etc/openssl@1.1/cert.pem",
- "/usr/local/etc/openssl@1.0/cert.pem",
- "/usr/local/etc/openssl/certs",
- "/System/Library/OpenSSL",
-#endif
-#ifdef _AIX
- "/var/ssl/certs/ca-bundle.crt",
-#endif
- NULL,
- };
- const char *path = NULL;
- int i;
-
- for (i = 0; (path = paths[i]); i++) {
- struct stat st;
- rd_bool_t is_dir;
- int r;
-
- if (stat(path, &st) != 0)
- continue;
-
- is_dir = S_ISDIR(st.st_mode);
-
- if (is_dir && rd_kafka_dir_is_empty(path))
- continue;
-
- rd_kafka_dbg(rk, SECURITY, "CACERTS",
- "Setting default CA certificate location "
- "to %s, override with ssl.ca.location",
- path);
-
- r = SSL_CTX_load_verify_locations(ctx, is_dir ? NULL : path,
- is_dir ? path : NULL);
- if (r != 1) {
- char errstr[512];
- /* Read error and clear the error stack */
- rd_kafka_ssl_error(rk, NULL, errstr, sizeof(errstr));
- rd_kafka_dbg(rk, SECURITY, "CACERTS",
- "Failed to set default CA certificate "
- "location to %s %s: %s: skipping",
- is_dir ? "directory" : "file", path,
- errstr);
- continue;
- }
-
- return 0;
- }
-
- rd_kafka_dbg(rk, SECURITY, "CACERTS",
- "Unable to find any standard CA certificate"
- "paths: is the ca-certificates package installed?");
- return -1;
-#endif
-}
-
-
-/**
- * @brief Registers certificates, keys, etc, on the SSL_CTX
- *
- * @returns -1 on error, or 0 on success.
- */
-static int rd_kafka_ssl_set_certs(rd_kafka_t *rk,
- SSL_CTX *ctx,
- char *errstr,
- size_t errstr_size) {
- rd_bool_t ca_probe = rd_true;
- rd_bool_t check_pkey = rd_false;
- int r;
-
- /*
- * ssl_ca, ssl.ca.location, or Windows cert root store,
- * or default paths.
- */
- if (rk->rk_conf.ssl.ca) {
- /* CA certificate chain set with conf_set_ssl_cert() */
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading CA certificate(s) from memory");
-
- SSL_CTX_set_cert_store(ctx, rk->rk_conf.ssl.ca->store);
-
- /* OpenSSL takes ownership of the store */
- rk->rk_conf.ssl.ca->store = NULL;
-
- ca_probe = rd_false;
-
- } else {
-
- if (rk->rk_conf.ssl.ca_location &&
- strcmp(rk->rk_conf.ssl.ca_location, "probe")) {
- /* CA certificate location, either file or directory. */
- int is_dir =
- rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location);
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading CA certificate(s) from %s %s",
- is_dir ? "directory" : "file",
- rk->rk_conf.ssl.ca_location);
-
- r = SSL_CTX_load_verify_locations(
- ctx, !is_dir ? rk->rk_conf.ssl.ca_location : NULL,
- is_dir ? rk->rk_conf.ssl.ca_location : NULL);
-
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.ca.location failed: ");
- return -1;
- }
-
- ca_probe = rd_false;
- }
-
- if (rk->rk_conf.ssl.ca_pem) {
- /* CA as PEM string */
- X509 *x509;
- X509_STORE *store;
- BIO *bio;
- int cnt = 0;
-
- /* Get the OpenSSL trust store */
- store = SSL_CTX_get_cert_store(ctx);
- rd_assert(store != NULL);
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading CA certificate(s) from string");
-
- bio =
- BIO_new_mem_buf((void *)rk->rk_conf.ssl.ca_pem, -1);
- rd_assert(bio != NULL);
-
- /* Add all certificates to cert store */
- while ((x509 = PEM_read_bio_X509(
- bio, NULL, rd_kafka_transport_ssl_passwd_cb,
- rk))) {
- if (!X509_STORE_add_cert(store, x509)) {
- rd_snprintf(errstr, errstr_size,
- "failed to add ssl.ca.pem "
- "certificate "
- "#%d to CA cert store: ",
- cnt);
- X509_free(x509);
- BIO_free(bio);
- return -1;
- }
-
- X509_free(x509);
- cnt++;
- }
-
- if (!BIO_eof(bio) || !cnt) {
- rd_snprintf(errstr, errstr_size,
- "failed to read certificate #%d "
- "from ssl.ca.pem: "
- "not in PEM format?: ",
- cnt);
- BIO_free(bio);
- return -1;
- }
-
- BIO_free(bio);
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loaded %d CA certificate(s) from string",
- cnt);
-
-
- ca_probe = rd_false;
- }
- }
-
- if (ca_probe) {
-#ifdef _WIN32
- /* Attempt to load CA root certificates from the
- * configured Windows certificate stores. */
- r = rd_kafka_ssl_win_load_cert_stores(
- rk, ctx, rk->rk_conf.ssl.ca_cert_stores);
- if (r == 0) {
- rd_kafka_log(
- rk, LOG_NOTICE, "CERTSTORE",
- "No CA certificates loaded from "
- "Windows certificate stores: "
- "falling back to default OpenSSL CA paths");
- r = -1;
- } else if (r == -1)
- rd_kafka_log(
- rk, LOG_NOTICE, "CERTSTORE",
- "Failed to load CA certificates from "
- "Windows certificate stores: "
- "falling back to default OpenSSL CA paths");
-#else
- r = -1;
-#endif
-
- if ((rk->rk_conf.ssl.ca_location &&
- !strcmp(rk->rk_conf.ssl.ca_location, "probe"))
-#if WITH_STATIC_LIB_libcrypto
- || r == -1
-#endif
- ) {
- /* If OpenSSL was linked statically there is a risk
- * that the system installed CA certificate path
- * doesn't match the cert path of OpenSSL.
- * To circumvent this we check for the existence
- * of standard CA certificate paths and use the
- * first one that is found.
- * Ignore failures. */
- r = rd_kafka_ssl_probe_and_set_default_ca_location(rk,
- ctx);
- }
-
- if (r == -1) {
- /* Use default CA certificate paths from linked OpenSSL:
- * ignore failures */
-
- r = SSL_CTX_set_default_verify_paths(ctx);
- if (r != 1) {
- char errstr2[512];
- /* Read error and clear the error stack. */
- rd_kafka_ssl_error(rk, NULL, errstr2,
- sizeof(errstr2));
- rd_kafka_dbg(
- rk, SECURITY, "SSL",
- "SSL_CTX_set_default_verify_paths() "
- "failed: %s: ignoring",
- errstr2);
- }
- r = 0;
- }
- }
-
- if (rk->rk_conf.ssl.crl_location) {
- rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CRL from file %s",
- rk->rk_conf.ssl.crl_location);
-
- r = SSL_CTX_load_verify_locations(
- ctx, rk->rk_conf.ssl.crl_location, NULL);
-
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.crl.location failed: ");
- return -1;
- }
-
-
- rd_kafka_dbg(rk, SECURITY, "SSL", "Enabling CRL checks");
-
- X509_STORE_set_flags(SSL_CTX_get_cert_store(ctx),
- X509_V_FLAG_CRL_CHECK);
- }
-
-
- /*
- * ssl_cert, ssl.certificate.location and ssl.certificate.pem
- */
- if (rk->rk_conf.ssl.cert) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading public key from memory");
-
- rd_assert(rk->rk_conf.ssl.cert->x509);
- r = SSL_CTX_use_certificate(ctx, rk->rk_conf.ssl.cert->x509);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size, "ssl_cert failed: ");
- return -1;
- }
- }
-
- if (rk->rk_conf.ssl.cert_location) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading public key from file %s",
- rk->rk_conf.ssl.cert_location);
-
- r = SSL_CTX_use_certificate_chain_file(
- ctx, rk->rk_conf.ssl.cert_location);
-
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.certificate.location failed: ");
- return -1;
- }
- }
-
- if (rk->rk_conf.ssl.cert_pem) {
- X509 *x509;
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading public key from string");
-
- x509 =
- rd_kafka_ssl_X509_from_string(rk, rk->rk_conf.ssl.cert_pem);
- if (!x509) {
- rd_snprintf(errstr, errstr_size,
- "ssl.certificate.pem failed: "
- "not in PEM format?: ");
- return -1;
- }
-
- r = SSL_CTX_use_certificate(ctx, x509);
-
- X509_free(x509);
-
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.certificate.pem failed: ");
- return -1;
- }
- }
-
-
- /*
- * ssl_key, ssl.key.location and ssl.key.pem
- */
- if (rk->rk_conf.ssl.key) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading private key file from memory");
-
- rd_assert(rk->rk_conf.ssl.key->pkey);
- r = SSL_CTX_use_PrivateKey(ctx, rk->rk_conf.ssl.key->pkey);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl_key (in-memory) failed: ");
- return -1;
- }
-
- check_pkey = rd_true;
- }
-
- if (rk->rk_conf.ssl.key_location) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading private key file from %s",
- rk->rk_conf.ssl.key_location);
-
- r = SSL_CTX_use_PrivateKey_file(
- ctx, rk->rk_conf.ssl.key_location, SSL_FILETYPE_PEM);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.key.location failed: ");
- return -1;
- }
-
- check_pkey = rd_true;
- }
-
- if (rk->rk_conf.ssl.key_pem) {
- EVP_PKEY *pkey;
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading private key from string");
-
- pkey =
- rd_kafka_ssl_PKEY_from_string(rk, rk->rk_conf.ssl.key_pem);
- if (!pkey) {
- rd_snprintf(errstr, errstr_size,
- "ssl.key.pem failed: "
- "not in PEM format?: ");
- return -1;
- }
-
- r = SSL_CTX_use_PrivateKey(ctx, pkey);
-
- EVP_PKEY_free(pkey);
-
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "ssl.key.pem failed: ");
- return -1;
- }
-
- /* We no longer need the PEM key (it is cached in the CTX),
- * clear its memory. */
- rd_kafka_desensitize_str(rk->rk_conf.ssl.key_pem);
-
- check_pkey = rd_true;
- }
-
-
- /*
- * ssl.keystore.location
- */
- if (rk->rk_conf.ssl.keystore_location) {
- EVP_PKEY *pkey;
- X509 *cert;
- STACK_OF(X509) *ca = NULL;
- BIO *bio;
- PKCS12 *p12;
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading client's keystore file from %s",
- rk->rk_conf.ssl.keystore_location);
-
- bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "rb");
- if (!bio) {
- rd_snprintf(errstr, errstr_size,
- "Failed to open ssl.keystore.location: "
- "%s: ",
- rk->rk_conf.ssl.keystore_location);
- return -1;
- }
-
- p12 = d2i_PKCS12_bio(bio, NULL);
- if (!p12) {
- BIO_free(bio);
- rd_snprintf(errstr, errstr_size,
- "Error reading ssl.keystore.location "
- "PKCS#12 file: %s: ",
- rk->rk_conf.ssl.keystore_location);
- return -1;
- }
-
- pkey = EVP_PKEY_new();
- cert = X509_new();
- if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, &pkey,
- &cert, &ca)) {
- EVP_PKEY_free(pkey);
- X509_free(cert);
- PKCS12_free(p12);
- BIO_free(bio);
- if (ca != NULL)
- sk_X509_pop_free(ca, X509_free);
- rd_snprintf(errstr, errstr_size,
- "Failed to parse ssl.keystore.location "
- "PKCS#12 file: %s: ",
- rk->rk_conf.ssl.keystore_location);
- return -1;
- }
-
- if (ca != NULL)
- sk_X509_pop_free(ca, X509_free);
-
- PKCS12_free(p12);
- BIO_free(bio);
-
- r = SSL_CTX_use_certificate(ctx, cert);
- X509_free(cert);
- if (r != 1) {
- EVP_PKEY_free(pkey);
- rd_snprintf(errstr, errstr_size,
- "Failed to use ssl.keystore.location "
- "certificate: ");
- return -1;
- }
-
- r = SSL_CTX_use_PrivateKey(ctx, pkey);
- EVP_PKEY_free(pkey);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "Failed to use ssl.keystore.location "
- "private key: ");
- return -1;
- }
-
- check_pkey = rd_true;
- }
-
-#if WITH_SSL_ENGINE
- /*
- * If applicable, use OpenSSL engine to fetch SSL certificate.
- */
- if (rk->rk_conf.ssl.engine) {
- STACK_OF(X509_NAME) *cert_names = sk_X509_NAME_new_null();
- STACK_OF(X509_OBJECT) *roots =
- X509_STORE_get0_objects(SSL_CTX_get_cert_store(ctx));
- X509 *x509 = NULL;
- EVP_PKEY *pkey = NULL;
- int i = 0;
- for (i = 0; i < sk_X509_OBJECT_num(roots); i++) {
- x509 = X509_OBJECT_get0_X509(
- sk_X509_OBJECT_value(roots, i));
-
- if (x509)
- sk_X509_NAME_push(cert_names,
- X509_get_subject_name(x509));
- }
-
- if (cert_names)
- sk_X509_NAME_free(cert_names);
-
- x509 = NULL;
- r = ENGINE_load_ssl_client_cert(
- rk->rk_conf.ssl.engine, NULL, cert_names, &x509, &pkey,
- NULL, NULL, rk->rk_conf.ssl.engine_callback_data);
-
- sk_X509_NAME_free(cert_names);
- if (r == -1 || !x509 || !pkey) {
- X509_free(x509);
- EVP_PKEY_free(pkey);
- if (r == -1)
- rd_snprintf(errstr, errstr_size,
- "OpenSSL "
- "ENGINE_load_ssl_client_cert "
- "failed: ");
- else if (!x509)
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine failed to "
- "load certificate: ");
- else
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine failed to "
- "load private key: ");
-
- return -1;
- }
-
- r = SSL_CTX_use_certificate(ctx, x509);
- X509_free(x509);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "Failed to use SSL_CTX_use_certificate "
- "with engine: ");
- EVP_PKEY_free(pkey);
- return -1;
- }
-
- r = SSL_CTX_use_PrivateKey(ctx, pkey);
- EVP_PKEY_free(pkey);
- if (r != 1) {
- rd_snprintf(errstr, errstr_size,
- "Failed to use SSL_CTX_use_PrivateKey "
- "with engine: ");
- return -1;
- }
-
- check_pkey = rd_true;
- }
-#endif /*WITH_SSL_ENGINE*/
-
- /* Check that a valid private/public key combo was set. */
- if (check_pkey && SSL_CTX_check_private_key(ctx) != 1) {
- rd_snprintf(errstr, errstr_size, "Private key check failed: ");
- return -1;
- }
-
- return 0;
-}
-
-
-/**
- * @brief Once per rd_kafka_t handle cleanup of OpenSSL
- *
- * @locality any thread
- *
- * @locks rd_kafka_wrlock() MUST be held
- */
-void rd_kafka_ssl_ctx_term(rd_kafka_t *rk) {
- SSL_CTX_free(rk->rk_conf.ssl.ctx);
- rk->rk_conf.ssl.ctx = NULL;
-
-#if WITH_SSL_ENGINE
- RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free);
-#endif
-}
-
-
-#if WITH_SSL_ENGINE
-/**
- * @brief Initialize and load OpenSSL engine, if configured.
- *
- * @returns true on success, false on error.
- */
-static rd_bool_t
-rd_kafka_ssl_ctx_init_engine(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- ENGINE *engine;
-
- /* OpenSSL loads an engine as dynamic id and stores it in
- * internal list, as per LIST_ADD command below. If engine
- * already exists in internal list, it is supposed to be
- * fetched using engine id.
- */
- engine = ENGINE_by_id(rk->rk_conf.ssl.engine_id);
- if (!engine) {
- engine = ENGINE_by_id("dynamic");
- if (!engine) {
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine initialization failed in"
- " ENGINE_by_id: ");
- return rd_false;
- }
- }
-
- if (!ENGINE_ctrl_cmd_string(engine, "SO_PATH",
- rk->rk_conf.ssl.engine_location, 0)) {
- ENGINE_free(engine);
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine initialization failed in"
- " ENGINE_ctrl_cmd_string SO_PATH: ");
- return rd_false;
- }
-
- if (!ENGINE_ctrl_cmd_string(engine, "LIST_ADD", "1", 0)) {
- ENGINE_free(engine);
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine initialization failed in"
- " ENGINE_ctrl_cmd_string LIST_ADD: ");
- return rd_false;
- }
-
- if (!ENGINE_ctrl_cmd_string(engine, "LOAD", NULL, 0)) {
- ENGINE_free(engine);
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine initialization failed in"
- " ENGINE_ctrl_cmd_string LOAD: ");
- return rd_false;
- }
-
- if (!ENGINE_init(engine)) {
- ENGINE_free(engine);
- rd_snprintf(errstr, errstr_size,
- "OpenSSL engine initialization failed in"
- " ENGINE_init: ");
- return rd_false;
- }
-
- rk->rk_conf.ssl.engine = engine;
-
- return rd_true;
-}
-#endif
-
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
-/**
- * @brief Wrapper around OSSL_PROVIDER_unload() to expose a free(void*) API
- * suitable for rd_list_t's free_cb.
- */
-static void rd_kafka_ssl_OSSL_PROVIDER_free(void *ptr) {
- OSSL_PROVIDER *prov = ptr;
- (void)OSSL_PROVIDER_unload(prov);
-}
-
-
-/**
- * @brief Load OpenSSL 3.0.x providers specified in comma-separated string.
- *
- * @remark Only the error preamble/prefix is written here, the actual
- * OpenSSL error is retrieved from the OpenSSL error stack by
- * the caller.
- *
- * @returns rd_false on failure (errstr will be written to), or rd_true
- * on successs.
- */
-static rd_bool_t rd_kafka_ssl_ctx_load_providers(rd_kafka_t *rk,
- const char *providers_csv,
- char *errstr,
- size_t errstr_size) {
- size_t provider_cnt, i;
- char **providers = rd_string_split(
- providers_csv, ',', rd_true /*skip empty*/, &provider_cnt);
-
-
- if (!providers || !provider_cnt) {
- rd_snprintf(errstr, errstr_size,
- "ssl.providers expects a comma-separated "
- "list of OpenSSL 3.0.x providers");
- if (providers)
- rd_free(providers);
- return rd_false;
- }
-
- rd_list_init(&rk->rk_conf.ssl.loaded_providers, (int)provider_cnt,
- rd_kafka_ssl_OSSL_PROVIDER_free);
-
- for (i = 0; i < provider_cnt; i++) {
- const char *provider = providers[i];
- OSSL_PROVIDER *prov;
- const char *buildinfo = NULL;
- OSSL_PARAM request[] = {{"buildinfo", OSSL_PARAM_UTF8_PTR,
- (void *)&buildinfo, 0, 0},
- {NULL, 0, NULL, 0, 0}};
-
- prov = OSSL_PROVIDER_load(NULL, provider);
- if (!prov) {
- rd_snprintf(errstr, errstr_size,
- "Failed to load OpenSSL provider \"%s\": ",
- provider);
- rd_free(providers);
- return rd_false;
- }
-
- if (!OSSL_PROVIDER_get_params(prov, request))
- buildinfo = "no buildinfo";
-
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "OpenSSL provider \"%s\" loaded (%s)", provider,
- buildinfo);
-
- rd_list_add(&rk->rk_conf.ssl.loaded_providers, prov);
- }
-
- rd_free(providers);
-
- return rd_true;
-}
-#endif
-
-
-
-/**
- * @brief Once per rd_kafka_t handle initialization of OpenSSL
- *
- * @locality application thread
- *
- * @locks rd_kafka_wrlock() MUST be held
- */
-int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
- int r;
- SSL_CTX *ctx = NULL;
- const char *linking =
-#if WITH_STATIC_LIB_libcrypto
- "statically linked "
-#else
- ""
-#endif
- ;
-
-#if OPENSSL_VERSION_NUMBER >= 0x10100000
- rd_kafka_dbg(rk, SECURITY, "OPENSSL",
- "Using %sOpenSSL version %s "
- "(0x%lx, librdkafka built with 0x%lx)",
- linking, OpenSSL_version(OPENSSL_VERSION),
- OpenSSL_version_num(), OPENSSL_VERSION_NUMBER);
-#else
- rd_kafka_dbg(rk, SECURITY, "OPENSSL",
- "librdkafka built with %sOpenSSL version 0x%lx", linking,
- OPENSSL_VERSION_NUMBER);
-#endif
-
- if (errstr_size > 0)
- errstr[0] = '\0';
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000
- if (rk->rk_conf.ssl.providers &&
- !rd_kafka_ssl_ctx_load_providers(rk, rk->rk_conf.ssl.providers,
- errstr, errstr_size))
- goto fail;
-#endif
-
-#if WITH_SSL_ENGINE
- if (rk->rk_conf.ssl.engine_location && !rk->rk_conf.ssl.engine) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Loading OpenSSL engine from \"%s\"",
- rk->rk_conf.ssl.engine_location);
- if (!rd_kafka_ssl_ctx_init_engine(rk, errstr, errstr_size))
- goto fail;
- }
-#endif
-
-#if OPENSSL_VERSION_NUMBER >= 0x10100000
- ctx = SSL_CTX_new(TLS_client_method());
-#else
- ctx = SSL_CTX_new(SSLv23_client_method());
-#endif
- if (!ctx) {
- rd_snprintf(errstr, errstr_size, "SSL_CTX_new() failed: ");
- goto fail;
- }
-
-#ifdef SSL_OP_NO_SSLv3
- /* Disable SSLv3 (unsafe) */
- SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3);
-#endif
-
- /* Key file password callback */
- SSL_CTX_set_default_passwd_cb(ctx, rd_kafka_transport_ssl_passwd_cb);
- SSL_CTX_set_default_passwd_cb_userdata(ctx, rk);
-
- /* Ciphers */
- if (rk->rk_conf.ssl.cipher_suites) {
- rd_kafka_dbg(rk, SECURITY, "SSL", "Setting cipher list: %s",
- rk->rk_conf.ssl.cipher_suites);
- if (!SSL_CTX_set_cipher_list(ctx,
- rk->rk_conf.ssl.cipher_suites)) {
- /* Set a string that will prefix the
- * the OpenSSL error message (which is lousy)
- * to make it more meaningful. */
- rd_snprintf(errstr, errstr_size,
- "ssl.cipher.suites failed: ");
- goto fail;
- }
- }
-
- /* Set up broker certificate verification. */
- SSL_CTX_set_verify(ctx,
- rk->rk_conf.ssl.enable_verify ? SSL_VERIFY_PEER
- : SSL_VERIFY_NONE,
- rk->rk_conf.ssl.cert_verify_cb
- ? rd_kafka_transport_ssl_cert_verify_cb
- : NULL);
-
-#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER)
- /* Curves */
- if (rk->rk_conf.ssl.curves_list) {
- rd_kafka_dbg(rk, SECURITY, "SSL", "Setting curves list: %s",
- rk->rk_conf.ssl.curves_list);
- if (!SSL_CTX_set1_curves_list(ctx,
- rk->rk_conf.ssl.curves_list)) {
- rd_snprintf(errstr, errstr_size,
- "ssl.curves.list failed: ");
- goto fail;
- }
- }
-
- /* Certificate signature algorithms */
- if (rk->rk_conf.ssl.sigalgs_list) {
- rd_kafka_dbg(rk, SECURITY, "SSL",
- "Setting signature algorithms list: %s",
- rk->rk_conf.ssl.sigalgs_list);
- if (!SSL_CTX_set1_sigalgs_list(ctx,
- rk->rk_conf.ssl.sigalgs_list)) {
- rd_snprintf(errstr, errstr_size,
- "ssl.sigalgs.list failed: ");
- goto fail;
- }
- }
-#endif
-
- /* Register certificates, keys, etc. */
- if (rd_kafka_ssl_set_certs(rk, ctx, errstr, errstr_size) == -1)
- goto fail;
-
-
- SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
-
- rk->rk_conf.ssl.ctx = ctx;
-
- return 0;
-
-fail:
- r = (int)strlen(errstr);
- /* If only the error preamble is provided in errstr and ending with
- * "....: ", then retrieve the last error from the OpenSSL error stack,
- * else treat the errstr as complete. */
- if (r > 2 && !strcmp(&errstr[r - 2], ": "))
- rd_kafka_ssl_error(rk, NULL, errstr + r,
- (int)errstr_size > r ? (int)errstr_size - r
- : 0);
- RD_IF_FREE(ctx, SSL_CTX_free);
-#if WITH_SSL_ENGINE
- RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free);
-#endif
- rd_list_destroy(&rk->rk_conf.ssl.loaded_providers);
-
- return -1;
-}
-
-
-#if OPENSSL_VERSION_NUMBER < 0x10100000L
-static RD_UNUSED void
-rd_kafka_transport_ssl_lock_cb(int mode, int i, const char *file, int line) {
- if (mode & CRYPTO_LOCK)
- mtx_lock(&rd_kafka_ssl_locks[i]);
- else
- mtx_unlock(&rd_kafka_ssl_locks[i]);
-}
-#endif
-
-static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb(void) {
-#ifdef _WIN32
- /* Windows makes a distinction between thread handle
- * and thread id, which means we can't use the
- * thrd_current() API that returns the handle. */
- return (unsigned long)GetCurrentThreadId();
-#else
- return (unsigned long)(intptr_t)thrd_current();
-#endif
-}
-
-#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
-static void
-rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) {
- unsigned long thread_id = rd_kafka_transport_ssl_threadid_cb();
-
- CRYPTO_THREADID_set_numeric(id, thread_id);
-}
-#endif
-
-/**
- * @brief Global OpenSSL cleanup.
- */
-void rd_kafka_ssl_term(void) {
-#if OPENSSL_VERSION_NUMBER < 0x10100000L
- int i;
-
- if (CRYPTO_get_locking_callback() == &rd_kafka_transport_ssl_lock_cb) {
- CRYPTO_set_locking_callback(NULL);
-#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
- CRYPTO_THREADID_set_callback(NULL);
-#else
- CRYPTO_set_id_callback(NULL);
-#endif
-
- for (i = 0; i < rd_kafka_ssl_locks_cnt; i++)
- mtx_destroy(&rd_kafka_ssl_locks[i]);
-
- rd_free(rd_kafka_ssl_locks);
- }
-#endif
-}
-
-
-/**
- * @brief Global (once per process) OpenSSL init.
- */
-void rd_kafka_ssl_init(void) {
-#if OPENSSL_VERSION_NUMBER < 0x10100000L
- int i;
-
- if (!CRYPTO_get_locking_callback()) {
- rd_kafka_ssl_locks_cnt = CRYPTO_num_locks();
- rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt *
- sizeof(*rd_kafka_ssl_locks));
- for (i = 0; i < rd_kafka_ssl_locks_cnt; i++)
- mtx_init(&rd_kafka_ssl_locks[i], mtx_plain);
-
- CRYPTO_set_locking_callback(rd_kafka_transport_ssl_lock_cb);
-
-#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
- CRYPTO_THREADID_set_callback(
- rd_kafka_transport_libcrypto_THREADID_callback);
-#else
- CRYPTO_set_id_callback(rd_kafka_transport_ssl_threadid_cb);
-#endif
- }
-
- /* OPENSSL_init_ssl(3) and OPENSSL_init_crypto(3) say:
- * "As of version 1.1.0 OpenSSL will automatically allocate
- * all resources that it needs so no explicit initialisation
- * is required. Similarly it will also automatically
- * deinitialise as required."
- */
- SSL_load_error_strings();
- SSL_library_init();
-
- ERR_load_BIO_strings();
- ERR_load_crypto_strings();
- OpenSSL_add_all_algorithms();
-#endif
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h
deleted file mode 100644
index 325abbe1d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDKAFKA_SSL_H_
-#define _RDKAFKA_SSL_H_
-
-void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans);
-int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb,
- rd_kafka_transport_t *rktrans,
- char *errstr,
- size_t errstr_size);
-int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans);
-ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size);
-ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size);
-
-
-void rd_kafka_ssl_ctx_term(rd_kafka_t *rk);
-int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
-
-void rd_kafka_ssl_term(void);
-void rd_kafka_ssl_init(void);
-
-const char *rd_kafka_ssl_last_error_str(void);
-
-#endif /* _RDKAFKA_SSL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c
deleted file mode 100644
index 8e76ddb14..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c
+++ /dev/null
@@ -1,3428 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka_int.h"
-#include "rdkafka_assignor.h"
-#include "rdkafka_request.h"
-#include "rdmap.h"
-#include "rdunittest.h"
-
-#include <stdarg.h>
-#include <math.h> /* abs() */
-
-/**
- * @name KIP-54 and KIP-341 Sticky assignor.
- *
- * Closely mimicking the official Apache Kafka AbstractStickyAssignor
- * implementation.
- */
-
-/** FIXME
- * Remaining:
- * isSticky() -- used by tests
- */
-
-
-/** @brief Assignor state from last rebalance */
-typedef struct rd_kafka_sticky_assignor_state_s {
- rd_kafka_topic_partition_list_t *prev_assignment;
- int32_t generation_id;
-} rd_kafka_sticky_assignor_state_t;
-
-
-
-/**
- * Auxilliary glue types
- */
-
-/**
- * @struct ConsumerPair_t represents a pair of consumer member ids involved in
- * a partition reassignment, indicating a source consumer a partition
- * is moving from and a destination partition the same partition is
- * moving to.
- *
- * @sa PartitionMovements_t
- */
-typedef struct ConsumerPair_s {
- const char *src; /**< Source member id */
- const char *dst; /**< Destination member id */
-} ConsumerPair_t;
-
-
-static ConsumerPair_t *ConsumerPair_new(const char *src, const char *dst) {
- ConsumerPair_t *cpair;
-
- cpair = rd_malloc(sizeof(*cpair));
- cpair->src = src ? rd_strdup(src) : NULL;
- cpair->dst = dst ? rd_strdup(dst) : NULL;
-
- return cpair;
-}
-
-
-static void ConsumerPair_free(void *p) {
- ConsumerPair_t *cpair = p;
- if (cpair->src)
- rd_free((void *)cpair->src);
- if (cpair->dst)
- rd_free((void *)cpair->dst);
- rd_free(cpair);
-}
-
-static int ConsumerPair_cmp(const void *_a, const void *_b) {
- const ConsumerPair_t *a = _a, *b = _b;
- int r = strcmp(a->src ? a->src : "", b->src ? b->src : "");
- if (r)
- return r;
- return strcmp(a->dst ? a->dst : "", b->dst ? b->dst : "");
-}
-
-
-static unsigned int ConsumerPair_hash(const void *_a) {
- const ConsumerPair_t *a = _a;
- return 31 * (a->src ? rd_map_str_hash(a->src) : 1) +
- (a->dst ? rd_map_str_hash(a->dst) : 1);
-}
-
-
-
-typedef struct ConsumerGenerationPair_s {
- const char *consumer; /**< Memory owned by caller */
- int generation;
-} ConsumerGenerationPair_t;
-
-static void ConsumerGenerationPair_destroy(void *ptr) {
- ConsumerGenerationPair_t *cgpair = ptr;
- rd_free(cgpair);
-}
-
-/**
- * @param consumer This memory will be referenced, not copied, and thus must
- * outlive the ConsumerGenerationPair_t object.
- */
-static ConsumerGenerationPair_t *
-ConsumerGenerationPair_new(const char *consumer, int generation) {
- ConsumerGenerationPair_t *cgpair = rd_malloc(sizeof(*cgpair));
- cgpair->consumer = consumer;
- cgpair->generation = generation;
- return cgpair;
-}
-
-static int ConsumerGenerationPair_cmp_generation(const void *_a,
- const void *_b) {
- const ConsumerGenerationPair_t *a = _a, *b = _b;
- return a->generation - b->generation;
-}
-
-
-
-/**
- * Hash map types.
- *
- * Naming convention is:
- * map_<keytype>_<valuetype>_t
- *
- * Where the keytype and valuetype are spoken names of the types and
- * not the specific C types (since that'd be too long).
- */
-typedef RD_MAP_TYPE(const char *,
- rd_kafka_topic_partition_list_t *) map_str_toppar_list_t;
-
-typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
- const char *) map_toppar_str_t;
-
-typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
- rd_list_t *) map_toppar_list_t;
-
-typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
- ConsumerGenerationPair_t *) map_toppar_cgpair_t;
-
-typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
- ConsumerPair_t *) map_toppar_cpair_t;
-
-typedef RD_MAP_TYPE(const ConsumerPair_t *,
- rd_kafka_topic_partition_list_t *) map_cpair_toppar_list_t;
-
-/* map<string, map<ConsumerPair*, topic_partition_list_t*>> */
-typedef RD_MAP_TYPE(const char *,
- map_cpair_toppar_list_t *) map_str_map_cpair_toppar_list_t;
-
-
-
-/** Glue type helpers */
-
-static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new(void) {
- map_cpair_toppar_list_t *map = rd_calloc(1, sizeof(*map));
-
- RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, NULL,
- rd_kafka_topic_partition_list_destroy_free);
-
- return map;
-}
-
-static void map_cpair_toppar_list_t_free(void *ptr) {
- map_cpair_toppar_list_t *map = ptr;
- RD_MAP_DESTROY(map);
- rd_free(map);
-}
-
-
-
-/**
- * @struct Provides current state of partition movements between consumers
- * for each topic, and possible movements for each partition.
- */
-typedef struct PartitionMovements_s {
- map_toppar_cpair_t partitionMovements;
- map_str_map_cpair_toppar_list_t partitionMovementsByTopic;
-} PartitionMovements_t;
-
-
-static void PartitionMovements_init(PartitionMovements_t *pmov,
- size_t topic_cnt) {
- RD_MAP_INIT(&pmov->partitionMovements, topic_cnt * 3,
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- NULL, ConsumerPair_free);
-
- RD_MAP_INIT(&pmov->partitionMovementsByTopic, topic_cnt, rd_map_str_cmp,
- rd_map_str_hash, NULL, map_cpair_toppar_list_t_free);
-}
-
-static void PartitionMovements_destroy(PartitionMovements_t *pmov) {
- RD_MAP_DESTROY(&pmov->partitionMovementsByTopic);
- RD_MAP_DESTROY(&pmov->partitionMovements);
-}
-
-
-static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition(
- PartitionMovements_t *pmov,
- const rd_kafka_topic_partition_t *toppar) {
-
- ConsumerPair_t *cpair;
- map_cpair_toppar_list_t *partitionMovementsForThisTopic;
- rd_kafka_topic_partition_list_t *plist;
-
- cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
- rd_assert(cpair);
-
- partitionMovementsForThisTopic =
- RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
-
- plist = RD_MAP_GET(partitionMovementsForThisTopic, cpair);
- rd_assert(plist);
-
- rd_kafka_topic_partition_list_del(plist, toppar->topic,
- toppar->partition);
- if (plist->cnt == 0)
- RD_MAP_DELETE(partitionMovementsForThisTopic, cpair);
- if (RD_MAP_IS_EMPTY(partitionMovementsForThisTopic))
- RD_MAP_DELETE(&pmov->partitionMovementsByTopic, toppar->topic);
-
- return cpair;
-}
-
-static void PartitionMovements_addPartitionMovementRecord(
- PartitionMovements_t *pmov,
- const rd_kafka_topic_partition_t *toppar,
- ConsumerPair_t *cpair) {
- map_cpair_toppar_list_t *partitionMovementsForThisTopic;
- rd_kafka_topic_partition_list_t *plist;
-
- RD_MAP_SET(&pmov->partitionMovements, toppar, cpair);
-
- partitionMovementsForThisTopic =
- RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, toppar->topic,
- map_cpair_toppar_list_t_new());
-
- plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, cpair,
- rd_kafka_topic_partition_list_new(16));
-
- rd_kafka_topic_partition_list_add(plist, toppar->topic,
- toppar->partition);
-}
-
-static void
-PartitionMovements_movePartition(PartitionMovements_t *pmov,
- const rd_kafka_topic_partition_t *toppar,
- const char *old_consumer,
- const char *new_consumer) {
-
- if (RD_MAP_GET(&pmov->partitionMovements, toppar)) {
- /* This partition has previously moved */
- ConsumerPair_t *existing_cpair;
-
- existing_cpair =
- PartitionMovements_removeMovementRecordOfPartition(pmov,
- toppar);
-
- rd_assert(!rd_strcmp(existing_cpair->dst, old_consumer));
-
- if (rd_strcmp(existing_cpair->src, new_consumer)) {
- /* Partition is not moving back to its
- * previous consumer */
- PartitionMovements_addPartitionMovementRecord(
- pmov, toppar,
- ConsumerPair_new(existing_cpair->src,
- new_consumer));
- }
- } else {
- PartitionMovements_addPartitionMovementRecord(
- pmov, toppar, ConsumerPair_new(old_consumer, new_consumer));
- }
-}
-
-static const rd_kafka_topic_partition_t *
-PartitionMovements_getTheActualPartitionToBeMoved(
- PartitionMovements_t *pmov,
- const rd_kafka_topic_partition_t *toppar,
- const char *oldConsumer,
- const char *newConsumer) {
-
- ConsumerPair_t *cpair;
- ConsumerPair_t reverse_cpair = {.src = newConsumer, .dst = oldConsumer};
- map_cpair_toppar_list_t *partitionMovementsForThisTopic;
- rd_kafka_topic_partition_list_t *plist;
-
- if (!RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic))
- return toppar;
-
- cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
- if (cpair) {
- /* This partition has previously moved */
- rd_assert(!rd_strcmp(oldConsumer, cpair->dst));
-
- oldConsumer = cpair->src;
- }
-
- partitionMovementsForThisTopic =
- RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
-
- plist = RD_MAP_GET(partitionMovementsForThisTopic, &reverse_cpair);
- if (!plist)
- return toppar;
-
- return &plist->elems[0];
-}
-
-#if FIXME
-
-static rd_bool_t hasCycles(map_cpair_toppar_list_t *pairs) {
- return rd_true; // FIXME
-}
-
-/**
- * @remark This method is only used by the AbstractStickyAssignorTest
- * in the Java client.
- */
-static rd_bool_t PartitionMovements_isSticky(rd_kafka_t *rk,
- PartitionMovements_t *pmov) {
- const char *topic;
- map_cpair_toppar_list_t *topicMovementPairs;
-
- RD_MAP_FOREACH(topic, topicMovementPairs,
- &pmov->partitionMovementsByTopic) {
- if (hasCycles(topicMovementPairs)) {
- const ConsumerPair_t *cpair;
- const rd_kafka_topic_partition_list_t *partitions;
-
- rd_kafka_log(
- rk, LOG_ERR, "STICKY",
- "Sticky assignor: Stickiness is violated for "
- "topic %s: partition movements for this topic "
- "occurred among the following consumers: ",
- topic);
- RD_MAP_FOREACH(cpair, partitions, topicMovementPairs) {
- rd_kafka_log(rk, LOG_ERR, "STICKY", " %s -> %s",
- cpair->src, cpair->dst);
- }
-
- if (partitions)
- ; /* Avoid unused warning */
-
- return rd_false;
- }
- }
-
- return rd_true;
-}
-#endif
-
-
-/**
- * @brief Comparator to sort ascendingly by rd_map_elem_t object value as
- * topic partition list count, or by member id if the list count is
- * identical.
- * Used to sort sortedCurrentSubscriptions list.
- *
- * elem.key is the consumer member id string,
- * elem.value is the partition list.
- */
-static int sort_by_map_elem_val_toppar_list_cnt(const void *_a,
- const void *_b) {
- const rd_map_elem_t *a = _a, *b = _b;
- const rd_kafka_topic_partition_list_t *al = a->value, *bl = b->value;
- int r = al->cnt - bl->cnt;
- if (r)
- return r;
- return strcmp((const char *)a->key, (const char *)b->key);
-}
-
-
-/**
- * @brief Assign partition to the most eligible consumer.
- *
- * The assignment should improve the overall balance of the partition
- * assignments to consumers.
- */
-static void
-assignPartition(const rd_kafka_topic_partition_t *partition,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_str_toppar_list_t *currentAssignment,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- map_toppar_str_t *currentPartitionConsumer) {
- const rd_map_elem_t *elem;
- int i;
-
- RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
- const char *consumer = (const char *)elem->key;
- const rd_kafka_topic_partition_list_t *partitions;
-
- partitions =
- RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
- if (!rd_kafka_topic_partition_list_find(
- partitions, partition->topic, partition->partition))
- continue;
-
- rd_kafka_topic_partition_list_add(
- RD_MAP_GET(currentAssignment, consumer), partition->topic,
- partition->partition);
-
- RD_MAP_SET(currentPartitionConsumer,
- rd_kafka_topic_partition_copy(partition), consumer);
-
- /* Re-sort sortedCurrentSubscriptions since this consumer's
- * assignment count has increased.
- * This is an O(N) operation since it is a single shuffle. */
- rd_list_sort(sortedCurrentSubscriptions,
- sort_by_map_elem_val_toppar_list_cnt);
- return;
- }
-}
-
-/**
- * @returns true if the partition has two or more potential consumers.
- */
-static RD_INLINE rd_bool_t partitionCanParticipateInReassignment(
- const rd_kafka_topic_partition_t *partition,
- map_toppar_list_t *partition2AllPotentialConsumers) {
- rd_list_t *consumers;
-
- if (!(consumers =
- RD_MAP_GET(partition2AllPotentialConsumers, partition)))
- return rd_false;
-
- return rd_list_cnt(consumers) >= 2;
-}
-
-
-/**
- * @returns true if consumer can participate in reassignment based on
- * its current assignment.
- */
-static RD_INLINE rd_bool_t consumerCanParticipateInReassignment(
- rd_kafka_t *rk,
- const char *consumer,
- map_str_toppar_list_t *currentAssignment,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- map_toppar_list_t *partition2AllPotentialConsumers) {
- const rd_kafka_topic_partition_list_t *currentPartitions =
- RD_MAP_GET(currentAssignment, consumer);
- int currentAssignmentSize = currentPartitions->cnt;
- int maxAssignmentSize =
- RD_MAP_GET(consumer2AllPotentialPartitions, consumer)->cnt;
- int i;
-
- /* FIXME: And then what? Is this a local error? If so, assert. */
- if (currentAssignmentSize > maxAssignmentSize)
- rd_kafka_log(rk, LOG_ERR, "STICKY",
- "Sticky assignor error: "
- "Consumer %s is assigned more partitions (%d) "
- "than the maximum possible (%d)",
- consumer, currentAssignmentSize,
- maxAssignmentSize);
-
- /* If a consumer is not assigned all its potential partitions it is
- * subject to reassignment. */
- if (currentAssignmentSize < maxAssignmentSize)
- return rd_true;
-
- /* If any of the partitions assigned to a consumer is subject to
- * reassignment the consumer itself is subject to reassignment. */
- for (i = 0; i < currentPartitions->cnt; i++) {
- const rd_kafka_topic_partition_t *partition =
- &currentPartitions->elems[i];
-
- if (partitionCanParticipateInReassignment(
- partition, partition2AllPotentialConsumers))
- return rd_true;
- }
-
- return rd_false;
-}
-
-
-/**
- * @brief Process moving partition from old consumer to new consumer.
- */
-static void processPartitionMovement(
- rd_kafka_t *rk,
- PartitionMovements_t *partitionMovements,
- const rd_kafka_topic_partition_t *partition,
- const char *newConsumer,
- map_str_toppar_list_t *currentAssignment,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_toppar_str_t *currentPartitionConsumer) {
-
- const char *oldConsumer =
- RD_MAP_GET(currentPartitionConsumer, partition);
-
- PartitionMovements_movePartition(partitionMovements, partition,
- oldConsumer, newConsumer);
-
- rd_kafka_topic_partition_list_add(
- RD_MAP_GET(currentAssignment, newConsumer), partition->topic,
- partition->partition);
-
- rd_kafka_topic_partition_list_del(
- RD_MAP_GET(currentAssignment, oldConsumer), partition->topic,
- partition->partition);
-
- RD_MAP_SET(currentPartitionConsumer,
- rd_kafka_topic_partition_copy(partition), newConsumer);
-
- /* Re-sort after assignment count has changed. */
- rd_list_sort(sortedCurrentSubscriptions,
- sort_by_map_elem_val_toppar_list_cnt);
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "%s [%" PRId32 "] %sassigned to %s (from %s)",
- partition->topic, partition->partition,
- oldConsumer ? "re" : "", newConsumer,
- oldConsumer ? oldConsumer : "(none)");
-}
-
-
-/**
- * @brief Reassign \p partition to \p newConsumer
- */
-static void reassignPartitionToConsumer(
- rd_kafka_t *rk,
- PartitionMovements_t *partitionMovements,
- const rd_kafka_topic_partition_t *partition,
- map_str_toppar_list_t *currentAssignment,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_toppar_str_t *currentPartitionConsumer,
- const char *newConsumer) {
-
- const char *consumer = RD_MAP_GET(currentPartitionConsumer, partition);
- const rd_kafka_topic_partition_t *partitionToBeMoved;
-
- /* Find the correct partition movement considering
- * the stickiness requirement. */
- partitionToBeMoved = PartitionMovements_getTheActualPartitionToBeMoved(
- partitionMovements, partition, consumer, newConsumer);
-
- processPartitionMovement(rk, partitionMovements, partitionToBeMoved,
- newConsumer, currentAssignment,
- sortedCurrentSubscriptions,
- currentPartitionConsumer);
-}
-
-/**
- * @brief Reassign \p partition to an eligible new consumer.
- */
-static void
-reassignPartition(rd_kafka_t *rk,
- PartitionMovements_t *partitionMovements,
- const rd_kafka_topic_partition_t *partition,
- map_str_toppar_list_t *currentAssignment,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_toppar_str_t *currentPartitionConsumer,
- map_str_toppar_list_t *consumer2AllPotentialPartitions) {
-
- const rd_map_elem_t *elem;
- int i;
-
- /* Find the new consumer */
- RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
- const char *newConsumer = (const char *)elem->key;
-
- if (rd_kafka_topic_partition_list_find(
- RD_MAP_GET(consumer2AllPotentialPartitions,
- newConsumer),
- partition->topic, partition->partition)) {
- reassignPartitionToConsumer(
- rk, partitionMovements, partition,
- currentAssignment, sortedCurrentSubscriptions,
- currentPartitionConsumer, newConsumer);
-
- return;
- }
- }
-
- rd_assert(!*"reassignPartition(): no new consumer found");
-}
-
-
-
-/**
- * @brief Determine if the current assignment is balanced.
- *
- * @param currentAssignment the assignment whose balance needs to be checked
- * @param sortedCurrentSubscriptions an ascending sorted set of consumers based
- * on how many topic partitions are already
- * assigned to them
- * @param consumer2AllPotentialPartitions a mapping of all consumers to all
- * potential topic partitions that can be
- * assigned to them.
- * This parameter is called
- * allSubscriptions in the Java
- * implementation, but we choose this
- * name to be more consistent with its
- * use elsewhere in the code.
- * @param partition2AllPotentialConsumers a mapping of all partitions to
- * all potential consumers.
- *
- * @returns true if the given assignment is balanced; false otherwise
- */
-static rd_bool_t
-isBalanced(rd_kafka_t *rk,
- map_str_toppar_list_t *currentAssignment,
- const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- map_toppar_list_t *partition2AllPotentialConsumers) {
-
- int minimum = ((const rd_kafka_topic_partition_list_t
- *)((const rd_map_elem_t *)rd_list_first(
- sortedCurrentSubscriptions))
- ->value)
- ->cnt;
- int maximum = ((const rd_kafka_topic_partition_list_t
- *)((const rd_map_elem_t *)rd_list_last(
- sortedCurrentSubscriptions))
- ->value)
- ->cnt;
-
- /* Mapping from partitions to the consumer assigned to them */
- // FIXME: don't create prior to min/max check below */
- map_toppar_str_t allPartitions = RD_MAP_INITIALIZER(
- RD_MAP_CNT(partition2AllPotentialConsumers),
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- NULL /* references currentAssignment */,
- NULL /* references currentAssignment */);
-
- /* Iterators */
- const rd_kafka_topic_partition_list_t *partitions;
- const char *consumer;
- const rd_map_elem_t *elem;
- int i;
-
- /* The assignment is balanced if minimum and maximum numbers of
- * partitions assigned to consumers differ by at most one. */
- if (minimum >= maximum - 1) {
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Assignment is balanced: "
- "minimum %d and maximum %d partitions assigned "
- "to each consumer",
- minimum, maximum);
- RD_MAP_DESTROY(&allPartitions);
- return rd_true;
- }
-
- /* Create a mapping from partitions to the consumer assigned to them */
- RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
-
- for (i = 0; i < partitions->cnt; i++) {
- const rd_kafka_topic_partition_t *partition =
- &partitions->elems[i];
- const char *existing;
- if ((existing = RD_MAP_GET(&allPartitions, partition)))
- rd_kafka_log(rk, LOG_ERR, "STICKY",
- "Sticky assignor: %s [%" PRId32
- "] "
- "is assigned to more than one "
- "consumer (%s and %s)",
- partition->topic,
- partition->partition, existing,
- consumer);
-
- RD_MAP_SET(&allPartitions, partition, consumer);
- }
- }
-
-
- /* For each consumer that does not have all the topic partitions it
- * can get make sure none of the topic partitions it could but did
- * not get cannot be moved to it, because that would break the balance.
- *
- * Note: Since sortedCurrentSubscriptions elements are pointers to
- * currentAssignment's element we get both the consumer
- * and partition list in elem here. */
- RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
- const char *consumer = (const char *)elem->key;
- const rd_kafka_topic_partition_list_t *potentialTopicPartitions;
- const rd_kafka_topic_partition_list_t *consumerPartitions;
-
- consumerPartitions =
- (const rd_kafka_topic_partition_list_t *)elem->value;
-
- potentialTopicPartitions =
- RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
-
- /* Skip if this consumer already has all the topic partitions
- * it can get. */
- if (consumerPartitions->cnt == potentialTopicPartitions->cnt)
- continue;
-
- /* Otherwise make sure it can't get any more partitions */
-
- for (i = 0; i < potentialTopicPartitions->cnt; i++) {
- const rd_kafka_topic_partition_t *partition =
- &potentialTopicPartitions->elems[i];
- const char *otherConsumer;
- int otherConsumerPartitionCount;
-
- if (rd_kafka_topic_partition_list_find(
- consumerPartitions, partition->topic,
- partition->partition))
- continue;
-
- otherConsumer = RD_MAP_GET(&allPartitions, partition);
- otherConsumerPartitionCount =
- RD_MAP_GET(currentAssignment, otherConsumer)->cnt;
-
- if (consumerPartitions->cnt <
- otherConsumerPartitionCount) {
- rd_kafka_dbg(
- rk, ASSIGNOR, "STICKY",
- "%s [%" PRId32
- "] can be moved from "
- "consumer %s (%d partition(s)) to "
- "consumer %s (%d partition(s)) "
- "for a more balanced assignment",
- partition->topic, partition->partition,
- otherConsumer, otherConsumerPartitionCount,
- consumer, consumerPartitions->cnt);
- RD_MAP_DESTROY(&allPartitions);
- return rd_false;
- }
- }
- }
-
- RD_MAP_DESTROY(&allPartitions);
- return rd_true;
-}
-
-
-/**
- * @brief Perform reassignment.
- *
- * @returns true if reassignment was performed.
- */
-static rd_bool_t
-performReassignments(rd_kafka_t *rk,
- PartitionMovements_t *partitionMovements,
- rd_kafka_topic_partition_list_t *reassignablePartitions,
- map_str_toppar_list_t *currentAssignment,
- map_toppar_cgpair_t *prevAssignment,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- map_toppar_list_t *partition2AllPotentialConsumers,
- map_toppar_str_t *currentPartitionConsumer) {
- rd_bool_t reassignmentPerformed = rd_false;
- rd_bool_t modified, saveIsBalanced = rd_false;
- int iterations = 0;
-
- /* Repeat reassignment until no partition can be moved to
- * improve the balance. */
- do {
- int i;
-
- iterations++;
-
- modified = rd_false;
-
- /* Reassign all reassignable partitions (starting from the
- * partition with least potential consumers and if needed)
- * until the full list is processed or a balance is achieved. */
-
- for (i = 0; i < reassignablePartitions->cnt &&
- !isBalanced(rk, currentAssignment,
- sortedCurrentSubscriptions,
- consumer2AllPotentialPartitions,
- partition2AllPotentialConsumers);
- i++) {
- const rd_kafka_topic_partition_t *partition =
- &reassignablePartitions->elems[i];
- const rd_list_t *consumers = RD_MAP_GET(
- partition2AllPotentialConsumers, partition);
- const char *consumer, *otherConsumer;
- const ConsumerGenerationPair_t *prevcgp;
- const rd_kafka_topic_partition_list_t *currAssignment;
- int j;
-
- /* FIXME: Is this a local error/bug? If so, assert */
- if (rd_list_cnt(consumers) <= 1)
- rd_kafka_log(
- rk, LOG_ERR, "STICKY",
- "Sticky assignor: expected more than "
- "one potential consumer for partition "
- "%s [%" PRId32 "]",
- partition->topic, partition->partition);
-
- /* The partition must have a current consumer */
- consumer =
- RD_MAP_GET(currentPartitionConsumer, partition);
- rd_assert(consumer);
-
- currAssignment =
- RD_MAP_GET(currentAssignment, consumer);
- prevcgp = RD_MAP_GET(prevAssignment, partition);
-
- if (prevcgp &&
- currAssignment->cnt >
- RD_MAP_GET(currentAssignment, prevcgp->consumer)
- ->cnt +
- 1) {
- reassignPartitionToConsumer(
- rk, partitionMovements, partition,
- currentAssignment,
- sortedCurrentSubscriptions,
- currentPartitionConsumer,
- prevcgp->consumer);
- reassignmentPerformed = rd_true;
- modified = rd_true;
- continue;
- }
-
- /* Check if a better-suited consumer exists for the
- * partition; if so, reassign it. */
- RD_LIST_FOREACH(otherConsumer, consumers, j) {
- if (consumer == otherConsumer)
- continue;
-
- if (currAssignment->cnt <=
- RD_MAP_GET(currentAssignment, otherConsumer)
- ->cnt +
- 1)
- continue;
-
- reassignPartition(
- rk, partitionMovements, partition,
- currentAssignment,
- sortedCurrentSubscriptions,
- currentPartitionConsumer,
- consumer2AllPotentialPartitions);
-
- reassignmentPerformed = rd_true;
- modified = rd_true;
- break;
- }
- }
-
- if (i < reassignablePartitions->cnt)
- saveIsBalanced = rd_true;
-
- } while (modified);
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Reassignment %sperformed after %d iteration(s) of %d "
- "reassignable partition(s)%s",
- reassignmentPerformed ? "" : "not ", iterations,
- reassignablePartitions->cnt,
- saveIsBalanced ? ": assignment is balanced" : "");
-
- return reassignmentPerformed;
-}
-
-
-/**
- * @returns the balance score of the given assignment, as the sum of assigned
- * partitions size difference of all consumer pairs.
- *
- * A perfectly balanced assignment (with all consumers getting the same number
- * of partitions) has a balance score of 0.
- *
- * Lower balance score indicates a more balanced assignment.
- * FIXME: should be called imbalance score then?
- */
-static int getBalanceScore(map_str_toppar_list_t *assignment) {
- const char *consumer;
- const rd_kafka_topic_partition_list_t *partitions;
- int *sizes;
- int cnt = 0;
- int score = 0;
- int i, next;
-
- /* If there is just a single consumer the assignment will be balanced */
- if (RD_MAP_CNT(assignment) < 2)
- return 0;
-
- sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment));
-
- RD_MAP_FOREACH(consumer, partitions, assignment)
- sizes[cnt++] = partitions->cnt;
-
- for (next = 0; next < cnt; next++)
- for (i = next + 1; i < cnt; i++)
- score += abs(sizes[next] - sizes[i]);
-
- rd_free(sizes);
-
- if (consumer)
- ; /* Avoid unused warning */
-
- return score;
-}
-
-
-
-/**
- * @brief Balance the current assignment using the data structures
- * created in assign_cb(). */
-static void balance(rd_kafka_t *rk,
- PartitionMovements_t *partitionMovements,
- map_str_toppar_list_t *currentAssignment,
- map_toppar_cgpair_t *prevAssignment,
- rd_kafka_topic_partition_list_t *sortedPartitions,
- rd_kafka_topic_partition_list_t *unassignedPartitions,
- rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- map_toppar_list_t *partition2AllPotentialConsumers,
- map_toppar_str_t *currentPartitionConsumer,
- rd_bool_t revocationRequired) {
-
- /* If the consumer with most assignments (thus the last element
- * in the ascendingly ordered sortedCurrentSubscriptions list) has
- * zero partitions assigned it means there is no current assignment
- * for any consumer and the group is thus initializing for the first
- * time. */
- rd_bool_t initializing = ((const rd_kafka_topic_partition_list_t
- *)((const rd_map_elem_t *)rd_list_last(
- sortedCurrentSubscriptions))
- ->value)
- ->cnt == 0;
- rd_bool_t reassignmentPerformed = rd_false;
-
- map_str_toppar_list_t fixedAssignments =
- RD_MAP_INITIALIZER(RD_MAP_CNT(partition2AllPotentialConsumers),
- rd_map_str_cmp,
- rd_map_str_hash,
- NULL,
- NULL /* Will transfer ownership of the list
- * to currentAssignment at the end of
- * this function. */);
-
- map_str_toppar_list_t preBalanceAssignment = RD_MAP_INITIALIZER(
- RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
- NULL /* references currentAssignment */,
- rd_kafka_topic_partition_list_destroy_free);
- map_toppar_str_t preBalancePartitionConsumers = RD_MAP_INITIALIZER(
- RD_MAP_CNT(partition2AllPotentialConsumers),
- rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- NULL /* refs currentPartitionConsumer */);
- int newScore, oldScore;
- /* Iterator variables */
- const rd_kafka_topic_partition_t *partition;
- const void *ignore;
- const rd_map_elem_t *elem;
- int i;
-
- /* Assign all unassigned partitions */
- for (i = 0; i < unassignedPartitions->cnt; i++) {
- partition = &unassignedPartitions->elems[i];
-
- /* Skip if there is no potential consumer for the partition.
- * FIXME: How could this be? */
- if (rd_list_empty(RD_MAP_GET(partition2AllPotentialConsumers,
- partition))) {
- rd_dassert(!*"sticky assignor bug");
- continue;
- }
-
- assignPartition(
- partition, sortedCurrentSubscriptions, currentAssignment,
- consumer2AllPotentialPartitions, currentPartitionConsumer);
- }
-
-
- /* Narrow down the reassignment scope to only those partitions that can
- * actually be reassigned. */
- RD_MAP_FOREACH(partition, ignore, partition2AllPotentialConsumers) {
- if (partitionCanParticipateInReassignment(
- partition, partition2AllPotentialConsumers))
- continue;
-
- rd_kafka_topic_partition_list_del(
- sortedPartitions, partition->topic, partition->partition);
- rd_kafka_topic_partition_list_del(unassignedPartitions,
- partition->topic,
- partition->partition);
- }
-
- if (ignore)
- ; /* Avoid unused warning */
-
-
- /* Narrow down the reassignment scope to only those consumers that are
- * subject to reassignment. */
- RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
- const char *consumer = (const char *)elem->key;
- rd_kafka_topic_partition_list_t *partitions;
-
- if (consumerCanParticipateInReassignment(
- rk, consumer, currentAssignment,
- consumer2AllPotentialPartitions,
- partition2AllPotentialConsumers))
- continue;
-
- rd_list_remove_elem(sortedCurrentSubscriptions, i);
- i--; /* Since the current element is removed we need
- * to rewind the iterator. */
-
- partitions = rd_kafka_topic_partition_list_copy(
- RD_MAP_GET(currentAssignment, consumer));
- RD_MAP_DELETE(currentAssignment, consumer);
-
- RD_MAP_SET(&fixedAssignments, consumer, partitions);
- }
-
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Prepared balanced reassignment for %d consumers, "
- "%d available partition(s) where of %d are unassigned "
- "(initializing=%s, revocationRequired=%s, "
- "%d fixed assignments)",
- (int)RD_MAP_CNT(consumer2AllPotentialPartitions),
- sortedPartitions->cnt, unassignedPartitions->cnt,
- initializing ? "true" : "false",
- revocationRequired ? "true" : "false",
- (int)RD_MAP_CNT(&fixedAssignments));
-
- /* Create a deep copy of the current assignment so we can revert to it
- * if we do not get a more balanced assignment later. */
- RD_MAP_COPY(&preBalanceAssignment, currentAssignment,
- NULL /* just reference the key */,
- (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
- RD_MAP_COPY(&preBalancePartitionConsumers, currentPartitionConsumer,
- rd_kafka_topic_partition_copy_void,
- NULL /* references assign_cb(members) fields */);
-
-
- /* If we don't already need to revoke something due to subscription
- * changes, first try to balance by only moving newly added partitions.
- */
- if (!revocationRequired && unassignedPartitions->cnt > 0)
- performReassignments(
- rk, partitionMovements, unassignedPartitions,
- currentAssignment, prevAssignment,
- sortedCurrentSubscriptions, consumer2AllPotentialPartitions,
- partition2AllPotentialConsumers, currentPartitionConsumer);
-
- reassignmentPerformed = performReassignments(
- rk, partitionMovements, sortedPartitions, currentAssignment,
- prevAssignment, sortedCurrentSubscriptions,
- consumer2AllPotentialPartitions, partition2AllPotentialConsumers,
- currentPartitionConsumer);
-
- /* If we are not preserving existing assignments and we have made
- * changes to the current assignment make sure we are getting a more
- * balanced assignment; otherwise, revert to previous assignment. */
-
- if (!initializing && reassignmentPerformed &&
- (newScore = getBalanceScore(currentAssignment)) >=
- (oldScore = getBalanceScore(&preBalanceAssignment))) {
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Reassignment performed but keeping previous "
- "assignment since balance score did not improve: "
- "new score %d (%d consumers) vs "
- "old score %d (%d consumers): "
- "lower score is better",
- newScore, (int)RD_MAP_CNT(currentAssignment),
- oldScore, (int)RD_MAP_CNT(&preBalanceAssignment));
-
- RD_MAP_COPY(
- currentAssignment, &preBalanceAssignment,
- NULL /* just reference the key */,
- (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
-
- RD_MAP_CLEAR(currentPartitionConsumer);
- RD_MAP_COPY(currentPartitionConsumer,
- &preBalancePartitionConsumers,
- rd_kafka_topic_partition_copy_void,
- NULL /* references assign_cb(members) fields */);
- }
-
- RD_MAP_DESTROY(&preBalancePartitionConsumers);
- RD_MAP_DESTROY(&preBalanceAssignment);
-
- /* Add the fixed assignments (those that could not change) back. */
- if (!RD_MAP_IS_EMPTY(&fixedAssignments)) {
- const rd_map_elem_t *elem;
-
- RD_MAP_FOREACH_ELEM(elem, &fixedAssignments.rmap) {
- const char *consumer = elem->key;
- rd_kafka_topic_partition_list_t *partitions =
- (rd_kafka_topic_partition_list_t *)elem->value;
-
- RD_MAP_SET(currentAssignment, consumer, partitions);
-
- rd_list_add(sortedCurrentSubscriptions, (void *)elem);
- }
-
- /* Re-sort */
- rd_list_sort(sortedCurrentSubscriptions,
- sort_by_map_elem_val_toppar_list_cnt);
- }
-
- RD_MAP_DESTROY(&fixedAssignments);
-}
-
-
-
-/**
- * @brief Populate subscriptions, current and previous assignments based on the
- * \p members assignments.
- */
-static void prepopulateCurrentAssignments(
- rd_kafka_t *rk,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- map_str_toppar_list_t *subscriptions,
- map_str_toppar_list_t *currentAssignment,
- map_toppar_cgpair_t *prevAssignment,
- map_toppar_str_t *currentPartitionConsumer,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- size_t estimated_partition_cnt) {
-
- /* We need to process subscriptions' user data with each consumer's
- * reported generation in mind.
- * Higher generations overwrite lower generations in case of a conflict.
- * Conflicts will only exist if user data is for different generations.
- */
-
- /* For each partition we create a sorted list (by generation) of
- * its consumers. */
- RD_MAP_LOCAL_INITIALIZER(
- sortedPartitionConsumersByGeneration, member_cnt * 10 /* FIXME */,
- const rd_kafka_topic_partition_t *,
- /* List of ConsumerGenerationPair_t */
- rd_list_t *, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash, NULL, rd_list_destroy_free);
- const rd_kafka_topic_partition_t *partition;
- rd_list_t *consumers;
- int i;
-
- /* For each partition that is currently assigned to the group members
- * add the member and its generation to
- * sortedPartitionConsumersByGeneration (which is sorted afterwards)
- * indexed by the partition. */
- for (i = 0; i < (int)member_cnt; i++) {
- rd_kafka_group_member_t *consumer = &members[i];
- int j;
-
- RD_MAP_SET(subscriptions, consumer->rkgm_member_id->str,
- consumer->rkgm_subscription);
-
- RD_MAP_SET(currentAssignment, consumer->rkgm_member_id->str,
- rd_kafka_topic_partition_list_new(10));
-
- RD_MAP_SET(consumer2AllPotentialPartitions,
- consumer->rkgm_member_id->str,
- rd_kafka_topic_partition_list_new(
- (int)estimated_partition_cnt));
-
- if (!consumer->rkgm_owned)
- continue;
-
- for (j = 0; j < (int)consumer->rkgm_owned->cnt; j++) {
- partition = &consumer->rkgm_owned->elems[j];
-
- consumers = RD_MAP_GET_OR_SET(
- &sortedPartitionConsumersByGeneration, partition,
- rd_list_new(10, ConsumerGenerationPair_destroy));
-
- if (consumer->rkgm_generation != -1 &&
- rd_list_find(
- consumers, &consumer->rkgm_generation,
- ConsumerGenerationPair_cmp_generation)) {
- rd_kafka_log(
- rk, LOG_WARNING, "STICKY",
- "Sticky assignor: "
- "%s [%" PRId32
- "] is assigned to "
- "multiple consumers with same "
- "generation %d: "
- "skipping member %.*s",
- partition->topic, partition->partition,
- consumer->rkgm_generation,
- RD_KAFKAP_STR_PR(consumer->rkgm_member_id));
- continue;
- }
-
- rd_list_add(consumers,
- ConsumerGenerationPair_new(
- consumer->rkgm_member_id->str,
- consumer->rkgm_generation));
-
- RD_MAP_SET(currentPartitionConsumer,
- rd_kafka_topic_partition_copy(partition),
- consumer->rkgm_member_id->str);
- }
- }
-
- /* Populate currentAssignment and prevAssignment.
- * prevAssignment holds the prior ConsumerGenerationPair_t
- * (before current) of each partition. */
- RD_MAP_FOREACH(partition, consumers,
- &sortedPartitionConsumersByGeneration) {
- /* current and previous are the last two consumers
- * of each partition. */
- ConsumerGenerationPair_t *current, *previous;
- rd_kafka_topic_partition_list_t *partitions;
-
- /* Sort the per-partition consumers list by generation */
- rd_list_sort(consumers, ConsumerGenerationPair_cmp_generation);
-
- /* Add current (highest generation) consumer
- * to currentAssignment. */
- current = rd_list_elem(consumers, 0);
- partitions = RD_MAP_GET(currentAssignment, current->consumer);
- rd_kafka_topic_partition_list_add(partitions, partition->topic,
- partition->partition);
-
- /* Add previous (next highest generation) consumer, if any,
- * to prevAssignment. */
- previous = rd_list_elem(consumers, 1);
- if (previous)
- RD_MAP_SET(
- prevAssignment,
- rd_kafka_topic_partition_copy(partition),
- ConsumerGenerationPair_new(previous->consumer,
- previous->generation));
- }
-
- RD_MAP_DESTROY(&sortedPartitionConsumersByGeneration);
-}
-
-
-/**
- * @brief Populate maps for potential partitions per consumer and vice-versa.
- */
-static void
-populatePotentialMaps(const rd_kafka_assignor_topic_t *atopic,
- map_toppar_list_t *partition2AllPotentialConsumers,
- map_str_toppar_list_t *consumer2AllPotentialPartitions,
- size_t estimated_partition_cnt) {
- int i;
- const rd_kafka_group_member_t *rkgm;
-
- /* for each eligible (subscribed and available) topic (\p atopic):
- * for each member subscribing to that topic:
- * and for each partition of that topic:
- * add consumer and partition to:
- * partition2AllPotentialConsumers
- * consumer2AllPotentialPartitions
- */
-
- RD_LIST_FOREACH(rkgm, &atopic->members, i) {
- const char *consumer = rkgm->rkgm_member_id->str;
- rd_kafka_topic_partition_list_t *partitions =
- RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
- int j;
-
- rd_assert(partitions != NULL);
-
- for (j = 0; j < atopic->metadata->partition_cnt; j++) {
- rd_kafka_topic_partition_t *partition;
- rd_list_t *consumers;
-
- /* consumer2AllPotentialPartitions[consumer] += part */
- partition = rd_kafka_topic_partition_list_add(
- partitions, atopic->metadata->topic,
- atopic->metadata->partitions[j].id);
-
- /* partition2AllPotentialConsumers[part] += consumer */
- if (!(consumers =
- RD_MAP_GET(partition2AllPotentialConsumers,
- partition))) {
- consumers = rd_list_new(
- RD_MAX(2, (int)estimated_partition_cnt / 2),
- NULL);
- RD_MAP_SET(
- partition2AllPotentialConsumers,
- rd_kafka_topic_partition_copy(partition),
- consumers);
- }
- rd_list_add(consumers, (void *)consumer);
- }
- }
-}
-
-
-/**
- * @returns true if all consumers have identical subscriptions based on
- * the currently available topics and partitions.
- *
- * @remark The Java code checks both partition2AllPotentialConsumers and
- * and consumer2AllPotentialPartitions but since these maps
- * are symmetrical we only check one of them.
- * ^ FIXME, but we do.
- */
-static rd_bool_t areSubscriptionsIdentical(
- map_toppar_list_t *partition2AllPotentialConsumers,
- map_str_toppar_list_t *consumer2AllPotentialPartitions) {
- const void *ignore;
- const rd_list_t *lcurr, *lprev = NULL;
- const rd_kafka_topic_partition_list_t *pcurr, *pprev = NULL;
-
- RD_MAP_FOREACH(ignore, lcurr, partition2AllPotentialConsumers) {
- if (lprev && rd_list_cmp(lcurr, lprev, rd_map_str_cmp))
- return rd_false;
- lprev = lcurr;
- }
-
- RD_MAP_FOREACH(ignore, pcurr, consumer2AllPotentialPartitions) {
- if (pprev && rd_kafka_topic_partition_list_cmp(
- pcurr, pprev, rd_kafka_topic_partition_cmp))
- return rd_false;
- pprev = pcurr;
- }
-
- if (ignore) /* Avoid unused warning */
- ;
-
- return rd_true;
-}
-
-
-/**
- * @brief Comparator to sort an rd_kafka_topic_partition_list_t in ascending
- * order by the number of list elements in the .opaque field, or
- * secondarily by the topic name.
- * Used by sortPartitions().
- */
-static int
-toppar_sort_by_list_cnt(const void *_a, const void *_b, void *opaque) {
- const rd_kafka_topic_partition_t *a = _a, *b = _b;
- const rd_list_t *al = a->opaque, *bl = b->opaque;
- int r = rd_list_cnt(al) - rd_list_cnt(bl); /* ascending order */
- if (r)
- return r;
- return rd_kafka_topic_partition_cmp(a, b);
-}
-
-
-/**
- * @brief Sort valid partitions so they are processed in the potential
- * reassignment phase in the proper order that causes minimal partition
- * movement among consumers (hence honouring maximal stickiness).
- *
- * @returns The result of the partitions sort.
- */
-static rd_kafka_topic_partition_list_t *
-sortPartitions(rd_kafka_t *rk,
- map_str_toppar_list_t *currentAssignment,
- map_toppar_cgpair_t *prevAssignment,
- rd_bool_t isFreshAssignment,
- map_toppar_list_t *partition2AllPotentialConsumers,
- map_str_toppar_list_t *consumer2AllPotentialPartitions) {
-
- rd_kafka_topic_partition_list_t *sortedPartitions;
- map_str_toppar_list_t assignments = RD_MAP_INITIALIZER(
- RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
- NULL, rd_kafka_topic_partition_list_destroy_free);
- rd_kafka_topic_partition_list_t *partitions;
- const rd_kafka_topic_partition_t *partition;
- const rd_list_t *consumers;
- const char *consumer;
- rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from
- * assignments. */
- const rd_map_elem_t *elem;
- rd_bool_t wasEmpty;
- int i;
-
- sortedPartitions = rd_kafka_topic_partition_list_new(
- (int)RD_MAP_CNT(partition2AllPotentialConsumers));
- ;
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Sort %d partitions in %s assignment",
- (int)RD_MAP_CNT(partition2AllPotentialConsumers),
- isFreshAssignment ? "fresh" : "existing");
-
- if (isFreshAssignment ||
- !areSubscriptionsIdentical(partition2AllPotentialConsumers,
- consumer2AllPotentialPartitions)) {
- /* Create an ascending sorted list of partitions based on
- * how many consumers can potentially use them. */
- RD_MAP_FOREACH(partition, consumers,
- partition2AllPotentialConsumers) {
- rd_kafka_topic_partition_list_add(sortedPartitions,
- partition->topic,
- partition->partition)
- ->opaque = (void *)consumers;
- }
-
- rd_kafka_topic_partition_list_sort(
- sortedPartitions, toppar_sort_by_list_cnt, NULL);
-
- RD_MAP_DESTROY(&assignments);
-
- return sortedPartitions;
- }
-
- /* If this is a reassignment and the subscriptions are identical
- * then we just need to list partitions in a round robin fashion
- * (from consumers with most assigned partitions to those
- * with least assigned partitions). */
-
- /* Create an ascending sorted list of consumers by valid
- * partition count. The list element is the `rd_map_elem_t *`
- * of the assignments map. This allows us to get a sorted list
- * of consumers without too much data duplication. */
- rd_list_init(&sortedConsumers, (int)RD_MAP_CNT(currentAssignment),
- NULL);
-
- RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
- rd_kafka_topic_partition_list_t *partitions2;
-
- /* Sort assigned partitions for consistency (during tests) */
- rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
-
- partitions2 =
- rd_kafka_topic_partition_list_new(partitions->cnt);
-
- for (i = 0; i < partitions->cnt; i++) {
- partition = &partitions->elems[i];
-
- /* Only add partitions from the current assignment
- * that still exist. */
- if (RD_MAP_GET(partition2AllPotentialConsumers,
- partition))
- rd_kafka_topic_partition_list_add(
- partitions2, partition->topic,
- partition->partition);
- }
-
- if (partitions2->cnt > 0) {
- elem = RD_MAP_SET(&assignments, consumer, partitions2);
- rd_list_add(&sortedConsumers, (void *)elem);
- } else
- rd_kafka_topic_partition_list_destroy(partitions2);
- }
-
- /* Sort consumers */
- rd_list_sort(&sortedConsumers, sort_by_map_elem_val_toppar_list_cnt);
-
- /* At this point sortedConsumers contains an ascending-sorted list
- * of consumers based on how many valid partitions are currently
- * assigned to them. */
-
- while (!rd_list_empty(&sortedConsumers)) {
- /* Take consumer with most partitions */
- const rd_map_elem_t *elem = rd_list_last(&sortedConsumers);
- const char *consumer = (const char *)elem->key;
- /* Currently assigned partitions to this consumer */
- rd_kafka_topic_partition_list_t *remainingPartitions =
- RD_MAP_GET(&assignments, consumer);
- /* Partitions that were assigned to a different consumer
- * last time */
- rd_kafka_topic_partition_list_t *prevPartitions =
- rd_kafka_topic_partition_list_new(
- (int)RD_MAP_CNT(prevAssignment));
- rd_bool_t reSort = rd_true;
-
- /* From the partitions that had a different consumer before,
- * keep only those that are assigned to this consumer now. */
- for (i = 0; i < remainingPartitions->cnt; i++) {
- partition = &remainingPartitions->elems[i];
- if (RD_MAP_GET(prevAssignment, partition))
- rd_kafka_topic_partition_list_add(
- prevPartitions, partition->topic,
- partition->partition);
- }
-
- if (prevPartitions->cnt > 0) {
- /* If there is a partition of this consumer that was
- * assigned to another consumer before, then mark
- * it as a good option for reassignment. */
- partition = &prevPartitions->elems[0];
-
- rd_kafka_topic_partition_list_del(remainingPartitions,
- partition->topic,
- partition->partition);
-
- rd_kafka_topic_partition_list_add(sortedPartitions,
- partition->topic,
- partition->partition);
-
- rd_kafka_topic_partition_list_del_by_idx(prevPartitions,
- 0);
-
- } else if (remainingPartitions->cnt > 0) {
- /* Otherwise mark any other one of the current
- * partitions as a reassignment candidate. */
- partition = &remainingPartitions->elems[0];
-
- rd_kafka_topic_partition_list_add(sortedPartitions,
- partition->topic,
- partition->partition);
-
- rd_kafka_topic_partition_list_del_by_idx(
- remainingPartitions, 0);
- } else {
- rd_list_remove_elem(&sortedConsumers,
- rd_list_cnt(&sortedConsumers) - 1);
- /* No need to re-sort the list (below) */
- reSort = rd_false;
- }
-
- rd_kafka_topic_partition_list_destroy(prevPartitions);
-
- if (reSort) {
- /* Re-sort the list to keep the consumer with the most
- * partitions at the end of the list.
- * This should be an O(N) operation given it is at most
- * a single shuffle. */
- rd_list_sort(&sortedConsumers,
- sort_by_map_elem_val_toppar_list_cnt);
- }
- }
-
-
- wasEmpty = !sortedPartitions->cnt;
-
- RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers)
- rd_kafka_topic_partition_list_upsert(sortedPartitions, partition->topic,
- partition->partition);
-
- /* If all partitions were added in the foreach loop just above
- * it means there is no order to retain from the sorderConsumer loop
- * below and we sort the partitions according to their topic+partition
- * to get consistent results (mainly in tests). */
- if (wasEmpty)
- rd_kafka_topic_partition_list_sort(sortedPartitions, NULL,
- NULL);
-
- rd_list_destroy(&sortedConsumers);
- RD_MAP_DESTROY(&assignments);
-
- return sortedPartitions;
-}
-
-
-/**
- * @brief Transfer currentAssignment to members array.
- */
-static void assignToMembers(map_str_toppar_list_t *currentAssignment,
- rd_kafka_group_member_t *members,
- size_t member_cnt) {
- size_t i;
-
- for (i = 0; i < member_cnt; i++) {
- rd_kafka_group_member_t *rkgm = &members[i];
- const rd_kafka_topic_partition_list_t *partitions =
- RD_MAP_GET(currentAssignment, rkgm->rkgm_member_id->str);
- if (rkgm->rkgm_assignment)
- rd_kafka_topic_partition_list_destroy(
- rkgm->rkgm_assignment);
- rkgm->rkgm_assignment =
- rd_kafka_topic_partition_list_copy(partitions);
- }
-}
-
-
-/**
- * @brief KIP-54 and KIP-341/FIXME sticky assignor.
- *
- * This code is closely mimicking the AK Java AbstractStickyAssignor.assign().
- */
-rd_kafka_resp_err_t
-rd_kafka_sticky_assignor_assign_cb(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas,
- const char *member_id,
- const rd_kafka_metadata_t *metadata,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- rd_kafka_assignor_topic_t **eligible_topics,
- size_t eligible_topic_cnt,
- char *errstr,
- size_t errstr_size,
- void *opaque) {
- /* FIXME: Let the cgrp pass the actual eligible partition count */
- size_t partition_cnt = member_cnt * 10; /* FIXME */
-
- /* Map of subscriptions. This is \p member turned into a map. */
- map_str_toppar_list_t subscriptions =
- RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
- NULL /* refs members.rkgm_member_id */,
- NULL /* refs members.rkgm_subscription */);
-
- /* Map member to current assignment */
- map_str_toppar_list_t currentAssignment =
- RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
- NULL /* refs members.rkgm_member_id */,
- rd_kafka_topic_partition_list_destroy_free);
-
- /* Map partition to ConsumerGenerationPair */
- map_toppar_cgpair_t prevAssignment =
- RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- ConsumerGenerationPair_destroy);
-
- /* Partition assignment movements between consumers */
- PartitionMovements_t partitionMovements;
-
- rd_bool_t isFreshAssignment;
-
- /* Mapping of all topic partitions to all consumers that can be
- * assigned to them.
- * Value is an rd_list_t* with elements referencing the \p members
- * \c rkgm_member_id->str. */
- map_toppar_list_t partition2AllPotentialConsumers = RD_MAP_INITIALIZER(
- partition_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free, rd_list_destroy_free);
-
- /* Mapping of all consumers to all potential topic partitions that
- * can be assigned to them. */
- map_str_toppar_list_t consumer2AllPotentialPartitions =
- RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
- NULL,
- rd_kafka_topic_partition_list_destroy_free);
-
- /* Mapping of partition to current consumer. */
- map_toppar_str_t currentPartitionConsumer =
- RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
- rd_kafka_topic_partition_hash,
- rd_kafka_topic_partition_destroy_free,
- NULL /* refs members.rkgm_member_id->str */);
-
- rd_kafka_topic_partition_list_t *sortedPartitions;
- rd_kafka_topic_partition_list_t *unassignedPartitions;
- rd_list_t sortedCurrentSubscriptions;
-
- rd_bool_t revocationRequired = rd_false;
-
- /* Iteration variables */
- const char *consumer;
- rd_kafka_topic_partition_list_t *partitions;
- const rd_map_elem_t *elem;
- int i;
-
- /* Initialize PartitionMovements */
- PartitionMovements_init(&partitionMovements, eligible_topic_cnt);
-
- /* Prepopulate current and previous assignments */
- prepopulateCurrentAssignments(
- rk, members, member_cnt, &subscriptions, &currentAssignment,
- &prevAssignment, &currentPartitionConsumer,
- &consumer2AllPotentialPartitions, partition_cnt);
-
- isFreshAssignment = RD_MAP_IS_EMPTY(&currentAssignment);
-
- /* Populate partition2AllPotentialConsumers and
- * consumer2AllPotentialPartitions maps by each eligible topic. */
- for (i = 0; i < (int)eligible_topic_cnt; i++)
- populatePotentialMaps(
- eligible_topics[i], &partition2AllPotentialConsumers,
- &consumer2AllPotentialPartitions, partition_cnt);
-
-
- /* Sort valid partitions to minimize partition movements. */
- sortedPartitions = sortPartitions(
- rk, &currentAssignment, &prevAssignment, isFreshAssignment,
- &partition2AllPotentialConsumers, &consumer2AllPotentialPartitions);
-
-
- /* All partitions that need to be assigned (initially set to all
- * partitions but adjusted in the following loop) */
- unassignedPartitions =
- rd_kafka_topic_partition_list_copy(sortedPartitions);
-
- RD_MAP_FOREACH(consumer, partitions, &currentAssignment) {
- if (!RD_MAP_GET(&subscriptions, consumer)) {
- /* If a consumer that existed before
- * (and had some partition assignments) is now removed,
- * remove it from currentAssignment and its
- * partitions from currentPartitionConsumer */
-
- rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
- "Removing now non-existent consumer %s "
- "with %d previously assigned partitions",
- consumer, partitions->cnt);
-
-
- for (i = 0; i < partitions->cnt; i++) {
- const rd_kafka_topic_partition_t *partition =
- &partitions->elems[i];
- RD_MAP_DELETE(&currentPartitionConsumer,
- partition);
- }
-
- /* FIXME: The delete could be optimized by passing the
- * underlying elem_t. */
- RD_MAP_DELETE(&currentAssignment, consumer);
-
- } else {
- /* Otherwise (the consumer still exists) */
-
- for (i = 0; i < partitions->cnt; i++) {
- const rd_kafka_topic_partition_t *partition =
- &partitions->elems[i];
- rd_bool_t remove_part = rd_false;
-
- if (!RD_MAP_GET(
- &partition2AllPotentialConsumers,
- partition)) {
- /* If this partition of this consumer
- * no longer exists remove it from
- * currentAssignment of the consumer */
- remove_part = rd_true;
- RD_MAP_DELETE(&currentPartitionConsumer,
- partition);
-
- } else if (!rd_kafka_topic_partition_list_find(
- RD_MAP_GET(&subscriptions,
- consumer),
- partition->topic,
- RD_KAFKA_PARTITION_UA)) {
- /* If this partition cannot remain
- * assigned to its current consumer
- * because the consumer is no longer
- * subscribed to its topic, remove it
- * from the currentAssignment of the
- * consumer. */
- remove_part = rd_true;
- revocationRequired = rd_true;
- } else {
- /* Otherwise, remove the topic partition
- * from those that need to be assigned
- * only if its current consumer is still
- * subscribed to its topic (because it
- * is already assigned and we would want
- * to preserve that assignment as much
- * as possible). */
- rd_kafka_topic_partition_list_del(
- unassignedPartitions,
- partition->topic,
- partition->partition);
- }
-
- if (remove_part) {
- rd_kafka_topic_partition_list_del_by_idx(
- partitions, i);
- i--; /* Since the current element was
- * removed we need the next for
- * loop iteration to stay at the
- * same index. */
- }
- }
- }
- }
-
-
- /* At this point we have preserved all valid topic partition to consumer
- * assignments and removed all invalid topic partitions and invalid
- * consumers.
- * Now we need to assign unassignedPartitions to consumers so that the
- * topic partition assignments are as balanced as possible. */
-
- /* An ascending sorted list of consumers based on how many topic
- * partitions are already assigned to them. The list element is
- * referencing the rd_map_elem_t* from the currentAssignment map. */
- rd_list_init(&sortedCurrentSubscriptions,
- (int)RD_MAP_CNT(&currentAssignment), NULL);
-
- RD_MAP_FOREACH_ELEM(elem, &currentAssignment.rmap)
- rd_list_add(&sortedCurrentSubscriptions, (void *)elem);
-
- rd_list_sort(&sortedCurrentSubscriptions,
- sort_by_map_elem_val_toppar_list_cnt);
-
- /* Balance the available partitions across consumers */
- balance(rk, &partitionMovements, &currentAssignment, &prevAssignment,
- sortedPartitions, unassignedPartitions,
- &sortedCurrentSubscriptions, &consumer2AllPotentialPartitions,
- &partition2AllPotentialConsumers, &currentPartitionConsumer,
- revocationRequired);
-
- /* Transfer currentAssignment (now updated) to each member's
- * assignment. */
- assignToMembers(&currentAssignment, members, member_cnt);
-
-
- rd_list_destroy(&sortedCurrentSubscriptions);
-
- PartitionMovements_destroy(&partitionMovements);
-
- rd_kafka_topic_partition_list_destroy(unassignedPartitions);
- rd_kafka_topic_partition_list_destroy(sortedPartitions);
-
- RD_MAP_DESTROY(&currentPartitionConsumer);
- RD_MAP_DESTROY(&consumer2AllPotentialPartitions);
- RD_MAP_DESTROY(&partition2AllPotentialConsumers);
- RD_MAP_DESTROY(&prevAssignment);
- RD_MAP_DESTROY(&currentAssignment);
- RD_MAP_DESTROY(&subscriptions);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/** @brief FIXME docstring */
-static void rd_kafka_sticky_assignor_on_assignment_cb(
- const rd_kafka_assignor_t *rkas,
- void **assignor_state,
- const rd_kafka_topic_partition_list_t *partitions,
- const rd_kafkap_bytes_t *assignment_userdata,
- const rd_kafka_consumer_group_metadata_t *rkcgm) {
- rd_kafka_sticky_assignor_state_t *state =
- (rd_kafka_sticky_assignor_state_t *)*assignor_state;
-
- if (!state)
- state = rd_calloc(1, sizeof(*state));
- else
- rd_kafka_topic_partition_list_destroy(state->prev_assignment);
-
- state->prev_assignment = rd_kafka_topic_partition_list_copy(partitions);
- state->generation_id = rkcgm->generation_id;
-
- *assignor_state = state;
-}
-
-/** @brief FIXME docstring */
-static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata(
- const rd_kafka_assignor_t *rkas,
- void *assignor_state,
- const rd_list_t *topics,
- const rd_kafka_topic_partition_list_t *owned_partitions) {
- rd_kafka_sticky_assignor_state_t *state;
- rd_kafka_buf_t *rkbuf;
- rd_kafkap_bytes_t *metadata;
- rd_kafkap_bytes_t *kbytes;
- size_t len;
-
- /*
- * UserData (Version: 1) => [previous_assignment] generation
- * previous_assignment => topic [partitions]
- * topic => STRING
- * partitions => partition
- * partition => INT32
- * generation => INT32
- *
- * If there is no previous assignment, UserData is NULL.
- */
-
- if (!assignor_state) {
- return rd_kafka_consumer_protocol_member_metadata_new(
- topics, NULL, 0, owned_partitions);
- }
-
- state = (rd_kafka_sticky_assignor_state_t *)assignor_state;
-
- rkbuf = rd_kafka_buf_new(1, 100);
- rd_assert(state->prev_assignment != NULL);
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- rd_kafka_buf_write_topic_partitions(rkbuf, state->prev_assignment,
- rd_false /*skip invalid offsets*/,
- rd_false /*any offset*/, fields);
- rd_kafka_buf_write_i32(rkbuf, state->generation_id);
-
- /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
- rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
- len = rd_slice_remains(&rkbuf->rkbuf_reader);
- kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
- rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
- rd_kafka_buf_destroy(rkbuf);
-
- metadata = rd_kafka_consumer_protocol_member_metadata_new(
- topics, kbytes->data, kbytes->len, owned_partitions);
-
- rd_kafkap_bytes_destroy(kbytes);
-
- return metadata;
-}
-
-
-/**
- * @brief Destroy assignor state
- */
-static void rd_kafka_sticky_assignor_state_destroy(void *assignor_state) {
- rd_kafka_sticky_assignor_state_t *state =
- (rd_kafka_sticky_assignor_state_t *)assignor_state;
-
- rd_assert(assignor_state);
-
- rd_kafka_topic_partition_list_destroy(state->prev_assignment);
- rd_free(state);
-}
-
-
-
-/**
- * @name Sticky assignor unit tests
- *
- *
- * These are based on AbstractStickyAssignorTest.java
- *
- *
- *
- */
-
-
-
-/**
- * @brief Set a member's owned partitions based on its assignment.
- *
- * For use between assignor_run(). This is mimicing a consumer receiving
- * its new assignment and including it in the next rebalance as its
- * owned-partitions.
- */
-static void ut_set_owned(rd_kafka_group_member_t *rkgm) {
- if (rkgm->rkgm_owned)
- rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
-
- rkgm->rkgm_owned =
- rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment);
-}
-
-
-/**
- * @brief Verify assignment validity and balance.
- *
- * @remark Also updates the members owned partitions to the assignment.
- */
-
-static int verifyValidityAndBalance0(const char *func,
- int line,
- rd_kafka_group_member_t *members,
- size_t member_cnt,
- const rd_kafka_metadata_t *metadata) {
- int fails = 0;
- int i;
- rd_bool_t verbose = rd_false; /* Enable for troubleshooting */
-
- RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line,
- (int)member_cnt);
-
- for (i = 0; i < (int)member_cnt; i++) {
- const char *consumer = members[i].rkgm_member_id->str;
- const rd_kafka_topic_partition_list_t *partitions =
- members[i].rkgm_assignment;
- int p, j;
-
- if (verbose)
- RD_UT_SAY(
- "%s:%d: "
- "consumer \"%s\", %d subscribed topic(s), "
- "%d assigned partition(s):",
- func, line, consumer,
- members[i].rkgm_subscription->cnt, partitions->cnt);
-
- for (p = 0; p < partitions->cnt; p++) {
- const rd_kafka_topic_partition_t *partition =
- &partitions->elems[p];
-
- if (verbose)
- RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func,
- line, partition->topic,
- partition->partition);
-
- if (!rd_kafka_topic_partition_list_find(
- members[i].rkgm_subscription, partition->topic,
- RD_KAFKA_PARTITION_UA)) {
- RD_UT_WARN("%s [%" PRId32
- "] is assigned to "
- "%s but it is not subscribed to "
- "that topic",
- partition->topic,
- partition->partition, consumer);
- fails++;
- }
- }
-
- /* Update the member's owned partitions to match
- * the assignment. */
- ut_set_owned(&members[i]);
-
- if (i == (int)member_cnt - 1)
- continue;
-
- for (j = i + 1; j < (int)member_cnt; j++) {
- const char *otherConsumer =
- members[j].rkgm_member_id->str;
- const rd_kafka_topic_partition_list_t *otherPartitions =
- members[j].rkgm_assignment;
- rd_bool_t balanced =
- abs(partitions->cnt - otherPartitions->cnt) <= 1;
-
- for (p = 0; p < partitions->cnt; p++) {
- const rd_kafka_topic_partition_t *partition =
- &partitions->elems[p];
-
- if (rd_kafka_topic_partition_list_find(
- otherPartitions, partition->topic,
- partition->partition)) {
- RD_UT_WARN(
- "Consumer %s and %s are both "
- "assigned %s [%" PRId32 "]",
- consumer, otherConsumer,
- partition->topic,
- partition->partition);
- fails++;
- }
-
-
- /* If assignment is imbalanced and this topic
- * is also subscribed by the other consumer
- * it means the assignment strategy failed to
- * properly balance the partitions. */
- if (!balanced &&
- rd_kafka_topic_partition_list_find_topic(
- otherPartitions, partition->topic)) {
- RD_UT_WARN(
- "Some %s partition(s) can be "
- "moved from "
- "%s (%d partition(s)) to "
- "%s (%d partition(s)) to "
- "achieve a better balance",
- partition->topic, consumer,
- partitions->cnt, otherConsumer,
- otherPartitions->cnt);
- fails++;
- }
- }
- }
- }
-
- RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line,
- fails);
-
- return 0;
-}
-
-
-#define verifyValidityAndBalance(members, member_cnt, metadata) \
- do { \
- if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \
- member_cnt, metadata)) \
- return 1; \
- } while (0)
-
-
-/**
- * @brief Checks that all assigned partitions are fully balanced.
- *
- * Only works for symmetrical subscriptions.
- */
-static int isFullyBalanced0(const char *function,
- int line,
- const rd_kafka_group_member_t *members,
- size_t member_cnt) {
- int min_assignment = INT_MAX;
- int max_assignment = -1;
- size_t i;
-
- for (i = 0; i < member_cnt; i++) {
- int size = members[i].rkgm_assignment->cnt;
- if (size < min_assignment)
- min_assignment = size;
- if (size > max_assignment)
- max_assignment = size;
- }
-
- RD_UT_ASSERT(max_assignment - min_assignment <= 1,
- "%s:%d: Assignment not balanced: min %d, max %d", function,
- line, min_assignment, max_assignment);
-
- return 0;
-}
-
-#define isFullyBalanced(members, member_cnt) \
- do { \
- if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \
- member_cnt)) \
- return 1; \
- } while (0)
-
-
-static void
-ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) {
- int i;
-
- for (i = 0; i < partitions->cnt; i++)
- RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic,
- partitions->elems[i].partition);
-}
-
-
-
-/**
- * @brief Verify that member's assignment matches the expected partitions.
- *
- * The va-list is a NULL-terminated list of (const char *topic, int partition)
- * tuples.
- *
- * @returns 0 on success, else raises a unittest error and returns 1.
- */
-static int verifyAssignment0(const char *function,
- int line,
- rd_kafka_group_member_t *rkgm,
- ...) {
- va_list ap;
- int cnt = 0;
- const char *topic;
- int fails = 0;
-
- va_start(ap, rkgm);
- while ((topic = va_arg(ap, const char *))) {
- int partition = va_arg(ap, int);
- cnt++;
-
- if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment,
- topic, partition)) {
- RD_UT_WARN(
- "%s:%d: Expected %s [%d] not found in %s's "
- "assignment (%d partition(s))",
- function, line, topic, partition,
- rkgm->rkgm_member_id->str,
- rkgm->rkgm_assignment->cnt);
- fails++;
- }
- }
- va_end(ap);
-
- if (cnt != rkgm->rkgm_assignment->cnt) {
- RD_UT_WARN(
- "%s:%d: "
- "Expected %d assigned partition(s) for %s, not %d",
- function, line, cnt, rkgm->rkgm_member_id->str,
- rkgm->rkgm_assignment->cnt);
- fails++;
- }
-
- if (fails)
- ut_print_toppar_list(rkgm->rkgm_assignment);
-
- RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
-
- return 0;
-}
-
-#define verifyAssignment(rkgm, ...) \
- do { \
- if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \
- __VA_ARGS__)) \
- return 1; \
- } while (0)
-
-
-
-/**
- * @brief Initialize group member struct for testing.
- *
- * va-args is a NULL-terminated list of (const char *) topics.
- *
- * Use rd_kafka_group_member_clear() to free fields.
- */
-static void
-ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) {
- va_list ap;
- const char *topic;
-
- memset(rkgm, 0, sizeof(*rkgm));
-
- rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
- rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
- rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
-
- rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
-
- va_start(ap, member_id);
- while ((topic = va_arg(ap, const char *)))
- rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription,
- topic, RD_KAFKA_PARTITION_UA);
- va_end(ap);
-
- rkgm->rkgm_assignment =
- rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
-}
-
-
-
-static int ut_testOneConsumerNoTopic(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata = rd_kafka_metadata_new_topic_mock(NULL, 0);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], NULL);
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testOneConsumerNonexistentTopic(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 0);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], NULL);
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-
-static int ut_testOneConsumerOneTopic(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3,
- "expected assignment of 3 partitions, got %d partition(s)",
- members[0].rkgm_assignment->cnt);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
- NULL);
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testOnlyAssignsPartitionsFromSubscribedTopics(
- rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
-
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
- NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testOneConsumerMultipleTopics(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic2", 2);
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic2", 1,
- NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-static int
-ut_testTwoConsumersOneTopicOnePartition(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 1);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, NULL);
- verifyAssignment(&members[1], NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testTwoConsumersOneTopicTwoPartitions(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, NULL);
- verifyAssignment(&members[1], "topic1", 1, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testMultipleConsumersMixedTopicSubscriptions(
- rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
-
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[3];
-
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 2);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
- ut_init_member(&members[2], "consumer3", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
- verifyAssignment(&members[1], "topic2", 0, "topic2", 1, NULL);
- verifyAssignment(&members[2], "topic1", 1, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_group_member_clear(&members[2]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testTwoConsumersTwoTopicsSixPartitions(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
-
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
- NULL);
- verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2,
- NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testAddRemoveConsumerOneTopic(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
- NULL);
-
- verifyValidityAndBalance(members, 1, metadata);
- isFullyBalanced(members, 1);
-
- /* Add consumer2 */
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 1, "topic1", 2, NULL);
- verifyAssignment(&members[1], "topic1", 0, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
- // FIXME: isSticky();
-
-
- /* Remove consumer1 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 1,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
- NULL);
-
- verifyValidityAndBalance(&members[1], 1, metadata);
- isFullyBalanced(&members[1], 1);
- // FIXME: isSticky();
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-/**
- * This unit test performs sticky assignment for a scenario that round robin
- * assignor handles poorly.
- * Topics (partitions per topic):
- * - topic1 (2), topic2 (1), topic3 (2), topic4 (1), topic5 (2)
- * Subscriptions:
- * - consumer1: topic1, topic2, topic3, topic4, topic5
- * - consumer2: topic1, topic3, topic5
- * - consumer3: topic1, topic3, topic5
- * - consumer4: topic1, topic2, topic3, topic4, topic5
- * Round Robin Assignment Result:
- * - consumer1: topic1-0, topic3-0, topic5-0
- * - consumer2: topic1-1, topic3-1, topic5-1
- * - consumer3:
- * - consumer4: topic2-0, topic4-0
- * Sticky Assignment Result:
- * - consumer1: topic2-0, topic3-0
- * - consumer2: topic1-0, topic3-1
- * - consumer3: topic1-1, topic5-0
- * - consumer4: topic4-0, topic5-1
- */
-static int
-ut_testPoorRoundRobinAssignmentScenario(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[4];
-
- metadata = rd_kafka_metadata_new_topic_mockv(
- 5, "topic1", 2, "topic2", 1, "topic3", 2, "topic4", 1, "topic5", 2);
-
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3",
- "topic4", "topic5", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", "topic3", "topic5",
- NULL);
- ut_init_member(&members[2], "consumer3", "topic1", "topic3", "topic5",
- NULL);
- ut_init_member(&members[3], "consumer4", "topic1", "topic2", "topic3",
- "topic4", "topic5", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic2", 0, "topic3", 0, NULL);
- verifyAssignment(&members[1], "topic1", 0, "topic3", 1, NULL);
- verifyAssignment(&members[2], "topic1", 1, "topic5", 0, NULL);
- verifyAssignment(&members[3], "topic4", 0, "topic5", 1, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_group_member_clear(&members[2]);
- rd_kafka_group_member_clear(&members[3]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-
-static int ut_testAddRemoveTopicTwoConsumers(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
- verifyAssignment(&members[1], "topic1", 1, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- /*
- * Add topic2
- */
- RD_UT_SAY("Adding topic2");
- rd_kafka_metadata_destroy(metadata);
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
- NULL);
- verifyAssignment(&members[1], "topic1", 1, "topic2", 2, "topic2", 0,
- NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
- // FIXME: isSticky();
-
-
- /*
- * Remove topic1
- */
- RD_UT_SAY("Removing topic1");
- rd_kafka_metadata_destroy(metadata);
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic2", 3);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyAssignment(&members[0], "topic2", 1, NULL);
- verifyAssignment(&members[1], "topic2", 0, "topic2", 2, NULL);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
- // FIXME: isSticky();
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_group_member_clear(&members[1]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testReassignmentAfterOneConsumerLeaves(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[19];
- int member_cnt = RD_ARRAYSIZE(members);
- rd_kafka_metadata_topic_t mt[19];
- int topic_cnt = RD_ARRAYSIZE(mt);
- int i;
-
- for (i = 0; i < topic_cnt; i++) {
- char topic[10];
- rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
- rd_strdupa(&mt[i].topic, topic);
- mt[i].partition_cnt = i + 1;
- }
-
- metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
-
-
- for (i = 1; i <= member_cnt; i++) {
- char name[20];
- rd_kafka_topic_partition_list_t *subscription =
- rd_kafka_topic_partition_list_new(i);
- int j;
- for (j = 1; j <= i; j++) {
- char topic[16];
- rd_snprintf(topic, sizeof(topic), "topic%d", j);
- rd_kafka_topic_partition_list_add(
- subscription, topic, RD_KAFKA_PARTITION_UA);
- }
- rd_snprintf(name, sizeof(name), "consumer%d", i);
- ut_init_member(&members[i - 1], name, NULL);
- rd_kafka_topic_partition_list_destroy(
- members[i - 1].rkgm_subscription);
- members[i - 1].rkgm_subscription = subscription;
- }
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
-
-
- /*
- * Remove consumer10.
- */
- rd_kafka_group_member_clear(&members[9]);
- memmove(&members[9], &members[10],
- sizeof(*members) * (member_cnt - 10));
- member_cnt--;
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
- // FIXME: isSticky();
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testReassignmentAfterOneConsumerAdded(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[9];
- int member_cnt = RD_ARRAYSIZE(members);
- int i;
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 20);
-
- for (i = 1; i <= member_cnt; i++) {
- char name[20];
- rd_kafka_topic_partition_list_t *subscription =
- rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(subscription, "topic1",
- RD_KAFKA_PARTITION_UA);
- rd_snprintf(name, sizeof(name), "consumer%d", i);
- ut_init_member(&members[i - 1], name, NULL);
- rd_kafka_topic_partition_list_destroy(
- members[i - 1].rkgm_subscription);
- members[i - 1].rkgm_subscription = subscription;
- }
-
- member_cnt--; /* Skip one consumer */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
-
-
- /*
- * Add consumer.
- */
- member_cnt++;
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
- // FIXME: isSticky();
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testSameSubscriptions(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[9];
- int member_cnt = RD_ARRAYSIZE(members);
- rd_kafka_metadata_topic_t mt[15];
- int topic_cnt = RD_ARRAYSIZE(mt);
- rd_kafka_topic_partition_list_t *subscription =
- rd_kafka_topic_partition_list_new(topic_cnt);
- int i;
-
- for (i = 0; i < topic_cnt; i++) {
- char topic[10];
- rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
- rd_strdupa(&mt[i].topic, topic);
- mt[i].partition_cnt = i + 1;
- rd_kafka_topic_partition_list_add(subscription, topic,
- RD_KAFKA_PARTITION_UA);
- }
-
- metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
-
- for (i = 1; i <= member_cnt; i++) {
- char name[16];
- rd_snprintf(name, sizeof(name), "consumer%d", i);
- ut_init_member(&members[i - 1], name, NULL);
- rd_kafka_topic_partition_list_destroy(
- members[i - 1].rkgm_subscription);
- members[i - 1].rkgm_subscription =
- rd_kafka_topic_partition_list_copy(subscription);
- }
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
-
- /*
- * Remove consumer5
- */
- rd_kafka_group_member_clear(&members[5]);
- memmove(&members[5], &members[6], sizeof(*members) * (member_cnt - 6));
- member_cnt--;
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
- // FIXME: isSticky();
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
- rd_kafka_topic_partition_list_destroy(subscription);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testLargeAssignmentWithMultipleConsumersLeaving(
- rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
-
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[200];
- int member_cnt = RD_ARRAYSIZE(members);
- rd_kafka_metadata_topic_t mt[40];
- int topic_cnt = RD_ARRAYSIZE(mt);
- int i;
-
- for (i = 0; i < topic_cnt; i++) {
- char topic[10];
- rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
- rd_strdupa(&mt[i].topic, topic);
- mt[i].partition_cnt = i + 1;
- }
-
- metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
-
- for (i = 0; i < member_cnt; i++) {
- /* Java tests use a random set, this is more deterministic. */
- int sub_cnt = ((i + 1) * 17) % topic_cnt;
- rd_kafka_topic_partition_list_t *subscription =
- rd_kafka_topic_partition_list_new(sub_cnt);
- char name[16];
- int j;
-
- /* Subscribe to a subset of topics */
- for (j = 0; j < sub_cnt; j++)
- rd_kafka_topic_partition_list_add(
- subscription, metadata->topics[j].topic,
- RD_KAFKA_PARTITION_UA);
-
- rd_snprintf(name, sizeof(name), "consumer%d", i + 1);
- ut_init_member(&members[i], name, NULL);
- rd_kafka_topic_partition_list_destroy(
- members[i].rkgm_subscription);
- members[i].rkgm_subscription = subscription;
- }
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
-
- /*
- * Remove every 4th consumer (~50)
- */
- for (i = member_cnt - 1; i >= 0; i -= 4) {
- rd_kafka_group_member_clear(&members[i]);
- memmove(&members[i], &members[i + 1],
- sizeof(*members) * (member_cnt - (i + 1)));
- member_cnt--;
- }
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
- // FIXME: isSticky();
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testNewSubscription(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[3];
- int member_cnt = RD_ARRAYSIZE(members);
- int i;
-
- metadata = rd_kafka_metadata_new_topic_mockv(
- 5, "topic1", 1, "topic2", 2, "topic3", 3, "topic4", 4, "topic5", 5);
-
- for (i = 0; i < member_cnt; i++) {
- char name[16];
- int j;
-
- rd_snprintf(name, sizeof(name), "consumer%d", i);
- ut_init_member(&members[i], name, NULL);
-
- rd_kafka_topic_partition_list_destroy(
- members[i].rkgm_subscription);
- members[i].rkgm_subscription =
- rd_kafka_topic_partition_list_new(5);
-
- for (j = metadata->topic_cnt - (1 + i); j >= 0; j--)
- rd_kafka_topic_partition_list_add(
- members[i].rkgm_subscription,
- metadata->topics[j].topic, RD_KAFKA_PARTITION_UA);
- }
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- /*
- * Add topic1 to consumer1's subscription
- */
- RD_UT_SAY("Adding topic1 to consumer1");
- rd_kafka_topic_partition_list_add(members[0].rkgm_subscription,
- "topic1", RD_KAFKA_PARTITION_UA);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
- // FIXME: isSticky();
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testMoveExistingAssignments(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[4];
- int member_cnt = RD_ARRAYSIZE(members);
- rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT;
- int i;
- int fails = 0;
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
-
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
- ut_init_member(&members[2], "consumer3", "topic1", NULL);
- ut_init_member(&members[3], "consumer4", "topic1", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, member_cnt, metadata);
-
- for (i = 0; i < member_cnt; i++) {
- if (members[i].rkgm_assignment->cnt > 1) {
- RD_UT_WARN("%s assigned %d partitions, expected <= 1",
- members[i].rkgm_member_id->str,
- members[i].rkgm_assignment->cnt);
- fails++;
- } else if (members[i].rkgm_assignment->cnt == 1) {
- assignments[i] = rd_kafka_topic_partition_list_copy(
- members[i].rkgm_assignment);
- }
- }
-
- /*
- * Remove potential group leader consumer1
- */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1],
- member_cnt - 1, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(&members[1], member_cnt - 1, metadata);
- // FIXME: isSticky()
-
- for (i = 1; i < member_cnt; i++) {
- if (members[i].rkgm_assignment->cnt != 1) {
- RD_UT_WARN("%s assigned %d partitions, expected 1",
- members[i].rkgm_member_id->str,
- members[i].rkgm_assignment->cnt);
- fails++;
- } else if (assignments[i] &&
- !rd_kafka_topic_partition_list_find(
- assignments[i],
- members[i].rkgm_assignment->elems[0].topic,
- members[i]
- .rkgm_assignment->elems[0]
- .partition)) {
- RD_UT_WARN(
- "Stickiness was not honored for %s, "
- "%s [%" PRId32 "] not in previous assignment",
- members[i].rkgm_member_id->str,
- members[i].rkgm_assignment->elems[0].topic,
- members[i].rkgm_assignment->elems[0].partition);
- fails++;
- }
- }
-
- RD_UT_ASSERT(!fails, "See previous errors");
-
-
- for (i = 0; i < member_cnt; i++) {
- rd_kafka_group_member_clear(&members[i]);
- if (assignments[i])
- rd_kafka_topic_partition_list_destroy(assignments[i]);
- }
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-
-static int ut_testStickiness(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[3];
- int member_cnt = RD_ARRAYSIZE(members);
- int i;
-
- metadata = rd_kafka_metadata_new_topic_mockv(
- 6, "topic1", 1, "topic2", 1, "topic3", 1, "topic4", 1, "topic5", 1,
- "topic6", 1);
-
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
- rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
- members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
- 0);
-
- ut_init_member(&members[1], "consumer2", "topic1", "topic2", "topic3",
- "topic4", NULL);
- rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
- members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic2",
- 0);
- rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic3",
- 0);
-
- ut_init_member(&members[2], "consumer3", "topic4", "topic5", "topic6",
- NULL);
- rd_kafka_topic_partition_list_destroy(members[2].rkgm_assignment);
- members[2].rkgm_assignment = rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic4",
- 0);
- rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic5",
- 0);
- rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic6",
- 0);
-
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
-
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-/**
- * @brief Verify stickiness across three rebalances.
- */
-static int ut_testStickiness2(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[3];
- int member_cnt = RD_ARRAYSIZE(members);
- int i;
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 6);
-
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
- ut_init_member(&members[2], "consumer3", "topic1", NULL);
-
- /* Just consumer1 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, 1, metadata);
- isFullyBalanced(members, 1);
- verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
- "topic1", 3, "topic1", 4, "topic1", 5, NULL);
-
- /* consumer1 and consumer2 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 2,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, 2, metadata);
- isFullyBalanced(members, 2);
- verifyAssignment(&members[0], "topic1", 3, "topic1", 4, "topic1", 5,
- NULL);
- verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
- NULL);
-
- /* Run it twice, should be stable. */
- for (i = 0; i < 2; i++) {
- /* consumer1, consumer2, and consumer3 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata,
- members, 3, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, 3, metadata);
- isFullyBalanced(members, 3);
- verifyAssignment(&members[0], "topic1", 4, "topic1", 5, NULL);
- verifyAssignment(&members[1], "topic1", 1, "topic1", 2, NULL);
- verifyAssignment(&members[2], "topic1", 0, "topic1", 3, NULL);
- }
-
- /* Remove consumer1 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 2,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(&members[1], 2, metadata);
- isFullyBalanced(&members[1], 2);
- verifyAssignment(&members[1], "topic1", 1, "topic1", 2, "topic1", 5,
- NULL);
- verifyAssignment(&members[2], "topic1", 0, "topic1", 3, "topic1", 4,
- NULL);
-
- /* Remove consumer2 */
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[2], 1,
- errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(&members[2], 1, metadata);
- isFullyBalanced(&members[2], 1);
- verifyAssignment(&members[2], "topic1", 0, "topic1", 1, "topic1", 2,
- "topic1", 3, "topic1", 4, "topic1", 5, NULL);
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testAssignmentUpdatedForDeletedTopic(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata =
- rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic3", 100);
- ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3",
- NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 + 100,
- "Expected %d assigned partitions, not %d", 1 + 100,
- members[0].rkgm_assignment->cnt);
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted(
- rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
-
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[1];
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
-
- ut_init_member(&members[0], "consumer1", "topic", NULL);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- /*
- * Remove topic
- */
- rd_kafka_metadata_destroy(metadata);
- metadata = rd_kafka_metadata_new_topic_mock(NULL, 0);
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- RD_ARRAYSIZE(members), errstr,
- sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
-
- rd_kafka_group_member_clear(&members[0]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-
-static int
-ut_testConflictingPreviousAssignments(rd_kafka_t *rk,
- const rd_kafka_assignor_t *rkas) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_metadata_t *metadata;
- rd_kafka_group_member_t members[2];
- int member_cnt = RD_ARRAYSIZE(members);
- int i;
-
- // FIXME: removed from Java test suite, and fails for us, why, why?
- RD_UT_PASS();
-
- metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2);
-
- /* Both consumer and consumer2 have both partitions assigned */
- ut_init_member(&members[0], "consumer1", "topic1", NULL);
- rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
- members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
- 0);
- rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
- 1);
-
- ut_init_member(&members[1], "consumer2", "topic1", NULL);
- rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
- members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
- 0);
- rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
- 1);
-
-
- err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
- member_cnt, errstr, sizeof(errstr));
- RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
-
- RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 &&
- members[1].rkgm_assignment->cnt == 1,
- "Expected consumers to have 1 partition each, "
- "not %d and %d",
- members[0].rkgm_assignment->cnt,
- members[1].rkgm_assignment->cnt);
- RD_UT_ASSERT(members[0].rkgm_assignment->elems[0].partition !=
- members[1].rkgm_assignment->elems[0].partition,
- "Expected consumers to have different partitions "
- "assigned, not same partition %" PRId32,
- members[0].rkgm_assignment->elems[0].partition);
-
- verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
- isFullyBalanced(members, RD_ARRAYSIZE(members));
- /* FIXME: isSticky() */
-
- for (i = 0; i < member_cnt; i++)
- rd_kafka_group_member_clear(&members[i]);
- rd_kafka_metadata_destroy(metadata);
-
- RD_UT_PASS();
-}
-
-/* testReassignmentWithRandomSubscriptionsAndChanges is not ported
- * from Java since random tests don't provide meaningful test coverage. */
-
-
-static int rd_kafka_sticky_assignor_unittest(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- int fails = 0;
- char errstr[256];
- rd_kafka_assignor_t *rkas;
- static int (*tests[])(rd_kafka_t *, const rd_kafka_assignor_t *) = {
- ut_testOneConsumerNoTopic,
- ut_testOneConsumerNonexistentTopic,
- ut_testOneConsumerOneTopic,
- ut_testOnlyAssignsPartitionsFromSubscribedTopics,
- ut_testOneConsumerMultipleTopics,
- ut_testTwoConsumersOneTopicOnePartition,
- ut_testTwoConsumersOneTopicTwoPartitions,
- ut_testMultipleConsumersMixedTopicSubscriptions,
- ut_testTwoConsumersTwoTopicsSixPartitions,
- ut_testAddRemoveConsumerOneTopic,
- ut_testPoorRoundRobinAssignmentScenario,
- ut_testAddRemoveTopicTwoConsumers,
- ut_testReassignmentAfterOneConsumerLeaves,
- ut_testReassignmentAfterOneConsumerAdded,
- ut_testSameSubscriptions,
- ut_testLargeAssignmentWithMultipleConsumersLeaving,
- ut_testNewSubscription,
- ut_testMoveExistingAssignments,
- ut_testStickiness,
- ut_testStickiness2,
- ut_testAssignmentUpdatedForDeletedTopic,
- ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted,
- ut_testConflictingPreviousAssignments,
- NULL,
- };
- int i;
-
-
- conf = rd_kafka_conf_new();
- if (rd_kafka_conf_set(conf, "group.id", "test", errstr,
- sizeof(errstr)) ||
- rd_kafka_conf_set(conf, "partition.assignment.strategy",
- "cooperative-sticky", errstr, sizeof(errstr)))
- RD_UT_FAIL("sticky assignor conf failed: %s", errstr);
-
- rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL,
- 0);
-
- rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
- RD_UT_ASSERT(rk, "sticky assignor client instantiation failed: %s",
- errstr);
-
- rkas = rd_kafka_assignor_find(rk, "cooperative-sticky");
- RD_UT_ASSERT(rkas, "sticky assignor not found");
-
- for (i = 0; tests[i]; i++) {
- rd_ts_t ts = rd_clock();
- int r;
-
- RD_UT_SAY("[ Test #%d ]", i);
- r = tests[i](rk, rkas);
- RD_UT_SAY("[ Test #%d ran for %.3fms ]", i,
- (double)(rd_clock() - ts) / 1000.0);
-
- RD_UT_ASSERT(!r, "^ failed");
-
- fails += r;
- }
-
- rd_kafka_destroy(rk);
-
- return fails;
-}
-
-
-/**
- * @brief Initialzie and add sticky assignor.
- */
-rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk) {
- return rd_kafka_assignor_add(rk, "consumer", "cooperative-sticky",
- RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE,
- rd_kafka_sticky_assignor_assign_cb,
- rd_kafka_sticky_assignor_get_metadata,
- rd_kafka_sticky_assignor_on_assignment_cb,
- rd_kafka_sticky_assignor_state_destroy,
- rd_kafka_sticky_assignor_unittest, NULL);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c
deleted file mode 100644
index 080589358..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * This is the high level consumer API which is mutually exclusive
- * with the old legacy simple consumer.
- * Only one of these interfaces may be used on a given rd_kafka_t handle.
- */
-
-#include "rdkafka_int.h"
-
-
-rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk) {
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_SUBSCRIBE));
-}
-
-
-/** @returns 1 if the topic is invalid (bad regex, empty), else 0 if valid. */
-static size_t _invalid_topic_cb(const rd_kafka_topic_partition_t *rktpar,
- void *opaque) {
- rd_regex_t *re;
- char errstr[1];
-
- if (!*rktpar->topic)
- return 1;
-
- if (*rktpar->topic != '^')
- return 0;
-
- if (!(re = rd_regex_comp(rktpar->topic, errstr, sizeof(errstr))))
- return 1;
-
- rd_regex_destroy(re);
-
- return 0;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_subscribe(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *topics) {
-
- rd_kafka_op_t *rko;
- rd_kafka_cgrp_t *rkcg;
- rd_kafka_topic_partition_list_t *topics_cpy;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- /* Validate topics */
- if (topics->cnt == 0 || rd_kafka_topic_partition_list_sum(
- topics, _invalid_topic_cb, NULL) > 0)
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- topics_cpy = rd_kafka_topic_partition_list_copy(topics);
- if (rd_kafka_topic_partition_list_has_duplicates(
- topics_cpy, rd_true /*ignore partition field*/)) {
- rd_kafka_topic_partition_list_destroy(topics_cpy);
- return RD_KAFKA_RESP_ERR__INVALID_ARG;
- }
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE);
- rko->rko_u.subscribe.topics = topics_cpy;
-
- return rd_kafka_op_err_destroy(
- rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
-}
-
-
-rd_kafka_error_t *
-rd_kafka_assign0(rd_kafka_t *rk,
- rd_kafka_assign_method_t assign_method,
- const rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_op_t *rko;
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP,
- "Requires a consumer with group.id "
- "configured");
-
- rko = rd_kafka_op_new(RD_KAFKA_OP_ASSIGN);
-
- rko->rko_u.assign.method = assign_method;
-
- if (partitions)
- rko->rko_u.assign.partitions =
- rd_kafka_topic_partition_list_copy(partitions);
-
- return rd_kafka_op_error_destroy(
- rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_assign(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
-
- error = rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_ASSIGN, partitions);
-
- if (!error)
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- else {
- err = rd_kafka_error_code(error);
- rd_kafka_error_destroy(error);
- }
-
- return err;
-}
-
-
-rd_kafka_error_t *
-rd_kafka_incremental_assign(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions) {
- if (!partitions)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "partitions must not be NULL");
-
- return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN,
- partitions);
-}
-
-
-rd_kafka_error_t *rd_kafka_incremental_unassign(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *partitions) {
- if (!partitions)
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
- "partitions must not be NULL");
-
- return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN,
- partitions);
-}
-
-
-int rd_kafka_assignment_lost(rd_kafka_t *rk) {
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return 0;
-
- return rd_kafka_cgrp_assignment_is_lost(rkcg) == rd_true;
-}
-
-
-const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) {
- rd_kafka_op_t *rko;
- rd_kafka_cgrp_t *rkcg;
- const char *result;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return NULL;
-
- rko = rd_kafka_op_req2(rkcg->rkcg_ops,
- RD_KAFKA_OP_GET_REBALANCE_PROTOCOL);
-
- if (!rko)
- return NULL;
- else if (rko->rko_err) {
- rd_kafka_op_destroy(rko);
- return NULL;
- }
-
- result = rko->rko_u.rebalance_protocol.str;
-
- rd_kafka_op_destroy(rko);
-
- return result;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_assignment(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t **partitions) {
- rd_kafka_op_t *rko;
- rd_kafka_resp_err_t err;
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_ASSIGNMENT);
- if (!rko)
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- err = rko->rko_err;
-
- *partitions = rko->rko_u.assign.partitions;
- rko->rko_u.assign.partitions = NULL;
- rd_kafka_op_destroy(rko);
-
- if (!*partitions && !err) {
- /* Create an empty list for convenience of the caller */
- *partitions = rd_kafka_topic_partition_list_new(0);
- }
-
- return err;
-}
-
-rd_kafka_resp_err_t
-rd_kafka_subscription(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t **topics) {
- rd_kafka_op_t *rko;
- rd_kafka_resp_err_t err;
- rd_kafka_cgrp_t *rkcg;
-
- if (!(rkcg = rd_kafka_cgrp_get(rk)))
- return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
-
- rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_SUBSCRIPTION);
- if (!rko)
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- err = rko->rko_err;
-
- *topics = rko->rko_u.subscribe.topics;
- rko->rko_u.subscribe.topics = NULL;
- rd_kafka_op_destroy(rko);
-
- if (!*topics && !err) {
- /* Create an empty list for convenience of the caller */
- *topics = rd_kafka_topic_partition_list_new(0);
- }
-
- return err;
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_pause_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- return rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_SYNC,
- RD_KAFKA_TOPPAR_F_APP_PAUSE,
- partitions);
-}
-
-
-rd_kafka_resp_err_t
-rd_kafka_resume_partitions(rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- return rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_SYNC,
- RD_KAFKA_TOPPAR_F_APP_PAUSE,
- partitions);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c
deleted file mode 100644
index 5240af785..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rd.h"
-#include "rdtime.h"
-#include "rdsysqueue.h"
-
-#include "rdkafka_queue.h"
-
-static RD_INLINE void rd_kafka_timers_lock(rd_kafka_timers_t *rkts) {
- mtx_lock(&rkts->rkts_lock);
-}
-
-static RD_INLINE void rd_kafka_timers_unlock(rd_kafka_timers_t *rkts) {
- mtx_unlock(&rkts->rkts_lock);
-}
-
-
-static RD_INLINE int rd_kafka_timer_started(const rd_kafka_timer_t *rtmr) {
- return rtmr->rtmr_interval ? 1 : 0;
-}
-
-
-static RD_INLINE int rd_kafka_timer_scheduled(const rd_kafka_timer_t *rtmr) {
- return rtmr->rtmr_next ? 1 : 0;
-}
-
-
-static int rd_kafka_timer_cmp(const void *_a, const void *_b) {
- const rd_kafka_timer_t *a = _a, *b = _b;
- return RD_CMP(a->rtmr_next, b->rtmr_next);
-}
-
-static void rd_kafka_timer_unschedule(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr) {
- TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link);
- rtmr->rtmr_next = 0;
-}
-
-
-/**
- * @brief Schedule the next firing of the timer at \p abs_time.
- *
- * @remark Will not update rtmr_interval, only rtmr_next.
- *
- * @locks_required timers_lock()
- */
-static void rd_kafka_timer_schedule_next(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- rd_ts_t abs_time) {
- rd_kafka_timer_t *first;
-
- rtmr->rtmr_next = abs_time;
-
- if (!(first = TAILQ_FIRST(&rkts->rkts_timers)) ||
- first->rtmr_next > rtmr->rtmr_next) {
- TAILQ_INSERT_HEAD(&rkts->rkts_timers, rtmr, rtmr_link);
- cnd_signal(&rkts->rkts_cond);
- if (rkts->rkts_wakeq)
- rd_kafka_q_yield(rkts->rkts_wakeq);
- } else
- TAILQ_INSERT_SORTED(&rkts->rkts_timers, rtmr,
- rd_kafka_timer_t *, rtmr_link,
- rd_kafka_timer_cmp);
-}
-
-
-/**
- * @brief Schedule the next firing of the timer according to the timer's
- * interval plus an optional \p extra_us.
- *
- * @locks_required timers_lock()
- */
-static void rd_kafka_timer_schedule(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- int extra_us) {
-
- /* Timer has been stopped */
- if (!rtmr->rtmr_interval)
- return;
-
- /* Timers framework is terminating */
- if (unlikely(!rkts->rkts_enabled))
- return;
-
- rd_kafka_timer_schedule_next(
- rkts, rtmr, rd_clock() + rtmr->rtmr_interval + extra_us);
-}
-
-/**
- * @brief Stop a timer that may be started.
- * If called from inside a timer callback 'lock' must be 0, else 1.
- *
- * @returns 1 if the timer was started (before being stopped), else 0.
- */
-int rd_kafka_timer_stop(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- int lock) {
- if (lock)
- rd_kafka_timers_lock(rkts);
-
- if (!rd_kafka_timer_started(rtmr)) {
- if (lock)
- rd_kafka_timers_unlock(rkts);
- return 0;
- }
-
- if (rd_kafka_timer_scheduled(rtmr))
- rd_kafka_timer_unschedule(rkts, rtmr);
-
- rtmr->rtmr_interval = 0;
-
- if (lock)
- rd_kafka_timers_unlock(rkts);
-
- return 1;
-}
-
-
-/**
- * @returns true if timer is started, else false.
- */
-rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts,
- const rd_kafka_timer_t *rtmr) {
- rd_bool_t ret;
- rd_kafka_timers_lock(rkts);
- ret = rtmr->rtmr_interval != 0;
- rd_kafka_timers_unlock(rkts);
- return ret;
-}
-
-
-/**
- * @brief Start the provided timer with the given interval.
- *
- * Upon expiration of the interval (us) the callback will be called in the
- * main rdkafka thread, after callback return the timer will be restarted.
- *
- * @param oneshot just fire the timer once.
- * @param restart if timer is already started, restart it.
- *
- * Use rd_kafka_timer_stop() to stop a timer.
- */
-void rd_kafka_timer_start0(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- rd_ts_t interval,
- rd_bool_t oneshot,
- rd_bool_t restart,
- void (*callback)(rd_kafka_timers_t *rkts, void *arg),
- void *arg) {
- rd_kafka_timers_lock(rkts);
-
- if (!restart && rd_kafka_timer_scheduled(rtmr)) {
- rd_kafka_timers_unlock(rkts);
- return;
- }
-
- rd_kafka_timer_stop(rkts, rtmr, 0 /*!lock*/);
-
- /* Make sure the timer interval is non-zero or the timer
- * won't be scheduled, which is not what the caller of .._start*()
- * would expect. */
- rtmr->rtmr_interval = interval == 0 ? 1 : interval;
- rtmr->rtmr_callback = callback;
- rtmr->rtmr_arg = arg;
- rtmr->rtmr_oneshot = oneshot;
-
- rd_kafka_timer_schedule(rkts, rtmr, 0);
-
- rd_kafka_timers_unlock(rkts);
-}
-
-/**
- * Delay the next timer invocation by '2 * rtmr->rtmr_interval'
- */
-void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr) {
- rd_kafka_timers_lock(rkts);
- if (rd_kafka_timer_scheduled(rtmr)) {
- rtmr->rtmr_interval *= 2;
- rd_kafka_timer_unschedule(rkts, rtmr);
- }
- rd_kafka_timer_schedule(rkts, rtmr, 0);
- rd_kafka_timers_unlock(rkts);
-}
-
-/**
- * @brief Override the interval once for the next firing of the timer.
- *
- * @locks_required none
- * @locks_acquired timers_lock
- */
-void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- rd_ts_t interval) {
- rd_kafka_timers_lock(rkts);
- if (rd_kafka_timer_scheduled(rtmr))
- rd_kafka_timer_unschedule(rkts, rtmr);
- rd_kafka_timer_schedule_next(rkts, rtmr, rd_clock() + interval);
- rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * @returns the delta time to the next time (>=0) this timer fires, or -1
- * if timer is stopped.
- */
-rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- int do_lock) {
- rd_ts_t now = rd_clock();
- rd_ts_t delta = -1;
-
- if (do_lock)
- rd_kafka_timers_lock(rkts);
-
- if (rd_kafka_timer_scheduled(rtmr)) {
- delta = rtmr->rtmr_next - now;
- if (delta < 0)
- delta = 0;
- }
-
- if (do_lock)
- rd_kafka_timers_unlock(rkts);
-
- return delta;
-}
-
-
-/**
- * Interrupt rd_kafka_timers_run().
- * Used for termination.
- */
-void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts) {
- rd_kafka_timers_lock(rkts);
- cnd_signal(&rkts->rkts_cond);
- rd_kafka_timers_unlock(rkts);
-}
-
-
-/**
- * Returns the delta time to the next timer to fire, capped by 'timeout_ms'.
- */
-rd_ts_t
-rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_us, int do_lock) {
- rd_ts_t now = rd_clock();
- rd_ts_t sleeptime = 0;
- rd_kafka_timer_t *rtmr;
-
- if (do_lock)
- rd_kafka_timers_lock(rkts);
-
- if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) {
- sleeptime = rtmr->rtmr_next - now;
- if (sleeptime < 0)
- sleeptime = 0;
- else if (sleeptime > (rd_ts_t)timeout_us)
- sleeptime = (rd_ts_t)timeout_us;
- } else
- sleeptime = (rd_ts_t)timeout_us;
-
- if (do_lock)
- rd_kafka_timers_unlock(rkts);
-
- return sleeptime;
-}
-
-
-/**
- * Dispatch timers.
- * Will block up to 'timeout' microseconds before returning.
- */
-void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us) {
- rd_ts_t now = rd_clock();
- rd_ts_t end = now + timeout_us;
-
- rd_kafka_timers_lock(rkts);
-
- while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) {
- int64_t sleeptime;
- rd_kafka_timer_t *rtmr;
-
- if (timeout_us != RD_POLL_NOWAIT) {
- sleeptime = rd_kafka_timers_next(rkts, timeout_us,
- 0 /*no-lock*/);
-
- if (sleeptime > 0) {
- cnd_timedwait_ms(&rkts->rkts_cond,
- &rkts->rkts_lock,
- (int)(sleeptime / 1000));
- }
- }
-
- now = rd_clock();
-
- while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) &&
- rtmr->rtmr_next <= now) {
- rd_bool_t oneshot;
-
- rd_kafka_timer_unschedule(rkts, rtmr);
-
- /* If timer must only be fired once,
- * disable it now prior to callback.
- *
- * NOTE: Oneshot timers are never touched again after
- * the callback has been called to avoid use-after-free.
- */
- if ((oneshot = rtmr->rtmr_oneshot))
- rtmr->rtmr_interval = 0;
-
- rd_kafka_timers_unlock(rkts);
-
- rtmr->rtmr_callback(rkts, rtmr->rtmr_arg);
-
- rd_kafka_timers_lock(rkts);
-
- /* Restart timer, unless it has been stopped, or
- * already reschedueld (start()ed) from callback. */
- if (!oneshot && rd_kafka_timer_started(rtmr) &&
- !rd_kafka_timer_scheduled(rtmr))
- rd_kafka_timer_schedule(rkts, rtmr, 0);
- }
-
- if (timeout_us == RD_POLL_NOWAIT) {
- /* Only iterate once, even if rd_clock doesn't change */
- break;
- }
- }
-
- rd_kafka_timers_unlock(rkts);
-}
-
-
-void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts) {
- rd_kafka_timer_t *rtmr;
-
- rd_kafka_timers_lock(rkts);
- rkts->rkts_enabled = 0;
- while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)))
- rd_kafka_timer_stop(rkts, rtmr, 0);
- rd_kafka_assert(rkts->rkts_rk, TAILQ_EMPTY(&rkts->rkts_timers));
- rd_kafka_timers_unlock(rkts);
-
- cnd_destroy(&rkts->rkts_cond);
- mtx_destroy(&rkts->rkts_lock);
-}
-
-void rd_kafka_timers_init(rd_kafka_timers_t *rkts,
- rd_kafka_t *rk,
- struct rd_kafka_q_s *wakeq) {
- memset(rkts, 0, sizeof(*rkts));
- rkts->rkts_rk = rk;
- TAILQ_INIT(&rkts->rkts_timers);
- mtx_init(&rkts->rkts_lock, mtx_plain);
- cnd_init(&rkts->rkts_cond);
- rkts->rkts_enabled = 1;
- rkts->rkts_wakeq = wakeq;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h
deleted file mode 100644
index e3cadd7b9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_TIMER_H_
-#define _RDKAFKA_TIMER_H_
-
-#include "rd.h"
-
-struct rd_kafka_q_s; /**< Forward decl */
-
-/* A timer engine. */
-typedef struct rd_kafka_timers_s {
-
- TAILQ_HEAD(, rd_kafka_timer_s) rkts_timers;
-
- struct rd_kafka_s *rkts_rk;
-
- mtx_t rkts_lock;
- cnd_t rkts_cond;
-
- /** Optional wake-up (q_yield()) to wake up when a new timer
- * is scheduled that will fire prior to any existing timers.
- * This is used to wake up blocking IO or queue polls that run
- * in the same loop as timers_run(). */
- struct rd_kafka_q_s *rkts_wakeq;
-
- int rkts_enabled;
-} rd_kafka_timers_t;
-
-
-typedef struct rd_kafka_timer_s {
- TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link;
-
- rd_ts_t rtmr_next;
- rd_ts_t rtmr_interval; /* interval in microseconds */
- rd_bool_t rtmr_oneshot; /**< Only fire once. */
-
- void (*rtmr_callback)(rd_kafka_timers_t *rkts, void *arg);
- void *rtmr_arg;
-} rd_kafka_timer_t;
-
-
-
-int rd_kafka_timer_stop(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- int lock);
-void rd_kafka_timer_start0(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- rd_ts_t interval,
- rd_bool_t oneshot,
- rd_bool_t restart,
- void (*callback)(rd_kafka_timers_t *rkts, void *arg),
- void *arg);
-#define rd_kafka_timer_start(rkts, rtmr, interval, callback, arg) \
- rd_kafka_timer_start0(rkts, rtmr, interval, rd_false, rd_true, \
- callback, arg)
-#define rd_kafka_timer_start_oneshot(rkts, rtmr, restart, interval, callback, \
- arg) \
- rd_kafka_timer_start0(rkts, rtmr, interval, rd_true, restart, \
- callback, arg)
-
-void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr);
-rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- int do_lock);
-
-void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts,
- rd_kafka_timer_t *rtmr,
- rd_ts_t interval);
-
-/**
- * @returns true if timer is started.
- *
- * @remark Must only be called in the timer's thread (not thread-safe)
- */
-rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts,
- const rd_kafka_timer_t *rtmr);
-
-void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts);
-rd_ts_t
-rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_ms, int do_lock);
-void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us);
-void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts);
-void rd_kafka_timers_init(rd_kafka_timers_t *rkte,
- rd_kafka_t *rk,
- struct rd_kafka_q_s *wakeq);
-
-#endif /* _RDKAFKA_TIMER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c
deleted file mode 100644
index 89bfa092d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c
+++ /dev/null
@@ -1,1900 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_msg.h"
-#include "rdkafka_topic.h"
-#include "rdkafka_partition.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_cgrp.h"
-#include "rdkafka_metadata.h"
-#include "rdkafka_offset.h"
-#include "rdlog.h"
-#include "rdsysqueue.h"
-#include "rdtime.h"
-#include "rdregex.h"
-
-#if WITH_ZSTD
-#include <zstd.h>
-#endif
-
-
-const char *rd_kafka_topic_state_names[] = {"unknown", "exists", "notexists",
- "error"};
-
-
-static int rd_kafka_topic_metadata_update(
- rd_kafka_topic_t *rkt,
- const struct rd_kafka_metadata_topic *mdt,
- const rd_kafka_partition_leader_epoch_t *leader_epochs,
- rd_ts_t ts_age);
-
-
-/**
- * @brief Increases the app's topic reference count.
- *
- * The app refcounts are implemented separately from the librdkafka refcounts,
- * they are increased/decreased in a separate rkt_app_refcnt to keep track of
- * its use.
- *
- * This only covers topic_new() & topic_destroy().
- * The topic_t exposed in rd_kafka_message_t is NOT covered and is handled
- * like a standard internal -> app pointer conversion (keep_a()).
- */
-static void rd_kafka_topic_keep_app(rd_kafka_topic_t *rkt) {
- if (rd_refcnt_add(&rkt->rkt_app_refcnt) == 1)
- rd_kafka_topic_keep(rkt);
-}
-
-/**
- * @brief drop rkt app reference
- */
-static void rd_kafka_topic_destroy_app(rd_kafka_topic_t *app_rkt) {
- rd_kafka_topic_t *rkt = app_rkt;
-
- rd_assert(!rd_kafka_rkt_is_lw(app_rkt));
-
- if (unlikely(rd_refcnt_sub(&rkt->rkt_app_refcnt) == 0))
- rd_kafka_topic_destroy0(rkt); /* final app reference lost,
- * loose reference from
- * keep_app() */
-}
-
-
-/**
- * Final destructor for topic. Refcnt must be 0.
- */
-void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) {
- rd_kafka_partition_msgid_t *partmsgid, *partmsgid_tmp;
-
- rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0);
-
- rd_kafka_wrlock(rkt->rkt_rk);
- TAILQ_REMOVE(&rkt->rkt_rk->rk_topics, rkt, rkt_link);
- rkt->rkt_rk->rk_topic_cnt--;
- rd_kafka_wrunlock(rkt->rkt_rk);
-
- TAILQ_FOREACH_SAFE(partmsgid, &rkt->rkt_saved_partmsgids, link,
- partmsgid_tmp) {
- rd_free(partmsgid);
- }
-
- rd_kafka_assert(rkt->rkt_rk, rd_list_empty(&rkt->rkt_desp));
- rd_list_destroy(&rkt->rkt_desp);
-
- rd_avg_destroy(&rkt->rkt_avg_batchsize);
- rd_avg_destroy(&rkt->rkt_avg_batchcnt);
-
- if (rkt->rkt_topic)
- rd_kafkap_str_destroy(rkt->rkt_topic);
-
- rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf);
-
- rwlock_destroy(&rkt->rkt_lock);
- rd_refcnt_destroy(&rkt->rkt_app_refcnt);
- rd_refcnt_destroy(&rkt->rkt_refcnt);
-
- rd_free(rkt);
-}
-
-/**
- * @brief Application topic object destroy.
- * @warning MUST ONLY BE CALLED BY THE APPLICATION.
- * Use rd_kafka_topic_destroy0() for all internal use.
- */
-void rd_kafka_topic_destroy(rd_kafka_topic_t *app_rkt) {
- rd_kafka_lwtopic_t *lrkt;
- if (unlikely((lrkt = rd_kafka_rkt_get_lw(app_rkt)) != NULL))
- rd_kafka_lwtopic_destroy(lrkt);
- else
- rd_kafka_topic_destroy_app(app_rkt);
-}
-
-
-/**
- * Finds and returns a topic based on its name, or NULL if not found.
- * The 'rkt' refcount is increased by one and the caller must call
- * rd_kafka_topic_destroy() when it is done with the topic to decrease
- * the refcount.
- *
- * Locality: any thread
- */
-rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- const char *topic,
- int do_lock) {
- rd_kafka_topic_t *rkt;
-
- if (do_lock)
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) {
- rd_kafka_topic_keep(rkt);
- break;
- }
- }
- if (do_lock)
- rd_kafka_rdunlock(rk);
-
- return rkt;
-}
-
-/**
- * Same semantics as ..find() but takes a Kafka protocol string instead.
- */
-rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- const rd_kafkap_str_t *topic) {
- rd_kafka_topic_t *rkt;
-
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) {
- rd_kafka_topic_keep(rkt);
- break;
- }
- }
- rd_kafka_rdunlock(rk);
-
- return rkt;
-}
-
-
-/**
- * @brief rd_kafka_topic_t comparator.
- */
-int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b) {
- rd_kafka_topic_t *rkt_a = (void *)_a, *rkt_b = (void *)_b;
-
- if (rkt_a == rkt_b)
- return 0;
-
- return rd_kafkap_str_cmp(rkt_a->rkt_topic, rkt_b->rkt_topic);
-}
-
-
-/**
- * @brief Destroy/free a light-weight topic object.
- */
-void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt) {
- rd_assert(rd_kafka_rkt_is_lw((const rd_kafka_topic_t *)lrkt));
- if (rd_refcnt_sub(&lrkt->lrkt_refcnt) > 0)
- return;
-
- rd_refcnt_destroy(&lrkt->lrkt_refcnt);
- rd_free(lrkt);
-}
-
-
-/**
- * @brief Create a new light-weight topic name-only handle.
- *
- * This type of object is a light-weight non-linked alternative
- * to the proper rd_kafka_itopic_t for outgoing APIs
- * (such as rd_kafka_message_t) when there is no full topic object available.
- */
-rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic) {
- rd_kafka_lwtopic_t *lrkt;
- size_t topic_len = strlen(topic);
-
- lrkt = rd_malloc(sizeof(*lrkt) + topic_len + 1);
-
- memcpy(lrkt->lrkt_magic, "LRKT", 4);
- lrkt->lrkt_rk = rk;
- rd_refcnt_init(&lrkt->lrkt_refcnt, 1);
- lrkt->lrkt_topic = (char *)(lrkt + 1);
- memcpy(lrkt->lrkt_topic, topic, topic_len + 1);
-
- return lrkt;
-}
-
-
-/**
- * @returns a proper rd_kafka_topic_t object (not light-weight)
- * based on the input rd_kafka_topic_t app object which may
- * either be a proper topic (which is then returned) or a light-weight
- * topic in which case it will look up or create the proper topic
- * object.
- *
- * This allows the application to (unknowingly) pass a light-weight
- * topic object to any proper-aware public API.
- */
-rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt) {
- rd_kafka_lwtopic_t *lrkt;
-
- if (likely(!(lrkt = rd_kafka_rkt_get_lw(app_rkt))))
- return app_rkt;
-
- /* Create proper topic object */
- return rd_kafka_topic_new0(lrkt->lrkt_rk, lrkt->lrkt_topic, NULL, NULL,
- 0);
-}
-
-
-/**
- * @brief Create new topic handle.
- *
- * @locality any
- */
-rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk,
- const char *topic,
- rd_kafka_topic_conf_t *conf,
- int *existing,
- int do_lock) {
- rd_kafka_topic_t *rkt;
- const struct rd_kafka_metadata_cache_entry *rkmce;
- const char *conf_err;
- const char *used_conf_str;
-
- /* Verify configuration.
- * Maximum topic name size + headers must never exceed message.max.bytes
- * which is min-capped to 1000.
- * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */
- if (!topic || strlen(topic) > 512) {
- if (conf)
- rd_kafka_topic_conf_destroy(conf);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return NULL;
- }
-
- if (do_lock)
- rd_kafka_wrlock(rk);
- if ((rkt = rd_kafka_topic_find(rk, topic, 0 /*no lock*/))) {
- if (do_lock)
- rd_kafka_wrunlock(rk);
- if (conf)
- rd_kafka_topic_conf_destroy(conf);
- if (existing)
- *existing = 1;
- return rkt;
- }
-
- if (!conf) {
- if (rk->rk_conf.topic_conf) {
- conf = rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf);
- used_conf_str = "default_topic_conf";
- } else {
- conf = rd_kafka_topic_conf_new();
- used_conf_str = "empty";
- }
- } else {
- used_conf_str = "user-supplied";
- }
-
-
- /* Verify and finalize topic configuration */
- if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, &rk->rk_conf,
- conf))) {
- if (do_lock)
- rd_kafka_wrunlock(rk);
- /* Incompatible configuration settings */
- rd_kafka_log(rk, LOG_ERR, "TOPICCONF",
- "Incompatible configuration settings "
- "for topic \"%s\": %s",
- topic, conf_err);
- rd_kafka_topic_conf_destroy(conf);
- rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
- return NULL;
- }
-
- if (existing)
- *existing = 0;
-
- rkt = rd_calloc(1, sizeof(*rkt));
-
- memcpy(rkt->rkt_magic, "IRKT", 4);
-
- rkt->rkt_topic = rd_kafkap_str_new(topic, -1);
- rkt->rkt_rk = rk;
-
- rkt->rkt_ts_create = rd_clock();
-
- rkt->rkt_conf = *conf;
- rd_free(conf); /* explicitly not rd_kafka_topic_destroy()
- * since we dont want to rd_free internal members,
- * just the placeholder. The internal members
- * were copied on the line above. */
-
- /* Partitioner */
- if (!rkt->rkt_conf.partitioner) {
- const struct {
- const char *str;
- void *part;
- } part_map[] = {
- {"random", (void *)rd_kafka_msg_partitioner_random},
- {"consistent", (void *)rd_kafka_msg_partitioner_consistent},
- {"consistent_random",
- (void *)rd_kafka_msg_partitioner_consistent_random},
- {"murmur2", (void *)rd_kafka_msg_partitioner_murmur2},
- {"murmur2_random",
- (void *)rd_kafka_msg_partitioner_murmur2_random},
- {"fnv1a", (void *)rd_kafka_msg_partitioner_fnv1a},
- {"fnv1a_random",
- (void *)rd_kafka_msg_partitioner_fnv1a_random},
- {NULL}};
- int i;
-
- /* Use "partitioner" configuration property string, if set */
- for (i = 0; rkt->rkt_conf.partitioner_str && part_map[i].str;
- i++) {
- if (!strcmp(rkt->rkt_conf.partitioner_str,
- part_map[i].str)) {
- rkt->rkt_conf.partitioner = part_map[i].part;
- break;
- }
- }
-
- /* Default partitioner: consistent_random */
- if (!rkt->rkt_conf.partitioner) {
- /* Make sure part_map matched something, otherwise
- * there is a discreprency between this code
- * and the validator in rdkafka_conf.c */
- assert(!rkt->rkt_conf.partitioner_str);
-
- rkt->rkt_conf.partitioner =
- rd_kafka_msg_partitioner_consistent_random;
- }
- }
-
- if (rkt->rkt_rk->rk_conf.sticky_partition_linger_ms > 0 &&
- rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_consistent &&
- rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_murmur2 &&
- rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_fnv1a) {
- rkt->rkt_conf.random_partitioner = rd_false;
- } else {
- rkt->rkt_conf.random_partitioner = rd_true;
- }
-
- /* Sticky partition assignment interval */
- rd_interval_init(&rkt->rkt_sticky_intvl);
-
- if (rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO)
- rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid;
- else
- rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid_lifo;
-
- if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT)
- rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec;
-
- /* Translate compression level to library-specific level and check
- * upper bound */
- switch (rkt->rkt_conf.compression_codec) {
-#if WITH_ZLIB
- case RD_KAFKA_COMPRESSION_GZIP:
- if (rkt->rkt_conf.compression_level ==
- RD_KAFKA_COMPLEVEL_DEFAULT)
- rkt->rkt_conf.compression_level = Z_DEFAULT_COMPRESSION;
- else if (rkt->rkt_conf.compression_level >
- RD_KAFKA_COMPLEVEL_GZIP_MAX)
- rkt->rkt_conf.compression_level =
- RD_KAFKA_COMPLEVEL_GZIP_MAX;
- break;
-#endif
- case RD_KAFKA_COMPRESSION_LZ4:
- if (rkt->rkt_conf.compression_level ==
- RD_KAFKA_COMPLEVEL_DEFAULT)
- /* LZ4 has no notion of system-wide default compression
- * level, use zero in this case */
- rkt->rkt_conf.compression_level = 0;
- else if (rkt->rkt_conf.compression_level >
- RD_KAFKA_COMPLEVEL_LZ4_MAX)
- rkt->rkt_conf.compression_level =
- RD_KAFKA_COMPLEVEL_LZ4_MAX;
- break;
-#if WITH_ZSTD
- case RD_KAFKA_COMPRESSION_ZSTD:
- if (rkt->rkt_conf.compression_level ==
- RD_KAFKA_COMPLEVEL_DEFAULT)
- rkt->rkt_conf.compression_level = 3;
- else if (rkt->rkt_conf.compression_level >
- RD_KAFKA_COMPLEVEL_ZSTD_MAX)
- rkt->rkt_conf.compression_level =
- RD_KAFKA_COMPLEVEL_ZSTD_MAX;
- break;
-#endif
- case RD_KAFKA_COMPRESSION_SNAPPY:
- default:
- /* Compression level has no effect in this case */
- rkt->rkt_conf.compression_level = RD_KAFKA_COMPLEVEL_DEFAULT;
- }
-
- rd_avg_init(&rkt->rkt_avg_batchsize, RD_AVG_GAUGE, 0,
- rk->rk_conf.max_msg_size, 2,
- rk->rk_conf.stats_interval_ms ? 1 : 0);
- rd_avg_init(&rkt->rkt_avg_batchcnt, RD_AVG_GAUGE, 0,
- rk->rk_conf.batch_num_messages, 2,
- rk->rk_conf.stats_interval_ms ? 1 : 0);
-
- rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s",
- RD_KAFKAP_STR_PR(rkt->rkt_topic));
-
- rd_list_init(&rkt->rkt_desp, 16, NULL);
- rd_interval_init(&rkt->rkt_desp_refresh_intvl);
- TAILQ_INIT(&rkt->rkt_saved_partmsgids);
- rd_refcnt_init(&rkt->rkt_refcnt, 0);
- rd_refcnt_init(&rkt->rkt_app_refcnt, 0);
-
- rd_kafka_topic_keep(rkt);
-
- rwlock_init(&rkt->rkt_lock);
-
- /* Create unassigned partition */
- rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA);
-
- TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link);
- rk->rk_topic_cnt++;
-
- /* Populate from metadata cache. */
- if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1 /*valid*/)) &&
- !rkmce->rkmce_mtopic.err) {
- if (existing)
- *existing = 1;
-
- rd_kafka_topic_metadata_update(rkt, &rkmce->rkmce_mtopic, NULL,
- rkmce->rkmce_ts_insert);
- }
-
- if (do_lock)
- rd_kafka_wrunlock(rk);
-
- if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) {
- char desc[256];
- rd_snprintf(desc, sizeof(desc),
- "Topic \"%s\" configuration (%s)", topic,
- used_conf_str);
- rd_kafka_anyconf_dump_dbg(rk, _RK_TOPIC, &rkt->rkt_conf, desc);
- }
-
- return rkt;
-}
-
-
-
-/**
- * @brief Create new app topic handle.
- *
- * @locality application thread
- */
-rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk,
- const char *topic,
- rd_kafka_topic_conf_t *conf) {
- rd_kafka_topic_t *rkt;
- int existing;
-
- rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1 /*lock*/);
- if (!rkt)
- return NULL;
-
- /* Increase application refcount. */
- rd_kafka_topic_keep_app(rkt);
-
- /* Query for the topic leader (async) */
- if (!existing)
- rd_kafka_topic_leader_query(rk, rkt);
-
- /* Drop our reference since there is already/now an app refcnt */
- rd_kafka_topic_destroy0(rkt);
-
- return rkt;
-}
-
-
-
-/**
- * Sets the state for topic.
- * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held
- */
-static void rd_kafka_topic_set_state(rd_kafka_topic_t *rkt, int state) {
-
- if ((int)rkt->rkt_state == state)
- return;
-
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "STATE",
- "Topic %s changed state %s -> %s", rkt->rkt_topic->str,
- rd_kafka_topic_state_names[rkt->rkt_state],
- rd_kafka_topic_state_names[state]);
-
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR)
- rkt->rkt_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rkt->rkt_state = state;
-}
-
-/**
- * Returns the name of a topic.
- * NOTE:
- * The topic Kafka String representation is crafted with an extra byte
- * at the end for the Nul that is not included in the length, this way
- * we can use the topic's String directly.
- * This is not true for Kafka Strings read from the network.
- */
-const char *rd_kafka_topic_name(const rd_kafka_topic_t *app_rkt) {
- if (rd_kafka_rkt_is_lw(app_rkt))
- return rd_kafka_rkt_lw_const(app_rkt)->lrkt_topic;
- else
- return app_rkt->rkt_topic->str;
-}
-
-
-/**
- * @brief Update the broker that a topic+partition is delegated to.
- *
- * @param broker_id The id of the broker to associate the toppar with.
- * @param rkb A reference to the broker to delegate to (must match
- * broker_id) or NULL if the toppar should be undelegated for
- * any reason.
- * @param reason Human-readable reason for the update, included in debug log.
- *
- * @returns 1 if the broker delegation was changed, -1 if the broker
- * delegation was changed and is now undelegated, else 0.
- *
- * @locks caller must have rd_kafka_toppar_lock(rktp)
- * @locality any
- */
-int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp,
- int32_t broker_id,
- rd_kafka_broker_t *rkb,
- const char *reason) {
-
- rktp->rktp_broker_id = broker_id;
-
- if (!rkb) {
- int had_broker = rktp->rktp_broker ? 1 : 0;
- rd_kafka_toppar_broker_delegate(rktp, NULL);
- return had_broker ? -1 : 0;
- }
-
- if (rktp->rktp_broker) {
- if (rktp->rktp_broker == rkb) {
- /* No change in broker */
- return 0;
- }
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_FETCH,
- "TOPICUPD",
- "Topic %s [%" PRId32
- "]: migrating from "
- "broker %" PRId32 " to %" PRId32
- " (leader is "
- "%" PRId32 "): %s",
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition,
- rktp->rktp_broker->rkb_nodeid, rkb->rkb_nodeid,
- rktp->rktp_leader_id, reason);
- }
-
- rd_kafka_toppar_broker_delegate(rktp, rkb);
-
- return 1;
-}
-
-
-/**
- * @brief Update a topic+partition for a new leader.
- *
- * @remark If a toppar is currently delegated to a preferred replica,
- * it will not be delegated to the leader broker unless there
- * has been a leader change.
- *
- * @param leader_id The id of the new leader broker.
- * @param leader A reference to the leader broker or NULL if the
- * toppar should be undelegated for any reason.
- * @param leader_epoch Partition leader's epoch (KIP-320), or -1 if not known.
- *
- * @returns 1 if the broker delegation was changed, -1 if the broker
- * delegation was changed and is now undelegated, else 0.
- *
- * @locks caller must have rd_kafka_topic_wrlock(rkt)
- * AND NOT rd_kafka_toppar_lock(rktp)
- * @locality any
- */
-static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt,
- int32_t partition,
- int32_t leader_id,
- rd_kafka_broker_t *leader,
- int32_t leader_epoch) {
- rd_kafka_toppar_t *rktp;
- rd_bool_t fetching_from_follower, need_epoch_validation = rd_false;
- int r = 0;
-
- rktp = rd_kafka_toppar_get(rkt, partition, 0);
- if (unlikely(!rktp)) {
- /* Have only seen this in issue #132.
- * Probably caused by corrupt broker state. */
- rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "BROKER",
- "%s [%" PRId32
- "] is unknown "
- "(partition_cnt %i): "
- "ignoring leader (%" PRId32 ") update",
- rkt->rkt_topic->str, partition,
- rkt->rkt_partition_cnt, leader_id);
- return -1;
- }
-
- rd_kafka_toppar_lock(rktp);
-
- if (leader_epoch < rktp->rktp_leader_epoch) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
- "%s [%" PRId32
- "]: ignoring outdated metadata update with "
- "leader epoch %" PRId32
- " which is older than "
- "our cached epoch %" PRId32,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, leader_epoch,
- rktp->rktp_leader_epoch);
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
- rd_kafka_toppar_unlock(rktp);
- return 0;
- }
- }
-
- if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT)
- need_epoch_validation = rd_true;
- else if (leader_epoch > rktp->rktp_leader_epoch) {
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
- "%s [%" PRId32 "]: leader %" PRId32
- " epoch %" PRId32 " -> leader %" PRId32
- " epoch %" PRId32,
- rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rktp->rktp_leader_id,
- rktp->rktp_leader_epoch, leader_id, leader_epoch);
- rktp->rktp_leader_epoch = leader_epoch;
- need_epoch_validation = rd_true;
- }
-
- fetching_from_follower =
- leader != NULL && rktp->rktp_broker != NULL &&
- rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL &&
- rktp->rktp_broker != leader;
-
- if (fetching_from_follower && rktp->rktp_leader_id == leader_id) {
- rd_kafka_dbg(
- rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
- "Topic %s [%" PRId32 "]: leader %" PRId32
- " unchanged, "
- "not migrating away from preferred replica %" PRId32,
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- leader_id, rktp->rktp_broker_id);
- r = 0;
-
- } else {
-
- if (rktp->rktp_leader_id != leader_id ||
- rktp->rktp_leader != leader) {
- /* Update leader if it has changed */
- rktp->rktp_leader_id = leader_id;
- if (rktp->rktp_leader)
- rd_kafka_broker_destroy(rktp->rktp_leader);
- if (leader)
- rd_kafka_broker_keep(leader);
- rktp->rktp_leader = leader;
- }
-
- /* Update handling broker */
- r = rd_kafka_toppar_broker_update(rktp, leader_id, leader,
- "leader updated");
- }
-
- if (need_epoch_validation) {
- /* Update next fetch position, that could be stale since last
- * fetch start. Only if the app pos is real. */
- if (rktp->rktp_app_pos.offset > 0) {
- rd_kafka_toppar_set_next_fetch_position(
- rktp, rktp->rktp_app_pos);
- }
- rd_kafka_offset_validate(rktp, "epoch updated from metadata");
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp); /* from get() */
-
- return r;
-}
-
-
-/**
- * @brief Revert the topic+partition delegation to the leader from
- * a preferred replica.
- *
- * @returns 1 if the broker delegation was changed, -1 if the broker
- * delegation was changed and is now undelegated, else 0.
- *
- * @locks none
- * @locality any
- */
-int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp) {
- rd_kafka_broker_t *leader;
- int r;
-
- rd_kafka_rdlock(rktp->rktp_rkt->rkt_rk);
- rd_kafka_toppar_lock(rktp);
-
- rd_assert(rktp->rktp_leader_id != rktp->rktp_broker_id);
-
- rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
- "Topic %s [%" PRId32
- "]: Reverting from preferred "
- "replica %" PRId32 " to leader %" PRId32,
- rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rktp->rktp_broker_id, rktp->rktp_leader_id);
-
- leader = rd_kafka_broker_find_by_nodeid(rktp->rktp_rkt->rkt_rk,
- rktp->rktp_leader_id);
-
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_rdunlock(rktp->rktp_rkt->rkt_rk);
-
- rd_kafka_toppar_lock(rktp);
- r = rd_kafka_toppar_broker_update(
- rktp, rktp->rktp_leader_id, leader,
- "reverting from preferred replica to leader");
- rd_kafka_toppar_unlock(rktp);
-
- if (leader)
- rd_kafka_broker_destroy(leader);
-
- return r;
-}
-
-
-
-/**
- * @brief Save idempotent producer state for a partition that is about to
- * be removed.
- *
- * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
- */
-static void rd_kafka_toppar_idemp_msgid_save(rd_kafka_topic_t *rkt,
- const rd_kafka_toppar_t *rktp) {
- rd_kafka_partition_msgid_t *partmsgid = rd_malloc(sizeof(*partmsgid));
- partmsgid->partition = rktp->rktp_partition;
- partmsgid->msgid = rktp->rktp_msgid;
- partmsgid->pid = rktp->rktp_eos.pid;
- partmsgid->epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid;
- partmsgid->ts = rd_clock();
-
- TAILQ_INSERT_TAIL(&rkt->rkt_saved_partmsgids, partmsgid, link);
-}
-
-
-/**
- * @brief Restore idempotent producer state for a new/resurfacing partition.
- *
- * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
- */
-static void rd_kafka_toppar_idemp_msgid_restore(rd_kafka_topic_t *rkt,
- rd_kafka_toppar_t *rktp) {
- rd_kafka_partition_msgid_t *partmsgid;
-
- TAILQ_FOREACH(partmsgid, &rkt->rkt_saved_partmsgids, link) {
- if (partmsgid->partition == rktp->rktp_partition)
- break;
- }
-
- if (!partmsgid)
- return;
-
- rktp->rktp_msgid = partmsgid->msgid;
- rktp->rktp_eos.pid = partmsgid->pid;
- rktp->rktp_eos.epoch_base_msgid = partmsgid->epoch_base_msgid;
-
- rd_kafka_dbg(rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "MSGID",
- "Topic %s [%" PRId32 "]: restored %s with MsgId %" PRIu64
- " and "
- "epoch base MsgId %" PRIu64
- " that was saved upon removal %dms ago",
- rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_pid2str(partmsgid->pid), partmsgid->msgid,
- partmsgid->epoch_base_msgid,
- (int)((rd_clock() - partmsgid->ts) / 1000));
-
- TAILQ_REMOVE(&rkt->rkt_saved_partmsgids, partmsgid, link);
- rd_free(partmsgid);
-}
-
-
-/**
- * @brief Update the number of partitions for a topic and takes actions
- * accordingly.
- *
- * @returns 1 if the partition count changed, else 0.
- *
- * @locks rd_kafka_topic_wrlock(rkt) MUST be held.
- */
-static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
- int32_t partition_cnt) {
- rd_kafka_t *rk = rkt->rkt_rk;
- rd_kafka_toppar_t **rktps;
- rd_kafka_toppar_t *rktp;
- rd_bool_t is_idempodent = rd_kafka_is_idempotent(rk);
- int32_t i;
-
- if (likely(rkt->rkt_partition_cnt == partition_cnt))
- return 0; /* No change in partition count */
-
- if (unlikely(rkt->rkt_partition_cnt != 0 &&
- !rd_kafka_terminating(rkt->rkt_rk)))
- rd_kafka_log(rk, LOG_NOTICE, "PARTCNT",
- "Topic %s partition count changed "
- "from %" PRId32 " to %" PRId32,
- rkt->rkt_topic->str, rkt->rkt_partition_cnt,
- partition_cnt);
- else
- rd_kafka_dbg(rk, TOPIC, "PARTCNT",
- "Topic %s partition count changed "
- "from %" PRId32 " to %" PRId32,
- rkt->rkt_topic->str, rkt->rkt_partition_cnt,
- partition_cnt);
-
-
- /* Create and assign new partition list */
- if (partition_cnt > 0)
- rktps = rd_calloc(partition_cnt, sizeof(*rktps));
- else
- rktps = NULL;
-
- for (i = 0; i < partition_cnt; i++) {
- if (i >= rkt->rkt_partition_cnt) {
- /* New partition. Check if its in the list of
- * desired partitions first. */
-
- rktp = rd_kafka_toppar_desired_get(rkt, i);
- if (rktp) {
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_flags &=
- ~(RD_KAFKA_TOPPAR_F_UNKNOWN |
- RD_KAFKA_TOPPAR_F_REMOVE);
-
- /* Remove from desp list since the
- * partition is now known. */
- rd_kafka_toppar_desired_unlink(rktp);
- } else {
- rktp = rd_kafka_toppar_new(rkt, i);
-
- rd_kafka_toppar_lock(rktp);
- rktp->rktp_flags &=
- ~(RD_KAFKA_TOPPAR_F_UNKNOWN |
- RD_KAFKA_TOPPAR_F_REMOVE);
- }
- rktps[i] = rktp;
-
- if (is_idempodent)
- /* Restore idempotent producer state for
- * this partition, if any. */
- rd_kafka_toppar_idemp_msgid_restore(rkt, rktp);
-
- rd_kafka_toppar_unlock(rktp);
-
- } else {
- /* Existing partition, grab our own reference. */
- rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]);
- /* Loose previous ref */
- rd_kafka_toppar_destroy(rkt->rkt_p[i]);
- }
- }
-
- /* Propagate notexist errors for desired partitions */
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) {
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
- "%s [%" PRId32
- "]: "
- "desired partition does not exist in cluster",
- rkt->rkt_topic->str, rktp->rktp_partition);
- rd_kafka_toppar_enq_error(
- rktp,
- rkt->rkt_err ? rkt->rkt_err
- : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- "desired partition is not available");
- }
-
- /* Remove excessive partitions */
- for (i = partition_cnt; i < rkt->rkt_partition_cnt; i++) {
- rktp = rkt->rkt_p[i];
-
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE",
- "%s [%" PRId32 "] no longer reported in metadata",
- rkt->rkt_topic->str, rktp->rktp_partition);
-
- rd_kafka_toppar_lock(rktp);
-
- /* Idempotent/Transactional producer:
- * We need to save each removed partition's base msgid for
- * the (rare) chance the partition comes back,
- * in which case we must continue with the correct msgid
- * in future ProduceRequests.
- *
- * These base msgsid are restored (above) if/when partitions
- * come back and the PID,Epoch hasn't changed.
- *
- * One situation where this might happen is if a broker goes
- * out of sync and starts to wrongfully report an existing
- * topic as non-existent, triggering the removal of partitions
- * on the producer client. When metadata is eventually correct
- * again and the topic is "re-created" on the producer, it
- * must continue with the next msgid/baseseq. */
- if (is_idempodent && rd_kafka_pid_valid(rktp->rktp_eos.pid))
- rd_kafka_toppar_idemp_msgid_save(rkt, rktp);
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
-
- if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) {
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
- "Topic %s [%" PRId32
- "] is desired "
- "but no longer known: "
- "moving back on desired list",
- rkt->rkt_topic->str, rktp->rktp_partition);
-
- /* If this is a desired partition move it back on to
- * the desired list since partition is no longer known*/
- rd_kafka_toppar_desired_link(rktp);
-
- if (!rd_kafka_terminating(rkt->rkt_rk))
- rd_kafka_toppar_enq_error(
- rktp,
- rkt->rkt_err
- ? rkt->rkt_err
- : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
- "desired partition is no longer "
- "available");
-
- rd_kafka_toppar_broker_delegate(rktp, NULL);
-
- } else {
- /* Tell handling broker to let go of the toppar */
- rd_kafka_toppar_broker_leave_for_remove(rktp);
- }
-
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp);
- }
-
- if (rkt->rkt_p)
- rd_free(rkt->rkt_p);
-
- rkt->rkt_p = rktps;
-
- rkt->rkt_partition_cnt = partition_cnt;
-
- return 1;
-}
-
-
-
-/**
- * Topic 'rkt' does not exist: propagate to interested parties.
- * The topic's state must have been set to NOTEXISTS and
- * rd_kafka_topic_partition_cnt_update() must have been called prior to
- * calling this function.
- *
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-static void rd_kafka_topic_propagate_notexists(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err) {
- rd_kafka_toppar_t *rktp;
- int i;
-
- if (rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)
- return;
-
-
- /* Notify consumers that the topic doesn't exist. */
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
- rd_kafka_toppar_enq_error(rktp, err, "topic does not exist");
-}
-
-
-/**
- * Assign messages on the UA partition to available partitions.
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-static void rd_kafka_topic_assign_uas(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err) {
- rd_kafka_t *rk = rkt->rkt_rk;
- rd_kafka_toppar_t *rktp_ua;
- rd_kafka_msg_t *rkm, *tmp;
- rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas);
- rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed);
- rd_kafka_resp_err_t err_all = RD_KAFKA_RESP_ERR_NO_ERROR;
- int cnt;
-
- if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER)
- return;
-
- rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0);
- if (unlikely(!rktp_ua)) {
- rd_kafka_dbg(rk, TOPIC, "ASSIGNUA",
- "No UnAssigned partition available for %s",
- rkt->rkt_topic->str);
- return;
- }
-
- /* Assign all unassigned messages to new topics. */
- rd_kafka_toppar_lock(rktp_ua);
-
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) {
- err_all = rkt->rkt_err;
- rd_kafka_dbg(rk, TOPIC, "PARTCNT",
- "Failing all %i unassigned messages in "
- "topic %.*s due to permanent topic error: %s",
- rktp_ua->rktp_msgq.rkmq_msg_cnt,
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- rd_kafka_err2str(err_all));
- } else if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) {
- err_all = err;
- rd_kafka_dbg(rk, TOPIC, "PARTCNT",
- "Failing all %i unassigned messages in "
- "topic %.*s since topic does not exist: %s",
- rktp_ua->rktp_msgq.rkmq_msg_cnt,
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- rd_kafka_err2str(err_all));
- } else {
- rd_kafka_dbg(rk, TOPIC, "PARTCNT",
- "Partitioning %i unassigned messages in "
- "topic %.*s to %" PRId32 " partitions",
- rktp_ua->rktp_msgq.rkmq_msg_cnt,
- RD_KAFKAP_STR_PR(rkt->rkt_topic),
- rkt->rkt_partition_cnt);
- }
-
- rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq);
- cnt = uas.rkmq_msg_cnt;
- rd_kafka_toppar_unlock(rktp_ua);
-
- TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) {
- /* Fast-path for failing messages with forced partition or
- * when all messages are to fail. */
- if (err_all || (rkm->rkm_partition != RD_KAFKA_PARTITION_UA &&
- rkm->rkm_partition >= rkt->rkt_partition_cnt &&
- rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN)) {
- rd_kafka_msgq_enq(&failed, rkm);
- continue;
- }
-
- if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) {
- /* Desired partition not available */
- rd_kafka_msgq_enq(&failed, rkm);
- }
- }
-
- rd_kafka_dbg(rk, TOPIC, "UAS",
- "%i/%i messages were partitioned in topic %s",
- cnt - failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str);
-
- if (failed.rkmq_msg_cnt > 0) {
- /* Fail the messages */
- rd_kafka_dbg(rk, TOPIC, "UAS",
- "%" PRId32
- "/%i messages failed partitioning "
- "in topic %s",
- failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str);
- rd_kafka_dr_msgq(
- rkt, &failed,
- err_all ? err_all : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
- }
-
- rd_kafka_toppar_destroy(rktp_ua); /* from get() */
-}
-
-
-/**
- * @brief Mark topic as non-existent, unless metadata propagation configuration
- * disallows it.
- *
- * @param err Propagate non-existent topic using this error code.
- * If \p err is RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION it means the
- * topic is invalid and no propagation delay will be used.
- *
- * @returns true if the topic was marked as non-existent, else false.
- *
- * @locks topic_wrlock() MUST be held.
- */
-rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err) {
- rd_ts_t remains_us;
- rd_bool_t permanent = err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION;
-
- if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) {
- /* Dont update metadata while terminating. */
- return rd_false;
- }
-
- rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR);
-
- remains_us =
- (rkt->rkt_ts_create +
- (rkt->rkt_rk->rk_conf.metadata_propagation_max_ms * 1000)) -
- rkt->rkt_ts_metadata;
-
- if (!permanent && rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN &&
- remains_us > 0) {
- /* Still allowing topic metadata to propagate. */
- rd_kafka_dbg(
- rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_METADATA, "TOPICPROP",
- "Topic %.*s does not exist, allowing %dms "
- "for metadata propagation before marking topic "
- "as non-existent",
- RD_KAFKAP_STR_PR(rkt->rkt_topic), (int)(remains_us / 1000));
- return rd_false;
- }
-
- rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS);
-
- rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
- /* Update number of partitions */
- rd_kafka_topic_partition_cnt_update(rkt, 0);
-
- /* Purge messages with forced partition */
- rd_kafka_topic_assign_uas(rkt, err);
-
- /* Propagate nonexistent topic info */
- rd_kafka_topic_propagate_notexists(rkt, err);
-
- return rd_true;
-}
-
-/**
- * @brief Mark topic as errored, such as when topic authorization fails.
- *
- * @param err Propagate error using this error code.
- *
- * @returns true if the topic was marked as errored, else false.
- *
- * @locality any
- * @locks topic_wrlock() MUST be held.
- */
-rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err) {
-
- if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) {
- /* Dont update metadata while terminating. */
- return rd_false;
- }
-
- rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR);
-
- /* Same error, ignore. */
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR && rkt->rkt_err == err)
- return rd_true;
-
- rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPICERROR",
- "Topic %s has permanent error: %s", rkt->rkt_topic->str,
- rd_kafka_err2str(err));
-
- rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_ERROR);
-
- rkt->rkt_err = err;
-
- /* Update number of partitions */
- rd_kafka_topic_partition_cnt_update(rkt, 0);
-
- /* Purge messages with forced partition */
- rd_kafka_topic_assign_uas(rkt, err);
-
- return rd_true;
-}
-
-
-
-/**
- * @brief Update a topic from metadata.
- *
- * @param mdt Topic metadata.
- * @param leader_epochs Array of per-partition leader epochs, or NULL.
- * The array size is identical to the partition count in
- * \p mdt.
- * @param ts_age absolute age (timestamp) of metadata.
- * @returns 1 if the number of partitions changed, 0 if not, and -1 if the
- * topic is unknown.
-
- *
- * @locks_required rd_kafka_*lock() MUST be held.
- */
-static int rd_kafka_topic_metadata_update(
- rd_kafka_topic_t *rkt,
- const struct rd_kafka_metadata_topic *mdt,
- const rd_kafka_partition_leader_epoch_t *leader_epochs,
- rd_ts_t ts_age) {
- rd_kafka_t *rk = rkt->rkt_rk;
- int upd = 0;
- int j;
- rd_kafka_broker_t **partbrokers;
- int leader_cnt = 0;
- int old_state;
-
- if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR)
- rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA",
- "Error in metadata reply for "
- "topic %s (PartCnt %i): %s",
- rkt->rkt_topic->str, mdt->partition_cnt,
- rd_kafka_err2str(mdt->err));
-
- if (unlikely(rd_kafka_terminating(rk))) {
- /* Dont update metadata while terminating, do this
- * after acquiring lock for proper synchronisation */
- return -1;
- }
-
- /* Look up brokers before acquiring rkt lock to preserve lock order */
- partbrokers = rd_malloc(mdt->partition_cnt * sizeof(*partbrokers));
-
- for (j = 0; j < mdt->partition_cnt; j++) {
- if (mdt->partitions[j].leader == -1) {
- partbrokers[j] = NULL;
- continue;
- }
-
- partbrokers[j] = rd_kafka_broker_find_by_nodeid(
- rk, mdt->partitions[j].leader);
- }
-
-
- rd_kafka_topic_wrlock(rkt);
-
- old_state = rkt->rkt_state;
- rkt->rkt_ts_metadata = ts_age;
-
- /* Set topic state.
- * UNKNOWN_TOPIC_OR_PART may indicate that auto.create.topics failed */
- if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION /*invalid topic*/ ||
- mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
- rd_kafka_topic_set_notexists(rkt, mdt->err);
- else if (mdt->partition_cnt > 0)
- rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_EXISTS);
- else if (mdt->err)
- rd_kafka_topic_set_error(rkt, mdt->err);
-
- /* Update number of partitions, but not if there are
- * (possibly intermittent) errors (e.g., "Leader not available"). */
- if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) {
- upd += rd_kafka_topic_partition_cnt_update(rkt,
- mdt->partition_cnt);
-
- /* If the metadata times out for a topic (because all brokers
- * are down) the state will transition to S_UNKNOWN.
- * When updated metadata is eventually received there might
- * not be any change to partition count or leader,
- * but there may still be messages in the UA partition that
- * needs to be assigned, so trigger an update for this case too.
- * Issue #1985. */
- if (old_state == RD_KAFKA_TOPIC_S_UNKNOWN)
- upd++;
- }
-
- /* Update leader for each partition */
- for (j = 0; j < mdt->partition_cnt; j++) {
- int r;
- rd_kafka_broker_t *leader;
- int32_t leader_epoch =
- leader_epochs ? leader_epochs[j].leader_epoch : -1;
-
- rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA",
- " Topic %s partition %i Leader %" PRId32
- " Epoch %" PRId32,
- rkt->rkt_topic->str, mdt->partitions[j].id,
- mdt->partitions[j].leader, leader_epoch);
-
- leader = partbrokers[j];
- partbrokers[j] = NULL;
-
- /* Update leader for partition */
- r = rd_kafka_toppar_leader_update(rkt, mdt->partitions[j].id,
- mdt->partitions[j].leader,
- leader, leader_epoch);
-
- upd += (r != 0 ? 1 : 0);
-
- if (leader) {
- if (r != -1)
- leader_cnt++;
- /* Drop reference to broker (from find()) */
- rd_kafka_broker_destroy(leader);
- }
- }
-
- /* If all partitions have leaders we can turn off fast leader query. */
- if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt)
- rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
-
- if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) {
- /* (Possibly intermittent) topic-wide error:
- * remove leaders for partitions */
-
- for (j = 0; j < rkt->rkt_partition_cnt; j++) {
- rd_kafka_toppar_t *rktp;
- if (!rkt->rkt_p[j])
- continue;
-
- rktp = rkt->rkt_p[j];
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_broker_delegate(rktp, NULL);
- rd_kafka_toppar_unlock(rktp);
- }
- }
-
- /* If there was an update to the partitions try to assign
- * unassigned messages to new partitions, or fail them */
- if (upd > 0)
- rd_kafka_topic_assign_uas(
- rkt,
- mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
-
- rd_kafka_topic_wrunlock(rkt);
-
- /* Loose broker references */
- for (j = 0; j < mdt->partition_cnt; j++)
- if (partbrokers[j])
- rd_kafka_broker_destroy(partbrokers[j]);
-
- rd_free(partbrokers);
-
- return upd;
-}
-
-/**
- * @brief Update topic by metadata, if topic is locally known.
- * @sa rd_kafka_topic_metadata_update()
- * @locks none
- */
-int rd_kafka_topic_metadata_update2(
- rd_kafka_broker_t *rkb,
- const struct rd_kafka_metadata_topic *mdt,
- const rd_kafka_partition_leader_epoch_t *leader_epochs) {
- rd_kafka_topic_t *rkt;
- int r;
-
- rd_kafka_wrlock(rkb->rkb_rk);
- if (!(rkt =
- rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/))) {
- rd_kafka_wrunlock(rkb->rkb_rk);
- return -1; /* Ignore topics that we dont have locally. */
- }
-
- r = rd_kafka_topic_metadata_update(rkt, mdt, leader_epochs, rd_clock());
-
- rd_kafka_wrunlock(rkb->rkb_rk);
-
- rd_kafka_topic_destroy0(rkt); /* from find() */
-
- return r;
-}
-
-
-
-/**
- * @returns a list of all partitions (rktp's) for a topic.
- * @remark rd_kafka_topic_*lock() MUST be held.
- */
-static rd_list_t *rd_kafka_topic_get_all_partitions(rd_kafka_topic_t *rkt) {
- rd_list_t *list;
- rd_kafka_toppar_t *rktp;
- int i;
-
- list = rd_list_new(rkt->rkt_partition_cnt +
- rd_list_cnt(&rkt->rkt_desp) + 1 /*ua*/,
- NULL);
-
- for (i = 0; i < rkt->rkt_partition_cnt; i++)
- rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_p[i]));
-
- RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
- rd_list_add(list, rd_kafka_toppar_keep(rktp));
-
- if (rkt->rkt_ua)
- rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_ua));
-
- return list;
-}
-
-
-
-/**
- * Remove all partitions from a topic, including the ua.
- * Must only be called during rd_kafka_t termination.
- *
- * Locality: main thread
- */
-void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt) {
- rd_kafka_toppar_t *rktp;
- rd_list_t *partitions;
- int i;
-
- /* Purge messages for all partitions outside the topic_wrlock since
- * a message can hold a reference to the topic_t and thus
- * would trigger a recursive lock dead-lock. */
- rd_kafka_topic_rdlock(rkt);
- partitions = rd_kafka_topic_get_all_partitions(rkt);
- rd_kafka_topic_rdunlock(rkt);
-
- RD_LIST_FOREACH(rktp, partitions, i) {
- rd_kafka_toppar_lock(rktp);
- rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq);
- rd_kafka_toppar_purge_and_disable_queues(rktp);
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp);
- }
- rd_list_destroy(partitions);
-
- rd_kafka_topic_keep(rkt);
- rd_kafka_topic_wrlock(rkt);
-
- /* Setting the partition count to 0 moves all partitions to
- * the desired list (rktp_desp). */
- rd_kafka_topic_partition_cnt_update(rkt, 0);
-
- /* Now clean out the desired partitions list.
- * Use reverse traversal to avoid excessive memory shuffling
- * in rd_list_remove() */
- RD_LIST_FOREACH_REVERSE(rktp, &rkt->rkt_desp, i) {
- /* Keep a reference while deleting from desired list */
- rd_kafka_toppar_keep(rktp);
-
- rd_kafka_toppar_lock(rktp);
- rd_kafka_toppar_desired_del(rktp);
- rd_kafka_toppar_unlock(rktp);
-
- rd_kafka_toppar_destroy(rktp);
- }
-
- rd_kafka_assert(rkt->rkt_rk, rkt->rkt_partition_cnt == 0);
-
- if (rkt->rkt_p)
- rd_free(rkt->rkt_p);
-
- rkt->rkt_p = NULL;
- rkt->rkt_partition_cnt = 0;
-
- if ((rktp = rkt->rkt_ua)) {
- rkt->rkt_ua = NULL;
- rd_kafka_toppar_destroy(rktp);
- }
-
- rd_kafka_topic_wrunlock(rkt);
-
- rd_kafka_topic_destroy0(rkt);
-}
-
-
-
-/**
- * @returns the broker state (as a human readable string) if a query
- * for the partition leader is necessary, else NULL.
- * @locality any
- * @locks rd_kafka_toppar_lock MUST be held
- */
-static const char *rd_kafka_toppar_needs_query(rd_kafka_t *rk,
- rd_kafka_toppar_t *rktp) {
- int broker_state;
-
- if (!rktp->rktp_broker)
- return "not delegated";
-
- if (rktp->rktp_broker->rkb_source == RD_KAFKA_INTERNAL)
- return "internal";
-
- broker_state = rd_kafka_broker_get_state(rktp->rktp_broker);
-
- if (broker_state >= RD_KAFKA_BROKER_STATE_UP)
- return NULL;
-
- if (!rk->rk_conf.sparse_connections)
- return "down";
-
- /* Partition assigned to broker but broker does not
- * need a persistent connection, this typically means
- * the partition is not being fetched or not being produced to,
- * so there is no need to re-query the leader. */
- if (broker_state == RD_KAFKA_BROKER_STATE_INIT)
- return NULL;
-
- /* This is most likely a persistent broker,
- * which means the partition leader should probably
- * be re-queried to see if it needs changing. */
- return "down";
-}
-
-
-
-/**
- * @brief Scan all topics and partitions for:
- * - timed out messages in UA partitions.
- * - topics that needs to be created on the broker.
- * - topics who's metadata is too old.
- * - partitions with unknown leaders that require leader query.
- *
- * @locality rdkafka main thread
- */
-void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now) {
- rd_kafka_topic_t *rkt;
- rd_kafka_toppar_t *rktp;
- rd_list_t query_topics;
-
- rd_list_init(&query_topics, 0, rd_free);
-
- rd_kafka_rdlock(rk);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
- int p;
- int query_this = 0;
- rd_kafka_msgq_t timedout = RD_KAFKA_MSGQ_INITIALIZER(timedout);
-
- rd_kafka_topic_wrlock(rkt);
-
- /* Check if metadata information has timed out. */
- if (rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN &&
- !rd_kafka_metadata_cache_topic_get(rk, rkt->rkt_topic->str,
- 1 /*only valid*/)) {
- rd_kafka_dbg(rk, TOPIC, "NOINFO",
- "Topic %s metadata information timed out "
- "(%" PRId64 "ms old)",
- rkt->rkt_topic->str,
- (rd_clock() - rkt->rkt_ts_metadata) /
- 1000);
- rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_UNKNOWN);
-
- query_this = 1;
- } else if (rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN) {
- rd_kafka_dbg(rk, TOPIC, "NOINFO",
- "Topic %s metadata information unknown",
- rkt->rkt_topic->str);
- query_this = 1;
- }
-
- /* Just need a read-lock from here on. */
- rd_kafka_topic_wrunlock(rkt);
- rd_kafka_topic_rdlock(rkt);
-
- if (rkt->rkt_partition_cnt == 0) {
- /* If this topic is unknown by brokers try
- * to create it by sending a topic-specific
- * metadata request.
- * This requires "auto.create.topics.enable=true"
- * on the brokers. */
- rd_kafka_dbg(rk, TOPIC, "NOINFO",
- "Topic %s partition count is zero: "
- "should refresh metadata",
- rkt->rkt_topic->str);
-
- query_this = 1;
-
- } else if (!rd_list_empty(&rkt->rkt_desp) &&
- rd_interval_immediate(&rkt->rkt_desp_refresh_intvl,
- 10 * 1000 * 1000, 0) > 0) {
- /* Query topic metadata if there are
- * desired (non-existent) partitions.
- * At most every 10 seconds. */
- rd_kafka_dbg(rk, TOPIC, "DESIRED",
- "Topic %s has %d desired partition(s): "
- "should refresh metadata",
- rkt->rkt_topic->str,
- rd_list_cnt(&rkt->rkt_desp));
-
- query_this = 1;
- }
-
- for (p = RD_KAFKA_PARTITION_UA; p < rkt->rkt_partition_cnt;
- p++) {
-
- if (!(rktp = rd_kafka_toppar_get(
- rkt, p,
- p == RD_KAFKA_PARTITION_UA ? rd_true
- : rd_false)))
- continue;
-
- rd_kafka_toppar_lock(rktp);
-
- /* Check that partition is delegated to a broker that
- * is up, else add topic to query list. */
- if (p != RD_KAFKA_PARTITION_UA) {
- const char *leader_reason =
- rd_kafka_toppar_needs_query(rk, rktp);
-
- if (leader_reason) {
- rd_kafka_dbg(rk, TOPIC, "QRYLEADER",
- "Topic %s [%" PRId32
- "]: "
- "broker is %s: re-query",
- rkt->rkt_topic->str,
- rktp->rktp_partition,
- leader_reason);
- query_this = 1;
- }
- } else {
- if (rk->rk_type == RD_KAFKA_PRODUCER) {
- /* Scan UA partition for message
- * timeouts.
- * Proper partitions are scanned by
- * their toppar broker thread. */
- rd_kafka_msgq_age_scan(
- rktp, &rktp->rktp_msgq, &timedout,
- now, NULL);
- }
- }
-
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp);
- }
-
- rd_kafka_topic_rdunlock(rkt);
-
- /* Propagate delivery reports for timed out messages */
- if (rd_kafka_msgq_len(&timedout) > 0) {
- rd_kafka_dbg(
- rk, MSG, "TIMEOUT", "%s: %d message(s) timed out",
- rkt->rkt_topic->str, rd_kafka_msgq_len(&timedout));
- rd_kafka_dr_msgq(rkt, &timedout,
- RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
- }
-
- /* Need to re-query this topic's leader. */
- if (query_this &&
- !rd_list_find(&query_topics, rkt->rkt_topic->str,
- (void *)strcmp))
- rd_list_add(&query_topics,
- rd_strdup(rkt->rkt_topic->str));
- }
- rd_kafka_rdunlock(rk);
-
- if (!rd_list_empty(&query_topics))
- rd_kafka_metadata_refresh_topics(
- rk, NULL, &query_topics, rd_true /*force even if cached
- * info exists*/
- ,
- rk->rk_conf.allow_auto_create_topics,
- rd_false /*!cgrp_update*/, "refresh unavailable topics");
- rd_list_destroy(&query_topics);
-}
-
-
-/**
- * Locks: rd_kafka_topic_*lock() must be held.
- */
-int rd_kafka_topic_partition_available(const rd_kafka_topic_t *app_rkt,
- int32_t partition) {
- int avail;
- rd_kafka_toppar_t *rktp;
- rd_kafka_broker_t *rkb;
-
- /* This API must only be called from a partitioner and the
- * partitioner is always passed a proper topic */
- rd_assert(!rd_kafka_rkt_is_lw(app_rkt));
-
- rktp = rd_kafka_toppar_get(app_rkt, partition, 0 /*no ua-on-miss*/);
- if (unlikely(!rktp))
- return 0;
-
- rkb = rd_kafka_toppar_broker(rktp, 1 /*proper broker*/);
- avail = rkb ? 1 : 0;
- if (rkb)
- rd_kafka_broker_destroy(rkb);
- rd_kafka_toppar_destroy(rktp);
- return avail;
-}
-
-
-void *rd_kafka_topic_opaque(const rd_kafka_topic_t *app_rkt) {
- const rd_kafka_lwtopic_t *lrkt;
-
- lrkt = rd_kafka_rkt_get_lw((rd_kafka_topic_t *)app_rkt);
- if (unlikely(lrkt != NULL)) {
- void *opaque;
- rd_kafka_topic_t *rkt;
-
- if (!(rkt = rd_kafka_topic_find(lrkt->lrkt_rk, lrkt->lrkt_topic,
- 1 /*lock*/)))
- return NULL;
-
- opaque = rkt->rkt_conf.opaque;
-
- rd_kafka_topic_destroy0(rkt); /* loose refcnt from find() */
-
- return opaque;
- }
-
- return app_rkt->rkt_conf.opaque;
-}
-
-
-int rd_kafka_topic_info_cmp(const void *_a, const void *_b) {
- const rd_kafka_topic_info_t *a = _a, *b = _b;
- int r;
-
- if ((r = strcmp(a->topic, b->topic)))
- return r;
-
- return RD_CMP(a->partition_cnt, b->partition_cnt);
-}
-
-
-/**
- * @brief string compare two topics.
- *
- * @param _a topic string (type char *)
- * @param _b rd_kafka_topic_info_t * pointer.
- */
-int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b) {
- const char *a = _a;
- const rd_kafka_topic_info_t *b = _b;
- return strcmp(a, b->topic);
-}
-
-
-/**
- * Allocate new topic_info.
- * \p topic is copied.
- */
-rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic,
- int partition_cnt) {
- rd_kafka_topic_info_t *ti;
- size_t tlen = strlen(topic) + 1;
-
- /* Allocate space for the topic along with the struct */
- ti = rd_malloc(sizeof(*ti) + tlen);
- ti->topic = (char *)(ti + 1);
- memcpy((char *)ti->topic, topic, tlen);
- ti->partition_cnt = partition_cnt;
-
- return ti;
-}
-
-/**
- * Destroy/free topic_info
- */
-void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti) {
- rd_free(ti);
-}
-
-
-/**
- * @brief Match \p topic to \p pattern.
- *
- * If pattern begins with "^" it is considered a regexp,
- * otherwise a simple string comparison is performed.
- *
- * @returns 1 on match, else 0.
- */
-int rd_kafka_topic_match(rd_kafka_t *rk,
- const char *pattern,
- const char *topic) {
- char errstr[128];
-
- if (*pattern == '^') {
- int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr));
- if (unlikely(r == -1))
- rd_kafka_dbg(rk, TOPIC, "TOPICREGEX",
- "Topic \"%s\" regex \"%s\" "
- "matching failed: %s",
- topic, pattern, errstr);
- return r == 1;
- } else
- return !strcmp(pattern, topic);
-}
-
-
-
-/**
- * @brief Trigger broker metadata query for topic leader.
- *
- * @locks none
- */
-void rd_kafka_topic_leader_query0(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- int do_rk_lock,
- rd_bool_t force) {
- rd_list_t topics;
-
- rd_list_init(&topics, 1, rd_free);
- rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
-
- rd_kafka_metadata_refresh_topics(
- rk, NULL, &topics, force, rk->rk_conf.allow_auto_create_topics,
- rd_false /*!cgrp_update*/, "leader query");
-
- rd_list_destroy(&topics);
-}
-
-
-
-/**
- * @brief Populate list \p topics with the topic names (strdupped char *) of
- * all locally known or cached topics.
- *
- * @param cache_cntp is an optional pointer to an int that will be set to the
- * number of entries added to \p topics from the
- * metadata cache.
- * @remark \p rk lock MUST NOT be held
- */
-void rd_kafka_local_topics_to_list(rd_kafka_t *rk,
- rd_list_t *topics,
- int *cache_cntp) {
- rd_kafka_topic_t *rkt;
- int cache_cnt;
-
- rd_kafka_rdlock(rk);
- rd_list_grow(topics, rk->rk_topic_cnt);
- TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link)
- rd_list_add(topics, rd_strdup(rkt->rkt_topic->str));
- cache_cnt = rd_kafka_metadata_cache_topics_to_list(rk, topics);
- if (cache_cntp)
- *cache_cntp = cache_cnt;
- rd_kafka_rdunlock(rk);
-}
-
-
-/**
- * @brief Unit test helper to set a topic's state to EXISTS
- * with the given number of partitions.
- */
-void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt,
- int partition_cnt,
- int32_t leader_id) {
- struct rd_kafka_metadata_topic mdt = {.topic =
- (char *)rkt->rkt_topic->str,
- .partition_cnt = partition_cnt};
- int i;
-
- mdt.partitions = rd_alloca(sizeof(*mdt.partitions) * partition_cnt);
-
- for (i = 0; i < partition_cnt; i++) {
- memset(&mdt.partitions[i], 0, sizeof(mdt.partitions[i]));
- mdt.partitions[i].id = i;
- mdt.partitions[i].leader = leader_id;
- }
-
- rd_kafka_wrlock(rkt->rkt_rk);
- rd_kafka_metadata_cache_topic_update(rkt->rkt_rk, &mdt, rd_true);
- rd_kafka_topic_metadata_update(rkt, &mdt, NULL, rd_clock());
- rd_kafka_wrunlock(rkt->rkt_rk);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h
deleted file mode 100644
index cbed9308a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012,2013 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_TOPIC_H_
-#define _RDKAFKA_TOPIC_H_
-
-#include "rdlist.h"
-
-extern const char *rd_kafka_topic_state_names[];
-
-
-/**
- * @struct Light-weight topic object which only contains the topic name.
- *
- * For use in outgoing APIs (like rd_kafka_message_t) when there is
- * no proper topic object available.
- *
- * @remark lrkt_magic[4] MUST be the first field and be set to "LRKT".
- */
-struct rd_kafka_lwtopic_s {
- char lrkt_magic[4]; /**< "LRKT" */
- rd_kafka_t *lrkt_rk; /**< Pointer to the client instance. */
- rd_refcnt_t lrkt_refcnt; /**< Refcount */
- char *lrkt_topic; /**< Points past this struct, allocated
- * along with the struct. */
-};
-
-/** Casts a topic_t to a light-weight lwtopic_t */
-#define rd_kafka_rkt_lw(rkt) ((rd_kafka_lwtopic_t *)rkt)
-
-#define rd_kafka_rkt_lw_const(rkt) ((const rd_kafka_lwtopic_t *)rkt)
-
-/**
- * @returns true if the topic object is a light-weight topic, else false.
- */
-static RD_UNUSED RD_INLINE rd_bool_t
-rd_kafka_rkt_is_lw(const rd_kafka_topic_t *app_rkt) {
- const rd_kafka_lwtopic_t *lrkt = rd_kafka_rkt_lw_const(app_rkt);
- return !memcmp(lrkt->lrkt_magic, "LRKT", 4);
-}
-
-/** @returns the lwtopic_t if \p rkt is a light-weight topic, else NULL. */
-static RD_UNUSED RD_INLINE rd_kafka_lwtopic_t *
-rd_kafka_rkt_get_lw(rd_kafka_topic_t *rkt) {
- if (rd_kafka_rkt_is_lw(rkt))
- return rd_kafka_rkt_lw(rkt);
- return NULL;
-}
-
-void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt);
-rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic);
-
-static RD_UNUSED RD_INLINE void
-rd_kafka_lwtopic_keep(rd_kafka_lwtopic_t *lrkt) {
- rd_refcnt_add(&lrkt->lrkt_refcnt);
-}
-
-
-
-/**
- * @struct Holds partition + transactional PID + base sequence msgid.
- *
- * Used in rkt_saved_partmsgids to restore transactional/idempotency state
- * for a partition that is lost from metadata for some time and then returns.
- */
-typedef struct rd_kafka_partition_msgid_s {
- TAILQ_ENTRY(rd_kafka_partition_msgid_s) link;
- int32_t partition;
- rd_kafka_pid_t pid;
- uint64_t msgid;
- uint64_t epoch_base_msgid;
- rd_ts_t ts;
-} rd_kafka_partition_msgid_t;
-
-
-/**
- * @struct Aux struct that holds a partition id and a leader epoch.
- * Used as temporary holding space for per-partition leader epochs
- * while parsing MetadataResponse.
- */
-typedef struct rd_kafka_partition_leader_epoch_s {
- int32_t partition_id;
- int32_t leader_epoch;
-} rd_kafka_partition_leader_epoch_t;
-
-
-/*
- * @struct Internal representation of a topic.
- *
- * @remark rkt_magic[4] MUST be the first field and be set to "IRKT".
- */
-struct rd_kafka_topic_s {
- char rkt_magic[4]; /**< "IRKT" */
-
- TAILQ_ENTRY(rd_kafka_topic_s) rkt_link;
-
- rd_refcnt_t rkt_refcnt;
-
- rwlock_t rkt_lock;
- rd_kafkap_str_t *rkt_topic;
-
- rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */
- rd_kafka_toppar_t **rkt_p; /**< Partition array */
- int32_t rkt_partition_cnt;
-
- int32_t rkt_sticky_partition; /**< Current sticky partition.
- * @locks rkt_lock */
- rd_interval_t rkt_sticky_intvl; /**< Interval to assign new
- * sticky partition. */
-
- rd_list_t rkt_desp; /* Desired partitions
- * that are not yet seen
- * in the cluster. */
- rd_interval_t rkt_desp_refresh_intvl; /**< Rate-limiter for
- * desired partition
- * metadata refresh. */
-
- rd_ts_t rkt_ts_create; /**< Topic object creation time. */
- rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata
- * update for this topic. */
-
- rd_refcnt_t rkt_app_refcnt; /**< Number of active rkt's new()ed
- * by application. */
-
- enum { RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */
- RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */
- RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */
- RD_KAFKA_TOPIC_S_ERROR, /* Topic exists but is in an errored
- * state, such as auth failure. */
- } rkt_state;
-
- int rkt_flags;
-#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL \
- 0x1 /* Leader lost/unavailable \
- * for at least one partition. */
-
- rd_kafka_resp_err_t rkt_err; /**< Permanent error. */
-
- rd_kafka_t *rkt_rk;
-
- rd_avg_t rkt_avg_batchsize; /**< Average batch size */
- rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */
-
- rd_kafka_topic_conf_t rkt_conf;
-
- /** Idempotent/Txn producer:
- * The PID,Epoch,base Msgid state for removed partitions. */
- TAILQ_HEAD(, rd_kafka_partition_msgid_s) rkt_saved_partmsgids;
-};
-
-#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock)
-#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock)
-
-
-
-/**
- * @brief Increase refcount and return topic object.
- */
-static RD_INLINE RD_UNUSED rd_kafka_topic_t *
-rd_kafka_topic_keep(rd_kafka_topic_t *rkt) {
- rd_kafka_lwtopic_t *lrkt;
- if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL))
- rd_kafka_lwtopic_keep(lrkt);
- else
- rd_refcnt_add(&rkt->rkt_refcnt);
- return rkt;
-}
-
-void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt);
-
-rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt);
-
-
-
-/**
- * @brief Loose reference to topic object as increased by ..topic_keep().
- */
-static RD_INLINE RD_UNUSED void rd_kafka_topic_destroy0(rd_kafka_topic_t *rkt) {
- rd_kafka_lwtopic_t *lrkt;
- if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL))
- rd_kafka_lwtopic_destroy(lrkt);
- else if (unlikely(rd_refcnt_sub(&rkt->rkt_refcnt) == 0))
- rd_kafka_topic_destroy_final(rkt);
-}
-
-
-rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk,
- const char *topic,
- rd_kafka_topic_conf_t *conf,
- int *existing,
- int do_lock);
-
-rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- const char *topic,
- int do_lock);
-rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func,
- int line,
- rd_kafka_t *rk,
- const rd_kafkap_str_t *topic);
-#define rd_kafka_topic_find(rk, topic, do_lock) \
- rd_kafka_topic_find_fl(__FUNCTION__, __LINE__, rk, topic, do_lock)
-#define rd_kafka_topic_find0(rk, topic) \
- rd_kafka_topic_find0_fl(__FUNCTION__, __LINE__, rk, topic)
-int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b);
-
-void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt);
-
-rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err);
-rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt,
- rd_kafka_resp_err_t err);
-
-/**
- * @returns the topic's permanent error, if any.
- *
- * @locality any
- * @locks_acquired rd_kafka_topic_rdlock(rkt)
- */
-static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
-rd_kafka_topic_get_error(rd_kafka_topic_t *rkt) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_topic_rdlock(rkt);
- if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR)
- err = rkt->rkt_err;
- rd_kafka_topic_rdunlock(rkt);
- return err;
-}
-
-int rd_kafka_topic_metadata_update2(
- rd_kafka_broker_t *rkb,
- const struct rd_kafka_metadata_topic *mdt,
- const rd_kafka_partition_leader_epoch_t *leader_epochs);
-
-void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now);
-
-
-typedef struct rd_kafka_topic_info_s {
- const char *topic; /**< Allocated along with struct */
- int partition_cnt;
-} rd_kafka_topic_info_t;
-
-int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b);
-int rd_kafka_topic_info_cmp(const void *_a, const void *_b);
-rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic,
- int partition_cnt);
-void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti);
-
-int rd_kafka_topic_match(rd_kafka_t *rk,
- const char *pattern,
- const char *topic);
-
-int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp,
- int32_t broker_id,
- rd_kafka_broker_t *rkb,
- const char *reason);
-
-int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp);
-
-rd_kafka_resp_err_t rd_kafka_topics_leader_query_sync(rd_kafka_t *rk,
- int all_topics,
- const rd_list_t *topics,
- int timeout_ms);
-void rd_kafka_topic_leader_query0(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- int do_rk_lock,
- rd_bool_t force);
-#define rd_kafka_topic_leader_query(rk, rkt) \
- rd_kafka_topic_leader_query0(rk, rkt, 1 /*lock*/, \
- rd_false /*dont force*/)
-
-#define rd_kafka_topic_fast_leader_query(rk) \
- rd_kafka_metadata_fast_leader_query(rk)
-
-void rd_kafka_local_topics_to_list(rd_kafka_t *rk,
- rd_list_t *topics,
- int *cache_cntp);
-
-void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt,
- int partition_cnt,
- int32_t leader_id);
-
-#endif /* _RDKAFKA_TOPIC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c
deleted file mode 100644
index ae5895b29..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c
+++ /dev/null
@@ -1,1295 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifdef _WIN32
-#pragma comment(lib, "ws2_32.lib")
-#endif
-
-#define __need_IOV_MAX
-
-#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */
-
-#include "rdkafka_int.h"
-#include "rdaddr.h"
-#include "rdkafka_transport.h"
-#include "rdkafka_transport_int.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_interceptor.h"
-
-#include <errno.h>
-
-/* AIX doesn't have MSG_DONTWAIT */
-#ifndef MSG_DONTWAIT
-#define MSG_DONTWAIT MSG_NONBLOCK
-#endif
-
-#if WITH_SSL
-#include "rdkafka_ssl.h"
-#endif
-
-/**< Current thread's rd_kafka_transport_t instance.
- * This pointer is set up when calling any OpenSSL APIs that might
- * trigger SSL callbacks, and is used to retrieve the SSL object's
- * corresponding rd_kafka_transport_t instance.
- * There is an set/get_ex_data() API in OpenSSL, but it requires storing
- * a unique index somewhere, which we can't do without having a singleton
- * object, so instead we cut out the middle man and store the
- * rd_kafka_transport_t pointer directly in the thread-local memory. */
-RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport;
-
-
-static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout);
-
-
-/**
- * Low-level socket close
- */
-static void rd_kafka_transport_close0(rd_kafka_t *rk, rd_socket_t s) {
- if (rk->rk_conf.closesocket_cb)
- rk->rk_conf.closesocket_cb((int)s, rk->rk_conf.opaque);
- else
- rd_socket_close(s);
-}
-
-/**
- * Close and destroy a transport handle
- */
-void rd_kafka_transport_close(rd_kafka_transport_t *rktrans) {
-#if WITH_SSL
- rd_kafka_curr_transport = rktrans;
- if (rktrans->rktrans_ssl)
- rd_kafka_transport_ssl_close(rktrans);
-#endif
-
- rd_kafka_sasl_close(rktrans);
-
- if (rktrans->rktrans_recv_buf)
- rd_kafka_buf_destroy(rktrans->rktrans_recv_buf);
-
-#ifdef _WIN32
- WSACloseEvent(rktrans->rktrans_wsaevent);
-#endif
-
- if (rktrans->rktrans_s != -1)
- rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk,
- rktrans->rktrans_s);
-
- rd_free(rktrans);
-}
-
-/**
- * @brief shutdown(2) a transport's underlying socket.
- *
- * This will prohibit further sends and receives.
- * rd_kafka_transport_close() must still be called to close the socket.
- */
-void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans) {
- shutdown(rktrans->rktrans_s,
-#ifdef _WIN32
- SD_BOTH
-#else
- SHUT_RDWR
-#endif
- );
-}
-
-
-#ifndef _WIN32
-/**
- * @brief sendmsg() abstraction, converting a list of segments to iovecs.
- * @remark should only be called if the number of segments is > 1.
- */
-static ssize_t rd_kafka_transport_socket_sendmsg(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size) {
- struct iovec iov[IOV_MAX];
- struct msghdr msg = {.msg_iov = iov};
- size_t iovlen;
- ssize_t r;
- size_t r2;
-
- rd_slice_get_iov(slice, msg.msg_iov, &iovlen, IOV_MAX,
- /* FIXME: Measure the effects of this */
- rktrans->rktrans_sndbuf_size);
- msg.msg_iovlen = (int)iovlen;
-
-#ifdef __sun
- /* See recvmsg() comment. Setting it here to be safe. */
- rd_socket_errno = EAGAIN;
-#endif
-
- r = sendmsg(rktrans->rktrans_s, &msg,
- MSG_DONTWAIT
-#ifdef MSG_NOSIGNAL
- | MSG_NOSIGNAL
-#endif
- );
-
- if (r == -1) {
- if (rd_socket_errno == EAGAIN)
- return 0;
- rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno));
- return -1;
- }
-
- /* Update buffer read position */
- r2 = rd_slice_read(slice, NULL, (size_t)r);
- rd_assert((size_t)r == r2 &&
- *"BUG: wrote more bytes than available in slice");
-
- return r;
-}
-#endif
-
-
-/**
- * @brief Plain send() abstraction
- */
-static ssize_t rd_kafka_transport_socket_send0(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size) {
- ssize_t sum = 0;
- const void *p;
- size_t rlen;
-
- while ((rlen = rd_slice_peeker(slice, &p))) {
- ssize_t r;
- size_t r2;
-
- r = send(rktrans->rktrans_s, p,
-#ifdef _WIN32
- (int)rlen, (int)0
-#else
- rlen, 0
-#endif
- );
-
-#ifdef _WIN32
- if (unlikely(r == RD_SOCKET_ERROR)) {
- if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) {
- rktrans->rktrans_blocked = rd_true;
- return sum;
- } else {
- rd_snprintf(
- errstr, errstr_size, "%s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
- }
-
- rktrans->rktrans_blocked = rd_false;
-#else
- if (unlikely(r <= 0)) {
- if (r == 0 || rd_socket_errno == EAGAIN)
- return 0;
- rd_snprintf(errstr, errstr_size, "%s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
-#endif
-
- /* Update buffer read position */
- r2 = rd_slice_read(slice, NULL, (size_t)r);
- rd_assert((size_t)r == r2 &&
- *"BUG: wrote more bytes than available in slice");
-
-
- sum += r;
-
- /* FIXME: remove this and try again immediately and let
- * the next write() call fail instead? */
- if ((size_t)r < rlen)
- break;
- }
-
- return sum;
-}
-
-
-static ssize_t rd_kafka_transport_socket_send(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size) {
-#ifndef _WIN32
- /* FIXME: Use sendmsg() with iovecs if there's more than one segment
- * remaining, otherwise (or if platform does not have sendmsg)
- * use plain send(). */
- return rd_kafka_transport_socket_sendmsg(rktrans, slice, errstr,
- errstr_size);
-#endif
- return rd_kafka_transport_socket_send0(rktrans, slice, errstr,
- errstr_size);
-}
-
-
-
-#ifndef _WIN32
-/**
- * @brief recvmsg() abstraction, converting a list of segments to iovecs.
- * @remark should only be called if the number of segments is > 1.
- */
-static ssize_t rd_kafka_transport_socket_recvmsg(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size) {
- ssize_t r;
- struct iovec iov[IOV_MAX];
- struct msghdr msg = {.msg_iov = iov};
- size_t iovlen;
-
- rd_buf_get_write_iov(rbuf, msg.msg_iov, &iovlen, IOV_MAX,
- /* FIXME: Measure the effects of this */
- rktrans->rktrans_rcvbuf_size);
- msg.msg_iovlen = (int)iovlen;
-
-#ifdef __sun
- /* SunOS doesn't seem to set errno when recvmsg() fails
- * due to no data and MSG_DONTWAIT is set. */
- rd_socket_errno = EAGAIN;
-#endif
- r = recvmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT);
- if (unlikely(r <= 0)) {
- if (r == -1 && rd_socket_errno == EAGAIN)
- return 0;
- else if (r == 0 || (r == -1 && rd_socket_errno == ECONNRESET)) {
- /* Receive 0 after POLLIN event means
- * connection closed. */
- rd_snprintf(errstr, errstr_size, "Disconnected");
- return -1;
- } else if (r == -1) {
- rd_snprintf(errstr, errstr_size, "%s",
- rd_strerror(errno));
- return -1;
- }
- }
-
- /* Update buffer write position */
- rd_buf_write(rbuf, NULL, (size_t)r);
-
- return r;
-}
-#endif
-
-
-/**
- * @brief Plain recv()
- */
-static ssize_t rd_kafka_transport_socket_recv0(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size) {
- ssize_t sum = 0;
- void *p;
- size_t len;
-
- while ((len = rd_buf_get_writable(rbuf, &p))) {
- ssize_t r;
-
- r = recv(rktrans->rktrans_s, p,
-#ifdef _WIN32
- (int)
-#endif
- len,
- 0);
-
- if (unlikely(r == RD_SOCKET_ERROR)) {
- if (rd_socket_errno == EAGAIN
-#ifdef _WIN32
- || rd_socket_errno == WSAEWOULDBLOCK
-#endif
- )
- return sum;
- else {
- rd_snprintf(
- errstr, errstr_size, "%s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
- } else if (unlikely(r == 0)) {
- /* Receive 0 after POLLIN event means
- * connection closed. */
- rd_snprintf(errstr, errstr_size, "Disconnected");
- return -1;
- }
-
- /* Update buffer write position */
- rd_buf_write(rbuf, NULL, (size_t)r);
-
- sum += r;
-
- /* FIXME: remove this and try again immediately and let
- * the next recv() call fail instead? */
- if ((size_t)r < len)
- break;
- }
- return sum;
-}
-
-
-static ssize_t rd_kafka_transport_socket_recv(rd_kafka_transport_t *rktrans,
- rd_buf_t *buf,
- char *errstr,
- size_t errstr_size) {
-#ifndef _WIN32
- return rd_kafka_transport_socket_recvmsg(rktrans, buf, errstr,
- errstr_size);
-#endif
- return rd_kafka_transport_socket_recv0(rktrans, buf, errstr,
- errstr_size);
-}
-
-
-
-/**
- * CONNECT state is failed (errstr!=NULL) or done (TCP is up, SSL is working..).
- * From this state we either hand control back to the broker code,
- * or if authentication is configured we ente the AUTH state.
- */
-void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans,
- char *errstr) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
- rd_kafka_curr_transport = rktrans;
-
- rd_kafka_broker_connect_done(rkb, errstr);
-}
-
-
-
-ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size) {
- ssize_t r;
-#if WITH_SSL
- if (rktrans->rktrans_ssl) {
- rd_kafka_curr_transport = rktrans;
- r = rd_kafka_transport_ssl_send(rktrans, slice, errstr,
- errstr_size);
- } else
-#endif
- r = rd_kafka_transport_socket_send(rktrans, slice, errstr,
- errstr_size);
-
- return r;
-}
-
-
-ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size) {
- ssize_t r;
-
-#if WITH_SSL
- if (rktrans->rktrans_ssl) {
- rd_kafka_curr_transport = rktrans;
- r = rd_kafka_transport_ssl_recv(rktrans, rbuf, errstr,
- errstr_size);
- } else
-#endif
- r = rd_kafka_transport_socket_recv(rktrans, rbuf, errstr,
- errstr_size);
-
- return r;
-}
-
-
-
-/**
- * @brief Notify transport layer of full request sent.
- */
-void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf) {
- rd_kafka_transport_t *rktrans = rkb->rkb_transport;
-
- /* Call on_request_sent interceptors */
- rd_kafka_interceptors_on_request_sent(
- rkb->rkb_rk, (int)rktrans->rktrans_s, rkb->rkb_name,
- rkb->rkb_nodeid, rkbuf->rkbuf_reqhdr.ApiKey,
- rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_corrid,
- rd_slice_size(&rkbuf->rkbuf_reader));
-}
-
-
-
-/**
- * Length framed receive handling.
- * Currently only supports a the following framing:
- * [int32_t:big_endian_length_of_payload][payload]
- *
- * To be used on POLLIN event, will return:
- * -1: on fatal error (errstr will be updated, *rkbufp remains unset)
- * 0: still waiting for data (*rkbufp remains unset)
- * 1: data complete, (buffer returned in *rkbufp)
- */
-int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans,
- rd_kafka_buf_t **rkbufp,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf;
- ssize_t r;
- const int log_decode_errors = LOG_ERR;
-
- /* States:
- * !rktrans_recv_buf: initial state; set up buf to receive header.
- * rkbuf_totlen == 0: awaiting header
- * rkbuf_totlen > 0: awaiting payload
- */
-
- if (!rkbuf) {
- rkbuf = rd_kafka_buf_new(1, 4 /*length field's length*/);
- /* Set up buffer reader for the length field */
- rd_buf_write_ensure(&rkbuf->rkbuf_buf, 4, 4);
- rktrans->rktrans_recv_buf = rkbuf;
- }
-
-
- r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, errstr,
- errstr_size);
- if (r == 0)
- return 0;
- else if (r == -1)
- return -1;
-
- if (rkbuf->rkbuf_totlen == 0) {
- /* Frame length not known yet. */
- int32_t frame_len;
-
- if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) {
- /* Wait for entire frame header. */
- return 0;
- }
-
- /* Initialize reader */
- rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, 4);
-
- /* Reader header: payload length */
- rd_kafka_buf_read_i32(rkbuf, &frame_len);
-
- if (frame_len < 0 ||
- frame_len > rktrans->rktrans_rkb->rkb_rk->rk_conf
- .recv_max_msg_size) {
- rd_snprintf(errstr, errstr_size,
- "Invalid frame size %" PRId32, frame_len);
- return -1;
- }
-
- rkbuf->rkbuf_totlen = 4 + frame_len;
- if (frame_len == 0) {
- /* Payload is empty, we're done. */
- rktrans->rktrans_recv_buf = NULL;
- *rkbufp = rkbuf;
- return 1;
- }
-
- /* Allocate memory to hold entire frame payload in contigious
- * memory. */
- rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, frame_len);
-
- /* Try reading directly, there is probably more data available*/
- return rd_kafka_transport_framed_recv(rktrans, rkbufp, errstr,
- errstr_size);
- }
-
- if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) {
- /* Payload is complete. */
- rktrans->rktrans_recv_buf = NULL;
- *rkbufp = rkbuf;
- return 1;
- }
-
- /* Wait for more data */
- return 0;
-
-err_parse:
- rd_snprintf(errstr, errstr_size, "Frame header parsing failed: %s",
- rd_kafka_err2str(rkbuf->rkbuf_err));
- return -1;
-}
-
-
-/**
- * @brief Final socket setup after a connection has been established
- */
-void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- unsigned int slen;
-
- /* Set socket send & receive buffer sizes if configuerd */
- if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) {
- if (setsockopt(
- rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
- (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size,
- sizeof(rkb->rkb_rk->rk_conf.socket_sndbuf_size)) ==
- RD_SOCKET_ERROR)
- rd_rkb_log(rkb, LOG_WARNING, "SNDBUF",
- "Failed to set socket send "
- "buffer size to %i: %s",
- rkb->rkb_rk->rk_conf.socket_sndbuf_size,
- rd_socket_strerror(rd_socket_errno));
- }
-
- if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) {
- if (setsockopt(
- rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
- (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
- sizeof(rkb->rkb_rk->rk_conf.socket_rcvbuf_size)) ==
- RD_SOCKET_ERROR)
- rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
- "Failed to set socket receive "
- "buffer size to %i: %s",
- rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
- rd_socket_strerror(rd_socket_errno));
- }
-
- /* Get send and receive buffer sizes to allow limiting
- * the total number of bytes passed with iovecs to sendmsg()
- * and recvmsg(). */
- slen = sizeof(rktrans->rktrans_rcvbuf_size);
- if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
- (void *)&rktrans->rktrans_rcvbuf_size,
- &slen) == RD_SOCKET_ERROR) {
- rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
- "Failed to get socket receive "
- "buffer size: %s: assuming 1MB",
- rd_socket_strerror(rd_socket_errno));
- rktrans->rktrans_rcvbuf_size = 1024 * 1024;
- } else if (rktrans->rktrans_rcvbuf_size < 1024 * 64)
- rktrans->rktrans_rcvbuf_size =
- 1024 * 64; /* Use at least 64KB */
-
- slen = sizeof(rktrans->rktrans_sndbuf_size);
- if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
- (void *)&rktrans->rktrans_sndbuf_size,
- &slen) == RD_SOCKET_ERROR) {
- rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
- "Failed to get socket send "
- "buffer size: %s: assuming 1MB",
- rd_socket_strerror(rd_socket_errno));
- rktrans->rktrans_sndbuf_size = 1024 * 1024;
- } else if (rktrans->rktrans_sndbuf_size < 1024 * 64)
- rktrans->rktrans_sndbuf_size =
- 1024 * 64; /* Use at least 64KB */
-
-
-#ifdef TCP_NODELAY
- if (rkb->rkb_rk->rk_conf.socket_nagle_disable) {
- int one = 1;
- if (setsockopt(rktrans->rktrans_s, IPPROTO_TCP, TCP_NODELAY,
- (void *)&one, sizeof(one)) == RD_SOCKET_ERROR)
- rd_rkb_log(rkb, LOG_WARNING, "NAGLE",
- "Failed to disable Nagle (TCP_NODELAY) "
- "on socket: %s",
- rd_socket_strerror(rd_socket_errno));
- }
-#endif
-}
-
-
-/**
- * TCP connection established.
- * Set up socket options, SSL, etc.
- *
- * Locality: broker thread
- */
-static void rd_kafka_transport_connected(rd_kafka_transport_t *rktrans) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
- rd_rkb_dbg(
- rkb, BROKER, "CONNECT", "Connected to %s",
- rd_sockaddr2str(rkb->rkb_addr_last,
- RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY));
-
- rd_kafka_transport_post_connect_setup(rktrans);
-
-#if WITH_SSL
- if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL ||
- rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) {
- char errstr[512];
-
- rd_kafka_broker_lock(rkb);
- rd_kafka_broker_set_state(rkb,
- RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE);
- rd_kafka_broker_unlock(rkb);
-
- /* Set up SSL connection.
- * This is also an asynchronous operation so dont
- * propagate to broker_connect_done() just yet. */
- if (rd_kafka_transport_ssl_connect(rkb, rktrans, errstr,
- sizeof(errstr)) == -1) {
- rd_kafka_transport_connect_done(rktrans, errstr);
- return;
- }
- return;
- }
-#endif
-
- /* Propagate connect success */
- rd_kafka_transport_connect_done(rktrans, NULL);
-}
-
-
-
-/**
- * @brief the kernel SO_ERROR in \p errp for the given transport.
- * @returns 0 if getsockopt() was succesful (and \p and errp can be trusted),
- * else -1 in which case \p errp 's value is undefined.
- */
-static int rd_kafka_transport_get_socket_error(rd_kafka_transport_t *rktrans,
- int *errp) {
- socklen_t intlen = sizeof(*errp);
-
- if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_ERROR, (void *)errp,
- &intlen) == -1) {
- rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR",
- "Failed to get socket error: %s",
- rd_socket_strerror(rd_socket_errno));
- return -1;
- }
-
- return 0;
-}
-
-
-/**
- * IO event handler.
- *
- * @param socket_errstr Is an optional (else NULL) error string from the
- * socket layer.
- *
- * Locality: broker thread
- */
-static void rd_kafka_transport_io_event(rd_kafka_transport_t *rktrans,
- int events,
- const char *socket_errstr) {
- char errstr[512];
- int r;
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
- switch (rkb->rkb_state) {
- case RD_KAFKA_BROKER_STATE_CONNECT:
- /* Asynchronous connect finished, read status. */
- if (!(events & (POLLOUT | POLLERR | POLLHUP)))
- return;
-
- if (socket_errstr)
- rd_kafka_broker_fail(
- rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
- "Connect to %s failed: %s",
- rd_sockaddr2str(rkb->rkb_addr_last,
- RD_SOCKADDR2STR_F_PORT |
- RD_SOCKADDR2STR_F_FAMILY),
- socket_errstr);
- else if (rd_kafka_transport_get_socket_error(rktrans, &r) ==
- -1) {
- rd_kafka_broker_fail(
- rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
- "Connect to %s failed: "
- "unable to get status from "
- "socket %d: %s",
- rd_sockaddr2str(rkb->rkb_addr_last,
- RD_SOCKADDR2STR_F_PORT |
- RD_SOCKADDR2STR_F_FAMILY),
- rktrans->rktrans_s, rd_strerror(rd_socket_errno));
- } else if (r != 0) {
- /* Connect failed */
- rd_snprintf(
- errstr, sizeof(errstr), "Connect to %s failed: %s",
- rd_sockaddr2str(rkb->rkb_addr_last,
- RD_SOCKADDR2STR_F_PORT |
- RD_SOCKADDR2STR_F_FAMILY),
- rd_strerror(r));
-
- rd_kafka_transport_connect_done(rktrans, errstr);
- } else {
- /* Connect succeeded */
- rd_kafka_transport_connected(rktrans);
- }
- break;
-
- case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE:
-#if WITH_SSL
- rd_assert(rktrans->rktrans_ssl);
-
- /* Currently setting up SSL connection:
- * perform handshake. */
- r = rd_kafka_transport_ssl_handshake(rktrans);
-
- if (r == 0 /* handshake still in progress */ &&
- (events & POLLHUP)) {
- rd_kafka_broker_conn_closed(
- rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected");
- return;
- }
-
-#else
- RD_NOTREACHED();
-#endif
- break;
-
- case RD_KAFKA_BROKER_STATE_AUTH_LEGACY:
- /* SASL authentication.
- * Prior to broker version v1.0.0 this is performed
- * directly on the socket without Kafka framing. */
- if (rd_kafka_sasl_io_event(rktrans, events, errstr,
- sizeof(errstr)) == -1) {
- rd_kafka_broker_fail(
- rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "SASL authentication failure: %s", errstr);
- return;
- }
-
- if (events & POLLHUP) {
- rd_kafka_broker_fail(rkb, LOG_ERR,
- RD_KAFKA_RESP_ERR__AUTHENTICATION,
- "Disconnected");
-
- return;
- }
-
- break;
-
- case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY:
- case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE:
- case RD_KAFKA_BROKER_STATE_AUTH_REQ:
- case RD_KAFKA_BROKER_STATE_UP:
- case RD_KAFKA_BROKER_STATE_UPDATE:
-
- if (events & POLLIN) {
- while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
- rd_kafka_recv(rkb) > 0)
- ;
-
- /* If connection went down: bail out early */
- if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN)
- return;
- }
-
- if (events & POLLHUP) {
- rd_kafka_broker_conn_closed(
- rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected");
- return;
- }
-
- if (events & POLLOUT) {
- while (rd_kafka_send(rkb) > 0)
- ;
- }
- break;
-
- case RD_KAFKA_BROKER_STATE_INIT:
- case RD_KAFKA_BROKER_STATE_DOWN:
- case RD_KAFKA_BROKER_STATE_TRY_CONNECT:
- rd_kafka_assert(rkb->rkb_rk, !*"bad state");
- }
-}
-
-
-
-#ifdef _WIN32
-/**
- * @brief Convert WSA FD_.. events to POLL.. events.
- */
-static RD_INLINE int rd_kafka_transport_wsa2events(long wevents) {
- int events = 0;
-
- if (unlikely(wevents == 0))
- return 0;
-
- if (wevents & FD_READ)
- events |= POLLIN;
- if (wevents & (FD_WRITE | FD_CONNECT))
- events |= POLLOUT;
- if (wevents & FD_CLOSE)
- events |= POLLHUP;
-
- rd_dassert(events != 0);
-
- return events;
-}
-
-/**
- * @brief Convert POLL.. events to WSA FD_.. events.
- */
-static RD_INLINE int rd_kafka_transport_events2wsa(int events,
- rd_bool_t is_connecting) {
- long wevents = FD_CLOSE;
-
- if (unlikely(is_connecting))
- return wevents | FD_CONNECT;
-
- if (events & POLLIN)
- wevents |= FD_READ;
- if (events & POLLOUT)
- wevents |= FD_WRITE;
-
- return wevents;
-}
-
-
-/**
- * @returns the WinSocket events (as POLL.. events) for the broker socket.
- */
-static int rd_kafka_transport_get_wsa_events(rd_kafka_transport_t *rktrans) {
- const int try_bits[4 * 2] = {FD_READ_BIT, POLLIN, FD_WRITE_BIT,
- POLLOUT, FD_CONNECT_BIT, POLLOUT,
- FD_CLOSE_BIT, POLLHUP};
- int r, i;
- WSANETWORKEVENTS netevents;
- int events = 0;
- const char *socket_errstr = NULL;
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
-
- /* Get Socket event */
- r = WSAEnumNetworkEvents(rktrans->rktrans_s, rktrans->rktrans_wsaevent,
- &netevents);
- if (unlikely(r == SOCKET_ERROR)) {
- rd_rkb_log(rkb, LOG_ERR, "WSAWAIT",
- "WSAEnumNetworkEvents() failed: %s",
- rd_socket_strerror(rd_socket_errno));
- socket_errstr = rd_socket_strerror(rd_socket_errno);
- return POLLHUP | POLLERR;
- }
-
- /* Get fired events and errors for each event type */
- for (i = 0; i < RD_ARRAYSIZE(try_bits); i += 2) {
- const int bit = try_bits[i];
- const int event = try_bits[i + 1];
-
- if (!(netevents.lNetworkEvents & (1 << bit)))
- continue;
-
- if (unlikely(netevents.iErrorCode[bit])) {
- socket_errstr =
- rd_socket_strerror(netevents.iErrorCode[bit]);
- events |= POLLHUP;
- } else {
- events |= event;
-
- if (bit == FD_WRITE_BIT) {
- /* Writing no longer blocked */
- rktrans->rktrans_blocked = rd_false;
- }
- }
- }
-
- return events;
-}
-
-
-/**
- * @brief Win32: Poll transport and \p rkq cond events.
- *
- * @returns the transport socket POLL.. event bits.
- */
-static int rd_kafka_transport_io_serve_win32(rd_kafka_transport_t *rktrans,
- rd_kafka_q_t *rkq,
- int timeout_ms) {
- const DWORD wsaevent_cnt = 3;
- WSAEVENT wsaevents[3] = {
- rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */
- rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */
- rktrans->rktrans_wsaevent, /* socket */
- };
- DWORD r;
- int events = 0;
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- rd_bool_t set_pollout = rd_false;
- rd_bool_t cnd_is_waiting = rd_false;
-
- /* WSA only sets FD_WRITE (e.g., POLLOUT) when the socket was
- * previously blocked, unlike BSD sockets that set POLLOUT as long as
- * the socket isn't blocked. So we need to imitate the BSD behaviour
- * here and cut the timeout short if a write is wanted and the socket
- * is not currently blocked. */
- if (rktrans->rktrans_rkb->rkb_state != RD_KAFKA_BROKER_STATE_CONNECT &&
- !rktrans->rktrans_blocked &&
- (rktrans->rktrans_pfd[0].events & POLLOUT)) {
- timeout_ms = 0;
- set_pollout = rd_true;
- } else {
- /* Check if the queue already has ops enqueued in which case we
- * cut the timeout short. Else add this thread as waiting on the
- * queue's condvar so that cnd_signal() (et.al.) will perform
- * SetEvent() and thus wake up this thread in case a new op is
- * added to the queue. */
- mtx_lock(&rkq->rkq_lock);
- if (rkq->rkq_qlen > 0) {
- timeout_ms = 0;
- } else {
- cnd_is_waiting = rd_true;
- cnd_wait_enter(&rkq->rkq_cond);
- }
- mtx_unlock(&rkq->rkq_lock);
- }
-
- /* Wait for IO and queue events */
- r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, timeout_ms,
- FALSE);
-
- if (cnd_is_waiting) {
- mtx_lock(&rkq->rkq_lock);
- cnd_wait_exit(&rkq->rkq_cond);
- mtx_unlock(&rkq->rkq_lock);
- }
-
- if (unlikely(r == WSA_WAIT_FAILED)) {
- rd_rkb_log(rkb, LOG_CRIT, "WSAWAIT",
- "WSAWaitForMultipleEvents failed: %s",
- rd_socket_strerror(rd_socket_errno));
- return POLLERR;
- } else if (r != WSA_WAIT_TIMEOUT) {
- r -= WSA_WAIT_EVENT_0;
-
- /* Reset the cond events if any of them were triggered */
- if (r < 2) {
- ResetEvent(rkq->rkq_cond.mEvents[0]);
- ResetEvent(rkq->rkq_cond.mEvents[1]);
- }
-
- /* Get the socket events. */
- events = rd_kafka_transport_get_wsa_events(rktrans);
- }
-
- /* As explained above we need to set the POLLOUT flag
- * in case it is wanted but not triggered by Winsocket so that
- * io_event() knows it can attempt to send more data. */
- if (likely(set_pollout && !(events & (POLLHUP | POLLERR | POLLOUT))))
- events |= POLLOUT;
-
- return events;
-}
-#endif
-
-
-/**
- * @brief Poll and serve IOs
- *
- * @returns 0 if \p rkq may need additional blocking/timeout polling, else 1.
- *
- * @locality broker thread
- */
-int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans,
- rd_kafka_q_t *rkq,
- int timeout_ms) {
- rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
- int events;
-
- rd_kafka_curr_transport = rktrans;
-
- if (
-#ifndef _WIN32
- /* BSD sockets use POLLOUT to indicate success to connect.
- * Windows has its own flag for this (FD_CONNECT). */
- rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT ||
-#endif
- (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE &&
- rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
- rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0))
- rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT);
-
-#ifdef _WIN32
- /* BSD sockets use POLLIN and a following recv() returning 0 to
- * to indicate connection close.
- * Windows has its own flag for this (FD_CLOSE). */
- if (rd_kafka_bufq_cnt(&rkb->rkb_waitresps) > 0)
-#endif
- rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN);
-
- /* On Windows we can wait for both IO and condvars (rkq)
- * simultaneously.
- *
- * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake
- * up the rkq. */
-#ifdef _WIN32
- events = rd_kafka_transport_io_serve_win32(rktrans, rkq, timeout_ms);
-
-#else
- if (rd_kafka_transport_poll(rktrans, timeout_ms) < 1)
- return 0; /* No events, caller can block on \p rkq poll */
-
- /* Broker socket events */
- events = rktrans->rktrans_pfd[0].revents;
-#endif
-
- if (events) {
- rd_kafka_transport_poll_clear(rktrans, POLLOUT | POLLIN);
-
- rd_kafka_transport_io_event(rktrans, events, NULL);
- }
-
- return 1;
-}
-
-
-/**
- * @brief Create a new transport object using existing socket \p s.
- */
-rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb,
- rd_socket_t s,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_transport_t *rktrans;
- int on = 1;
- int r;
-
-#ifdef SO_NOSIGPIPE
- /* Disable SIGPIPE signalling for this socket on OSX */
- if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)) == -1)
- rd_rkb_dbg(rkb, BROKER, "SOCKET",
- "Failed to set SO_NOSIGPIPE: %s",
- rd_socket_strerror(rd_socket_errno));
-#endif
-
-#ifdef SO_KEEPALIVE
- /* Enable TCP keep-alives, if configured. */
- if (rkb->rkb_rk->rk_conf.socket_keepalive) {
- if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, (void *)&on,
- sizeof(on)) == RD_SOCKET_ERROR)
- rd_rkb_dbg(rkb, BROKER, "SOCKET",
- "Failed to set SO_KEEPALIVE: %s",
- rd_socket_strerror(rd_socket_errno));
- }
-#endif
-
- /* Set the socket to non-blocking */
- if ((r = rd_fd_set_nonblocking(s))) {
- rd_snprintf(errstr, errstr_size,
- "Failed to set socket non-blocking: %s",
- rd_socket_strerror(r));
- return NULL;
- }
-
-
- rktrans = rd_calloc(1, sizeof(*rktrans));
- rktrans->rktrans_rkb = rkb;
- rktrans->rktrans_s = s;
-
-#ifdef _WIN32
- rktrans->rktrans_wsaevent = WSACreateEvent();
- rd_assert(rktrans->rktrans_wsaevent != NULL);
-#endif
-
- return rktrans;
-}
-
-
-/**
- * Initiate asynchronous connection attempt.
- *
- * Locality: broker thread
- */
-rd_kafka_transport_t *rd_kafka_transport_connect(rd_kafka_broker_t *rkb,
- const rd_sockaddr_inx_t *sinx,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_transport_t *rktrans;
- int s = -1;
- int r;
-
- rkb->rkb_addr_last = sinx;
-
- s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, SOCK_STREAM,
- IPPROTO_TCP,
- rkb->rkb_rk->rk_conf.opaque);
- if (s == -1) {
- rd_snprintf(errstr, errstr_size, "Failed to create socket: %s",
- rd_socket_strerror(rd_socket_errno));
- return NULL;
- }
-
- rktrans = rd_kafka_transport_new(rkb, s, errstr, errstr_size);
- if (!rktrans) {
- rd_kafka_transport_close0(rkb->rkb_rk, s);
- return NULL;
- }
-
- rd_rkb_dbg(rkb, BROKER, "CONNECT",
- "Connecting to %s (%s) "
- "with socket %i",
- rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY |
- RD_SOCKADDR2STR_F_PORT),
- rd_kafka_secproto_names[rkb->rkb_proto], s);
-
- /* Connect to broker */
- if (rkb->rkb_rk->rk_conf.connect_cb) {
- rd_kafka_broker_lock(rkb); /* for rkb_nodename */
- r = rkb->rkb_rk->rk_conf.connect_cb(
- s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx),
- rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque);
- rd_kafka_broker_unlock(rkb);
- } else {
- if (connect(s, (struct sockaddr *)sinx,
- RD_SOCKADDR_INX_LEN(sinx)) == RD_SOCKET_ERROR &&
- (rd_socket_errno != EINPROGRESS
-#ifdef _WIN32
- && rd_socket_errno != WSAEWOULDBLOCK
-#endif
- ))
- r = rd_socket_errno;
- else
- r = 0;
- }
-
- if (r != 0) {
- rd_rkb_dbg(rkb, BROKER, "CONNECT",
- "Couldn't connect to %s: %s (%i)",
- rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_PORT |
- RD_SOCKADDR2STR_F_FAMILY),
- rd_socket_strerror(r), r);
- rd_snprintf(errstr, errstr_size,
- "Failed to connect to broker at %s: %s",
- rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE),
- rd_socket_strerror(r));
-
- rd_kafka_transport_close(rktrans);
- return NULL;
- }
-
- /* Set up transport handle */
- rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s;
- if (rkb->rkb_wakeup_fd[0] != -1) {
- rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt].events = POLLIN;
- rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd =
- rkb->rkb_wakeup_fd[0];
- }
-
-
- /* Poll writability to trigger on connection success/failure. */
- rd_kafka_transport_poll_set(rktrans, POLLOUT);
-
- return rktrans;
-}
-
-
-#ifdef _WIN32
-/**
- * @brief Set the WinSocket event poll bit to \p events.
- */
-static void rd_kafka_transport_poll_set_wsa(rd_kafka_transport_t *rktrans,
- int events) {
- int r;
- r = WSAEventSelect(
- rktrans->rktrans_s, rktrans->rktrans_wsaevent,
- rd_kafka_transport_events2wsa(rktrans->rktrans_pfd[0].events,
- rktrans->rktrans_rkb->rkb_state ==
- RD_KAFKA_BROKER_STATE_CONNECT));
- if (unlikely(r != 0)) {
- rd_rkb_log(rktrans->rktrans_rkb, LOG_CRIT, "WSAEVENT",
- "WSAEventSelect() failed: %s",
- rd_socket_strerror(rd_socket_errno));
- }
-}
-#endif
-
-void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) {
- if ((rktrans->rktrans_pfd[0].events & event) == event)
- return;
-
- rktrans->rktrans_pfd[0].events |= event;
-
-#ifdef _WIN32
- rd_kafka_transport_poll_set_wsa(rktrans,
- rktrans->rktrans_pfd[0].events);
-#endif
-}
-
-void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) {
- if (!(rktrans->rktrans_pfd[0].events & event))
- return;
-
- rktrans->rktrans_pfd[0].events &= ~event;
-
-#ifdef _WIN32
- rd_kafka_transport_poll_set_wsa(rktrans,
- rktrans->rktrans_pfd[0].events);
-#endif
-}
-
-#ifndef _WIN32
-/**
- * @brief Poll transport fds.
- *
- * @returns 1 if an event was raised, else 0, or -1 on error.
- */
-static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) {
- int r;
-
- r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout);
- if (r <= 0)
- return r;
-
- if (rktrans->rktrans_pfd[1].revents & POLLIN) {
- /* Read wake-up fd data and throw away, just used for wake-ups*/
- char buf[1024];
- while (rd_socket_read((int)rktrans->rktrans_pfd[1].fd, buf,
- sizeof(buf)) > 0)
- ; /* Read all buffered signalling bytes */
- }
-
- return 1;
-}
-#endif
-
-#ifdef _WIN32
-/**
- * @brief A socket write operation would block, flag the socket
- * as blocked so that POLLOUT events are handled correctly.
- *
- * This is really only used on Windows where POLLOUT (FD_WRITE) is
- * edge-triggered rather than level-triggered.
- */
-void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans,
- rd_bool_t blocked) {
- rktrans->rktrans_blocked = blocked;
-}
-#endif
-
-
-#if 0
-/**
- * Global cleanup.
- * This is dangerous and SHOULD NOT be called since it will rip
- * the rug from under the application if it uses any of this functionality
- * in its own code. This means we might leak some memory on exit.
- */
-void rd_kafka_transport_term (void) {
-#ifdef _WIN32
- (void)WSACleanup(); /* FIXME: dangerous */
-#endif
-}
-#endif
-
-void rd_kafka_transport_init(void) {
-#ifdef _WIN32
- WSADATA d;
- (void)WSAStartup(MAKEWORD(2, 2), &d);
-#endif
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h
deleted file mode 100644
index 83af5ae90..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_TRANSPORT_H_
-#define _RDKAFKA_TRANSPORT_H_
-
-#ifndef _WIN32
-#include <poll.h>
-#endif
-
-#include "rdbuf.h"
-#include "rdaddr.h"
-
-typedef struct rd_kafka_transport_s rd_kafka_transport_t;
-
-int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans,
- rd_kafka_q_t *rkq,
- int timeout_ms);
-
-ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans,
- rd_slice_t *slice,
- char *errstr,
- size_t errstr_size);
-ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans,
- rd_buf_t *rbuf,
- char *errstr,
- size_t errstr_size);
-
-void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb,
- rd_kafka_buf_t *rkbuf);
-
-int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans,
- rd_kafka_buf_t **rkbufp,
- char *errstr,
- size_t errstr_size);
-
-rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb,
- rd_socket_t s,
- char *errstr,
- size_t errstr_size);
-struct rd_kafka_broker_s;
-rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb,
- const rd_sockaddr_inx_t *sinx,
- char *errstr,
- size_t errstr_size);
-void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans,
- char *errstr);
-
-void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans);
-
-void rd_kafka_transport_close(rd_kafka_transport_t *rktrans);
-void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans);
-void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event);
-void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event);
-
-#ifdef _WIN32
-void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans,
- rd_bool_t blocked);
-#else
-/* no-op on other platforms */
-#define rd_kafka_transport_set_blocked(rktrans, blocked) \
- do { \
- } while (0)
-#endif
-
-
-void rd_kafka_transport_init(void);
-
-#endif /* _RDKAFKA_TRANSPORT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h
deleted file mode 100644
index 4b053b98f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDKAFKA_TRANSPORT_INT_H_
-#define _RDKAFKA_TRANSPORT_INT_H_
-
-/* This header file is to be used by .c files needing access to the
- * rd_kafka_transport_t struct internals. */
-
-#include "rdkafka_sasl.h"
-
-#if WITH_SSL
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/pkcs12.h>
-#endif
-
-#ifndef _WIN32
-#include <sys/socket.h>
-#include <netinet/tcp.h>
-#endif
-
-struct rd_kafka_transport_s {
- rd_socket_t rktrans_s;
- rd_kafka_broker_t *rktrans_rkb; /* Not reference counted */
-
-#if WITH_SSL
- SSL *rktrans_ssl;
-#endif
-
-#ifdef _WIN32
- WSAEVENT *rktrans_wsaevent;
- rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK.
- * We need to poll for FD_WRITE which
- * is edge-triggered rather than
- * level-triggered.
- * This behaviour differs from BSD
- * sockets. */
-#endif
-
- struct {
- void *state; /* SASL implementation
- * state handle */
-
- int complete; /* Auth was completed early
- * from the client's perspective
- * (but we might still have to
- * wait for server reply). */
-
- /* SASL framing buffers */
- struct msghdr msg;
- struct iovec iov[2];
-
- char *recv_buf;
- int recv_of; /* Received byte count */
- int recv_len; /* Expected receive length for
- * current frame. */
- } rktrans_sasl;
-
- rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */
-
- /* Two pollable fds:
- * - TCP socket
- * - wake-up fd (not used on Win32)
- */
- rd_pollfd_t rktrans_pfd[2];
- int rktrans_pfd_cnt;
-
- size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */
- size_t rktrans_sndbuf_size; /**< Socket send buffer size */
-};
-
-
-extern RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport;
-
-#endif /* _RDKAFKA_TRANSPORT_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c
deleted file mode 100644
index afbc28b71..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c
+++ /dev/null
@@ -1,3249 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @name Transaction Manager
- *
- */
-
-#include <stdarg.h>
-
-#include "rd.h"
-#include "rdkafka_int.h"
-#include "rdkafka_txnmgr.h"
-#include "rdkafka_idempotence.h"
-#include "rdkafka_request.h"
-#include "rdkafka_error.h"
-#include "rdunittest.h"
-#include "rdrand.h"
-
-
-static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms);
-
-#define rd_kafka_txn_curr_api_set_result(rk, actions, error) \
- rd_kafka_txn_curr_api_set_result0(__FUNCTION__, __LINE__, rk, actions, \
- error)
-static void rd_kafka_txn_curr_api_set_result0(const char *func,
- int line,
- rd_kafka_t *rk,
- int actions,
- rd_kafka_error_t *error);
-
-
-
-/**
- * @return a normalized error code, this for instance abstracts different
- * fencing errors to return one single fencing error to the application.
- */
-static rd_kafka_resp_err_t rd_kafka_txn_normalize_err(rd_kafka_resp_err_t err) {
-
- switch (err) {
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
- return RD_KAFKA_RESP_ERR__FENCED;
- case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
- return RD_KAFKA_RESP_ERR__TIMED_OUT;
- default:
- return err;
- }
-}
-
-
-/**
- * @brief Ensure client is configured as a transactional producer,
- * else return error.
- *
- * @locality application thread
- * @locks none
- */
-static RD_INLINE rd_kafka_error_t *
-rd_kafka_ensure_transactional(const rd_kafka_t *rk) {
- if (unlikely(rk->rk_type != RD_KAFKA_PRODUCER))
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "The Transactional API can only be used "
- "on producer instances");
-
- if (unlikely(!rk->rk_conf.eos.transactional_id))
- return rd_kafka_error_new(RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
- "The Transactional API requires "
- "transactional.id to be configured");
-
- return NULL;
-}
-
-
-
-/**
- * @brief Ensure transaction state is one of \p states.
- *
- * @param the required states, ended by a -1 sentinel.
- *
- * @locks_required rd_kafka_*lock(rk) MUST be held
- * @locality any
- */
-static RD_INLINE rd_kafka_error_t *
-rd_kafka_txn_require_states0(rd_kafka_t *rk, rd_kafka_txn_state_t states[]) {
- rd_kafka_error_t *error;
- size_t i;
-
- if (unlikely((error = rd_kafka_ensure_transactional(rk)) != NULL))
- return error;
-
- for (i = 0; (int)states[i] != -1; i++)
- if (rk->rk_eos.txn_state == states[i])
- return NULL;
-
- /* For fatal and abortable states return the last transactional
- * error, for all other states just return a state error. */
- if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_FATAL_ERROR)
- error = rd_kafka_error_new_fatal(rk->rk_eos.txn_err, "%s",
- rk->rk_eos.txn_errstr);
- else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) {
- error = rd_kafka_error_new(rk->rk_eos.txn_err, "%s",
- rk->rk_eos.txn_errstr);
- rd_kafka_error_set_txn_requires_abort(error);
- } else
- error = rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__STATE, "Operation not valid in state %s",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state));
-
-
- return error;
-}
-
-/** @brief \p ... is a list of states */
-#define rd_kafka_txn_require_state(rk, ...) \
- rd_kafka_txn_require_states0( \
- rk, (rd_kafka_txn_state_t[]) {__VA_ARGS__, -1})
-
-
-
-/**
- * @param ignore Will be set to true if the state transition should be
- * completely ignored.
- * @returns true if the state transition is valid, else false.
- */
-static rd_bool_t
-rd_kafka_txn_state_transition_is_valid(rd_kafka_txn_state_t curr,
- rd_kafka_txn_state_t new_state,
- rd_bool_t *ignore) {
-
- *ignore = rd_false;
-
- switch (new_state) {
- case RD_KAFKA_TXN_STATE_INIT:
- /* This is the initialized value and this transition will
- * never happen. */
- return rd_false;
-
- case RD_KAFKA_TXN_STATE_WAIT_PID:
- return curr == RD_KAFKA_TXN_STATE_INIT;
-
- case RD_KAFKA_TXN_STATE_READY_NOT_ACKED:
- return curr == RD_KAFKA_TXN_STATE_WAIT_PID;
-
- case RD_KAFKA_TXN_STATE_READY:
- return curr == RD_KAFKA_TXN_STATE_READY_NOT_ACKED ||
- curr == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED ||
- curr == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED;
-
- case RD_KAFKA_TXN_STATE_IN_TRANSACTION:
- return curr == RD_KAFKA_TXN_STATE_READY;
-
- case RD_KAFKA_TXN_STATE_BEGIN_COMMIT:
- return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION;
-
- case RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION:
- return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT;
-
- case RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED:
- return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT ||
- curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION;
-
- case RD_KAFKA_TXN_STATE_BEGIN_ABORT:
- return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
- curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION ||
- curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR;
-
- case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION:
- return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT;
-
- case RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED:
- return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
- curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION;
-
- case RD_KAFKA_TXN_STATE_ABORTABLE_ERROR:
- if (curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
- curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION ||
- curr == RD_KAFKA_TXN_STATE_FATAL_ERROR) {
- /* Ignore sub-sequent abortable errors in
- * these states. */
- *ignore = rd_true;
- return 1;
- }
-
- return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
- curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT ||
- curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION;
-
- case RD_KAFKA_TXN_STATE_FATAL_ERROR:
- /* Any state can transition to a fatal error */
- return rd_true;
-
- default:
- RD_BUG("Invalid txn state transition: %s -> %s",
- rd_kafka_txn_state2str(curr),
- rd_kafka_txn_state2str(new_state));
- return rd_false;
- }
-}
-
-
-/**
- * @brief Transition the transaction state to \p new_state.
- *
- * @returns 0 on success or an error code if the state transition
- * was invalid.
- *
- * @locality rdkafka main thread
- * @locks_required rd_kafka_wrlock MUST be held
- */
-static void rd_kafka_txn_set_state(rd_kafka_t *rk,
- rd_kafka_txn_state_t new_state) {
- rd_bool_t ignore;
-
- if (rk->rk_eos.txn_state == new_state)
- return;
-
- /* Check if state transition is valid */
- if (!rd_kafka_txn_state_transition_is_valid(rk->rk_eos.txn_state,
- new_state, &ignore)) {
- rd_kafka_log(rk, LOG_CRIT, "TXNSTATE",
- "BUG: Invalid transaction state transition "
- "attempted: %s -> %s",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state),
- rd_kafka_txn_state2str(new_state));
-
- rd_assert(!*"BUG: Invalid transaction state transition");
- }
-
- if (ignore) {
- /* Ignore this state change */
- return;
- }
-
- rd_kafka_dbg(rk, EOS, "TXNSTATE", "Transaction state change %s -> %s",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state),
- rd_kafka_txn_state2str(new_state));
-
- /* If transitioning from IN_TRANSACTION, the app is no longer
- * allowed to enqueue (produce) messages. */
- if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION)
- rd_atomic32_set(&rk->rk_eos.txn_may_enq, 0);
- else if (new_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION)
- rd_atomic32_set(&rk->rk_eos.txn_may_enq, 1);
-
- rk->rk_eos.txn_state = new_state;
-}
-
-
-/**
- * @returns the current transaction timeout, i.e., the time remaining in
- * the current transaction.
- *
- * @remark The remaining timeout is currently not tracked, so this function
- * will always return the remaining time based on transaction.timeout.ms
- * and we rely on the broker to enforce the actual remaining timeout.
- * This is still better than not having a timeout cap at all, which
- * used to be the case.
- * It's also tricky knowing exactly what the controller thinks the
- * remaining transaction time is.
- *
- * @locks_required rd_kafka_*lock(rk) MUST be held.
- */
-static RD_INLINE rd_ts_t rd_kafka_txn_current_timeout(const rd_kafka_t *rk) {
- return rd_timeout_init(rk->rk_conf.eos.transaction_timeout_ms);
-}
-
-
-/**
- * @brief An unrecoverable transactional error has occurred.
- *
- * @param do_lock RD_DO_LOCK: rd_kafka_wrlock(rk) will be acquired and released,
- * RD_DONT_LOCK: rd_kafka_wrlock(rk) MUST be held by the caller.
- * @locality any
- * @locks rd_kafka_wrlock MUST NOT be held
- */
-void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk,
- rd_dolock_t do_lock,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) {
- char errstr[512];
- va_list ap;
-
- va_start(ap, fmt);
- vsnprintf(errstr, sizeof(errstr), fmt, ap);
- va_end(ap);
-
- rd_kafka_log(rk, LOG_ALERT, "TXNERR",
- "Fatal transaction error: %s (%s)", errstr,
- rd_kafka_err2name(err));
-
- if (do_lock)
- rd_kafka_wrlock(rk);
- rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s", errstr);
-
- rk->rk_eos.txn_err = err;
- if (rk->rk_eos.txn_errstr)
- rd_free(rk->rk_eos.txn_errstr);
- rk->rk_eos.txn_errstr = rd_strdup(errstr);
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR);
-
- if (do_lock)
- rd_kafka_wrunlock(rk);
-
- /* If application has called a transactional API and
- * it has now failed, reply to the app.
- * If there is no currently called API then this is a no-op. */
- rd_kafka_txn_curr_api_set_result(
- rk, 0, rd_kafka_error_new_fatal(err, "%s", errstr));
-}
-
-
-/**
- * @brief An abortable/recoverable transactional error has occured.
- *
- * @param requires_epoch_bump If true; abort_transaction() will bump the epoch
- * on the coordinator (KIP-360).
-
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock MUST NOT be held
- */
-void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_bool_t requires_epoch_bump,
- const char *fmt,
- ...) {
- char errstr[512];
- va_list ap;
-
- if (rd_kafka_fatal_error(rk, NULL, 0)) {
- rd_kafka_dbg(rk, EOS, "FATAL",
- "Not propagating abortable transactional "
- "error (%s) "
- "since previous fatal error already raised",
- rd_kafka_err2name(err));
- return;
- }
-
- va_start(ap, fmt);
- vsnprintf(errstr, sizeof(errstr), fmt, ap);
- va_end(ap);
-
- rd_kafka_wrlock(rk);
-
- if (requires_epoch_bump)
- rk->rk_eos.txn_requires_epoch_bump = requires_epoch_bump;
-
- if (rk->rk_eos.txn_err) {
- rd_kafka_dbg(rk, EOS, "TXNERR",
- "Ignoring sub-sequent abortable transaction "
- "error: %s (%s): "
- "previous error (%s) already raised",
- errstr, rd_kafka_err2name(err),
- rd_kafka_err2name(rk->rk_eos.txn_err));
- rd_kafka_wrunlock(rk);
- return;
- }
-
- rk->rk_eos.txn_err = err;
- if (rk->rk_eos.txn_errstr)
- rd_free(rk->rk_eos.txn_errstr);
- rk->rk_eos.txn_errstr = rd_strdup(errstr);
-
- rd_kafka_log(rk, LOG_ERR, "TXNERR",
- "Current transaction failed in state %s: %s (%s%s)",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state), errstr,
- rd_kafka_err2name(err),
- requires_epoch_bump ? ", requires epoch bump" : "");
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTABLE_ERROR);
- rd_kafka_wrunlock(rk);
-
- /* Purge all messages in queue/flight */
- rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_ABORT_TXN |
- RD_KAFKA_PURGE_F_NON_BLOCKING);
-}
-
-
-
-/**
- * @brief Send request-reply op to txnmgr callback, waits for a reply
- * or timeout, and returns an error object or NULL on success.
- *
- * @remark Does not alter the current API state.
- *
- * @returns an error object on failure, else NULL.
- *
- * @locality application thread
- *
- * @locks_acquired rk->rk_eos.txn_curr_api.lock
- */
-#define rd_kafka_txn_op_req(rk, op_cb, abs_timeout) \
- rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, \
- rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, op_cb), \
- abs_timeout)
-#define rd_kafka_txn_op_req1(rk, rko, abs_timeout) \
- rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, rko, abs_timeout)
-static rd_kafka_error_t *rd_kafka_txn_op_req0(const char *func,
- int line,
- rd_kafka_t *rk,
- rd_kafka_op_t *rko,
- rd_ts_t abs_timeout) {
- rd_kafka_error_t *error = NULL;
- rd_bool_t has_result = rd_false;
-
- mtx_lock(&rk->rk_eos.txn_curr_api.lock);
-
- /* See if there's already a result, if so return that immediately. */
- if (rk->rk_eos.txn_curr_api.has_result) {
- error = rk->rk_eos.txn_curr_api.error;
- rk->rk_eos.txn_curr_api.error = NULL;
- rk->rk_eos.txn_curr_api.has_result = rd_false;
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
- rd_kafka_op_destroy(rko);
- rd_kafka_dbg(rk, EOS, "OPREQ",
- "%s:%d: %s: returning already set result: %s",
- func, line, rk->rk_eos.txn_curr_api.name,
- error ? rd_kafka_error_string(error) : "Success");
- return error;
- }
-
- /* Send one-way op to txnmgr */
- if (!rd_kafka_q_enq(rk->rk_ops, rko))
- RD_BUG("rk_ops queue disabled");
-
- /* Wait for result to be set, or timeout */
- do {
- if (cnd_timedwait_ms(&rk->rk_eos.txn_curr_api.cnd,
- &rk->rk_eos.txn_curr_api.lock,
- rd_timeout_remains(abs_timeout)) ==
- thrd_timedout)
- break;
- } while (!rk->rk_eos.txn_curr_api.has_result);
-
-
-
- if ((has_result = rk->rk_eos.txn_curr_api.has_result)) {
- rk->rk_eos.txn_curr_api.has_result = rd_false;
- error = rk->rk_eos.txn_curr_api.error;
- rk->rk_eos.txn_curr_api.error = NULL;
- }
-
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-
- /* If there was no reply it means the background operation is still
- * in progress and its result will be set later, so the application
- * should call this API again to resume. */
- if (!has_result) {
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Timed out waiting for operation to finish, "
- "retry call to resume");
- }
-
- return error;
-}
-
-
-/**
- * @brief Begin (or resume) a public API call.
- *
- * This function will prevent conflicting calls.
- *
- * @returns an error on failure, or NULL on success.
- *
- * @locality application thread
- *
- * @locks_acquired rk->rk_eos.txn_curr_api.lock
- */
-static rd_kafka_error_t *rd_kafka_txn_curr_api_begin(rd_kafka_t *rk,
- const char *api_name,
- rd_bool_t cap_timeout,
- int timeout_ms,
- rd_ts_t *abs_timeoutp) {
- rd_kafka_error_t *error = NULL;
-
- if ((error = rd_kafka_ensure_transactional(rk)))
- return error;
-
- rd_kafka_rdlock(rk); /* Need lock for retrieving the states */
- rd_kafka_dbg(rk, EOS, "TXNAPI",
- "Transactional API called: %s "
- "(in txn state %s, idemp state %s, API timeout %d)",
- api_name, rd_kafka_txn_state2str(rk->rk_eos.txn_state),
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
- timeout_ms);
- rd_kafka_rdunlock(rk);
-
- mtx_lock(&rk->rk_eos.txn_curr_api.lock);
-
-
- /* Make sure there is no other conflicting in-progress API call,
- * and that this same call is not currently under way in another thread.
- */
- if (unlikely(*rk->rk_eos.txn_curr_api.name &&
- strcmp(rk->rk_eos.txn_curr_api.name, api_name))) {
- /* Another API is being called. */
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__CONFLICT,
- "Conflicting %s API call is already in progress",
- rk->rk_eos.txn_curr_api.name);
-
- } else if (unlikely(rk->rk_eos.txn_curr_api.calling)) {
- /* There is an active call to this same API
- * from another thread. */
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS,
- "Simultaneous %s API calls not allowed",
- rk->rk_eos.txn_curr_api.name);
-
- } else if (*rk->rk_eos.txn_curr_api.name) {
- /* Resumed call */
- rk->rk_eos.txn_curr_api.calling = rd_true;
-
- } else {
- /* New call */
- rd_snprintf(rk->rk_eos.txn_curr_api.name,
- sizeof(rk->rk_eos.txn_curr_api.name), "%s",
- api_name);
- rk->rk_eos.txn_curr_api.calling = rd_true;
- rd_assert(!rk->rk_eos.txn_curr_api.error);
- }
-
- if (!error && abs_timeoutp) {
- rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
-
- if (cap_timeout) {
- /* Cap API timeout to remaining transaction timeout */
- rd_ts_t abs_txn_timeout =
- rd_kafka_txn_current_timeout(rk);
- if (abs_timeout > abs_txn_timeout ||
- abs_timeout == RD_POLL_INFINITE)
- abs_timeout = abs_txn_timeout;
- }
-
- *abs_timeoutp = abs_timeout;
- }
-
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-
- return error;
-}
-
-
-
-/**
- * @brief Return from public API.
- *
- * This function updates the current API state and must be used in
- * all return statements from the public txn API.
- *
- * @param resumable If true and the error is retriable, the current API state
- * will be maintained to allow a future call to the same API
- * to resume the background operation that is in progress.
- * @param error The error object, if not NULL, is simply inspected and returned.
- *
- * @returns the \p error object as-is.
- *
- * @locality application thread
- * @locks_acquired rk->rk_eos.txn_curr_api.lock
- */
-#define rd_kafka_txn_curr_api_return(rk, resumable, error) \
- rd_kafka_txn_curr_api_return0(__FUNCTION__, __LINE__, rk, resumable, \
- error)
-static rd_kafka_error_t *
-rd_kafka_txn_curr_api_return0(const char *func,
- int line,
- rd_kafka_t *rk,
- rd_bool_t resumable,
- rd_kafka_error_t *error) {
-
- mtx_lock(&rk->rk_eos.txn_curr_api.lock);
-
- rd_kafka_dbg(
- rk, EOS, "TXNAPI", "Transactional API %s return%s at %s:%d: %s",
- rk->rk_eos.txn_curr_api.name,
- resumable && rd_kafka_error_is_retriable(error) ? " resumable" : "",
- func, line, error ? rd_kafka_error_string(error) : "Success");
-
- rd_assert(*rk->rk_eos.txn_curr_api.name);
- rd_assert(rk->rk_eos.txn_curr_api.calling);
-
- rk->rk_eos.txn_curr_api.calling = rd_false;
-
- /* Reset the current API call so that other APIs may be called,
- * unless this is a resumable API and the error is retriable. */
- if (!resumable || (error && !rd_kafka_error_is_retriable(error))) {
- *rk->rk_eos.txn_curr_api.name = '\0';
- /* It is possible for another error to have been set,
- * typically when a fatal error is raised, so make sure
- * we're not destroying the error we're supposed to return. */
- if (rk->rk_eos.txn_curr_api.error != error)
- rd_kafka_error_destroy(rk->rk_eos.txn_curr_api.error);
- rk->rk_eos.txn_curr_api.error = NULL;
- }
-
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-
- return error;
-}
-
-
-
-/**
- * @brief Set the (possibly intermediary) result for the current API call.
- *
- * The result is \p error NULL for success or \p error object on failure.
- * If the application is actively blocked on the call the result will be
- * sent on its replyq, otherwise the result will be stored for future retrieval
- * the next time the application calls the API again.
- *
- * @locality rdkafka main thread
- * @locks_acquired rk->rk_eos.txn_curr_api.lock
- */
-static void rd_kafka_txn_curr_api_set_result0(const char *func,
- int line,
- rd_kafka_t *rk,
- int actions,
- rd_kafka_error_t *error) {
-
- mtx_lock(&rk->rk_eos.txn_curr_api.lock);
-
- if (!*rk->rk_eos.txn_curr_api.name) {
- /* No current API being called, this could happen
- * if the application thread API deemed the API was done,
- * or for fatal errors that attempt to set the result
- * regardless of current API state.
- * In this case we simply throw away this result. */
- if (error)
- rd_kafka_error_destroy(error);
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
- return;
- }
-
- rd_kafka_dbg(rk, EOS, "APIRESULT",
- "Transactional API %s (intermediary%s) result set "
- "at %s:%d: %s (%sprevious result%s%s)",
- rk->rk_eos.txn_curr_api.name,
- rk->rk_eos.txn_curr_api.calling ? ", calling" : "", func,
- line, error ? rd_kafka_error_string(error) : "Success",
- rk->rk_eos.txn_curr_api.has_result ? "" : "no ",
- rk->rk_eos.txn_curr_api.error ? ": " : "",
- rd_kafka_error_string(rk->rk_eos.txn_curr_api.error));
-
- rk->rk_eos.txn_curr_api.has_result = rd_true;
-
-
- if (rk->rk_eos.txn_curr_api.error) {
- /* If there's already an error it typically means
- * a fatal error has been raised, so nothing more to do here. */
- rd_kafka_dbg(
- rk, EOS, "APIRESULT",
- "Transactional API %s error "
- "already set: %s",
- rk->rk_eos.txn_curr_api.name,
- rd_kafka_error_string(rk->rk_eos.txn_curr_api.error));
-
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-
- if (error)
- rd_kafka_error_destroy(error);
-
- return;
- }
-
- if (error) {
- if (actions & RD_KAFKA_ERR_ACTION_FATAL)
- rd_kafka_error_set_fatal(error);
- else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
- rd_kafka_error_set_txn_requires_abort(error);
- else if (actions & RD_KAFKA_ERR_ACTION_RETRY)
- rd_kafka_error_set_retriable(error);
- }
-
- rk->rk_eos.txn_curr_api.error = error;
- error = NULL;
- cnd_broadcast(&rk->rk_eos.txn_curr_api.cnd);
-
-
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-}
-
-
-
-/**
- * @brief The underlying idempotent producer state changed,
- * see if this affects the transactional operations.
- *
- * @locality any thread
- * @locks rd_kafka_wrlock(rk) MUST be held
- */
-void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk,
- rd_kafka_idemp_state_t idemp_state) {
- rd_bool_t set_result = rd_false;
-
- if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED &&
- rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_WAIT_PID) {
- /* Application is calling (or has called) init_transactions() */
- RD_UT_COVERAGE(1);
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED);
- set_result = rd_true;
-
- } else if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED &&
- (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
- rk->rk_eos.txn_state ==
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)) {
- /* Application is calling abort_transaction() as we're
- * recovering from a fatal idempotence error. */
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
- set_result = rd_true;
-
- } else if (idemp_state == RD_KAFKA_IDEMP_STATE_FATAL_ERROR &&
- rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_FATAL_ERROR) {
- /* A fatal error has been raised. */
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR);
- }
-
- if (set_result) {
- /* Application has called init_transactions() or
- * abort_transaction() and it is now complete,
- * reply to the app. */
- rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
- }
-}
-
-
-/**
- * @brief Moves a partition from the pending list to the proper list.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_partition_registered(rd_kafka_toppar_t *rktp) {
- rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
-
- rd_kafka_toppar_lock(rktp);
-
- if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_PEND_TXN))) {
- rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_PROTOCOL, "ADDPARTS",
- "\"%.*s\" [%" PRId32
- "] is not in pending "
- "list but returned in AddPartitionsToTxn "
- "response: ignoring",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
- rd_kafka_toppar_unlock(rktp);
- return;
- }
-
- rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_TOPIC, "ADDPARTS",
- "%.*s [%" PRId32 "] registered with transaction",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition);
-
- rd_assert((rktp->rktp_flags &
- (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN)) ==
- RD_KAFKA_TOPPAR_F_PEND_TXN);
-
- rktp->rktp_flags = (rktp->rktp_flags & ~RD_KAFKA_TOPPAR_F_PEND_TXN) |
- RD_KAFKA_TOPPAR_F_IN_TXN;
-
- rd_kafka_toppar_unlock(rktp);
-
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- TAILQ_REMOVE(&rk->rk_eos.txn_waitresp_rktps, rktp, rktp_txnlink);
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
-
- /* Not destroy()/keep():ing rktp since it just changes tailq. */
-
- TAILQ_INSERT_TAIL(&rk->rk_eos.txn_rktps, rktp, rktp_txnlink);
-}
-
-
-
-/**
- * @brief Handle AddPartitionsToTxnResponse
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_handle_AddPartitionsToTxn(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int32_t TopicCnt;
- int actions = 0;
- int retry_backoff_ms = 500; /* retry backoff */
- rd_kafka_resp_err_t reset_coord_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_bool_t require_bump = rd_false;
-
- if (err)
- goto done;
-
- rd_kafka_rdlock(rk);
- rd_assert(rk->rk_eos.txn_state !=
- RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION);
-
- if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION &&
- rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_BEGIN_COMMIT) {
- /* Response received after aborting transaction */
- rd_rkb_dbg(rkb, EOS, "ADDPARTS",
- "Ignoring outdated AddPartitionsToTxn response in "
- "state %s",
- rd_kafka_txn_state2str(rk->rk_eos.txn_state));
- rd_kafka_rdunlock(rk);
- err = RD_KAFKA_RESP_ERR__OUTDATED;
- goto done;
- }
- rd_kafka_rdunlock(rk);
-
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i32(rkbuf, &TopicCnt);
-
- while (TopicCnt-- > 0) {
- rd_kafkap_str_t Topic;
- rd_kafka_topic_t *rkt;
- int32_t PartCnt;
- rd_bool_t request_error = rd_false;
-
- rd_kafka_buf_read_str(rkbuf, &Topic);
- rd_kafka_buf_read_i32(rkbuf, &PartCnt);
-
- rkt = rd_kafka_topic_find0(rk, &Topic);
- if (rkt)
- rd_kafka_topic_rdlock(rkt); /* for toppar_get() */
-
- while (PartCnt-- > 0) {
- rd_kafka_toppar_t *rktp = NULL;
- int32_t Partition;
- int16_t ErrorCode;
- int p_actions = 0;
-
- rd_kafka_buf_read_i32(rkbuf, &Partition);
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- if (rkt)
- rktp = rd_kafka_toppar_get(rkt, Partition,
- rd_false);
-
- if (!rktp) {
- rd_rkb_dbg(rkb, EOS | RD_KAFKA_DBG_PROTOCOL,
- "ADDPARTS",
- "Unknown partition \"%.*s\" "
- "[%" PRId32
- "] in AddPartitionsToTxn "
- "response: ignoring",
- RD_KAFKAP_STR_PR(&Topic), Partition);
- continue;
- }
-
- switch (ErrorCode) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- /* Move rktp from pending to proper list */
- rd_kafka_txn_partition_registered(rktp);
- break;
-
- /* Request-level errors.
- * As soon as any of these errors are seen
- * the rest of the partitions are ignored
- * since they will have the same error. */
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- reset_coord_err = ErrorCode;
- p_actions |= RD_KAFKA_ERR_ACTION_RETRY;
- err = ErrorCode;
- request_error = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
- retry_backoff_ms = 20;
- /* FALLTHRU */
- case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
- case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
- p_actions |= RD_KAFKA_ERR_ACTION_RETRY;
- err = ErrorCode;
- request_error = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- p_actions |= RD_KAFKA_ERR_ACTION_FATAL;
- err = ErrorCode;
- request_error = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
- require_bump = rd_true;
- p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- err = ErrorCode;
- request_error = rd_true;
- break;
-
- /* Partition-level errors.
- * Continue with rest of partitions. */
- case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
- p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- err = ErrorCode;
- break;
-
- case RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED:
- /* Partition skipped due to other partition's
- * error. */
- p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- if (!err)
- err = ErrorCode;
- break;
-
- default:
- /* Other partition error */
- p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- err = ErrorCode;
- break;
- }
-
- if (ErrorCode) {
- actions |= p_actions;
-
- if (!(p_actions &
- (RD_KAFKA_ERR_ACTION_FATAL |
- RD_KAFKA_ERR_ACTION_PERMANENT)))
- rd_rkb_dbg(
- rkb, EOS, "ADDPARTS",
- "AddPartitionsToTxn response: "
- "partition \"%.*s\": "
- "[%" PRId32 "]: %s",
- RD_KAFKAP_STR_PR(&Topic), Partition,
- rd_kafka_err2str(ErrorCode));
- else
- rd_rkb_log(rkb, LOG_ERR, "ADDPARTS",
- "Failed to add partition "
- "\"%.*s\" [%" PRId32
- "] to "
- "transaction: %s",
- RD_KAFKAP_STR_PR(&Topic),
- Partition,
- rd_kafka_err2str(ErrorCode));
- }
-
- rd_kafka_toppar_destroy(rktp);
-
- if (request_error)
- break; /* Request-level error seen, bail out */
- }
-
- if (rkt) {
- rd_kafka_topic_rdunlock(rkt);
- rd_kafka_topic_destroy0(rkt);
- }
-
- if (request_error)
- break; /* Request-level error seen, bail out */
- }
-
- if (actions) /* Actions set from encountered errors */
- goto done;
-
- /* Since these partitions are now allowed to produce
- * we wake up all broker threads. */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "partitions added to transaction");
-
- goto done;
-
-err_parse:
- err = rkbuf->rkbuf_err;
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
-
-done:
- if (err) {
- rd_assert(rk->rk_eos.txn_req_cnt > 0);
- rk->rk_eos.txn_req_cnt--;
- }
-
- /* Handle local request-level errors */
- switch (err) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- break;
-
- case RD_KAFKA_RESP_ERR__DESTROY:
- case RD_KAFKA_RESP_ERR__OUTDATED:
- /* Terminating or outdated, ignore response */
- return;
-
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT:
- default:
- /* For these errors we can't be sure if the
- * request was received by the broker or not,
- * so increase the txn_req_cnt back up as if
- * they were received so that and EndTxnRequest
- * is sent on abort_transaction(). */
- rk->rk_eos.txn_req_cnt++;
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
- }
-
- if (reset_coord_err) {
- rd_kafka_wrlock(rk);
- rd_kafka_txn_coord_set(rk, NULL,
- "AddPartitionsToTxn failed: %s",
- rd_kafka_err2str(reset_coord_err));
- rd_kafka_wrunlock(rk);
- }
-
- /* Partitions that failed will still be on the waitresp list
- * and are moved back to the pending list for the next scheduled
- * AddPartitionsToTxn request.
- * If this request was successful there will be no remaining partitions
- * on the waitresp list.
- */
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- TAILQ_CONCAT_SORTED(&rk->rk_eos.txn_pending_rktps,
- &rk->rk_eos.txn_waitresp_rktps, rd_kafka_toppar_t *,
- rktp_txnlink, rd_kafka_toppar_topic_cmp);
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
-
- err = rd_kafka_txn_normalize_err(err);
-
- if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
- rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
- "Failed to add partitions to "
- "transaction: %s",
- rd_kafka_err2str(err));
-
- } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
- /* Treat all other permanent errors as abortable errors.
- * If an epoch bump is required let idempo sort it out. */
- if (require_bump)
- rd_kafka_idemp_drain_epoch_bump(
- rk, err,
- "Failed to add partition(s) to transaction "
- "on broker %s: %s (after %d ms)",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000));
- else
- rd_kafka_txn_set_abortable_error(
- rk, err,
- "Failed to add partition(s) to transaction "
- "on broker %s: %s (after %d ms)",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000));
-
- } else {
- /* Schedule registration of any new or remaining partitions */
- rd_kafka_txn_schedule_register_partitions(
- rk, (actions & RD_KAFKA_ERR_ACTION_RETRY)
- ? retry_backoff_ms
- : 1 /*immediate*/);
- }
-}
-
-
-/**
- * @brief Send AddPartitionsToTxnRequest to the transaction coordinator.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_register_partitions(rd_kafka_t *rk) {
- char errstr[512];
- rd_kafka_resp_err_t err;
- rd_kafka_error_t *error;
- rd_kafka_pid_t pid;
-
- /* Require operational state */
- rd_kafka_rdlock(rk);
- error =
- rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
- RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
-
- if (unlikely(error != NULL)) {
- rd_kafka_rdunlock(rk);
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Not registering partitions: %s",
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
- return;
- }
-
- /* Get pid, checked later */
- pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
-
- rd_kafka_rdunlock(rk);
-
- /* Transaction coordinator needs to be up */
- if (!rd_kafka_broker_is_up(rk->rk_eos.txn_coord)) {
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Not registering partitions: "
- "coordinator is not available");
- return;
- }
-
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- if (TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps)) {
- /* No pending partitions to register */
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- return;
- }
-
- if (!TAILQ_EMPTY(&rk->rk_eos.txn_waitresp_rktps)) {
- /* Only allow one outstanding AddPartitionsToTxnRequest */
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Not registering partitions: waiting for "
- "previous AddPartitionsToTxn request to complete");
- return;
- }
-
- /* Require valid pid */
- if (unlikely(!rd_kafka_pid_valid(pid))) {
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Not registering partitions: "
- "No PID available (idempotence state %s)",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
- rd_dassert(!*"BUG: No PID despite proper transaction state");
- return;
- }
-
-
- /* Send request to coordinator */
- err = rd_kafka_AddPartitionsToTxnRequest(
- rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
- &rk->rk_eos.txn_pending_rktps, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_txn_handle_AddPartitionsToTxn, NULL);
- if (err) {
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Not registering partitions: %s", errstr);
- return;
- }
-
- /* Move all pending partitions to wait-response list.
- * No need to keep waitresp sorted. */
- TAILQ_CONCAT(&rk->rk_eos.txn_waitresp_rktps,
- &rk->rk_eos.txn_pending_rktps, rktp_txnlink);
-
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
-
- rk->rk_eos.txn_req_cnt++;
-
- rd_rkb_dbg(rk->rk_eos.txn_coord, EOS, "ADDPARTS",
- "Registering partitions with transaction");
-}
-
-
-static void rd_kafka_txn_register_partitions_tmr_cb(rd_kafka_timers_t *rkts,
- void *arg) {
- rd_kafka_t *rk = arg;
- rd_kafka_txn_register_partitions(rk);
-}
-
-
-/**
- * @brief Schedule register_partitions() as soon as possible.
- *
- * @locality any
- * @locks any
- */
-void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms) {
- rd_kafka_timer_start_oneshot(
- &rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr,
- rd_false /*dont-restart*/,
- backoff_ms ? backoff_ms * 1000 : 1 /* immediate */,
- rd_kafka_txn_register_partitions_tmr_cb, rk);
-}
-
-
-
-/**
- * @brief Clears \p flag from all rktps and destroys them, emptying
- * and reinitializing the \p tqh.
- */
-static void rd_kafka_txn_clear_partitions_flag(rd_kafka_toppar_tqhead_t *tqh,
- int flag) {
- rd_kafka_toppar_t *rktp, *tmp;
-
- TAILQ_FOREACH_SAFE(rktp, tqh, rktp_txnlink, tmp) {
- rd_kafka_toppar_lock(rktp);
- rd_dassert(rktp->rktp_flags & flag);
- rktp->rktp_flags &= ~flag;
- rd_kafka_toppar_unlock(rktp);
- rd_kafka_toppar_destroy(rktp);
- }
-
- TAILQ_INIT(tqh);
-}
-
-
-/**
- * @brief Clear all pending partitions.
- *
- * @locks txn_pending_lock MUST be held
- */
-static void rd_kafka_txn_clear_pending_partitions(rd_kafka_t *rk) {
- rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_pending_rktps,
- RD_KAFKA_TOPPAR_F_PEND_TXN);
- rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_waitresp_rktps,
- RD_KAFKA_TOPPAR_F_PEND_TXN);
-}
-
-/**
- * @brief Clear all added partitions.
- *
- * @locks rd_kafka_wrlock(rk) MUST be held
- */
-static void rd_kafka_txn_clear_partitions(rd_kafka_t *rk) {
- rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_rktps,
- RD_KAFKA_TOPPAR_F_IN_TXN);
-}
-
-
-
-/**
- * @brief Async handler for init_transactions()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t rd_kafka_txn_op_init_transactions(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if ((error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_INIT, RD_KAFKA_TXN_STATE_WAIT_PID,
- RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) {
- rd_kafka_wrunlock(rk);
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_READY_NOT_ACKED) {
- /* A previous init_transactions() called finished successfully
- * after timeout, the application has called init_transactions()
- * again, we do nothin here, ack_init_transactions() will
- * transition the state from READY_NOT_ACKED to READY. */
- rd_kafka_wrunlock(rk);
-
- } else {
-
- /* Possibly a no-op if already in WAIT_PID state */
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_WAIT_PID);
-
- rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_wrunlock(rk);
-
- /* Start idempotent producer to acquire PID */
- rd_kafka_idemp_start(rk, rd_true /*immediately*/);
-
- /* Do not call curr_api_set_result, it will be triggered from
- * idemp_state_change() when the PID has been retrieved. */
- }
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Async handler for the application to acknowledge
- * successful background completion of init_transactions().
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t
-rd_kafka_txn_op_ack_init_transactions(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if (!(error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED)))
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY);
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms) {
- rd_kafka_error_t *error;
- rd_ts_t abs_timeout;
-
- /* Cap actual timeout to transaction.timeout.ms * 2 when an infinite
- * timeout is provided, this is to make sure the call doesn't block
- * indefinitely in case a coordinator is not available.
- * This is only needed for init_transactions() since there is no
- * coordinator to time us out yet. */
- if (timeout_ms == RD_POLL_INFINITE &&
- /* Avoid overflow */
- rk->rk_conf.eos.transaction_timeout_ms < INT_MAX / 2)
- timeout_ms = rk->rk_conf.eos.transaction_timeout_ms * 2;
-
- if ((error = rd_kafka_txn_curr_api_begin(rk, "init_transactions",
- rd_false /* no cap */,
- timeout_ms, &abs_timeout)))
- return error;
-
- /* init_transactions() will continue to operate in the background
- * if the timeout expires, and the application may call
- * init_transactions() again to resume the initialization
- * process.
- * For this reason we need two states:
- * - TXN_STATE_READY_NOT_ACKED for when initialization is done
- * but the API call timed out prior to success, meaning the
- * application does not know initialization finished and
- * is thus not allowed to call sub-sequent txn APIs, e.g. begin..()
- * - TXN_STATE_READY for when initialization is done and this
- * function has returned successfully to the application.
- *
- * And due to the two states we need two calls to the rdkafka main
- * thread (to keep txn_state synchronization in one place). */
-
- /* First call is to trigger initialization */
- if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_init_transactions,
- abs_timeout))) {
- if (rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR__TIMED_OUT) {
- /* See if there's a more meaningful txn_init_err set
- * by idempo that we can return. */
- rd_kafka_resp_err_t err;
- rd_kafka_rdlock(rk);
- err =
- rd_kafka_txn_normalize_err(rk->rk_eos.txn_init_err);
- rd_kafka_rdunlock(rk);
-
- if (err && err != RD_KAFKA_RESP_ERR__TIMED_OUT) {
- rd_kafka_error_destroy(error);
- error = rd_kafka_error_new_retriable(
- err, "Failed to initialize Producer ID: %s",
- rd_kafka_err2str(err));
- }
- }
-
- return rd_kafka_txn_curr_api_return(rk, rd_true, error);
- }
-
-
- /* Second call is to transition from READY_NOT_ACKED -> READY,
- * if necessary. */
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_ack_init_transactions,
- /* Timeout must be infinite since this is
- * a synchronization point.
- * The call is immediate though, so this
- * will not block. */
- RD_POLL_INFINITE);
-
- return rd_kafka_txn_curr_api_return(rk,
- /* not resumable at this point */
- rd_false, error);
-}
-
-
-
-/**
- * @brief Handler for begin_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t rd_kafka_txn_op_begin_transaction(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
- rd_bool_t wakeup_brokers = rd_false;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
- if (!(error =
- rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_READY))) {
- rd_assert(TAILQ_EMPTY(&rk->rk_eos.txn_rktps));
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION);
-
- rd_assert(rk->rk_eos.txn_req_cnt == 0);
- rd_atomic64_set(&rk->rk_eos.txn_dr_fails, 0);
- rk->rk_eos.txn_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free);
- rk->rk_eos.txn_errstr = NULL;
-
- /* Wake up all broker threads (that may have messages to send
- * that were waiting for this transaction state.
- * But needs to be done below with no lock held. */
- wakeup_brokers = rd_true;
- }
- rd_kafka_wrunlock(rk);
-
- if (wakeup_brokers)
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
- "begin transaction");
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk) {
- rd_kafka_error_t *error;
-
- if ((error = rd_kafka_txn_curr_api_begin(rk, "begin_transaction",
- rd_false, 0, NULL)))
- return error;
-
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_transaction,
- RD_POLL_INFINITE);
-
- return rd_kafka_txn_curr_api_return(rk, rd_false /*not resumable*/,
- error);
-}
-
-
-static rd_kafka_resp_err_t
-rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *reply_opaque);
-
-/**
- * @brief Handle TxnOffsetCommitResponse
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_handle_TxnOffsetCommit(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_op_t *rko = opaque;
- int actions = 0;
- rd_kafka_topic_partition_list_t *partitions = NULL;
- char errstr[512];
-
- *errstr = '\0';
-
- if (err)
- goto done;
-
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- partitions = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields);
- if (!partitions)
- goto err_parse;
-
- err = rd_kafka_topic_partition_list_get_err(partitions);
- if (err) {
- char errparts[256];
- rd_kafka_topic_partition_list_str(partitions, errparts,
- sizeof(errparts),
- RD_KAFKA_FMT_F_ONLY_ERR);
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to commit offsets to transaction on "
- "broker %s: %s "
- "(after %dms)",
- rd_kafka_broker_name(rkb), errparts,
- (int)(request->rkbuf_ts_sent / 1000));
- }
-
- goto done;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-
-done:
- if (err) {
- if (!*errstr) {
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to commit offsets to "
- "transaction on broker %s: %s "
- "(after %d ms)",
- rkb ? rd_kafka_broker_name(rkb) : "(none)",
- rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000));
- }
- }
-
-
- if (partitions)
- rd_kafka_topic_partition_list_destroy(partitions);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- break;
-
- case RD_KAFKA_RESP_ERR__DESTROY:
- /* Producer is being terminated, ignore the response. */
- case RD_KAFKA_RESP_ERR__OUTDATED:
- /* Set a non-actionable actions flag so that
- * curr_api_set_result() is called below, without
- * other side-effects. */
- actions = RD_KAFKA_ERR_ACTION_SPECIAL;
- return;
-
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
- /* Note: this is the group coordinator, not the
- * transaction coordinator. */
- rd_kafka_coord_cache_evict(&rk->rk_coord_cache, rkb);
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
- case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
- case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
- case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT:
- actions |= RD_KAFKA_ERR_ACTION_FATAL;
- break;
-
- case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- break;
-
- case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
- case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
- case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- break;
-
- default:
- /* Unhandled error, fail transaction */
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- break;
- }
-
- err = rd_kafka_txn_normalize_err(err);
-
- if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
- rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, "%s", errstr);
-
- } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- int remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout);
-
- if (!rd_timeout_expired(remains_ms)) {
- rd_kafka_coord_req(
- rk, RD_KAFKA_COORD_GROUP,
- rko->rko_u.txn.cgmetadata->group_id,
- rd_kafka_txn_send_TxnOffsetCommitRequest, rko,
- 500 /* 500ms delay before retrying */,
- rd_timeout_remains_limit0(
- remains_ms, rk->rk_conf.socket_timeout_ms),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_txn_handle_TxnOffsetCommit, rko);
- return;
- } else if (!err)
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- }
-
- if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
- rd_kafka_txn_set_abortable_error(rk, err, "%s", errstr);
-
- if (err)
- rd_kafka_txn_curr_api_set_result(
- rk, actions, rd_kafka_error_new(err, "%s", errstr));
- else
- rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
-
- rd_kafka_op_destroy(rko);
-}
-
-
-
-/**
- * @brief Construct and send TxnOffsetCommitRequest.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static rd_kafka_resp_err_t
-rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb,
- rd_kafka_op_t *rko,
- rd_kafka_replyq_t replyq,
- rd_kafka_resp_cb_t *resp_cb,
- void *reply_opaque) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_buf_t *rkbuf;
- int16_t ApiVersion;
- rd_kafka_pid_t pid;
- const rd_kafka_consumer_group_metadata_t *cgmetadata =
- rko->rko_u.txn.cgmetadata;
- int cnt;
-
- rd_kafka_rdlock(rk);
- if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION) {
- rd_kafka_rdunlock(rk);
- /* Do not free the rko, it is passed as the reply_opaque
- * on the reply queue by coord_req_fsm() when we return
- * an error here. */
- return RD_KAFKA_RESP_ERR__STATE;
- }
-
- pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
- rd_kafka_rdunlock(rk);
- if (!rd_kafka_pid_valid(pid)) {
- /* Do not free the rko, it is passed as the reply_opaque
- * on the reply queue by coord_req_fsm() when we return
- * an error here. */
- return RD_KAFKA_RESP_ERR__STATE;
- }
-
- ApiVersion = rd_kafka_broker_ApiVersion_supported(
- rkb, RD_KAFKAP_TxnOffsetCommit, 0, 3, NULL);
- if (ApiVersion == -1) {
- /* Do not free the rko, it is passed as the reply_opaque
- * on the reply queue by coord_req_fsm() when we return
- * an error here. */
- return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
- }
-
- rkbuf = rd_kafka_buf_new_flexver_request(
- rkb, RD_KAFKAP_TxnOffsetCommit, 1, rko->rko_u.txn.offsets->cnt * 50,
- ApiVersion >= 3);
-
- /* transactional_id */
- rd_kafka_buf_write_str(rkbuf, rk->rk_conf.eos.transactional_id, -1);
-
- /* group_id */
- rd_kafka_buf_write_str(rkbuf, rko->rko_u.txn.cgmetadata->group_id, -1);
-
- /* PID */
- rd_kafka_buf_write_i64(rkbuf, pid.id);
- rd_kafka_buf_write_i16(rkbuf, pid.epoch);
-
- if (ApiVersion >= 3) {
- /* GenerationId */
- rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id);
- /* MemberId */
- rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1);
- /* GroupInstanceId */
- rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id,
- -1);
- }
-
- /* Write per-partition offsets list */
- const rd_kafka_topic_partition_field_t fields[] = {
- RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
- RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
- ApiVersion >= 2 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH
- : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
- RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA,
- RD_KAFKA_TOPIC_PARTITION_FIELD_END};
- cnt = rd_kafka_buf_write_topic_partitions(
- rkbuf, rko->rko_u.txn.offsets, rd_true /*skip invalid offsets*/,
- rd_false /*any offset*/, fields);
- if (!cnt) {
- /* No valid partition offsets, don't commit. */
- rd_kafka_buf_destroy(rkbuf);
- /* Do not free the rko, it is passed as the reply_opaque
- * on the reply queue by coord_req_fsm() when we return
- * an error here. */
- return RD_KAFKA_RESP_ERR__NO_OFFSET;
- }
-
- rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
-
- rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
-
- rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
- reply_opaque);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Handle AddOffsetsToTxnResponse
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_handle_AddOffsetsToTxn(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- rd_kafka_op_t *rko = opaque;
- int16_t ErrorCode;
- int actions = 0;
- int remains_ms;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY) {
- rd_kafka_op_destroy(rko);
- return;
- }
-
- if (err)
- goto done;
-
- rd_kafka_buf_read_throttle_time(rkbuf);
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- err = ErrorCode;
- goto done;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-
-done:
- if (err) {
- rd_assert(rk->rk_eos.txn_req_cnt > 0);
- rk->rk_eos.txn_req_cnt--;
- }
-
- remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout);
- if (rd_timeout_expired(remains_ms) && !err)
- err = RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- switch (err) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- break;
-
- case RD_KAFKA_RESP_ERR__DESTROY:
- /* Producer is being terminated, ignore the response. */
- case RD_KAFKA_RESP_ERR__OUTDATED:
- /* Set a non-actionable actions flag so that
- * curr_api_set_result() is called below, without
- * other side-effects. */
- actions = RD_KAFKA_ERR_ACTION_SPECIAL;
- break;
-
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT:
- /* For these errors we can't be sure if the
- * request was received by the broker or not,
- * so increase the txn_req_cnt back up as if
- * they were received so that and EndTxnRequest
- * is sent on abort_transaction(). */
- rk->rk_eos.txn_req_cnt++;
- /* FALLTHRU */
- case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
- actions |=
- RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH;
- break;
-
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
- case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT:
- actions |= RD_KAFKA_ERR_ACTION_FATAL;
- break;
-
- case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- break;
-
- case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
- case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
- case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- default:
- /* All unhandled errors are permanent */
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- break;
- }
-
- err = rd_kafka_txn_normalize_err(err);
-
- rd_kafka_dbg(rk, EOS, "ADDOFFSETS",
- "AddOffsetsToTxn response from %s: %s (%s)",
- rkb ? rd_kafka_broker_name(rkb) : "(none)",
- rd_kafka_err2name(err), rd_kafka_actions2str(actions));
-
- /* All unhandled errors are considered permanent */
- if (err && !actions)
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
-
- if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
- rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
- "Failed to add offsets to "
- "transaction: %s",
- rd_kafka_err2str(err));
- } else {
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
- rd_kafka_txn_coord_timer_start(rk, 50);
-
- if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
- rd_rkb_dbg(
- rkb, EOS, "ADDOFFSETS",
- "Failed to add offsets to transaction on "
- "broker %s: %s (after %dms, %dms remains): "
- "error is retriable",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000), remains_ms);
-
- if (!rd_timeout_expired(remains_ms) &&
- rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) {
- rk->rk_eos.txn_req_cnt++;
- return;
- }
-
- /* Propagate as retriable error through
- * api_reply() below */
- }
- }
-
- if (err)
- rd_rkb_log(rkb, LOG_ERR, "ADDOFFSETS",
- "Failed to add offsets to transaction on broker %s: "
- "%s",
- rkb ? rd_kafka_broker_name(rkb) : "(none)",
- rd_kafka_err2str(err));
-
- if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
- rd_kafka_txn_set_abortable_error(
- rk, err,
- "Failed to add offsets to "
- "transaction on broker %s: "
- "%s (after %dms)",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000));
-
- if (!err) {
- /* Step 2: Commit offsets to transaction on the
- * group coordinator. */
-
- rd_kafka_coord_req(
- rk, RD_KAFKA_COORD_GROUP,
- rko->rko_u.txn.cgmetadata->group_id,
- rd_kafka_txn_send_TxnOffsetCommitRequest, rko,
- 0 /* no delay */,
- rd_timeout_remains_limit0(remains_ms,
- rk->rk_conf.socket_timeout_ms),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0),
- rd_kafka_txn_handle_TxnOffsetCommit, rko);
-
- } else {
-
- rd_kafka_txn_curr_api_set_result(
- rk, actions,
- rd_kafka_error_new(
- err,
- "Failed to add offsets to transaction on "
- "broker %s: %s (after %dms)",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- (int)(request->rkbuf_ts_sent / 1000)));
-
- rd_kafka_op_destroy(rko);
- }
-}
-
-
-/**
- * @brief Async handler for send_offsets_to_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t
-rd_kafka_txn_op_send_offsets_to_transaction(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- char errstr[512];
- rd_kafka_error_t *error;
- rd_kafka_pid_t pid;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- *errstr = '\0';
-
- rd_kafka_wrlock(rk);
-
- if ((error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION))) {
- rd_kafka_wrunlock(rk);
- goto err;
- }
-
- rd_kafka_wrunlock(rk);
-
- pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
- if (!rd_kafka_pid_valid(pid)) {
- rd_dassert(!*"BUG: No PID despite proper transaction state");
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__STATE,
- "No PID available (idempotence state %s)",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
- goto err;
- }
-
- /* This is a multi-stage operation, consisting of:
- * 1) send AddOffsetsToTxnRequest to transaction coordinator.
- * 2) send TxnOffsetCommitRequest to group coordinator. */
-
- err = rd_kafka_AddOffsetsToTxnRequest(
- rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
- rko->rko_u.txn.cgmetadata->group_id, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_AddOffsetsToTxn,
- rko);
-
- if (err) {
- error = rd_kafka_error_new_retriable(err, "%s", errstr);
- goto err;
- }
-
- rk->rk_eos.txn_req_cnt++;
-
- return RD_KAFKA_OP_RES_KEEP; /* the rko is passed to AddOffsetsToTxn */
-
-err:
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-/**
- * error returns:
- * ERR__TRANSPORT - retryable
- */
-rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(
- rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- const rd_kafka_consumer_group_metadata_t *cgmetadata,
- int timeout_ms) {
- rd_kafka_error_t *error;
- rd_kafka_op_t *rko;
- rd_kafka_topic_partition_list_t *valid_offsets;
- rd_ts_t abs_timeout;
-
- if (!cgmetadata || !offsets)
- return rd_kafka_error_new(
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "cgmetadata and offsets are required parameters");
-
- if ((error = rd_kafka_txn_curr_api_begin(
- rk, "send_offsets_to_transaction",
- /* Cap timeout to txn timeout */
- rd_true, timeout_ms, &abs_timeout)))
- return error;
-
-
- valid_offsets = rd_kafka_topic_partition_list_match(
- offsets, rd_kafka_topic_partition_match_valid_offset, NULL);
-
- if (valid_offsets->cnt == 0) {
- /* No valid offsets, e.g., nothing was consumed,
- * this is not an error, do nothing. */
- rd_kafka_topic_partition_list_destroy(valid_offsets);
- return rd_kafka_txn_curr_api_return(rk, rd_false, NULL);
- }
-
- rd_kafka_topic_partition_list_sort_by_topic(valid_offsets);
-
- rko = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN,
- rd_kafka_txn_op_send_offsets_to_transaction);
- rko->rko_u.txn.offsets = valid_offsets;
- rko->rko_u.txn.cgmetadata =
- rd_kafka_consumer_group_metadata_dup(cgmetadata);
- rko->rko_u.txn.abs_timeout = abs_timeout;
-
- /* Timeout is enforced by op_send_offsets_to_transaction() */
- error = rd_kafka_txn_op_req1(rk, rko, RD_POLL_INFINITE);
-
- return rd_kafka_txn_curr_api_return(rk, rd_false, error);
-}
-
-
-
-/**
- * @brief Successfully complete the transaction.
- *
- * Current state must be either COMMIT_NOT_ACKED or ABORT_NOT_ACKED.
- *
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock(rk) MUST be held
- */
-static void rd_kafka_txn_complete(rd_kafka_t *rk, rd_bool_t is_commit) {
- rd_kafka_dbg(rk, EOS, "TXNCOMPLETE", "Transaction successfully %s",
- is_commit ? "committed" : "aborted");
-
- /* Clear all transaction partition state */
- rd_kafka_txn_clear_pending_partitions(rk);
- rd_kafka_txn_clear_partitions(rk);
-
- rk->rk_eos.txn_requires_epoch_bump = rd_false;
- rk->rk_eos.txn_req_cnt = 0;
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY);
-}
-
-
-/**
- * @brief EndTxn (commit or abort of transaction on the coordinator) is done,
- * or was skipped.
- * Continue with next steps (if any) before completing the local
- * transaction state.
- *
- * @locality rdkafka main thread
- * @locks_acquired rd_kafka_wrlock(rk), rk->rk_eos.txn_curr_api.lock
- */
-static void rd_kafka_txn_endtxn_complete(rd_kafka_t *rk) {
- rd_bool_t is_commit;
-
- mtx_lock(&rk->rk_eos.txn_curr_api.lock);
- is_commit = !strcmp(rk->rk_eos.txn_curr_api.name, "commit_transaction");
- mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
-
- rd_kafka_wrlock(rk);
-
- /* If an epoch bump is required, let idempo handle it.
- * When the bump is finished we'll be notified through
- * idemp_state_change() and we can complete the local transaction state
- * and set the final API call result.
- * If the bumping fails a fatal error will be raised. */
- if (rk->rk_eos.txn_requires_epoch_bump) {
- rd_kafka_resp_err_t bump_err = rk->rk_eos.txn_err;
- rd_dassert(!is_commit);
-
- rd_kafka_wrunlock(rk);
-
- /* After the epoch bump is done we'll be transitioned
- * to the next state. */
- rd_kafka_idemp_drain_epoch_bump0(
- rk, rd_false /* don't allow txn abort */, bump_err,
- "Transaction aborted: %s", rd_kafka_err2str(bump_err));
- return;
- }
-
- if (is_commit)
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED);
- else
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
-}
-
-
-/**
- * @brief Handle EndTxnResponse (commit or abort)
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_handle_EndTxn(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode;
- int actions = 0;
- rd_bool_t is_commit, may_retry = rd_false, require_bump = rd_false;
-
- if (err == RD_KAFKA_RESP_ERR__DESTROY)
- return;
-
- is_commit = request->rkbuf_u.EndTxn.commit;
-
- if (err)
- goto err;
-
- rd_kafka_buf_read_throttle_time(rkbuf);
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
- err = ErrorCode;
- goto err;
-
-err_parse:
- err = rkbuf->rkbuf_err;
- /* FALLTHRU */
-
-err:
- rd_kafka_wrlock(rk);
-
- if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) {
- may_retry = rd_true;
-
- } else if (rk->rk_eos.txn_state ==
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) {
- may_retry = rd_true;
-
- } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) {
- /* Transaction has failed locally, typically due to timeout.
- * Get the transaction error and return that instead of
- * this error.
- * This is a tricky state since the transaction will have
- * failed locally but the EndTxn(commit) may have succeeded. */
-
-
- if (err) {
- rd_kafka_txn_curr_api_set_result(
- rk, RD_KAFKA_ERR_ACTION_PERMANENT,
- rd_kafka_error_new(
- rk->rk_eos.txn_err,
- "EndTxn failed with %s but transaction "
- "had already failed due to: %s",
- rd_kafka_err2name(err), rk->rk_eos.txn_errstr));
- } else {
- /* If the transaction has failed locally but
- * this EndTxn commit succeeded we'll raise
- * a fatal error. */
- if (is_commit)
- rd_kafka_txn_curr_api_set_result(
- rk, RD_KAFKA_ERR_ACTION_FATAL,
- rd_kafka_error_new(
- rk->rk_eos.txn_err,
- "Transaction commit succeeded on the "
- "broker but the transaction "
- "had already failed locally due to: %s",
- rk->rk_eos.txn_errstr));
-
- else
- rd_kafka_txn_curr_api_set_result(
- rk, RD_KAFKA_ERR_ACTION_PERMANENT,
- rd_kafka_error_new(
- rk->rk_eos.txn_err,
- "Transaction abort succeeded on the "
- "broker but the transaction"
- "had already failed locally due to: %s",
- rk->rk_eos.txn_errstr));
- }
-
- rd_kafka_wrunlock(rk);
-
-
- return;
-
- } else if (!err) {
- /* Request is outdated */
- err = RD_KAFKA_RESP_ERR__OUTDATED;
- }
-
-
- rd_kafka_dbg(rk, EOS, "ENDTXN",
- "EndTxn returned %s in state %s (may_retry=%s)",
- rd_kafka_err2name(err),
- rd_kafka_txn_state2str(rk->rk_eos.txn_state),
- RD_STR_ToF(may_retry));
-
- rd_kafka_wrunlock(rk);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR_NO_ERROR:
- break;
-
- case RD_KAFKA_RESP_ERR__DESTROY:
- /* Producer is being terminated, ignore the response. */
- case RD_KAFKA_RESP_ERR__OUTDATED:
- /* Transactional state no longer relevant for this
- * outdated response. */
- break;
- case RD_KAFKA_RESP_ERR__TIMED_OUT:
- case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
- /* Request timeout */
- /* FALLTHRU */
- case RD_KAFKA_RESP_ERR__TRANSPORT:
- actions |=
- RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH;
- break;
-
- case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
- case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
- rd_kafka_wrlock(rk);
- rd_kafka_txn_coord_set(rk, NULL, "EndTxn failed: %s",
- rd_kafka_err2str(err));
- rd_kafka_wrunlock(rk);
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
- case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
- actions |= RD_KAFKA_ERR_ACTION_RETRY;
- break;
-
- case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- require_bump = rd_true;
- break;
-
- case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
- case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
- actions |= RD_KAFKA_ERR_ACTION_FATAL;
- break;
-
- default:
- /* All unhandled errors are permanent */
- actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
- }
-
- err = rd_kafka_txn_normalize_err(err);
-
- if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
- rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
- "Failed to end transaction: %s",
- rd_kafka_err2str(err));
- } else {
- if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
- rd_kafka_txn_coord_timer_start(rk, 50);
-
- if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
- if (require_bump && !is_commit) {
- /* Abort failed to due invalid PID, starting
- * with KIP-360 we can have idempo sort out
- * epoch bumping.
- * When the epoch has been bumped we'll detect
- * the idemp_state_change and complete the
- * current API call. */
- rd_kafka_idemp_drain_epoch_bump0(
- rk,
- /* don't allow txn abort */
- rd_false, err, "EndTxn %s failed: %s",
- is_commit ? "commit" : "abort",
- rd_kafka_err2str(err));
- return;
- }
-
- /* For aborts we need to revert the state back to
- * BEGIN_ABORT so that the abort can be retried from
- * the beginning in op_abort_transaction(). */
- rd_kafka_wrlock(rk);
- if (rk->rk_eos.txn_state ==
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)
- rd_kafka_txn_set_state(
- rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT);
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_set_abortable_error0(
- rk, err, require_bump,
- "Failed to end transaction: "
- "%s",
- rd_kafka_err2str(err));
-
- } else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY &&
- rd_kafka_buf_retry(rkb, request))
- return;
- }
-
- if (err)
- rd_kafka_txn_curr_api_set_result(
- rk, actions,
- rd_kafka_error_new(err, "EndTxn %s failed: %s",
- is_commit ? "commit" : "abort",
- rd_kafka_err2str(err)));
- else
- rd_kafka_txn_endtxn_complete(rk);
-}
-
-
-
-/**
- * @brief Handler for commit_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t
-rd_kafka_txn_op_commit_transaction(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_pid_t pid;
- int64_t dr_fails;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if ((error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
- RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
- RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED)))
- goto done;
-
- if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) {
- /* A previous call to commit_transaction() timed out but the
- * commit completed since then, we still
- * need to wait for the application to call commit_transaction()
- * again to resume the call, and it just did. */
- goto done;
- } else if (rk->rk_eos.txn_state ==
- RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) {
- /* A previous call to commit_transaction() timed out but the
- * commit is still in progress, we still
- * need to wait for the application to call commit_transaction()
- * again to resume the call, and it just did. */
- rd_kafka_wrunlock(rk);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- /* If any messages failed delivery the transaction must be aborted. */
- dr_fails = rd_atomic64_get(&rk->rk_eos.txn_dr_fails);
- if (unlikely(dr_fails > 0)) {
- error = rd_kafka_error_new_txn_requires_abort(
- RD_KAFKA_RESP_ERR__INCONSISTENT,
- "%" PRId64
- " message(s) failed delivery "
- "(see individual delivery reports)",
- dr_fails);
- goto done;
- }
-
- if (!rk->rk_eos.txn_req_cnt) {
- /* If there were no messages produced, or no send_offsets,
- * in this transaction, simply complete the transaction
- * without sending anything to the transaction coordinator
- * (since it will not have any txn state). */
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "No partitions registered: not sending EndTxn");
- rd_kafka_wrunlock(rk);
- rd_kafka_txn_endtxn_complete(rk);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
- if (!rd_kafka_pid_valid(pid)) {
- rd_dassert(!*"BUG: No PID despite proper transaction state");
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__STATE,
- "No PID available (idempotence state %s)",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
- goto done;
- }
-
- err = rd_kafka_EndTxnRequest(
- rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
- rd_true /* commit */, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL);
- if (err) {
- error = rd_kafka_error_new_retriable(err, "%s", errstr);
- goto done;
- }
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION);
-
- rd_kafka_wrunlock(rk);
-
- return RD_KAFKA_OP_RES_HANDLED;
-
-done:
- rd_kafka_wrunlock(rk);
-
- /* If the returned error is an abortable error
- * also set the current transaction state accordingly. */
- if (rd_kafka_error_txn_requires_abort(error))
- rd_kafka_txn_set_abortable_error(rk, rd_kafka_error_code(error),
- "%s",
- rd_kafka_error_string(error));
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Handler for commit_transaction()'s first phase: begin commit
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t rd_kafka_txn_op_begin_commit(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
-
- rd_kafka_wrlock(rk);
-
- error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
- RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
- RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
- RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED);
-
- if (!error &&
- rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) {
- /* Transition to BEGIN_COMMIT state if no error and commit not
- * already started. */
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
- }
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Handler for last ack of commit_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t
-rd_kafka_txn_op_commit_transaction_ack(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if (!(error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) {
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "Committed transaction now acked by application");
- rd_kafka_txn_complete(rk, rd_true /*is commit*/);
- }
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_ts_t abs_timeout;
-
- /* The commit is in three phases:
- * - begin commit: wait for outstanding messages to be produced,
- * disallow new messages from being produced
- * by application.
- * - commit: commit transaction.
- * - commit not acked: commit done, but waiting for application
- * to acknowledge by completing this API call.
- */
-
- if ((error = rd_kafka_txn_curr_api_begin(rk, "commit_transaction",
- rd_false /* no cap */,
- timeout_ms, &abs_timeout)))
- return error;
-
- /* Begin commit */
- if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_commit,
- abs_timeout)))
- return rd_kafka_txn_curr_api_return(rk,
- /* not resumable yet */
- rd_false, error);
-
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "Flushing %d outstanding message(s) prior to commit",
- rd_kafka_outq_len(rk));
-
- /* Wait for queued messages to be delivered, limited by
- * the remaining transaction lifetime. */
- if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) {
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "Flush failed (with %d messages remaining): %s",
- rd_kafka_outq_len(rk), rd_kafka_err2str(err));
-
- if (err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- error = rd_kafka_error_new_retriable(
- err,
- "Failed to flush all outstanding messages "
- "within the API timeout: "
- "%d message(s) remaining%s",
- rd_kafka_outq_len(rk),
- /* In case event queue delivery reports
- * are enabled and there is no dr callback
- * we instruct the developer to poll
- * the event queue separately, since we
- * can't do it for them. */
- ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) &&
- !rk->rk_conf.dr_msg_cb && !rk->rk_conf.dr_cb)
- ? ": the event queue must be polled "
- "for delivery report events in a separate "
- "thread or prior to calling commit"
- : "");
- else
- error = rd_kafka_error_new_retriable(
- err, "Failed to flush outstanding messages: %s",
- rd_kafka_err2str(err));
-
- /* The commit operation is in progress in the background
- * and the application will need to call this API again
- * to resume. */
- return rd_kafka_txn_curr_api_return(rk, rd_true, error);
- }
-
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "Transaction commit message flush complete");
-
- /* Commit transaction */
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction,
- abs_timeout);
- if (error)
- return rd_kafka_txn_curr_api_return(rk, rd_true, error);
-
- /* Last call is to transition from COMMIT_NOT_ACKED to READY */
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction_ack,
- /* Timeout must be infinite since this is
- * a synchronization point.
- * The call is immediate though, so this
- * will not block. */
- RD_POLL_INFINITE);
-
- return rd_kafka_txn_curr_api_return(rk,
- /* not resumable at this point */
- rd_false, error);
-}
-
-
-
-/**
- * @brief Handler for abort_transaction()'s first phase: begin abort
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t rd_kafka_txn_op_begin_abort(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
- rd_bool_t clear_pending = rd_false;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- error =
- rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
- RD_KAFKA_TXN_STATE_BEGIN_ABORT,
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
- RD_KAFKA_TXN_STATE_ABORTABLE_ERROR,
- RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
-
- if (!error &&
- (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
- rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR)) {
- /* Transition to ABORTING_TRANSACTION state if no error and
- * abort not already started. */
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT);
- clear_pending = rd_true;
- }
-
- rd_kafka_wrunlock(rk);
-
- if (clear_pending) {
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- rd_kafka_txn_clear_pending_partitions(rk);
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- }
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Handler for abort_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t rd_kafka_txn_op_abort_transaction(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_pid_t pid;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if ((error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT,
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
- RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED)))
- goto done;
-
- if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) {
- /* A previous call to abort_transaction() timed out but
- * the aborting completed since then, we still need to wait
- * for the application to call abort_transaction() again
- * to synchronize state, and it just did. */
- goto done;
- } else if (rk->rk_eos.txn_state ==
- RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) {
- /* A previous call to abort_transaction() timed out but
- * the abort is still in progress, we still need to wait
- * for the application to call abort_transaction() again
- * to synchronize state, and it just did. */
- rd_kafka_wrunlock(rk);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- if (!rk->rk_eos.txn_req_cnt) {
- rd_kafka_dbg(rk, EOS, "TXNABORT",
- "No partitions registered: not sending EndTxn");
- rd_kafka_wrunlock(rk);
- rd_kafka_txn_endtxn_complete(rk);
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- /* If the underlying idempotent producer's state indicates it
- * is re-acquiring its PID we need to wait for that to finish
- * before allowing a new begin_transaction(), and since that is
- * not a blocking call we need to perform that wait in this
- * state instead.
- * To recover we need to request an epoch bump from the
- * transaction coordinator. This is handled automatically
- * by the idempotent producer, so we just need to wait for
- * the new pid to be assigned.
- */
- if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_ASSIGNED &&
- rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT) {
- rd_kafka_dbg(rk, EOS, "TXNABORT",
- "Waiting for transaction coordinator "
- "PID bump to complete before aborting "
- "transaction (idempotent producer state %s)",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
-
- rd_kafka_wrunlock(rk);
-
- return RD_KAFKA_OP_RES_HANDLED;
- }
-
- pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_true);
- if (!rd_kafka_pid_valid(pid)) {
- rd_dassert(!*"BUG: No PID despite proper transaction state");
- error = rd_kafka_error_new_retriable(
- RD_KAFKA_RESP_ERR__STATE,
- "No PID available (idempotence state %s)",
- rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
- goto done;
- }
-
- err = rd_kafka_EndTxnRequest(
- rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
- rd_false /* abort */, errstr, sizeof(errstr),
- RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL);
- if (err) {
- error = rd_kafka_error_new_retriable(err, "%s", errstr);
- goto done;
- }
-
- rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION);
-
- rd_kafka_wrunlock(rk);
-
- return RD_KAFKA_OP_RES_HANDLED;
-
-done:
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-/**
- * @brief Handler for last ack of abort_transaction()
- *
- * @locks none
- * @locality rdkafka main thread
- */
-static rd_kafka_op_res_t
-rd_kafka_txn_op_abort_transaction_ack(rd_kafka_t *rk,
- rd_kafka_q_t *rkq,
- rd_kafka_op_t *rko) {
- rd_kafka_error_t *error;
-
- if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
- return RD_KAFKA_OP_RES_HANDLED;
-
- rd_kafka_wrlock(rk);
-
- if (!(error = rd_kafka_txn_require_state(
- rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) {
- rd_kafka_dbg(rk, EOS, "TXNABORT",
- "Aborted transaction now acked by application");
- rd_kafka_txn_complete(rk, rd_false /*is abort*/);
- }
-
- rd_kafka_wrunlock(rk);
-
- rd_kafka_txn_curr_api_set_result(rk, 0, error);
-
- return RD_KAFKA_OP_RES_HANDLED;
-}
-
-
-
-rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_ts_t abs_timeout;
-
- if ((error = rd_kafka_txn_curr_api_begin(rk, "abort_transaction",
- rd_false /* no cap */,
- timeout_ms, &abs_timeout)))
- return error;
-
- /* The abort is multi-phase:
- * - set state to BEGIN_ABORT
- * - flush() outstanding messages
- * - send EndTxn
- */
-
- /* Begin abort */
- if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_abort,
- abs_timeout)))
- return rd_kafka_txn_curr_api_return(rk,
- /* not resumable yet */
- rd_false, error);
-
- rd_kafka_dbg(rk, EOS, "TXNABORT",
- "Purging and flushing %d outstanding message(s) prior "
- "to abort",
- rd_kafka_outq_len(rk));
-
- /* Purge all queued messages.
- * Will need to wait for messages in-flight since purging these
- * messages may lead to gaps in the idempotent producer sequences. */
- err = rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE |
- RD_KAFKA_PURGE_F_ABORT_TXN);
-
- /* Serve delivery reports for the purged messages. */
- if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) {
- /* FIXME: Not sure these errors matter that much */
- if (err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- error = rd_kafka_error_new_retriable(
- err,
- "Failed to flush all outstanding messages "
- "within the API timeout: "
- "%d message(s) remaining%s",
- rd_kafka_outq_len(rk),
- (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR)
- ? ": the event queue must be polled "
- "for delivery report events in a separate "
- "thread or prior to calling abort"
- : "");
-
- else
- error = rd_kafka_error_new_retriable(
- err, "Failed to flush outstanding messages: %s",
- rd_kafka_err2str(err));
-
- /* The abort operation is in progress in the background
- * and the application will need to call this API again
- * to resume. */
- return rd_kafka_txn_curr_api_return(rk, rd_true, error);
- }
-
- rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
- "Transaction abort message purge and flush complete");
-
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction,
- abs_timeout);
- if (error)
- return rd_kafka_txn_curr_api_return(rk, rd_true, error);
-
- /* Last call is to transition from ABORT_NOT_ACKED to READY. */
- error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction_ack,
- /* Timeout must be infinite since this is
- * a synchronization point.
- * The call is immediate though, so this
- * will not block. */
- RD_POLL_INFINITE);
-
- return rd_kafka_txn_curr_api_return(rk,
- /* not resumable at this point */
- rd_false, error);
-}
-
-
-
-/**
- * @brief Coordinator query timer
- *
- * @locality rdkafka main thread
- * @locks none
- */
-
-static void rd_kafka_txn_coord_timer_cb(rd_kafka_timers_t *rkts, void *arg) {
- rd_kafka_t *rk = arg;
-
- rd_kafka_wrlock(rk);
- rd_kafka_txn_coord_query(rk, "Coordinator query timer");
- rd_kafka_wrunlock(rk);
-}
-
-/**
- * @brief Start coord query timer if not already started.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms) {
- rd_assert(rd_kafka_is_transactional(rk));
- rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr,
- /* don't restart if already started */
- rd_false, 1000 * timeout_ms,
- rd_kafka_txn_coord_timer_cb, rk);
-}
-
-
-/**
- * @brief Parses and handles a FindCoordinator response.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-static void rd_kafka_txn_handle_FindCoordinator(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- rd_kafka_resp_err_t err,
- rd_kafka_buf_t *rkbuf,
- rd_kafka_buf_t *request,
- void *opaque) {
- const int log_decode_errors = LOG_ERR;
- int16_t ErrorCode;
- rd_kafkap_str_t Host;
- int32_t NodeId, Port;
- char errstr[512];
-
- *errstr = '\0';
-
- rk->rk_eos.txn_wait_coord = rd_false;
-
- if (err)
- goto err;
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1)
- rd_kafka_buf_read_throttle_time(rkbuf);
-
- rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
-
- if (request->rkbuf_reqhdr.ApiVersion >= 1) {
- rd_kafkap_str_t ErrorMsg;
- rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
- if (ErrorCode)
- rd_snprintf(errstr, sizeof(errstr), "%.*s",
- RD_KAFKAP_STR_PR(&ErrorMsg));
- }
-
- if ((err = ErrorCode))
- goto err;
-
- rd_kafka_buf_read_i32(rkbuf, &NodeId);
- rd_kafka_buf_read_str(rkbuf, &Host);
- rd_kafka_buf_read_i32(rkbuf, &Port);
-
- rd_rkb_dbg(rkb, EOS, "TXNCOORD",
- "FindCoordinator response: "
- "Transaction coordinator is broker %" PRId32 " (%.*s:%d)",
- NodeId, RD_KAFKAP_STR_PR(&Host), (int)Port);
-
- rd_kafka_rdlock(rk);
- if (NodeId == -1)
- err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
- else if (!(rkb = rd_kafka_broker_find_by_nodeid(rk, NodeId))) {
- rd_snprintf(errstr, sizeof(errstr),
- "Transaction coordinator %" PRId32 " is unknown",
- NodeId);
- err = RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
- }
- rd_kafka_rdunlock(rk);
-
- if (err)
- goto err;
-
- rd_kafka_wrlock(rk);
- rd_kafka_txn_coord_set(rk, rkb, "FindCoordinator response");
- rd_kafka_wrunlock(rk);
-
- rd_kafka_broker_destroy(rkb);
-
- return;
-
-err_parse:
- err = rkbuf->rkbuf_err;
-err:
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__DESTROY:
- return;
-
- case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
- case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
- rd_kafka_wrlock(rk);
- rd_kafka_txn_set_fatal_error(
- rkb->rkb_rk, RD_DONT_LOCK, err,
- "Failed to find transaction coordinator: %s: %s%s%s",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
- *errstr ? ": " : "", errstr);
-
- rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
- rd_kafka_wrunlock(rk);
- return;
-
- case RD_KAFKA_RESP_ERR__UNKNOWN_BROKER:
- rd_kafka_metadata_refresh_brokers(rk, NULL, errstr);
- break;
-
- default:
- break;
- }
-
- rd_kafka_wrlock(rk);
- rd_kafka_txn_coord_set(
- rk, NULL, "Failed to find transaction coordinator: %s: %s",
- rd_kafka_err2name(err), *errstr ? errstr : rd_kafka_err2str(err));
- rd_kafka_wrunlock(rk);
-}
-
-
-
-/**
- * @brief Query for the transaction coordinator.
- *
- * @returns true if a fatal error was raised, else false.
- *
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock(rk) MUST be held.
- */
-rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason) {
- rd_kafka_resp_err_t err;
- char errstr[512];
- rd_kafka_broker_t *rkb;
-
- rd_assert(rd_kafka_is_transactional(rk));
-
- if (rk->rk_eos.txn_wait_coord) {
- rd_kafka_dbg(rk, EOS, "TXNCOORD",
- "Not sending coordinator query (%s): "
- "waiting for previous query to finish",
- reason);
- return rd_false;
- }
-
- /* Find usable broker to query for the txn coordinator */
- rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, sizeof(errstr));
- if (!rkb) {
- rd_kafka_dbg(rk, EOS, "TXNCOORD",
- "Unable to query for transaction coordinator: "
- "%s: %s",
- reason, errstr);
-
- if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false))
- return rd_true;
-
- rd_kafka_txn_coord_timer_start(rk, 500);
-
- return rd_false;
- }
-
- rd_kafka_dbg(rk, EOS, "TXNCOORD",
- "Querying for transaction coordinator: %s", reason);
-
- /* Send FindCoordinator request */
- err = rd_kafka_FindCoordinatorRequest(
- rkb, RD_KAFKA_COORD_TXN, rk->rk_conf.eos.transactional_id,
- RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_FindCoordinator,
- NULL);
-
- if (err) {
- rd_snprintf(errstr, sizeof(errstr),
- "Failed to send coordinator query to %s: "
- "%s",
- rd_kafka_broker_name(rkb), rd_kafka_err2str(err));
-
- rd_kafka_broker_destroy(rkb);
-
- if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false))
- return rd_true; /* Fatal error */
-
- rd_kafka_txn_coord_timer_start(rk, 500);
-
- return rd_false;
- }
-
- rd_kafka_broker_destroy(rkb);
-
- rk->rk_eos.txn_wait_coord = rd_true;
-
- return rd_false;
-}
-
-/**
- * @brief Sets or clears the current coordinator address.
- *
- * @returns true if the coordinator was changed, else false.
- *
- * @locality rdkafka main thread
- * @locks rd_kafka_wrlock(rk) MUST be held
- */
-rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *fmt,
- ...) {
- char buf[256];
- va_list ap;
-
- va_start(ap, fmt);
- vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
-
- if (rk->rk_eos.txn_curr_coord == rkb) {
- if (!rkb) {
- rd_kafka_dbg(rk, EOS, "TXNCOORD", "%s", buf);
- /* Keep querying for the coordinator */
- rd_kafka_txn_coord_timer_start(rk, 500);
- }
- return rd_false;
- }
-
- rd_kafka_dbg(rk, EOS, "TXNCOORD",
- "Transaction coordinator changed from %s -> %s: %s",
- rk->rk_eos.txn_curr_coord
- ? rd_kafka_broker_name(rk->rk_eos.txn_curr_coord)
- : "(none)",
- rkb ? rd_kafka_broker_name(rkb) : "(none)", buf);
-
- if (rk->rk_eos.txn_curr_coord)
- rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord);
-
- rk->rk_eos.txn_curr_coord = rkb;
- if (rkb)
- rd_kafka_broker_keep(rkb);
-
- rd_kafka_broker_set_nodename(rk->rk_eos.txn_coord,
- rk->rk_eos.txn_curr_coord);
-
- if (!rkb) {
- /* Lost the current coordinator, query for new coordinator */
- rd_kafka_txn_coord_timer_start(rk, 500);
- } else {
- /* Trigger PID state machine */
- rd_kafka_idemp_pid_fsm(rk);
- }
-
- return rd_true;
-}
-
-
-/**
- * @brief Coordinator state monitor callback.
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_txn_coord_monitor_cb(rd_kafka_broker_t *rkb) {
- rd_kafka_t *rk = rkb->rkb_rk;
- rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb);
- rd_bool_t is_up;
-
- rd_assert(rk->rk_eos.txn_coord == rkb);
-
- is_up = rd_kafka_broker_state_is_up(state);
- rd_rkb_dbg(rkb, EOS, "COORD", "Transaction coordinator is now %s",
- is_up ? "up" : "down");
-
- if (!is_up) {
- /* Coordinator is down, the connection will be re-established
- * automatically, but we also trigger a coordinator query
- * to pick up on coordinator change. */
- rd_kafka_txn_coord_timer_start(rk, 500);
-
- } else {
- /* Coordinator is up. */
-
- rd_kafka_wrlock(rk);
- if (rk->rk_eos.idemp_state < RD_KAFKA_IDEMP_STATE_ASSIGNED) {
- /* See if a idempotence state change is warranted. */
- rd_kafka_idemp_pid_fsm(rk);
-
- } else if (rk->rk_eos.idemp_state ==
- RD_KAFKA_IDEMP_STATE_ASSIGNED) {
- /* PID is already valid, continue transactional
- * operations by checking for partitions to register */
- rd_kafka_txn_schedule_register_partitions(rk,
- 1 /*ASAP*/);
- }
-
- rd_kafka_wrunlock(rk);
- }
-}
-
-
-
-/**
- * @brief Transactions manager destructor
- *
- * @locality rdkafka main thread
- * @locks none
- */
-void rd_kafka_txns_term(rd_kafka_t *rk) {
-
- RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free);
- RD_IF_FREE(rk->rk_eos.txn_curr_api.error, rd_kafka_error_destroy);
-
- mtx_destroy(&rk->rk_eos.txn_curr_api.lock);
- cnd_destroy(&rk->rk_eos.txn_curr_api.cnd);
-
- rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, 1);
- rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr,
- 1);
-
- if (rk->rk_eos.txn_curr_coord)
- rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord);
-
- /* Logical coordinator */
- rd_kafka_broker_persistent_connection_del(
- rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord);
- rd_kafka_broker_monitor_del(&rk->rk_eos.txn_coord_mon);
- rd_kafka_broker_destroy(rk->rk_eos.txn_coord);
- rk->rk_eos.txn_coord = NULL;
-
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- rd_kafka_txn_clear_pending_partitions(rk);
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
- mtx_destroy(&rk->rk_eos.txn_pending_lock);
-
- rd_kafka_txn_clear_partitions(rk);
-}
-
-
-/**
- * @brief Initialize transactions manager.
- *
- * @locality application thread
- * @locks none
- */
-void rd_kafka_txns_init(rd_kafka_t *rk) {
- rd_atomic32_init(&rk->rk_eos.txn_may_enq, 0);
- mtx_init(&rk->rk_eos.txn_pending_lock, mtx_plain);
- TAILQ_INIT(&rk->rk_eos.txn_pending_rktps);
- TAILQ_INIT(&rk->rk_eos.txn_waitresp_rktps);
- TAILQ_INIT(&rk->rk_eos.txn_rktps);
-
- mtx_init(&rk->rk_eos.txn_curr_api.lock, mtx_plain);
- cnd_init(&rk->rk_eos.txn_curr_api.cnd);
-
- /* Logical coordinator */
- rk->rk_eos.txn_coord =
- rd_kafka_broker_add_logical(rk, "TxnCoordinator");
-
- rd_kafka_broker_monitor_add(&rk->rk_eos.txn_coord_mon,
- rk->rk_eos.txn_coord, rk->rk_ops,
- rd_kafka_txn_coord_monitor_cb);
-
- rd_kafka_broker_persistent_connection_add(
- rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord);
-
- rd_atomic64_init(&rk->rk_eos.txn_dr_fails, 0);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h
deleted file mode 100644
index 3c088d09a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDKAFKA_TXNMGR_H_
-#define _RDKAFKA_TXNMGR_H_
-
-/**
- * @returns true if transaction state allows enqueuing new messages
- * (i.e., produce()), else false.
- *
- * @locality application thread
- * @locks none
- */
-static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_enq_msg(rd_kafka_t *rk) {
- return !rd_kafka_is_transactional(rk) ||
- rd_atomic32_get(&rk->rk_eos.txn_may_enq);
-}
-
-
-/**
- * @returns true if transaction state allows sending messages to broker,
- * else false.
- *
- * @locality broker thread
- * @locks none
- */
-static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_send_msg(rd_kafka_t *rk) {
- rd_bool_t ret;
-
- rd_kafka_rdlock(rk);
- ret = (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
- rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
- rd_kafka_rdunlock(rk);
-
- return ret;
-}
-
-
-/**
- * @returns true if transaction and partition state allows sending queued
- * messages to broker, else false.
- *
- * @locality any
- * @locks toppar_lock MUST be held
- */
-static RD_INLINE RD_UNUSED rd_bool_t
-rd_kafka_txn_toppar_may_send_msg(rd_kafka_toppar_t *rktp) {
- if (likely(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_IN_TXN))
- return rd_true;
-
- return rd_false;
-}
-
-
-
-void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms);
-
-
-/**
- * @brief Add partition to transaction (unless already added).
- *
- * The partition will first be added to the pending list (txn_pending_rktps)
- * awaiting registration on the coordinator with AddPartitionsToTxnRequest.
- * On successful registration the partition is flagged as IN_TXN and removed
- * from the pending list.
- *
- * @locality application thread
- * @locks none
- */
-static RD_INLINE RD_UNUSED void
-rd_kafka_txn_add_partition(rd_kafka_toppar_t *rktp) {
- rd_kafka_t *rk;
- rd_bool_t schedule = rd_false;
-
- rd_kafka_toppar_lock(rktp);
-
- /* Already added or registered */
- if (likely(rktp->rktp_flags &
- (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN))) {
- rd_kafka_toppar_unlock(rktp);
- return;
- }
-
- rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_PEND_TXN;
-
- rd_kafka_toppar_unlock(rktp);
-
- rk = rktp->rktp_rkt->rkt_rk;
-
- mtx_lock(&rk->rk_eos.txn_pending_lock);
- schedule = TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps);
-
- /* List is sorted by topic name since AddPartitionsToTxnRequest()
- * requires it. */
- TAILQ_INSERT_SORTED(&rk->rk_eos.txn_pending_rktps, rktp,
- rd_kafka_toppar_t *, rktp_txnlink,
- rd_kafka_toppar_topic_cmp);
- rd_kafka_toppar_keep(rktp);
- mtx_unlock(&rk->rk_eos.txn_pending_lock);
-
- rd_kafka_dbg(rk, EOS, "ADDPARTS",
- "Marked %.*s [%" PRId32
- "] as part of transaction: "
- "%sscheduling registration",
- RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
- rktp->rktp_partition, schedule ? "" : "not ");
-
-
- /* Schedule registration of partitions by the rdkafka main thread */
- if (unlikely(schedule))
- rd_kafka_txn_schedule_register_partitions(rk, 1 /*immediate*/);
-}
-
-
-
-void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk,
- rd_kafka_idemp_state_t state);
-
-void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_bool_t requires_epoch_bump,
- const char *fmt,
- ...) RD_FORMAT(printf, 4, 5);
-#define rd_kafka_txn_set_abortable_error(rk, err, ...) \
- rd_kafka_txn_set_abortable_error0(rk, err, rd_false, __VA_ARGS__)
-
-#define rd_kafka_txn_set_abortable_error_with_bump(rk, err, ...) \
- rd_kafka_txn_set_abortable_error0(rk, err, rd_true, __VA_ARGS__)
-
-void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk,
- rd_dolock_t do_lock,
- rd_kafka_resp_err_t err,
- const char *fmt,
- ...) RD_FORMAT(printf, 4, 5);
-
-rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason);
-
-rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk,
- rd_kafka_broker_t *rkb,
- const char *fmt,
- ...) RD_FORMAT(printf, 3, 4);
-
-void rd_kafka_txns_term(rd_kafka_t *rk);
-void rd_kafka_txns_init(rd_kafka_t *rk);
-
-#endif /* _RDKAFKA_TXNMGR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c
deleted file mode 100644
index 68b01a4e1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdkafka_zstd.h"
-
-#if WITH_ZSTD_STATIC
-/* Enable advanced/unstable API for initCStream_srcSize */
-#define ZSTD_STATIC_LINKING_ONLY
-#endif
-
-#include <zstd.h>
-#include <zstd_errors.h>
-
-rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb,
- char *inbuf,
- size_t inlen,
- void **outbuf,
- size_t *outlenp) {
- unsigned long long out_bufsize = ZSTD_getFrameContentSize(inbuf, inlen);
-
- switch (out_bufsize) {
- case ZSTD_CONTENTSIZE_UNKNOWN:
- /* Decompressed size cannot be determined, make a guess */
- out_bufsize = inlen * 2;
- break;
- case ZSTD_CONTENTSIZE_ERROR:
- /* Error calculating frame content size */
- rd_rkb_dbg(rkb, MSG, "ZSTD",
- "Unable to begin ZSTD decompression "
- "(out buffer is %llu bytes): %s",
- out_bufsize, "Error in determining frame size");
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- default:
- break;
- }
-
- /* Increase output buffer until it can fit the entire result,
- * capped by message.max.bytes */
- while (out_bufsize <=
- (unsigned long long)rkb->rkb_rk->rk_conf.recv_max_msg_size) {
- size_t ret;
- char *decompressed;
-
- decompressed = rd_malloc((size_t)out_bufsize);
- if (!decompressed) {
- rd_rkb_dbg(rkb, MSG, "ZSTD",
- "Unable to allocate output buffer "
- "(%llu bytes for %" PRIusz
- " compressed bytes): %s",
- out_bufsize, inlen, rd_strerror(errno));
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
-
- ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, inbuf,
- inlen);
- if (!ZSTD_isError(ret)) {
- *outlenp = ret;
- *outbuf = decompressed;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_free(decompressed);
-
- /* Check if the destination size is too small */
- if (ZSTD_getErrorCode(ret) == ZSTD_error_dstSize_tooSmall) {
-
- /* Grow quadratically */
- out_bufsize += RD_MAX(out_bufsize * 2, 4000);
-
- rd_atomic64_add(&rkb->rkb_c.zbuf_grow, 1);
-
- } else {
- /* Fail on any other error */
- rd_rkb_dbg(rkb, MSG, "ZSTD",
- "Unable to begin ZSTD decompression "
- "(out buffer is %llu bytes): %s",
- out_bufsize, ZSTD_getErrorName(ret));
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- }
- }
-
- rd_rkb_dbg(rkb, MSG, "ZSTD",
- "Unable to decompress ZSTD "
- "(input buffer %" PRIusz
- ", output buffer %llu): "
- "output would exceed message.max.bytes (%d)",
- inlen, out_bufsize, rkb->rkb_rk->rk_conf.max_msg_size);
-
- return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
-}
-
-
-rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb,
- int comp_level,
- rd_slice_t *slice,
- void **outbuf,
- size_t *outlenp) {
- ZSTD_CStream *cctx;
- size_t r;
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- size_t len = rd_slice_remains(slice);
- ZSTD_outBuffer out;
- ZSTD_inBuffer in;
-
- *outbuf = NULL;
- out.pos = 0;
- out.size = ZSTD_compressBound(len);
- out.dst = rd_malloc(out.size);
- if (!out.dst) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "Unable to allocate output buffer "
- "(%" PRIusz " bytes): %s",
- out.size, rd_strerror(errno));
- return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- }
-
-
- cctx = ZSTD_createCStream();
- if (!cctx) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "Unable to create ZSTD compression context");
- err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
- goto done;
- }
-
-#if defined(WITH_ZSTD_STATIC) && \
- ZSTD_VERSION_NUMBER >= (1 * 100 * 100 + 2 * 100 + 1) /* v1.2.1 */
- r = ZSTD_initCStream_srcSize(cctx, comp_level, len);
-#else
- /* libzstd not linked statically (or zstd version < 1.2.1):
- * decompression in consumer may be more costly due to
- * decompressed size not included in header by librdkafka producer */
- r = ZSTD_initCStream(cctx, comp_level);
-#endif
- if (ZSTD_isError(r)) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "Unable to begin ZSTD compression "
- "(out buffer is %" PRIusz " bytes): %s",
- out.size, ZSTD_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- while ((in.size = rd_slice_reader(slice, &in.src))) {
- in.pos = 0;
- r = ZSTD_compressStream(cctx, &out, &in);
- if (unlikely(ZSTD_isError(r))) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "ZSTD compression failed "
- "(at of %" PRIusz
- " bytes, with "
- "%" PRIusz
- " bytes remaining in out buffer): "
- "%s",
- in.size, out.size - out.pos,
- ZSTD_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- /* No space left in output buffer,
- * but input isn't fully consumed */
- if (in.pos < in.size) {
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
- }
-
- if (rd_slice_remains(slice) != 0) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "Failed to finalize ZSTD compression "
- "of %" PRIusz " bytes: %s",
- len, "Unexpected trailing data");
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- r = ZSTD_endStream(cctx, &out);
- if (unlikely(ZSTD_isError(r) || r > 0)) {
- rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
- "Failed to finalize ZSTD compression "
- "of %" PRIusz " bytes: %s",
- len, ZSTD_getErrorName(r));
- err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
- goto done;
- }
-
- *outbuf = out.dst;
- *outlenp = out.pos;
-
-done:
- if (cctx)
- ZSTD_freeCStream(cctx);
-
- if (err)
- rd_free(out.dst);
-
- return err;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h
deleted file mode 100644
index f87c4c6fb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDZSTD_H_
-#define _RDZSTD_H_
-
-/**
- * @brief Decompress ZSTD framed data.
- *
- * @returns allocated buffer in \p *outbuf, length in \p *outlenp on success.
- */
-rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb,
- char *inbuf,
- size_t inlen,
- void **outbuf,
- size_t *outlenp);
-
-/**
- * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov.
- * @param MessageSetSize indicates (at least) full uncompressed data size,
- * possibly including MessageSet fields that will not
- * be compressed.
- *
- * @returns allocated buffer in \p *outbuf, length in \p *outlenp.
- */
-rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb,
- int comp_level,
- rd_slice_t *slice,
- void **outbuf,
- size_t *outlenp);
-
-#endif /* _RDZSTD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c
deleted file mode 100644
index c71e3004a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdlist.h"
-
-
-void rd_list_dump(const char *what, const rd_list_t *rl) {
- int i;
- printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", what, rl,
- rl->rl_cnt, rl->rl_size, rl->rl_elems);
- for (i = 0; i < rl->rl_cnt; i++)
- printf(" #%d: %p at &%p\n", i, rl->rl_elems[i],
- &rl->rl_elems[i]);
-}
-
-void rd_list_grow(rd_list_t *rl, size_t size) {
- rd_assert(!(rl->rl_flags & RD_LIST_F_FIXED_SIZE));
- rl->rl_size += (int)size;
- if (unlikely(rl->rl_size == 0))
- return; /* avoid zero allocations */
- rl->rl_elems =
- rd_realloc(rl->rl_elems, sizeof(*rl->rl_elems) * rl->rl_size);
-}
-
-rd_list_t *
-rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)) {
- memset(rl, 0, sizeof(*rl));
-
- if (initial_size > 0)
- rd_list_grow(rl, initial_size);
-
- rl->rl_free_cb = free_cb;
-
- return rl;
-}
-
-rd_list_t *rd_list_init_copy(rd_list_t *dst, const rd_list_t *src) {
-
- if (src->rl_flags & RD_LIST_F_FIXED_SIZE) {
- /* Source was preallocated, prealloc new dst list */
- rd_list_init(dst, 0, src->rl_free_cb);
-
- rd_list_prealloc_elems(dst, src->rl_elemsize, src->rl_size,
- 1 /*memzero*/);
- } else {
- /* Source is dynamic, initialize dst the same */
- rd_list_init(dst, rd_list_cnt(src), src->rl_free_cb);
- }
-
- return dst;
-}
-
-static RD_INLINE rd_list_t *rd_list_alloc(void) {
- return rd_malloc(sizeof(rd_list_t));
-}
-
-rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)) {
- rd_list_t *rl = rd_list_alloc();
- rd_list_init(rl, initial_size, free_cb);
- rl->rl_flags |= RD_LIST_F_ALLOCATED;
- return rl;
-}
-
-
-void rd_list_prealloc_elems(rd_list_t *rl,
- size_t elemsize,
- size_t cnt,
- int memzero) {
- size_t allocsize;
- char *p;
- size_t i;
-
- rd_assert(!rl->rl_elems);
-
- /* Allocation layout:
- * void *ptrs[cnt];
- * elems[elemsize][cnt];
- */
-
- allocsize = (sizeof(void *) * cnt) + (elemsize * cnt);
- if (memzero)
- rl->rl_elems = rd_calloc(1, allocsize);
- else
- rl->rl_elems = rd_malloc(allocsize);
-
- /* p points to first element's memory, unless elemsize is 0. */
- if (elemsize > 0)
- p = rl->rl_p = (char *)&rl->rl_elems[cnt];
- else
- p = rl->rl_p = NULL;
-
- /* Pointer -> elem mapping */
- for (i = 0; i < cnt; i++, p += elemsize)
- rl->rl_elems[i] = p;
-
- rl->rl_size = (int)cnt;
- rl->rl_cnt = 0;
- rl->rl_flags |= RD_LIST_F_FIXED_SIZE;
- rl->rl_elemsize = (int)elemsize;
-}
-
-
-void rd_list_set_cnt(rd_list_t *rl, size_t cnt) {
- rd_assert(rl->rl_flags & RD_LIST_F_FIXED_SIZE);
- rd_assert((int)cnt <= rl->rl_size);
- rl->rl_cnt = (int)cnt;
-}
-
-
-void rd_list_free_cb(rd_list_t *rl, void *ptr) {
- if (rl->rl_free_cb && ptr)
- rl->rl_free_cb(ptr);
-}
-
-
-void *rd_list_add(rd_list_t *rl, void *elem) {
- if (rl->rl_cnt == rl->rl_size)
- rd_list_grow(rl, rl->rl_size ? rl->rl_size * 2 : 16);
- rl->rl_flags &= ~RD_LIST_F_SORTED;
- if (elem)
- rl->rl_elems[rl->rl_cnt] = elem;
- return rl->rl_elems[rl->rl_cnt++];
-}
-
-void rd_list_set(rd_list_t *rl, int idx, void *ptr) {
- if (idx >= rl->rl_size)
- rd_list_grow(rl, idx + 1);
-
- if (idx >= rl->rl_cnt) {
- memset(&rl->rl_elems[rl->rl_cnt], 0,
- sizeof(*rl->rl_elems) * (idx - rl->rl_cnt));
- rl->rl_cnt = idx + 1;
- } else {
- /* Not allowed to replace existing element. */
- rd_assert(!rl->rl_elems[idx]);
- }
-
- rl->rl_elems[idx] = ptr;
-}
-
-
-
-void rd_list_remove_elem(rd_list_t *rl, int idx) {
- rd_assert(idx < rl->rl_cnt);
-
- if (idx + 1 < rl->rl_cnt)
- memmove(&rl->rl_elems[idx], &rl->rl_elems[idx + 1],
- sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx + 1)));
- rl->rl_cnt--;
-}
-
-void *rd_list_remove(rd_list_t *rl, void *match_elem) {
- void *elem;
- int i;
-
- RD_LIST_FOREACH(elem, rl, i) {
- if (elem == match_elem) {
- rd_list_remove_elem(rl, i);
- return elem;
- }
- }
-
- return NULL;
-}
-
-
-void *rd_list_remove_cmp(rd_list_t *rl,
- void *match_elem,
- int (*cmp)(void *_a, void *_b)) {
- void *elem;
- int i;
-
- RD_LIST_FOREACH(elem, rl, i) {
- if (elem == match_elem || !cmp(elem, match_elem)) {
- rd_list_remove_elem(rl, i);
- return elem;
- }
- }
-
- return NULL;
-}
-
-
-int rd_list_remove_multi_cmp(rd_list_t *rl,
- void *match_elem,
- int (*cmp)(void *_a, void *_b)) {
-
- void *elem;
- int i;
- int cnt = 0;
-
- /* Scan backwards to minimize memmoves */
- RD_LIST_FOREACH_REVERSE(elem, rl, i) {
- if (match_elem == cmp || !cmp(elem, match_elem)) {
- rd_list_remove_elem(rl, i);
- cnt++;
- }
- }
-
- return cnt;
-}
-
-
-void *rd_list_pop(rd_list_t *rl) {
- void *elem;
- int idx = rl->rl_cnt - 1;
-
- if (idx < 0)
- return NULL;
-
- elem = rl->rl_elems[idx];
- rd_list_remove_elem(rl, idx);
-
- return elem;
-}
-
-
-/**
- * Trampoline to avoid the double pointers in callbacks.
- *
- * rl_elems is a **, but to avoid having the application do the cumbersome
- * ** -> * casting we wrap this here and provide a simple * pointer to the
- * the callbacks.
- *
- * This is true for all list comparator uses, i.e., both sort() and find().
- */
-static RD_TLS int (*rd_list_cmp_curr)(const void *, const void *);
-
-static RD_INLINE int rd_list_cmp_trampoline(const void *_a, const void *_b) {
- const void *a = *(const void **)_a, *b = *(const void **)_b;
-
- return rd_list_cmp_curr(a, b);
-}
-
-void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)) {
- if (unlikely(rl->rl_elems == NULL))
- return;
-
- rd_list_cmp_curr = cmp;
- qsort(rl->rl_elems, rl->rl_cnt, sizeof(*rl->rl_elems),
- rd_list_cmp_trampoline);
- rl->rl_flags |= RD_LIST_F_SORTED;
-}
-
-static void rd_list_destroy_elems(rd_list_t *rl) {
- int i;
-
- if (!rl->rl_elems)
- return;
-
- if (rl->rl_free_cb) {
- /* Free in reverse order to allow deletions */
- for (i = rl->rl_cnt - 1; i >= 0; i--)
- if (rl->rl_elems[i])
- rl->rl_free_cb(rl->rl_elems[i]);
- }
-
- rd_free(rl->rl_elems);
- rl->rl_elems = NULL;
- rl->rl_cnt = 0;
- rl->rl_size = 0;
- rl->rl_flags &= ~RD_LIST_F_SORTED;
-}
-
-
-void rd_list_clear(rd_list_t *rl) {
- rd_list_destroy_elems(rl);
-}
-
-
-void rd_list_destroy(rd_list_t *rl) {
- rd_list_destroy_elems(rl);
- if (rl->rl_flags & RD_LIST_F_ALLOCATED)
- rd_free(rl);
-}
-
-void rd_list_destroy_free(void *rl) {
- rd_list_destroy((rd_list_t *)rl);
-}
-
-void *rd_list_elem(const rd_list_t *rl, int idx) {
- if (likely(idx < rl->rl_cnt))
- return (void *)rl->rl_elems[idx];
- return NULL;
-}
-
-int rd_list_index(const rd_list_t *rl,
- const void *match,
- int (*cmp)(const void *, const void *)) {
- int i;
- const void *elem;
-
- RD_LIST_FOREACH(elem, rl, i) {
- if (!cmp(match, elem))
- return i;
- }
-
- return -1;
-}
-
-
-void *rd_list_find(const rd_list_t *rl,
- const void *match,
- int (*cmp)(const void *, const void *)) {
- int i;
- const void *elem;
-
- if (rl->rl_flags & RD_LIST_F_SORTED) {
- void **r;
- rd_list_cmp_curr = cmp;
- r = bsearch(&match /*ptrptr to match elems*/, rl->rl_elems,
- rl->rl_cnt, sizeof(*rl->rl_elems),
- rd_list_cmp_trampoline);
- return r ? *r : NULL;
- }
-
- RD_LIST_FOREACH(elem, rl, i) {
- if (!cmp(match, elem))
- return (void *)elem;
- }
-
- return NULL;
-}
-
-
-void *rd_list_first(const rd_list_t *rl) {
- if (rl->rl_cnt == 0)
- return NULL;
- return rl->rl_elems[0];
-}
-
-void *rd_list_last(const rd_list_t *rl) {
- if (rl->rl_cnt == 0)
- return NULL;
- return rl->rl_elems[rl->rl_cnt - 1];
-}
-
-
-void *rd_list_find_duplicate(const rd_list_t *rl,
- int (*cmp)(const void *, const void *)) {
- int i;
-
- rd_assert(rl->rl_flags & RD_LIST_F_SORTED);
-
- for (i = 1; i < rl->rl_cnt; i++) {
- if (!cmp(rl->rl_elems[i - 1], rl->rl_elems[i]))
- return rl->rl_elems[i];
- }
-
- return NULL;
-}
-
-int rd_list_cmp(const rd_list_t *a,
- const rd_list_t *b,
- int (*cmp)(const void *, const void *)) {
- int i;
-
- i = RD_CMP(a->rl_cnt, b->rl_cnt);
- if (i)
- return i;
-
- for (i = 0; i < a->rl_cnt; i++) {
- int r = cmp(a->rl_elems[i], b->rl_elems[i]);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-
-/**
- * @brief Simple element pointer comparator
- */
-int rd_list_cmp_ptr(const void *a, const void *b) {
- return RD_CMP(a, b);
-}
-
-int rd_list_cmp_str(const void *a, const void *b) {
- return strcmp((const char *)a, (const char *)b);
-}
-
-void rd_list_apply(rd_list_t *rl,
- int (*cb)(void *elem, void *opaque),
- void *opaque) {
- void *elem;
- int i;
-
- RD_LIST_FOREACH(elem, rl, i) {
- if (!cb(elem, opaque)) {
- rd_list_remove_elem(rl, i);
- i--;
- }
- }
-
- return;
-}
-
-
-/**
- * @brief Default element copier that simply assigns the original pointer.
- */
-static void *rd_list_nocopy_ptr(const void *elem, void *opaque) {
- return (void *)elem;
-}
-
-rd_list_t *
-rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque) {
- rd_list_t *dst;
-
- dst = rd_list_new(src->rl_cnt, src->rl_free_cb);
-
- rd_list_copy_to(dst, src, copy_cb, opaque);
- return dst;
-}
-
-
-void rd_list_copy_to(rd_list_t *dst,
- const rd_list_t *src,
- void *(*copy_cb)(const void *elem, void *opaque),
- void *opaque) {
- void *elem;
- int i;
-
- rd_assert(dst != src);
-
- if (!copy_cb)
- copy_cb = rd_list_nocopy_ptr;
-
- RD_LIST_FOREACH(elem, src, i) {
- void *celem = copy_cb(elem, opaque);
- if (celem)
- rd_list_add(dst, celem);
- }
-}
-
-
-/**
- * @brief Copy elements of preallocated \p src to preallocated \p dst.
- *
- * @remark \p dst will be overwritten and initialized, but its
- * flags will be retained.
- *
- * @returns \p dst
- */
-static rd_list_t *rd_list_copy_preallocated0(rd_list_t *dst,
- const rd_list_t *src) {
- int dst_flags = dst->rl_flags & RD_LIST_F_ALLOCATED;
-
- rd_assert(dst != src);
-
- rd_list_init_copy(dst, src);
- dst->rl_flags |= dst_flags;
-
- rd_assert((dst->rl_flags & RD_LIST_F_FIXED_SIZE));
- rd_assert((src->rl_flags & RD_LIST_F_FIXED_SIZE));
- rd_assert(dst->rl_elemsize == src->rl_elemsize &&
- dst->rl_size == src->rl_size);
-
- memcpy(dst->rl_p, src->rl_p, src->rl_elemsize * src->rl_size);
- dst->rl_cnt = src->rl_cnt;
-
- return dst;
-}
-
-void *rd_list_copy_preallocated(const void *elem, void *opaque) {
- return rd_list_copy_preallocated0(rd_list_new(0, NULL),
- (const rd_list_t *)elem);
-}
-
-
-
-void rd_list_move(rd_list_t *dst, rd_list_t *src) {
- rd_list_init_copy(dst, src);
-
- if (src->rl_flags & RD_LIST_F_FIXED_SIZE) {
- rd_list_copy_preallocated0(dst, src);
- } else {
- memcpy(dst->rl_elems, src->rl_elems,
- src->rl_cnt * sizeof(*src->rl_elems));
- dst->rl_cnt = src->rl_cnt;
- }
-
- src->rl_cnt = 0;
-}
-
-
-/**
- * @name Misc helpers for common list types
- * @{
- *
- */
-rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size) {
- int rl_flags = rl->rl_flags & RD_LIST_F_ALLOCATED;
- rd_list_init(rl, 0, NULL);
- rl->rl_flags |= rl_flags;
- rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1 /*memzero*/);
- return rl;
-}
-
-void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val) {
- rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) &&
- rl->rl_elemsize == sizeof(int32_t));
- rd_assert(idx < rl->rl_size);
-
- memcpy(rl->rl_elems[idx], &val, sizeof(int32_t));
-
- if (rl->rl_cnt <= idx)
- rl->rl_cnt = idx + 1;
-}
-
-int32_t rd_list_get_int32(const rd_list_t *rl, int idx) {
- rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) &&
- rl->rl_elemsize == sizeof(int32_t) && idx < rl->rl_cnt);
- return *(int32_t *)rl->rl_elems[idx];
-}
-
-
-
-/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h
deleted file mode 100644
index db5295f6c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDLIST_H_
-#define _RDLIST_H_
-
-
-/**
- *
- * Simple light-weight append-only list to be used as a collection convenience.
- *
- */
-
-typedef struct rd_list_s {
- int rl_size;
- int rl_cnt;
- void **rl_elems;
- void (*rl_free_cb)(void *);
- int rl_flags;
-#define RD_LIST_F_ALLOCATED \
- 0x1 /* The rd_list_t is allocated, \
- * will be free on destroy() */
-#define RD_LIST_F_SORTED \
- 0x2 /* Set by sort(), cleared by any mutations. \
- * When this flag is set bsearch() is used \
- * by find(), otherwise a linear search. */
-#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */
-#define RD_LIST_F_UNIQUE \
- 0x8 /* Don't allow duplicates: \
- * ONLY ENFORCED BY CALLER. */
- int rl_elemsize; /**< Element size (when prealloc()ed) */
- void *rl_p; /**< Start of prealloced elements,
- * the allocation itself starts at rl_elems
- */
-} rd_list_t;
-
-
-/**
- * @brief Initialize a list, prepare for 'initial_size' elements
- * (optional optimization).
- * List elements will optionally be freed by \p free_cb.
- *
- * @returns \p rl
- */
-rd_list_t *
-rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *));
-
-
-/**
- * @brief Same as rd_list_init() but uses initial_size and free_cb
- * from the provided \p src list.
- */
-rd_list_t *rd_list_init_copy(rd_list_t *rl, const rd_list_t *src);
-
-/**
- * @brief Allocate a new list pointer and initialize
- * it according to rd_list_init().
- *
- * This is the same as calling \c rd_list_init(rd_list_alloc(), ..));
- *
- * Use rd_list_destroy() to free.
- */
-rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *));
-
-
-/**
- * @brief Prepare list to for an additional \p size elements.
- * This is an optimization to avoid incremental grows.
- */
-void rd_list_grow(rd_list_t *rl, size_t size);
-
-/**
- * @brief Preallocate elements to avoid having to pass an allocated pointer to
- * rd_list_add(), instead pass NULL to rd_list_add() and use the returned
- * pointer as the element.
- *
- * @param elemsize element size, or 0 if elements are allocated separately.
- * @param size number of elements
- * @param memzero initialize element memory to zeros.
- *
- * @remark Preallocated element lists can't grow past \p size.
- */
-void rd_list_prealloc_elems(rd_list_t *rl,
- size_t elemsize,
- size_t size,
- int memzero);
-
-/**
- * @brief Set the number of valid elements, this must only be used
- * with prealloc_elems() to make the preallocated elements directly
- * usable.
- */
-void rd_list_set_cnt(rd_list_t *rl, size_t cnt);
-
-
-/**
- * @brief Free a pointer using the list's free_cb
- *
- * @remark If no free_cb is set, or \p ptr is NULL, dont do anything
- *
- * Typical use is rd_list_free_cb(rd_list_remove_cmp(....));
- */
-void rd_list_free_cb(rd_list_t *rl, void *ptr);
-
-
-/**
- * @brief Append element to list
- *
- * @returns \p elem. If \p elem is NULL the default element for that index
- * will be returned (for use with set_elems).
- */
-void *rd_list_add(rd_list_t *rl, void *elem);
-
-
-/**
- * @brief Set element at \p idx to \p ptr.
- *
- * @remark MUST NOT overwrite an existing element.
- * @remark The list will be grown, if needed, any gaps between the current
- * highest element and \p idx will be set to NULL.
- */
-void rd_list_set(rd_list_t *rl, int idx, void *ptr);
-
-
-/**
- * Remove element from list.
- * This is a slow O(n) + memmove operation.
- * Returns the removed element.
- */
-void *rd_list_remove(rd_list_t *rl, void *match_elem);
-
-/**
- * Remove element from list using comparator.
- * See rd_list_remove()
- */
-void *rd_list_remove_cmp(rd_list_t *rl,
- void *match_elem,
- int (*cmp)(void *_a, void *_b));
-
-
-/**
- * @brief Remove element at index \p idx.
- *
- * This is a O(1) + memmove operation
- */
-void rd_list_remove_elem(rd_list_t *rl, int idx);
-
-
-/**
- * @brief Remove and return the last element in the list.
- *
- * @returns the last element, or NULL if list is empty. */
-void *rd_list_pop(rd_list_t *rl);
-
-
-/**
- * @brief Remove all elements matching comparator.
- *
- * @returns the number of elements removed.
- *
- * @sa rd_list_remove()
- */
-int rd_list_remove_multi_cmp(rd_list_t *rl,
- void *match_elem,
- int (*cmp)(void *_a, void *_b));
-
-
-/**
- * @brief Sort list using comparator.
- *
- * To sort a list ascendingly the comparator should implement (a - b)
- * and for descending order implement (b - a).
- */
-void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *));
-
-
-/**
- * Empties the list and frees elements (if there is a free_cb).
- */
-void rd_list_clear(rd_list_t *rl);
-
-
-/**
- * Empties the list, frees the element array, and optionally frees
- * each element using the registered \c rl->rl_free_cb.
- *
- * If the list was previously allocated with rd_list_new() it will be freed.
- */
-void rd_list_destroy(rd_list_t *rl);
-
-/**
- * @brief Wrapper for rd_list_destroy() that has same signature as free(3),
- * allowing it to be used as free_cb for nested lists.
- */
-void rd_list_destroy_free(void *rl);
-
-
-/**
- * Returns the element at index 'idx', or NULL if out of range.
- *
- * Typical iteration is:
- * int i = 0;
- * my_type_t *obj;
- * while ((obj = rd_list_elem(rl, i++)))
- * do_something(obj);
- */
-void *rd_list_elem(const rd_list_t *rl, int idx);
-
-#define RD_LIST_FOREACH(elem, listp, idx) \
- for (idx = 0; (elem = rd_list_elem(listp, idx)); idx++)
-
-#define RD_LIST_FOREACH_REVERSE(elem, listp, idx) \
- for (idx = (listp)->rl_cnt - 1; \
- idx >= 0 && (elem = rd_list_elem(listp, idx)); idx--)
-
-/**
- * Returns the number of elements in list.
- */
-static RD_INLINE RD_UNUSED int rd_list_cnt(const rd_list_t *rl) {
- return rl->rl_cnt;
-}
-
-
-/**
- * Returns true if list is empty
- */
-#define rd_list_empty(rl) (rd_list_cnt(rl) == 0)
-
-
-/**
- * @brief Find element index using comparator.
- *
- * \p match is the first argument to \p cmp, and each element (up to a match)
- * is the second argument to \p cmp.
- *
- * @remark this is a O(n) scan.
- * @returns the first matching element or NULL.
- */
-int rd_list_index(const rd_list_t *rl,
- const void *match,
- int (*cmp)(const void *, const void *));
-
-/**
- * @brief Find element using comparator
- *
- * \p match is the first argument to \p cmp, and each element (up to a match)
- * is the second argument to \p cmp.
- *
- * @remark if the list is sorted bsearch() is used, otherwise an O(n) scan.
- *
- * @returns the first matching element or NULL.
- */
-void *rd_list_find(const rd_list_t *rl,
- const void *match,
- int (*cmp)(const void *, const void *));
-
-
-
-/**
- * @returns the first element of the list, or NULL if list is empty.
- */
-void *rd_list_first(const rd_list_t *rl);
-
-/**
- * @returns the last element of the list, or NULL if list is empty.
- */
-void *rd_list_last(const rd_list_t *rl);
-
-
-/**
- * @returns the first duplicate in the list or NULL if no duplicates.
- *
- * @warning The list MUST be sorted.
- */
-void *rd_list_find_duplicate(const rd_list_t *rl,
- int (*cmp)(const void *, const void *));
-
-
-/**
- * @brief Compare list \p a to \p b.
- *
- * @returns < 0 if a was "lesser" than b,
- * > 0 if a was "greater" than b,
- * 0 if a and b are equal.
- */
-int rd_list_cmp(const rd_list_t *a,
- const rd_list_t *b,
- int (*cmp)(const void *, const void *));
-
-/**
- * @brief Simple element pointer comparator
- */
-int rd_list_cmp_ptr(const void *a, const void *b);
-
-/**
- * @brief strcmp comparator where the list elements are strings.
- */
-int rd_list_cmp_str(const void *a, const void *b);
-
-
-/**
- * @brief Apply \p cb to each element in list, if \p cb returns 0
- * the element will be removed (but not freed).
- */
-void rd_list_apply(rd_list_t *rl,
- int (*cb)(void *elem, void *opaque),
- void *opaque);
-
-
-
-typedef void *(rd_list_copy_cb_t)(const void *elem, void *opaque);
-/**
- * @brief Copy list \p src, returning a new list,
- * using optional \p copy_cb (per elem)
- */
-rd_list_t *
-rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque);
-
-
-/**
- * @brief Copy list \p src to \p dst using optional \p copy_cb (per elem)
- * @remark The destination list is not initialized or copied by this function.
- * @remark copy_cb() may return NULL in which case no element is added,
- * but the copy callback might have done so itself.
- */
-void rd_list_copy_to(rd_list_t *dst,
- const rd_list_t *src,
- void *(*copy_cb)(const void *elem, void *opaque),
- void *opaque);
-
-
-/**
- * @brief Copy callback to copy elements that are preallocated lists.
- */
-void *rd_list_copy_preallocated(const void *elem, void *opaque);
-
-
-/**
- * @brief String copier for rd_list_copy()
- */
-static RD_UNUSED void *rd_list_string_copy(const void *elem, void *opaque) {
- return rd_strdup((const char *)elem);
-}
-
-
-
-/**
- * @brief Move elements from \p src to \p dst.
- *
- * @remark \p dst will be initialized first.
- * @remark \p src will be emptied.
- */
-void rd_list_move(rd_list_t *dst, rd_list_t *src);
-
-
-/**
- * @name Misc helpers for common list types
- * @{
- *
- */
-
-/**
- * @brief Init a new list of int32_t's of maximum size \p max_size
- * where each element is pre-allocated.
- *
- * @remark The allocation flag of the original \p rl is retained,
- * do not pass an uninitialized \p rl to this function.
- */
-rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size);
-
-
-/**
- * Debugging: Print list to stdout.
- */
-void rd_list_dump(const char *what, const rd_list_t *rl);
-
-
-
-/**
- * @brief Set element at index \p idx to value \p val.
- *
- * @remark Must only be used with preallocated int32_t lists.
- * @remark Allows values to be overwritten.
- */
-void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val);
-
-/**
- * @returns the int32_t element value at index \p idx
- *
- * @remark Must only be used with preallocated int32_t lists.
- */
-int32_t rd_list_get_int32(const rd_list_t *rl, int idx);
-
-/**@}*/
-
-#endif /* _RDLIST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c
deleted file mode 100644
index 19fbbb161..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdlog.h"
-
-#include <stdarg.h>
-#include <string.h>
-#include <ctype.h>
-
-
-
-void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
- const char *p = (const char *)ptr;
- size_t of = 0;
-
-
- if (name)
- fprintf(fp, "%s hexdump (%" PRIusz " bytes):\n", name, len);
-
- for (of = 0; of < len; of += 16) {
- char hexen[16 * 3 + 1];
- char charen[16 + 1];
- int hof = 0;
-
- int cof = 0;
- unsigned int i;
-
- for (i = (unsigned int)of; i < (unsigned int)of + 16 && i < len;
- i++) {
- hof += rd_snprintf(hexen + hof, sizeof(hexen) - hof,
- "%02x ", p[i] & 0xff);
- cof +=
- rd_snprintf(charen + cof, sizeof(charen) - cof,
- "%c", isprint((int)p[i]) ? p[i] : '.');
- }
- fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen);
- }
-}
-
-
-void rd_iov_print(const char *what,
- int iov_idx,
- const struct iovec *iov,
- int hexdump) {
- printf("%s: iov #%i: %" PRIusz "\n", what, iov_idx,
- (size_t)iov->iov_len);
- if (hexdump)
- rd_hexdump(stdout, what, iov->iov_base, iov->iov_len);
-}
-
-
-void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump) {
- int i;
- size_t len = 0;
-
- printf("%s: iovlen %" PRIusz "\n", what, (size_t)msg->msg_iovlen);
-
- for (i = 0; i < (int)msg->msg_iovlen; i++) {
- rd_iov_print(what, i, &msg->msg_iov[i], hexdump);
- len += msg->msg_iov[i].iov_len;
- }
- printf("%s: ^ message was %" PRIusz " bytes in total\n", what, len);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h
deleted file mode 100644
index f360a0b66..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDLOG_H_
-#define _RDLOG_H_
-
-void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len);
-
-void rd_iov_print(const char *what,
- int iov_idx,
- const struct iovec *iov,
- int hexdump);
-struct msghdr;
-void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump);
-
-#endif /* _RDLOG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c
deleted file mode 100644
index 4b8547033..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdsysqueue.h"
-#include "rdstring.h"
-#include "rdmap.h"
-
-
-static RD_INLINE int rd_map_elem_cmp(const rd_map_elem_t *a,
- const rd_map_elem_t *b,
- const rd_map_t *rmap) {
- int r = a->hash - b->hash;
- if (r != 0)
- return r;
- return rmap->rmap_cmp(a->key, b->key);
-}
-
-static void rd_map_elem_destroy(rd_map_t *rmap, rd_map_elem_t *elem) {
- rd_assert(rmap->rmap_cnt > 0);
- rmap->rmap_cnt--;
- if (rmap->rmap_destroy_key)
- rmap->rmap_destroy_key((void *)elem->key);
- if (rmap->rmap_destroy_value)
- rmap->rmap_destroy_value((void *)elem->value);
- LIST_REMOVE(elem, hlink);
- LIST_REMOVE(elem, link);
- rd_free(elem);
-}
-
-static rd_map_elem_t *
-rd_map_find(const rd_map_t *rmap, int *bktp, const rd_map_elem_t *skel) {
- int bkt = skel->hash % rmap->rmap_buckets.cnt;
- rd_map_elem_t *elem;
-
- if (bktp)
- *bktp = bkt;
-
- LIST_FOREACH(elem, &rmap->rmap_buckets.p[bkt], hlink) {
- if (!rd_map_elem_cmp(skel, elem, rmap))
- return elem;
- }
-
- return NULL;
-}
-
-
-/**
- * @brief Create and return new element based on \p skel without value set.
- */
-static rd_map_elem_t *
-rd_map_insert(rd_map_t *rmap, int bkt, const rd_map_elem_t *skel) {
- rd_map_elem_t *elem;
-
- elem = rd_calloc(1, sizeof(*elem));
- elem->hash = skel->hash;
- elem->key = skel->key; /* takes ownership of key */
- LIST_INSERT_HEAD(&rmap->rmap_buckets.p[bkt], elem, hlink);
- LIST_INSERT_HEAD(&rmap->rmap_iter, elem, link);
- rmap->rmap_cnt++;
-
- return elem;
-}
-
-
-rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value) {
- rd_map_elem_t skel = {.key = key, .hash = rmap->rmap_hash(key)};
- rd_map_elem_t *elem;
- int bkt;
-
- if (!(elem = rd_map_find(rmap, &bkt, &skel))) {
- elem = rd_map_insert(rmap, bkt, &skel);
- } else {
- if (elem->value && rmap->rmap_destroy_value)
- rmap->rmap_destroy_value((void *)elem->value);
- if (rmap->rmap_destroy_key)
- rmap->rmap_destroy_key(key);
- }
-
- elem->value = value; /* takes ownership of value */
-
- return elem;
-}
-
-
-void *rd_map_get(const rd_map_t *rmap, const void *key) {
- const rd_map_elem_t skel = {.key = (void *)key,
- .hash = rmap->rmap_hash(key)};
- rd_map_elem_t *elem;
-
- if (!(elem = rd_map_find(rmap, NULL, &skel)))
- return NULL;
-
- return (void *)elem->value;
-}
-
-
-void rd_map_delete(rd_map_t *rmap, const void *key) {
- const rd_map_elem_t skel = {.key = (void *)key,
- .hash = rmap->rmap_hash(key)};
- rd_map_elem_t *elem;
- int bkt;
-
- if (!(elem = rd_map_find(rmap, &bkt, &skel)))
- return;
-
- rd_map_elem_destroy(rmap, elem);
-}
-
-
-void rd_map_copy(rd_map_t *dst,
- const rd_map_t *src,
- rd_map_copy_t *key_copy,
- rd_map_copy_t *value_copy) {
- const rd_map_elem_t *elem;
-
- RD_MAP_FOREACH_ELEM(elem, src) {
- rd_map_set(
- dst, key_copy ? key_copy(elem->key) : (void *)elem->key,
- value_copy ? value_copy(elem->value) : (void *)elem->value);
- }
-}
-
-
-void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem) {
- *elem = LIST_FIRST(&rmap->rmap_iter);
-}
-
-size_t rd_map_cnt(const rd_map_t *rmap) {
- return (size_t)rmap->rmap_cnt;
-}
-
-rd_bool_t rd_map_is_empty(const rd_map_t *rmap) {
- return rmap->rmap_cnt == 0;
-}
-
-
-/**
- * @brief Calculates the number of desired buckets and returns
- * a struct with pre-allocated buckets.
- */
-struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt) {
- static const int max_depth = 15;
- static const int bucket_sizes[] = {
- 5, 11, 23, 47, 97, 199, /* default */
- 409, 823, 1741, 3469, 6949, 14033,
- 28411, 57557, 116731, 236897, -1};
- struct rd_map_buckets buckets = RD_ZERO_INIT;
- int i;
-
- if (!expected_cnt) {
- buckets.cnt = 199;
- } else {
- /* Strive for an average (at expected element count) depth
- * of 15 elements per bucket, but limit the maximum
- * bucket count to the maximum value in bucket_sizes above.
- * When a real need arise we'll change this to a dynamically
- * growing hash map instead, but this will do for now. */
- buckets.cnt = bucket_sizes[0];
- for (i = 1; bucket_sizes[i] != -1 &&
- (int)expected_cnt / max_depth > bucket_sizes[i];
- i++)
- buckets.cnt = bucket_sizes[i];
- }
-
- rd_assert(buckets.cnt > 0);
-
- buckets.p = rd_calloc(buckets.cnt, sizeof(*buckets.p));
-
- return buckets;
-}
-
-
-void rd_map_init(rd_map_t *rmap,
- size_t expected_cnt,
- int (*cmp)(const void *a, const void *b),
- unsigned int (*hash)(const void *key),
- void (*destroy_key)(void *key),
- void (*destroy_value)(void *value)) {
-
- memset(rmap, 0, sizeof(*rmap));
- rmap->rmap_buckets = rd_map_alloc_buckets(expected_cnt);
- rmap->rmap_cmp = cmp;
- rmap->rmap_hash = hash;
- rmap->rmap_destroy_key = destroy_key;
- rmap->rmap_destroy_value = destroy_value;
-}
-
-void rd_map_clear(rd_map_t *rmap) {
- rd_map_elem_t *elem;
-
- while ((elem = LIST_FIRST(&rmap->rmap_iter)))
- rd_map_elem_destroy(rmap, elem);
-}
-
-void rd_map_destroy(rd_map_t *rmap) {
- rd_map_clear(rmap);
- rd_free(rmap->rmap_buckets.p);
-}
-
-
-int rd_map_str_cmp(const void *a, const void *b) {
- return strcmp((const char *)a, (const char *)b);
-}
-
-/**
- * @brief A djb2 string hasher.
- */
-unsigned int rd_map_str_hash(const void *key) {
- const char *str = key;
- return rd_string_hash(str, -1);
-}
-
-
-
-/**
- * @name Unit tests
- *
- */
-#include "rdtime.h"
-#include "rdunittest.h"
-#include "rdcrc32.h"
-
-
-/**
- * Typed hash maps
- */
-
-/* Complex key type */
-struct mykey {
- int k;
- int something_else; /* Ignored by comparator and hasher below */
-};
-
-/* Key comparator */
-static int mykey_cmp(const void *_a, const void *_b) {
- const struct mykey *a = _a, *b = _b;
- return a->k - b->k;
-}
-
-/* Key hasher */
-static unsigned int mykey_hash(const void *_key) {
- const struct mykey *key = _key;
- return (unsigned int)key->k;
-}
-
-/* Complex value type */
-struct person {
- char *name;
- char *surname;
-};
-
-/* Define typed hash map type */
-typedef RD_MAP_TYPE(const struct mykey *,
- const struct person *) ut_my_typed_map_t;
-
-
-/**
- * @brief Test typed hash map with pre-defined type.
- */
-static int unittest_typed_map(void) {
- ut_my_typed_map_t rmap =
- RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL);
- ut_my_typed_map_t dup =
- RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL);
- struct mykey k1 = {1};
- struct mykey k2 = {2};
- struct person v1 = {"Roy", "McPhearsome"};
- struct person v2 = {"Hedvig", "Lindahl"};
- const struct mykey *key;
- const struct person *value;
-
- RD_MAP_SET(&rmap, &k1, &v1);
- RD_MAP_SET(&rmap, &k2, &v2);
-
- value = RD_MAP_GET(&rmap, &k2);
- RD_UT_ASSERT(value == &v2, "mismatch");
-
- RD_MAP_FOREACH(key, value, &rmap) {
- RD_UT_SAY("enumerated key %d person %s %s", key->k, value->name,
- value->surname);
- }
-
- RD_MAP_COPY(&dup, &rmap, NULL, NULL);
-
- RD_MAP_DELETE(&rmap, &k1);
- value = RD_MAP_GET(&rmap, &k1);
- RD_UT_ASSERT(value == NULL, "expected no k1");
-
- value = RD_MAP_GET(&dup, &k1);
- RD_UT_ASSERT(value == &v1, "copied map: k1 mismatch");
- value = RD_MAP_GET(&dup, &k2);
- RD_UT_ASSERT(value == &v2, "copied map: k2 mismatch");
-
- RD_MAP_DESTROY(&rmap);
- RD_MAP_DESTROY(&dup);
-
- RD_UT_PASS();
-}
-
-
-static int person_cmp(const void *_a, const void *_b) {
- const struct person *a = _a, *b = _b;
- int r;
- if ((r = strcmp(a->name, b->name)))
- return r;
- return strcmp(a->surname, b->surname);
-}
-static unsigned int person_hash(const void *_key) {
- const struct person *key = _key;
- return 31 * rd_map_str_hash(key->name) + rd_map_str_hash(key->surname);
-}
-
-/**
- * @brief Test typed hash map with locally defined type.
- */
-static int unittest_typed_map2(void) {
- RD_MAP_LOCAL_INITIALIZER(usermap, 3, const char *,
- const struct person *, rd_map_str_cmp,
- rd_map_str_hash, NULL, NULL);
- RD_MAP_LOCAL_INITIALIZER(personmap, 3, const struct person *,
- const char *, person_cmp, person_hash, NULL,
- NULL);
- struct person p1 = {"Magnus", "Lundstrom"};
- struct person p2 = {"Peppy", "Popperpappies"};
- const char *user;
- const struct person *person;
-
- /* Populate user -> person map */
- RD_MAP_SET(&usermap, "user1234", &p1);
- RD_MAP_SET(&usermap, "user9999999999", &p2);
-
- person = RD_MAP_GET(&usermap, "user1234");
-
-
- RD_UT_ASSERT(person == &p1, "mismatch");
-
- RD_MAP_FOREACH(user, person, &usermap) {
- /* Populate reverse name -> user map */
- RD_MAP_SET(&personmap, person, user);
- }
-
- RD_MAP_FOREACH(person, user, &personmap) {
- /* Just reference the memory to catch memory errors.*/
- RD_UT_ASSERT(strlen(person->name) > 0 &&
- strlen(person->surname) > 0 &&
- strlen(user) > 0,
- "bug");
- }
-
- RD_MAP_DESTROY(&usermap);
- RD_MAP_DESTROY(&personmap);
-
- return 0;
-}
-
-
-/**
- * @brief Untyped hash map.
- *
- * This is a more thorough test of the underlying hash map implementation.
- */
-static int unittest_untyped_map(void) {
- rd_map_t rmap;
- int pass, i, r;
- int cnt = 100000;
- int exp_cnt = 0, get_cnt = 0, iter_cnt = 0;
- const rd_map_elem_t *elem;
- rd_ts_t ts = rd_clock();
- rd_ts_t ts_get = 0;
-
- rd_map_init(&rmap, cnt, rd_map_str_cmp, rd_map_str_hash, rd_free,
- rd_free);
-
- /* pass 0 is set,delete,overwrite,get
- * pass 1-5 is get */
- for (pass = 0; pass < 6; pass++) {
- if (pass == 1)
- ts_get = rd_clock();
-
- for (i = 1; i < cnt; i++) {
- char key[10];
- char val[64];
- const char *val2;
- rd_bool_t do_delete = !(i % 13);
- rd_bool_t overwrite = !do_delete && !(i % 5);
-
- rd_snprintf(key, sizeof(key), "key%d", i);
- rd_snprintf(val, sizeof(val), "VALUE=%d!", i);
-
- if (pass == 0) {
- rd_map_set(&rmap, rd_strdup(key),
- rd_strdup(val));
-
- if (do_delete)
- rd_map_delete(&rmap, key);
- }
-
- if (overwrite) {
- rd_snprintf(val, sizeof(val), "OVERWRITE=%d!",
- i);
- if (pass == 0)
- rd_map_set(&rmap, rd_strdup(key),
- rd_strdup(val));
- }
-
- val2 = rd_map_get(&rmap, key);
-
- if (do_delete)
- RD_UT_ASSERT(!val2,
- "map_get pass %d "
- "returned value %s "
- "for deleted key %s",
- pass, val2, key);
- else
- RD_UT_ASSERT(val2 && !strcmp(val, val2),
- "map_get pass %d: "
- "expected value %s, not %s, "
- "for key %s",
- pass, val, val2 ? val2 : "NULL",
- key);
-
- if (pass == 0 && !do_delete)
- exp_cnt++;
- }
-
- if (pass >= 1)
- get_cnt += cnt;
- }
-
- ts_get = rd_clock() - ts_get;
- RD_UT_SAY("%d map_get iterations took %.3fms = %" PRId64 "us/get",
- get_cnt, (float)ts_get / 1000.0, ts_get / get_cnt);
-
- RD_MAP_FOREACH_ELEM(elem, &rmap) {
- iter_cnt++;
- }
-
- r = (int)rd_map_cnt(&rmap);
- RD_UT_ASSERT(r == exp_cnt, "expected %d map entries, not %d", exp_cnt,
- r);
-
- RD_UT_ASSERT(r == iter_cnt,
- "map_cnt() = %d, iteration gave %d elements", r, iter_cnt);
-
- rd_map_destroy(&rmap);
-
- ts = rd_clock() - ts;
- RD_UT_SAY("Total time over %d entries took %.3fms", cnt,
- (float)ts / 1000.0);
-
- RD_UT_PASS();
-}
-
-
-int unittest_map(void) {
- int fails = 0;
- fails += unittest_untyped_map();
- fails += unittest_typed_map();
- fails += unittest_typed_map2();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h
deleted file mode 100644
index a79dcda06..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2020 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDMAP_H_
-#define _RDMAP_H_
-
-/**
- * @name Hash maps.
- *
- * Memory of key and value are allocated by the user but owned by the hash map
- * until elements are deleted or overwritten.
- *
- * The lower-case API provides a generic typeless (void *) hash map while
- * the upper-case API provides a strictly typed hash map implemented as macros
- * on top of the generic API.
- *
- * See rd_map_init(), et.al, for the generic API and RD_MAP_INITIALIZER()
- * for the typed API.
- *
- * @remark Not thread safe.
- */
-
-
-/**
- * @struct Map element. This is the internal representation
- * of the element and exposed to the user for iterating over the hash.
- */
-typedef struct rd_map_elem_s {
- LIST_ENTRY(rd_map_elem_s) hlink; /**< Hash bucket link */
- LIST_ENTRY(rd_map_elem_s) link; /**< Iterator link */
- unsigned int hash; /**< Key hash value */
- const void *key; /**< Key (memory owned by map) */
- const void *value; /**< Value (memory owned by map) */
-} rd_map_elem_t;
-
-
-/**
- * @struct Hash buckets (internal use).
- */
-struct rd_map_buckets {
- LIST_HEAD(, rd_map_elem_s) * p; /**< Hash buckets array */
- int cnt; /**< Bucket count */
-};
-
-
-/**
- * @struct Hash map.
- */
-typedef struct rd_map_s {
- struct rd_map_buckets rmap_buckets; /**< Hash buckets */
- int rmap_cnt; /**< Element count */
-
- LIST_HEAD(, rd_map_elem_s)
- rmap_iter; /**< Element list for iterating
- * over all elements. */
-
- int (*rmap_cmp)(const void *a, const void *b); /**< Key comparator */
- unsigned int (*rmap_hash)(const void *key); /**< Key hash function */
- void (*rmap_destroy_key)(void *key); /**< Optional key free */
- void (*rmap_destroy_value)(void *value); /**< Optional value free */
-
- void *rmap_opaque;
-} rd_map_t;
-
-
-
-/**
- * @brief Set/overwrite value in map.
- *
- * If an existing entry with the same key already exists its key and value
- * will be freed with the destroy_key and destroy_value functions
- * passed to rd_map_init().
- *
- * The map assumes memory ownership of both the \p key and \p value and will
- * use the destroy_key and destroy_value functions (if set) to free
- * the key and value memory when the map is destroyed or element removed.
- *
- * @returns the map element.
- */
-rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value);
-
-
-/**
- * @brief Look up \p key in the map and return its value, or NULL
- * if \p key was not found.
- *
- * The returned memory is still owned by the map.
- */
-void *rd_map_get(const rd_map_t *rmap, const void *key);
-
-
-/**
- * @brief Delete \p key from the map, if it exists.
- *
- * The destroy_key and destroy_value functions (if set) will be used
- * to free the key and value memory.
- */
-void rd_map_delete(rd_map_t *rmap, const void *key);
-
-
-/** Key or Value Copy function signature. */
-typedef void *(rd_map_copy_t)(const void *key_or_value);
-
-
-/**
- * @brief Copy all elements from \p src to \p dst.
- * \p dst must be initialized and compatible with \p src.
- *
- * @param dst Destination map to copy to.
- * @param src Source map to copy from.
- * @param key_copy Key copy callback. If NULL the \p dst key will just
- * reference the \p src key.
- * @param value_copy Value copy callback. If NULL the \p dst value will just
- * reference the \p src value.
- */
-void rd_map_copy(rd_map_t *dst,
- const rd_map_t *src,
- rd_map_copy_t *key_copy,
- rd_map_copy_t *value_copy);
-
-
-/**
- * @returns the current number of elements in the map.
- */
-size_t rd_map_cnt(const rd_map_t *rmap);
-
-/**
- * @returns true if map is empty, else false.
- */
-rd_bool_t rd_map_is_empty(const rd_map_t *rmap);
-
-
-/**
- * @brief Iterate over all elements in the map.
- *
- * @warning The map MUST NOT be modified during the loop.
- *
- * @remark This is part of the untyped generic API.
- */
-#define RD_MAP_FOREACH_ELEM(ELEM, RMAP) \
- for (rd_map_iter_begin((RMAP), &(ELEM)); rd_map_iter(&(ELEM)); \
- rd_map_iter_next(&(ELEM)))
-
-
-/**
- * @brief Begin iterating \p rmap, first element is set in \p *elem.
- */
-void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem);
-
-/**
- * @returns 1 if \p *elem is a valid iteration element, else 0.
- */
-static RD_INLINE RD_UNUSED int rd_map_iter(const rd_map_elem_t **elem) {
- return *elem != NULL;
-}
-
-/**
- * @brief Advances the iteration to the next element.
- */
-static RD_INLINE RD_UNUSED void rd_map_iter_next(const rd_map_elem_t **elem) {
- *elem = LIST_NEXT(*elem, link);
-}
-
-
-/**
- * @brief Initialize a map that is expected to hold \p expected_cnt elements.
- *
- * @param expected_cnt Expected number of elements in the map,
- * this is used to select a suitable bucket count.
- * Passing a value of 0 will set the bucket count
- * to a reasonable default.
- * @param cmp Key comparator that must return 0 if the two keys match.
- * @param hash Key hashing function that is used to map a key to a bucket.
- * It must return an integer hash >= 0 of the key.
- * @param destroy_key (Optional) When an element is deleted or overwritten
- * this function will be used to free the key memory.
- * @param destroy_value (Optional) When an element is deleted or overwritten
- * this function will be used to free the value memory.
- *
- * Destroy the map with rd_map_destroy()
- *
- * @remarks The map is not thread-safe.
- */
-void rd_map_init(rd_map_t *rmap,
- size_t expected_cnt,
- int (*cmp)(const void *a, const void *b),
- unsigned int (*hash)(const void *key),
- void (*destroy_key)(void *key),
- void (*destroy_value)(void *value));
-
-
-/**
- * @brief Internal use
- */
-struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt);
-
-
-/**
- * @brief Empty the map and free all elements.
- */
-void rd_map_clear(rd_map_t *rmap);
-
-
-/**
- * @brief Free all elements in the map and free all memory associated
- * with the map, but not the rd_map_t itself.
- *
- * The map is unusable after this call but can be re-initialized using
- * rd_map_init().
- *
- * @sa rd_map_clear()
- */
-void rd_map_destroy(rd_map_t *rmap);
-
-
-/**
- * @brief String comparator for (const char *) keys.
- */
-int rd_map_str_cmp(const void *a, const void *b);
-
-
-/**
- * @brief String hash function (djb2) for (const char *) keys.
- */
-unsigned int rd_map_str_hash(const void *a);
-
-
-
-/**
- * @name Typed hash maps.
- *
- * Typed hash maps provides a type-safe layer on top of the standard hash maps.
- */
-
-/**
- * @brief Define a typed map type which can later be used with
- * RD_MAP_INITIALIZER() and typed RD_MAP_*() API.
- */
-#define RD_MAP_TYPE(KEY_TYPE, VALUE_TYPE) \
- struct { \
- rd_map_t rmap; \
- KEY_TYPE key; \
- VALUE_TYPE value; \
- const rd_map_elem_t *elem; \
- }
-
-/**
- * @brief Initialize a typed hash map. The left hand side variable must be
- * a typed hash map defined by RD_MAP_TYPE().
- *
- * The typed hash map is a macro layer on top of the rd_map_t implementation
- * that provides type safety.
- * The methods are the same as the underlying implementation but in all caps
- * (to indicate their macro use), e.g., RD_MAP_SET() is the typed version
- * of rd_map_set().
- *
- * @param EXPECTED_CNT Expected number of elements in hash.
- * @param KEY_TYPE The type of the hash key.
- * @param VALUE_TYPE The type of the hash value.
- * @param CMP Comparator function for the key.
- * @param HASH Hash function for the key.
- * @param DESTROY_KEY Destructor for the key type.
- * @param DESTROY_VALUE Destructor for the value type.
- *
- * @sa rd_map_init()
- */
-#define RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
- DESTROY_VALUE) \
- { \
- .rmap = { \
- .rmap_buckets = rd_map_alloc_buckets(EXPECTED_CNT), \
- .rmap_cmp = CMP, \
- .rmap_hash = HASH, \
- .rmap_destroy_key = DESTROY_KEY, \
- .rmap_destroy_value = DESTROY_VALUE \
- } \
- }
-
-
-/**
- * @brief Initialize a locally-defined typed hash map.
- * This hash map can only be used in the current scope/function
- * as its type is private to this initializement.
- *
- * @param RMAP Hash map variable name.
- *
- * For the other parameters, see RD_MAP_INITIALIZER().
- *
- * @sa RD_MAP_INITIALIZER()
- */
-#define RD_MAP_LOCAL_INITIALIZER(RMAP, EXPECTED_CNT, KEY_TYPE, VALUE_TYPE, \
- CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \
- struct { \
- rd_map_t rmap; \
- KEY_TYPE key; \
- VALUE_TYPE value; \
- const rd_map_elem_t *elem; \
- } RMAP = RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
- DESTROY_VALUE)
-
-
-/**
- * @brief Initialize typed map \p RMAP.
- *
- * @sa rd_map_init()
- */
-#define RD_MAP_INIT(RMAP, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \
- rd_map_init(&(RMAP)->rmap, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
- DESTROY_VALUE)
-
-
-/**
- * @brief Allocate and initialize a typed map.
- */
-
-
-/**
- * @brief Typed hash map: Set key/value in map.
- *
- * @sa rd_map_set()
- */
-#define RD_MAP_SET(RMAP, KEY, VALUE) \
- ((RMAP)->key = KEY, (RMAP)->value = VALUE, \
- rd_map_set(&(RMAP)->rmap, (void *)(RMAP)->key, \
- (void *)(RMAP)->value))
-
-/**
- * @brief Typed hash map: Get value for key.
- *
- * @sa rd_map_get()
- */
-#define RD_MAP_GET(RMAP, KEY) \
- ((RMAP)->key = (KEY), \
- (RMAP)->value = rd_map_get(&(RMAP)->rmap, (RMAP)->key), \
- (RMAP)->value)
-
-
-
-/**
- * @brief Get value for key. If key does not exist in map a new
- * entry is added using the DEFAULT_CODE.
- */
-#define RD_MAP_GET_OR_SET(RMAP, KEY, DEFAULT_CODE) \
- (RD_MAP_GET(RMAP, KEY) \
- ? (RMAP)->value \
- : (RD_MAP_SET(RMAP, (RMAP)->key, DEFAULT_CODE), (RMAP)->value))
-
-
-/**
- * @brief Typed hash map: Delete element by key.
- *
- * The destroy_key and destroy_value functions (if set) will be used
- * to free the key and value memory.
- *
- * @sa rd_map_delete()
- */
-#define RD_MAP_DELETE(RMAP, KEY) \
- ((RMAP)->key = (KEY), rd_map_delete(&(RMAP)->rmap, (RMAP)->key))
-
-
-/**
- * @brief Copy all elements from \p SRC to \p DST.
- * \p DST must be initialized and compatible with \p SRC.
- *
- * @param DST Destination map to copy to.
- * @param SRC Source map to copy from.
- * @param KEY_COPY Key copy callback. If NULL the \p DST key will just
- * reference the \p SRC key.
- * @param VALUE_COPY Value copy callback. If NULL the \p DST value will just
- * reference the \p SRC value.
- */
-#define RD_MAP_COPY(DST, SRC, KEY_COPY, VALUE_COPY) \
- do { \
- if ((DST) != (SRC)) /*implicit type-check*/ \
- rd_map_copy(&(DST)->rmap, &(SRC)->rmap, KEY_COPY, \
- VALUE_COPY); \
- } while (0)
-
-
-/**
- * @brief Empty the map and free all elements.
- *
- * @sa rd_map_clear()
- */
-#define RD_MAP_CLEAR(RMAP) rd_map_clear(&(RMAP)->rmap)
-
-
-/**
- * @brief Typed hash map: Destroy hash map.
- *
- * @sa rd_map_destroy()
- */
-#define RD_MAP_DESTROY(RMAP) rd_map_destroy(&(RMAP)->rmap)
-
-
-/**
- * @brief Typed hash map: Destroy and free the hash map.
- *
- * @sa rd_map_destroy()
- */
-#define RD_MAP_DESTROY_AND_FREE(RMAP) \
- do { \
- rd_map_destroy(&(RMAP)->rmap); \
- rd_free(RMAP); \
- } while (0)
-
-
-/**
- * @brief Typed hash map: Iterate over all elements in the map.
- *
- * @warning The current or previous elements may be removed, but the next
- * element after the current one MUST NOT be modified during the loop.
- *
- * @warning RD_MAP_FOREACH() only supports one simultaneous invocation,
- * that is, special care must be taken not to call FOREACH() from
- * within a FOREACH() or FOREACH_KEY() loop on the same map.
- * This is due to how RMAP->elem is used as the iterator.
- * This restriction is unfortunately not enforced at build or run time.
- *
- * @remark The \p RMAP may not be const.
- */
-#define RD_MAP_FOREACH(K, V, RMAP) \
- for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL, \
- (V) = NULL; \
- rd_map_iter(&(RMAP)->elem) && \
- ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \
- (RMAP)->value = (void *)(RMAP)->elem->value, (V) = (RMAP)->value, \
- rd_map_iter_next(&(RMAP)->elem), rd_true);)
-
-
-/**
- * @brief Typed hash map: Iterate over all keys in the map.
- *
- * @warning The current or previous elements may be removed, but the next
- * element after the current one MUST NOT be modified during the loop.
- *
- * @warning RD_MAP_FOREACH_KEY() only supports one simultaneous invocation,
- * that is, special care must be taken not to call FOREACH_KEY() from
- * within a FOREACH() or FOREACH_KEY() loop on the same map.
- * This is due to how RMAP->elem is used as the iterator.
- * This restriction is unfortunately not enforced at build or run time.
- *
- * @remark The \p RMAP may not be const.
- */
-#define RD_MAP_FOREACH_KEY(K, RMAP) \
- for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL; \
- rd_map_iter(&(RMAP)->elem) && \
- ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \
- rd_map_iter_next(&(RMAP)->elem), rd_true);)
-
-
-/**
- * @returns the number of elements in the map.
- */
-#define RD_MAP_CNT(RMAP) rd_map_cnt(&(RMAP)->rmap)
-
-/**
- * @returns true if map is empty, else false.
- */
-#define RD_MAP_IS_EMPTY(RMAP) rd_map_is_empty(&(RMAP)->rmap)
-
-#endif /* _RDMAP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c
deleted file mode 100644
index c3e4095d4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdunittest.h"
-#include "rdmurmur2.h"
-#include "rdendian.h"
-
-
-/* MurmurHash2, by Austin Appleby
- *
- * With librdkafka modifications combinining aligned/unaligned variants
- * into the same function.
- */
-
-#define MM_MIX(h, k, m) \
- { \
- k *= m; \
- k ^= k >> r; \
- k *= m; \
- h *= m; \
- h ^= k; \
- }
-
-/*-----------------------------------------------------------------------------
-// Based on MurmurHashNeutral2, by Austin Appleby
-//
-// Same as MurmurHash2, but endian- and alignment-neutral.
-// Half the speed though, alas.
-//
-*/
-uint32_t rd_murmur2(const void *key, size_t len) {
- const uint32_t seed = 0x9747b28c;
- const uint32_t m = 0x5bd1e995;
- const int r = 24;
- uint32_t h = seed ^ (uint32_t)len;
- const unsigned char *tail;
-
- if (likely(((intptr_t)key & 0x3) == 0)) {
- /* Input is 32-bit word aligned. */
- const uint32_t *data = (const uint32_t *)key;
-
- while (len >= 4) {
- uint32_t k = htole32(*(uint32_t *)data);
-
- MM_MIX(h, k, m);
-
- data++;
- len -= 4;
- }
-
- tail = (const unsigned char *)data;
-
- } else {
- /* Unaligned slower variant */
- const unsigned char *data = (const unsigned char *)key;
-
- while (len >= 4) {
- uint32_t k;
-
- k = data[0];
- k |= data[1] << 8;
- k |= data[2] << 16;
- k |= data[3] << 24;
-
- MM_MIX(h, k, m);
-
- data += 4;
- len -= 4;
- }
-
- tail = data;
- }
-
- /* Read remaining sub-word */
- switch (len) {
- case 3:
- h ^= tail[2] << 16;
- case 2:
- h ^= tail[1] << 8;
- case 1:
- h ^= tail[0];
- h *= m;
- };
-
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
-
- /* Last bit is set to 0 because the java implementation uses int_32
- * and then sets to positive number flipping last bit to 1. */
- return h;
-}
-
-
-/**
- * @brief Unittest for rd_murmur2()
- */
-int unittest_murmur2(void) {
- const char *short_unaligned = "1234";
- const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
- const char *keysToTest[] = {
- "kafka",
- "giberish123456789",
- short_unaligned,
- short_unaligned + 1,
- short_unaligned + 2,
- short_unaligned + 3,
- unaligned,
- unaligned + 1,
- unaligned + 2,
- unaligned + 3,
- "",
- NULL,
- };
-
- const int32_t java_murmur2_results[] = {
- 0xd067cf64, // kafka
- 0x8f552b0c, // giberish123456789
- 0x9fc97b14, // short_unaligned
- 0xe7c009ca, // short_unaligned+1
- 0x873930da, // short_unaligned+2
- 0x5a4b5ca1, // short_unaligned+3
- 0x78424f1c, // unaligned
- 0x4a62b377, // unaligned+1
- 0xe0e4e09e, // unaligned+2
- 0x62b8b43f, // unaligned+3
- 0x106e08d9, // ""
- 0x106e08d9, // NULL
- };
-
- size_t i;
- for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) {
- uint32_t h = rd_murmur2(
- keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0);
- RD_UT_ASSERT((int32_t)h == java_murmur2_results[i],
- "Calculated murmur2 hash 0x%x for \"%s\", "
- "expected 0x%x",
- h, keysToTest[i], java_murmur2_results[i]);
- }
- RD_UT_PASS();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h
deleted file mode 100644
index 5991caa50..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __RDMURMUR2___H__
-#define __RDMURMUR2___H__
-
-uint32_t rd_murmur2(const void *key, size_t len);
-int unittest_murmur2(void);
-
-#endif // __RDMURMUR2___H__
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c
deleted file mode 100644
index 15c57e928..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * System portability
- */
-
-#include "rd.h"
-
-
-#include <stdlib.h>
-
-/**
- * qsort_r substitute
- * This nicely explains why we wont bother with the native implementation
- * on Win32 (qsort_s), OSX/FreeBSD (qsort_r with diff args):
- * http://forum.theorex.tech/t/different-declarations-of-qsort-r-on-mac-and-linux/93/2
- */
-static RD_TLS int (*rd_qsort_r_cmp)(const void *, const void *, void *);
-static RD_TLS void *rd_qsort_r_arg;
-
-static RD_UNUSED int rd_qsort_r_trampoline(const void *a, const void *b) {
- return rd_qsort_r_cmp(a, b, rd_qsort_r_arg);
-}
-
-void rd_qsort_r(void *base,
- size_t nmemb,
- size_t size,
- int (*compar)(const void *, const void *, void *),
- void *arg) {
- rd_qsort_r_cmp = compar;
- rd_qsort_r_arg = arg;
- qsort(base, nmemb, size, rd_qsort_r_trampoline);
- rd_qsort_r_cmp = NULL;
- rd_qsort_r_arg = NULL;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h
deleted file mode 100644
index 0cdbcd85f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDPORTS_H_
-#define _RDPORTS_H_
-
-
-void rd_qsort_r(void *base,
- size_t nmemb,
- size_t size,
- int (*compar)(const void *, const void *, void *),
- void *arg);
-
-#endif /* _RDPORTS_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h
deleted file mode 100644
index 7b2376823..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * POSIX system support
- */
-#ifndef _RDPOSIX_H_
-#define _RDPOSIX_H_
-
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/time.h>
-#include <inttypes.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <string.h>
-
-/**
- * Types
- */
-
-
-/**
- * Annotations, attributes, optimizers
- */
-#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
-#endif
-#ifndef unlikely
-#define unlikely(x) __builtin_expect((x), 0)
-#endif
-
-#define RD_UNUSED __attribute__((unused))
-#define RD_INLINE inline
-#define RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-#define RD_NORETURN __attribute__((noreturn))
-#define RD_IS_CONSTANT(p) __builtin_constant_p((p))
-#define RD_TLS __thread
-
-/**
- * Allocation
- */
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__)
-/* alloca(3) is in stdlib on FreeBSD */
-#include <alloca.h>
-#endif
-
-#define rd_alloca(N) alloca(N)
-
-
-/**
- * Strings, formatting, printf, ..
- */
-
-/* size_t and ssize_t format strings */
-#define PRIusz "zu"
-#define PRIdsz "zd"
-
-#ifndef RD_FORMAT
-#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__)))
-#endif
-#define rd_snprintf(...) snprintf(__VA_ARGS__)
-#define rd_vsnprintf(...) vsnprintf(__VA_ARGS__)
-
-#define rd_strcasecmp(A, B) strcasecmp(A, B)
-#define rd_strncasecmp(A, B, N) strncasecmp(A, B, N)
-
-
-#ifdef HAVE_STRCASESTR
-#define rd_strcasestr(HAYSTACK, NEEDLE) strcasestr(HAYSTACK, NEEDLE)
-#else
-#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE)
-#endif
-
-
-/**
- * Errors
- */
-
-
-#define rd_set_errno(err) (errno = (err))
-
-#if HAVE_STRERROR_R
-static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
- static RD_TLS char ret[128];
-
-#if defined(__GLIBC__) && defined(_GNU_SOURCE)
- return strerror_r(err, ret, sizeof(ret));
-#else /* XSI version */
- int r;
- /* The r assignment is to catch the case where
- * _GNU_SOURCE is not defined but the GNU version is
- * picked up anyway. */
- r = strerror_r(err, ret, sizeof(ret));
- if (unlikely(r))
- rd_snprintf(ret, sizeof(ret), "strerror_r(%d) failed (ret %d)",
- err, r);
- return ret;
-#endif
-}
-#else
-#define rd_strerror(err) strerror(err)
-#endif
-
-
-/**
- * Atomics
- */
-#include "rdatomic.h"
-
-/**
- * Misc
- */
-
-/**
- * Microsecond sleep.
- * Will retry on signal interrupt unless *terminate is true.
- */
-static RD_INLINE RD_UNUSED void rd_usleep(int usec, rd_atomic32_t *terminate) {
- struct timespec req = {usec / 1000000, (long)(usec % 1000000) * 1000};
-
- /* Retry until complete (issue #272), unless terminating. */
- while (nanosleep(&req, &req) == -1 &&
- (errno == EINTR && (!terminate || !rd_atomic32_get(terminate))))
- ;
-}
-
-
-
-#define rd_gettimeofday(tv, tz) gettimeofday(tv, tz)
-
-
-#ifndef __COVERITY__
-#define rd_assert(EXPR) assert(EXPR)
-#else
-extern void __coverity_panic__(void);
-#define rd_assert(EXPR) \
- do { \
- if (!(EXPR)) \
- __coverity_panic__(); \
- } while (0)
-#endif
-
-
-static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env,
- const char *def) {
- const char *tmp;
- tmp = getenv(env);
- if (tmp && *tmp)
- return tmp;
- return def;
-}
-
-
-/**
- * Empty struct initializer
- */
-#define RD_ZERO_INIT \
- {}
-
-/**
- * Sockets, IO
- */
-
-/** @brief Socket type */
-typedef int rd_socket_t;
-
-/** @brief Socket API error return value */
-#define RD_SOCKET_ERROR (-1)
-
-/** @brief Last socket error */
-#define rd_socket_errno errno
-
-
-/** @brief String representation of socket error */
-#define rd_socket_strerror(ERR) rd_strerror(ERR)
-
-/** @brief poll() struct type */
-typedef struct pollfd rd_pollfd_t;
-
-/** @brief poll(2) */
-#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \
- poll(POLLFD, FDCNT, TIMEOUT_MS)
-
-/**
- * @brief Set socket to non-blocking
- * @returns 0 on success or errno on failure.
- */
-static RD_UNUSED int rd_fd_set_nonblocking(int fd) {
- int fl = fcntl(fd, F_GETFL, 0);
- if (fl == -1 || fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1)
- return errno;
- return 0;
-}
-
-/**
- * @brief Create non-blocking pipe
- * @returns 0 on success or errno on failure
- */
-static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
- if (pipe(fds) == -1 || rd_fd_set_nonblocking(fds[0]) == -1 ||
- rd_fd_set_nonblocking(fds[1]))
- return errno;
-
- /* Minimize buffer sizes to avoid a large number
- * of signaling bytes to accumulate when
- * io-signalled queue is not being served for a while. */
-#ifdef F_SETPIPE_SZ
- /* Linux automatically rounds the pipe size up
- * to the minimum size. */
- fcntl(fds[0], F_SETPIPE_SZ, 100);
- fcntl(fds[1], F_SETPIPE_SZ, 100);
-#endif
- return 0;
-}
-#define rd_socket_read(fd, buf, sz) read(fd, buf, sz)
-#define rd_socket_write(fd, buf, sz) write(fd, buf, sz)
-#define rd_socket_close(fd) close(fd)
-
-/* File IO */
-#define rd_write(fd, buf, sz) write(fd, buf, sz)
-#define rd_open(path, flags, mode) open(path, flags, mode)
-#define rd_close(fd) close(fd)
-
-#endif /* _RDPOSIX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c
deleted file mode 100644
index e36d79380..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rd.h"
-#include "rdrand.h"
-#include "rdtime.h"
-#include "tinycthread.h"
-
-int rd_jitter(int low, int high) {
- int rand_num;
-#if HAVE_RAND_R
- static RD_TLS unsigned int seed = 0;
-
- /* Initial seed with time+thread id */
- if (unlikely(seed == 0)) {
- struct timeval tv;
- rd_gettimeofday(&tv, NULL);
- seed = (unsigned int)(tv.tv_usec / 1000);
- seed ^= (unsigned int)(intptr_t)thrd_current();
- }
-
- rand_num = rand_r(&seed);
-#else
- rand_num = rand();
-#endif
- return (low + (rand_num % ((high - low) + 1)));
-}
-
-void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size) {
- int i;
- void *tmp = rd_alloca(entry_size);
-
- /* FIXME: Optimized version for word-sized entries. */
-
- for (i = (int)nmemb - 1; i > 0; i--) {
- int j = rd_jitter(0, i);
- if (unlikely(i == j))
- continue;
-
- memcpy(tmp, (char *)base + (i * entry_size), entry_size);
- memcpy((char *)base + (i * entry_size),
- (char *)base + (j * entry_size), entry_size);
- memcpy((char *)base + (j * entry_size), tmp, entry_size);
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h
deleted file mode 100644
index 0e3a927c2..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDRAND_H_
-#define _RDRAND_H_
-
-
-/**
- * Returns a random (using rand(3)) number between 'low'..'high' (inclusive).
- */
-int rd_jitter(int low, int high);
-
-/**
- * Shuffles (randomizes) an array using the modern Fisher-Yates algorithm.
- */
-void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size);
-
-#endif /* _RDRAND_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c
deleted file mode 100644
index 0c70cb334..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdstring.h"
-#include "rdregex.h"
-
-#if HAVE_REGEX
-#include <regex.h>
-struct rd_regex_s {
- regex_t re;
-};
-
-#else
-
-#include "regexp.h"
-struct rd_regex_s {
- Reprog *re;
-};
-#endif
-
-
-/**
- * @brief Destroy compiled regex
- */
-void rd_regex_destroy(rd_regex_t *re) {
-#if HAVE_REGEX
- regfree(&re->re);
-#else
- re_regfree(re->re);
-#endif
- rd_free(re);
-}
-
-
-/**
- * @brief Compile regex \p pattern
- * @returns Compiled regex object on success on error.
- */
-rd_regex_t *
-rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size) {
- rd_regex_t *re = rd_calloc(1, sizeof(*re));
-#if HAVE_REGEX
- int r;
-
- r = regcomp(&re->re, pattern, REG_EXTENDED | REG_NOSUB);
- if (r) {
- if (errstr)
- regerror(r, &re->re, errstr, errstr_size);
- rd_free(re);
- return NULL;
- }
-#else
- const char *errstr2;
-
- re->re = re_regcomp(pattern, 0, &errstr2);
- if (!re->re) {
- if (errstr)
- rd_strlcpy(errstr, errstr2, errstr_size);
- rd_free(re);
- return NULL;
- }
-#endif
-
- return re;
-}
-
-
-/**
- * @brief Match \p str to pre-compiled regex \p re
- * @returns 1 on match, else 0
- */
-int rd_regex_exec(rd_regex_t *re, const char *str) {
-#if HAVE_REGEX
- return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH;
-#else
- return !re_regexec(re->re, str, NULL, 0);
-#endif
-}
-
-
-/**
- * @brief Perform regex match of \p str using regex \p pattern.
- *
- * @returns 1 on match, 0 on non-match or -1 on regex compilation error
- * in which case a human readable error string is written to
- * \p errstr (if not NULL).
- */
-int rd_regex_match(const char *pattern,
- const char *str,
- char *errstr,
- size_t errstr_size) {
-#if HAVE_REGEX /* use libc regex */
- regex_t re;
- int r;
-
- /* FIXME: cache compiled regex */
- r = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB);
- if (r) {
- if (errstr)
- regerror(r, &re, errstr, errstr_size);
- return 0;
- }
-
- r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH;
-
- regfree(&re);
-
- return r;
-
-#else /* Using regexp.h from minilibs (included) */
- Reprog *re;
- int r;
- const char *errstr2;
-
- /* FIXME: cache compiled regex */
- re = re_regcomp(pattern, 0, &errstr2);
- if (!re) {
- if (errstr)
- rd_strlcpy(errstr, errstr2, errstr_size);
- return -1;
- }
-
- r = !re_regexec(re, str, NULL, 0);
-
- re_regfree(re);
-
- return r;
-#endif
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h
deleted file mode 100644
index 135229d62..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _RDREGEX_H_
-#define _RDREGEX_H_
-
-typedef struct rd_regex_s rd_regex_t;
-
-void rd_regex_destroy(rd_regex_t *re);
-rd_regex_t *
-rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size);
-int rd_regex_exec(rd_regex_t *re, const char *str);
-
-int rd_regex_match(const char *pattern,
- const char *str,
- char *errstr,
- size_t errstr_size);
-
-#endif /* _RDREGEX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h
deleted file mode 100644
index a2c0de1b0..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDSIGNAL_H_
-#define _RDSIGNAL_H_
-
-#include <signal.h>
-
-#define RD_SIG_ALL -1
-#define RD_SIG_END -2
-
-extern sigset_t rd_intr_sigset;
-extern int rd_intr_blocked;
-
-static __inline void rd_intr_block(void) RD_UNUSED;
-static __inline void rd_intr_block(void) {
- if (rd_intr_blocked++)
- return;
-
- sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL);
-}
-
-static __inline void rd_intr_unblock(void) RD_UNUSED;
-static __inline void rd_intr_unblock(void) {
- assert(rd_intr_blocked > 0);
- if (--rd_intr_blocked)
- return;
-
- sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL);
-}
-
-#endif /* _RDSIGNAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c
deleted file mode 100644
index 6a18210c9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rd.h"
-#include "rdstring.h"
-#include "rdunittest.h"
-
-#include <ctype.h>
-
-
-/**
- * @brief Render string \p template using \p callback for key lookups.
- *
- * Keys in template follow the %{keyname} syntax.
- *
- * The \p callback must not write more than \p size bytes to \p buf, must
- * should return the number of bytes it wanted to write (which will indicate
- * a truncated write).
- * If the key is not found -1 should be returned (which fails the rendering).
- *
- * @returns number of written bytes to \p dest,
- * or -1 on failure (errstr is written)
- */
-char *rd_string_render(
- const char *template,
- char *errstr,
- size_t errstr_size,
- ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque),
- void *opaque) {
- const char *s = template;
- const char *tend = template + strlen(template);
- size_t size = 256;
- char *buf;
- size_t of = 0;
-
- buf = rd_malloc(size);
-
-#define _remain() (size - of - 1)
-#define _assure_space(SZ) \
- do { \
- if (of + (SZ) + 1 >= size) { \
- size = (size + (SZ) + 1) * 2; \
- buf = rd_realloc(buf, size); \
- } \
- } while (0)
-
-#define _do_write(PTR, SZ) \
- do { \
- _assure_space(SZ); \
- memcpy(buf + of, (PTR), (SZ)); \
- of += (SZ); \
- } while (0)
-
-
-
- while (*s) {
- const char *t;
- size_t tof = (size_t)(s - template);
-
- t = strstr(s, "%{");
- if (t != s) {
- /* Write "abc%{"
- * ^^^ */
- size_t len = (size_t)((t ? t : tend) - s);
- if (len)
- _do_write(s, len);
- }
-
- if (t) {
- const char *te;
- ssize_t r;
- char *tmpkey;
-
- /* Find "abc%{key}"
- * ^ */
- te = strchr(t + 2, '}');
- if (!te) {
- rd_snprintf(errstr, errstr_size,
- "Missing close-brace } for "
- "%.*s at %" PRIusz,
- 15, t, tof);
- rd_free(buf);
- return NULL;
- }
-
- rd_strndupa(&tmpkey, t + 2, (int)(te - t - 2));
-
- /* Query callback for length of key's value. */
- r = callback(tmpkey, NULL, 0, opaque);
- if (r == -1) {
- rd_snprintf(errstr, errstr_size,
- "Property not available: \"%s\"",
- tmpkey);
- rd_free(buf);
- return NULL;
- }
-
- _assure_space(r);
-
- /* Call again now providing a large enough buffer. */
- r = callback(tmpkey, buf + of, _remain(), opaque);
- if (r == -1) {
- rd_snprintf(errstr, errstr_size,
- "Property not available: "
- "\"%s\"",
- tmpkey);
- rd_free(buf);
- return NULL;
- }
-
- assert(r < (ssize_t)_remain());
- of += r;
- s = te + 1;
-
- } else {
- s = tend;
- }
- }
-
- buf[of] = '\0';
- return buf;
-}
-
-
-
-void rd_strtup_destroy(rd_strtup_t *strtup) {
- rd_free(strtup);
-}
-
-void rd_strtup_free(void *strtup) {
- rd_strtup_destroy((rd_strtup_t *)strtup);
-}
-
-rd_strtup_t *rd_strtup_new0(const char *name,
- ssize_t name_len,
- const char *value,
- ssize_t value_len) {
- rd_strtup_t *strtup;
-
- /* Calculate lengths, if needed, and add space for \0 nul */
-
- if (name_len == -1)
- name_len = strlen(name);
-
- if (!value)
- value_len = 0;
- else if (value_len == -1)
- value_len = strlen(value);
-
-
- strtup = rd_malloc(sizeof(*strtup) + name_len + 1 + value_len + 1 -
- 1 /*name[1]*/);
- memcpy(strtup->name, name, name_len);
- strtup->name[name_len] = '\0';
- if (value) {
- strtup->value = &strtup->name[name_len + 1];
- memcpy(strtup->value, value, value_len);
- strtup->value[value_len] = '\0';
- } else {
- strtup->value = NULL;
- }
-
- return strtup;
-}
-
-rd_strtup_t *rd_strtup_new(const char *name, const char *value) {
- return rd_strtup_new0(name, -1, value, -1);
-}
-
-
-/**
- * @returns a new copy of \p src
- */
-rd_strtup_t *rd_strtup_dup(const rd_strtup_t *src) {
- return rd_strtup_new(src->name, src->value);
-}
-
-/**
- * @brief Wrapper for rd_strtup_dup() suitable rd_list_copy*() use
- */
-void *rd_strtup_list_copy(const void *elem, void *opaque) {
- const rd_strtup_t *src = elem;
- return (void *)rd_strtup_dup(src);
-}
-
-
-
-/**
- * @brief Convert bit-flags in \p flags to human-readable CSV string
- * use the bit-description strings in \p desc.
- *
- * \p desc array element N corresponds to bit (1<<N).
- * \p desc MUST be terminated by a NULL array element.
- * Empty descriptions are ignored even if the bit is set.
- *
- * @returns a null-terminated \p dst
- */
-char *rd_flags2str(char *dst, size_t size, const char **desc, int flags) {
- int bit = 0;
- size_t of = 0;
-
- for (; *desc; desc++, bit++) {
- int r;
-
- if (!(flags & (1 << bit)) || !*desc)
- continue;
-
- if (of >= size) {
- /* Dest buffer too small, indicate truncation */
- if (size > 3)
- rd_snprintf(dst + (size - 3), 3, "..");
- break;
- }
-
- r = rd_snprintf(dst + of, size - of, "%s%s", !of ? "" : ",",
- *desc);
-
- of += r;
- }
-
- if (of == 0 && size > 0)
- *dst = '\0';
-
- return dst;
-}
-
-
-
-/**
- * @returns a djb2 hash of \p str.
- *
- * @param len If -1 the \p str will be hashed until nul is encountered,
- * else up to the \p len.
- */
-unsigned int rd_string_hash(const char *str, ssize_t len) {
- unsigned int hash = 5381;
- ssize_t i;
-
- if (len == -1) {
- for (i = 0; str[i] != '\0'; i++)
- hash = ((hash << 5) + hash) + str[i];
- } else {
- for (i = 0; i < len; i++)
- hash = ((hash << 5) + hash) + str[i];
- }
-
- return hash;
-}
-
-
-/**
- * @brief Same as strcmp() but handles NULL values.
- */
-int rd_strcmp(const char *a, const char *b) {
- if (a == b)
- return 0;
- else if (!a && b)
- return -1;
- else if (!b)
- return 1;
- else
- return strcmp(a, b);
-}
-
-
-
-/**
- * @brief Case-insensitive strstr() for platforms where strcasestr()
- * is not available.
- */
-char *_rd_strcasestr(const char *haystack, const char *needle) {
- const char *h_rem, *n_last;
- size_t h_len = strlen(haystack);
- size_t n_len = strlen(needle);
-
-
- if (n_len == 0 || n_len > h_len)
- return NULL;
- else if (n_len == h_len)
- return !rd_strcasecmp(haystack, needle) ? (char *)haystack
- : NULL;
-
- /*
- * Scan inspired by Boyer-Moore:
- *
- * haystack = "this is a haystack"
- * needle = "hays"
- *
- * "this is a haystack"
- * ^ ^- h_last
- * `-h (haystack + strlen(needle) - 1)
- * `-h_rem
- *
- * "hays"
- * ^-n
- * ^-n_last
- */
- n_last = needle + n_len - 1;
- h_rem = haystack + n_len - 1;
-
- while (*h_rem) {
- const char *h, *n = n_last;
-
- /* Find first occurrence of last character in the needle
- in the remaining haystack. */
- for (h = h_rem; *h && tolower((int)*h) != tolower((int)*n); h++)
- ;
-
- if (!*h)
- return NULL; /* No match */
-
- /* Backtrack both needle and haystack as long as each character
- * matches, if the start of the needle is found we have
- * a full match, else start over from the remaining part of the
- * haystack. */
- do {
- if (n == needle)
- return (char *)h; /* Full match */
-
- /* Rewind both n and h */
- n--;
- h--;
-
- } while (tolower((int)*n) == tolower((int)*h));
-
- /* Mismatch, start over at the next haystack position */
- h_rem++;
- }
-
- return NULL;
-}
-
-
-
-/**
- * @brief Unittests for rd_strcasestr()
- */
-static int ut_strcasestr(void) {
- static const struct {
- const char *haystack;
- const char *needle;
- ssize_t exp;
- } strs[] = {
- {"this is a haystack", "hays", 10},
- {"abc", "a", 0},
- {"abc", "b", 1},
- {"abc", "c", 2},
- {"AbcaBcabC", "ABC", 0},
- {"abcabcaBC", "BcA", 1},
- {"abcabcABc", "cAB", 2},
- {"need to estart stART the tart ReStArT!", "REsTaRt", 30},
- {"need to estart stART the tart ReStArT!", "?sTaRt", -1},
- {"aaaabaaAb", "ab", 3},
- {"0A!", "a", 1},
- {"a", "A", 0},
- {".z", "Z", 1},
- {"", "", -1},
- {"", "a", -1},
- {"a", "", -1},
- {"peRfeCt", "peRfeCt", 0},
- {"perfect", "perfect", 0},
- {"PERFECT", "perfect", 0},
- {NULL},
- };
- int i;
-
- RD_UT_BEGIN();
-
- for (i = 0; strs[i].haystack; i++) {
- const char *ret;
- ssize_t of = -1;
-
- ret = _rd_strcasestr(strs[i].haystack, strs[i].needle);
- if (ret)
- of = ret - strs[i].haystack;
- RD_UT_ASSERT(of == strs[i].exp,
- "#%d: '%s' in '%s': expected offset %" PRIdsz
- ", not %" PRIdsz " (%s)",
- i, strs[i].needle, strs[i].haystack, strs[i].exp,
- of, ret ? ret : "(NULL)");
- }
-
- RD_UT_PASS();
-}
-
-
-
-/**
- * @brief Split a character-separated string into an array.
- *
- * @remark This is not CSV compliant as CSV uses " for escapes, but this here
- * uses \.
- *
- * @param input Input string to parse.
- * @param sep The separator character (typically ',')
- * @param skip_empty Do not include empty fields in output array.
- * @param cntp Will be set to number of elements in array.
- *
- * Supports "\" escapes.
- * The array and the array elements will be allocated together and must be freed
- * with a single rd_free(array) call.
- * The array elements are copied and any "\" escapes are removed.
- *
- * @returns the parsed fields in an array. The number of elements in the
- * array is returned in \p cntp
- */
-char **rd_string_split(const char *input,
- char sep,
- rd_bool_t skip_empty,
- size_t *cntp) {
- size_t fieldcnt = 1;
- rd_bool_t next_esc = rd_false;
- const char *s;
- char *p;
- char **arr;
- size_t inputlen;
- size_t i = 0;
- size_t elen = 0;
-
- *cntp = 0;
-
- /* First count the maximum number of fields so we know how large of
- * an array we need to allocate. Escapes are ignored. */
- for (s = input; *s; s++) {
- if (*s == sep)
- fieldcnt++;
- }
-
- inputlen = (size_t)(s - input);
-
- /* Allocate array and memory for the copied elements in one go. */
- arr = rd_malloc((sizeof(*arr) * fieldcnt) + inputlen + 1);
- p = (char *)(&arr[fieldcnt]);
-
- for (s = input;; s++) {
- rd_bool_t at_end = *s == '\0';
- rd_bool_t is_esc = next_esc;
-
- /* If we've reached the end, jump to done to finish
- * the current field. */
- if (at_end)
- goto done;
-
- if (unlikely(!is_esc && *s == '\\')) {
- next_esc = rd_true;
- continue;
- }
-
- next_esc = rd_false;
-
- /* Strip leading whitespaces for each element */
- if (!is_esc && elen == 0 && isspace((int)*s))
- continue;
-
- if (likely(is_esc || *s != sep)) {
- char c = *s;
- if (is_esc) {
- /* Perform some common escape substitions.
- * If not known we'll just keep the escaped
- * character as is (probably the separator). */
- switch (c) {
- case 't':
- c = '\t';
- break;
- case 'n':
- c = '\n';
- break;
- case 'r':
- c = '\r';
- break;
- case '0':
- c = '\0';
- break;
- }
- }
- p[elen++] = c;
- continue;
- }
-
- done:
- /* Strip trailing whitespaces */
- while (elen > 0 && isspace((int)p[elen - 1]))
- elen--;
-
- /* End of field */
- if (elen == 0 && skip_empty) {
- if (at_end)
- break;
- continue;
- }
-
- rd_assert(i < fieldcnt);
-
- /* Nul-terminate the element */
- p[elen++] = '\0';
- /* Assign element to array */
- arr[i] = p;
- /* Update next element pointer past the written bytes */
- p += elen;
- /* Reset element length */
- elen = 0;
- /* Advance array element index */
- i++;
-
- if (at_end)
- break;
- }
-
- *cntp = i;
-
- return arr;
-}
-
-/**
- * @brief Unittest for rd_string_split()
- */
-static int ut_string_split(void) {
- static const struct {
- const char *input;
- const char sep;
- rd_bool_t skip_empty;
- size_t exp_cnt;
- const char *exp[16];
- } strs[] = {
- {"just one field", ',', rd_true, 1, {"just one field"}},
- /* Empty with skip_empty */
- {"", ',', rd_true, 0},
- /* Empty without skip_empty */
- {"", ',', rd_false, 1, {""}},
- {
- ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v",
- ',',
- rd_true,
- 11,
- {"a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq",
- "r s t u", "v"},
- },
- {
- ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v",
- ',',
- rd_false,
- 13,
- {"", "a", "b", "", "c", "d", "e", "f", "ghijk", "lmn", "opq",
- "r s t u", "v"},
- },
- {" this is an \\,escaped comma,\\,,\\\\, "
- "and this is an unbalanced escape: \\\\\\\\\\\\\\",
- ',',
- rd_true,
- 4,
- {"this is an ,escaped comma", ",", "\\",
- "and this is an unbalanced escape: \\\\\\"}},
- {
- "using|another ||\\|d|elimiter",
- '|',
- rd_false,
- 5,
- {"using", "another", "", "|d", "elimiter"},
- },
- {NULL},
- };
- size_t i;
-
- RD_UT_BEGIN();
-
- for (i = 0; strs[i].input; i++) {
- char **ret;
- size_t cnt = 12345;
- size_t j;
-
- ret = rd_string_split(strs[i].input, strs[i].sep,
- strs[i].skip_empty, &cnt);
- RD_UT_ASSERT(ret != NULL, "#%" PRIusz ": Did not expect NULL",
- i);
- RD_UT_ASSERT(cnt == strs[i].exp_cnt,
- "#%" PRIusz
- ": "
- "Expected %" PRIusz " elements, got %" PRIusz,
- i, strs[i].exp_cnt, cnt);
-
- for (j = 0; j < cnt; j++)
- RD_UT_ASSERT(!strcmp(strs[i].exp[j], ret[j]),
- "#%" PRIusz ": Expected string %" PRIusz
- " to be \"%s\", not \"%s\"",
- i, j, strs[i].exp[j], ret[j]);
-
- rd_free(ret);
- }
-
- RD_UT_PASS();
-}
-
-/**
- * @brief Unittests for strings
- */
-int unittest_string(void) {
- int fails = 0;
-
- fails += ut_strcasestr();
- fails += ut_string_split();
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h
deleted file mode 100644
index 67ea19401..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDSTRING_H_
-#define _RDSTRING_H_
-
-static RD_INLINE RD_UNUSED void
-rd_strlcpy(char *dst, const char *src, size_t dstsize) {
-#if HAVE_STRLCPY
- (void)strlcpy(dst, src, dstsize);
-#else
- if (likely(dstsize > 0)) {
- size_t srclen = strlen(src);
- size_t copylen = RD_MIN(srclen, dstsize - 1);
- memcpy(dst, src, copylen);
- dst[copylen] = '\0';
- }
-#endif
-}
-
-
-
-char *rd_string_render(
- const char *templ,
- char *errstr,
- size_t errstr_size,
- ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque),
- void *opaque);
-
-
-
-/**
- * @brief An immutable string tuple (name, value) in a single allocation.
- * \p value may be NULL.
- */
-typedef struct rd_strtup_s {
- char *value;
- char name[1]; /* Actual allocation of name + val here */
-} rd_strtup_t;
-
-void rd_strtup_destroy(rd_strtup_t *strtup);
-void rd_strtup_free(void *strtup);
-rd_strtup_t *rd_strtup_new0(const char *name,
- ssize_t name_len,
- const char *value,
- ssize_t value_len);
-rd_strtup_t *rd_strtup_new(const char *name, const char *value);
-rd_strtup_t *rd_strtup_dup(const rd_strtup_t *strtup);
-void *rd_strtup_list_copy(const void *elem, void *opaque);
-
-char *rd_flags2str(char *dst, size_t size, const char **desc, int flags);
-
-unsigned int rd_string_hash(const char *str, ssize_t len);
-
-int rd_strcmp(const char *a, const char *b);
-
-char *_rd_strcasestr(const char *haystack, const char *needle);
-
-char **rd_string_split(const char *input,
- char sep,
- rd_bool_t skip_empty,
- size_t *cntp);
-
-/** @returns "true" if EXPR is true, else "false" */
-#define RD_STR_ToF(EXPR) ((EXPR) ? "true" : "false")
-
-#endif /* _RDSTRING_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h
deleted file mode 100644
index ecba4154e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * Copyright (c) 2012-2013, Andreas Ă–man
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/*
-
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDSYSQUEUE_H_
-#define _RDSYSQUEUE_H_
-
-#include "queue.h"
-
-/*
- * Complete missing LIST-ops
- */
-
-#ifndef LIST_FOREACH
-#define LIST_FOREACH(var, head, field) \
- for ((var) = ((head)->lh_first); (var); (var) = ((var)->field.le_next))
-#endif
-
-#ifndef LIST_EMPTY
-#define LIST_EMPTY(head) ((head)->lh_first == NULL)
-#endif
-
-#ifndef LIST_FIRST
-#define LIST_FIRST(head) ((head)->lh_first)
-#endif
-
-#ifndef LIST_NEXT
-#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-#endif
-
-#ifndef LIST_INSERT_BEFORE
-#define LIST_INSERT_BEFORE(listelm, elm, field) \
- do { \
- (elm)->field.le_prev = (listelm)->field.le_prev; \
- (elm)->field.le_next = (listelm); \
- *(listelm)->field.le_prev = (elm); \
- (listelm)->field.le_prev = &(elm)->field.le_next; \
- } while (/*CONSTCOND*/ 0)
-#endif
-
-/*
- * Complete missing TAILQ-ops
- */
-
-#ifndef TAILQ_HEAD_INITIALIZER
-#define TAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).tqh_first }
-#endif
-
-#ifndef TAILQ_INSERT_BEFORE
-#define TAILQ_INSERT_BEFORE(listelm, elm, field) \
- do { \
- (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
- (elm)->field.tqe_next = (listelm); \
- *(listelm)->field.tqe_prev = (elm); \
- (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
- } while (0)
-#endif
-
-#ifndef TAILQ_FOREACH
-#define TAILQ_FOREACH(var, head, field) \
- for ((var) = ((head)->tqh_first); (var); \
- (var) = ((var)->field.tqe_next))
-#endif
-
-#ifndef TAILQ_EMPTY
-#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
-#endif
-
-#ifndef TAILQ_FIRST
-#define TAILQ_FIRST(head) ((head)->tqh_first)
-#endif
-
-#ifndef TAILQ_NEXT
-#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
-#endif
-
-#ifndef TAILQ_LAST
-#define TAILQ_LAST(head, headname) \
- (*(((struct headname *)((head)->tqh_last))->tqh_last))
-#endif
-
-#ifndef TAILQ_PREV
-#define TAILQ_PREV(elm, headname, field) \
- (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-#endif
-
-#ifndef TAILQ_FOREACH_SAFE
-/*
- * TAILQ_FOREACH_SAFE() provides a traversal where the current iterated element
- * may be freed or unlinked.
- * It does not allow freeing or modifying any other element in the list,
- * at least not the next element.
- */
-#define TAILQ_FOREACH_SAFE(elm, head, field, tmpelm) \
- for ((elm) = TAILQ_FIRST(head); \
- (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1); \
- (elm) = (tmpelm))
-#endif
-
-/*
- * In Mac OS 10.4 and earlier TAILQ_FOREACH_REVERSE was defined
- * differently, redefined it.
- */
-#ifdef __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
-#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1050
-#undef TAILQ_FOREACH_REVERSE
-#endif
-#endif
-
-#ifndef TAILQ_FOREACH_REVERSE
-#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
- for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
- (var); \
- (var) = \
- (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
-#endif
-
-
-/**
- * Treat the TAILQ as a circular list and return the previous/next entry,
- * possibly wrapping to the end/beginning.
- */
-#define TAILQ_CIRC_PREV(var, head, headname, field) \
- ((var) != TAILQ_FIRST(head) ? TAILQ_PREV(var, headname, field) \
- : TAILQ_LAST(head, headname))
-
-#define TAILQ_CIRC_NEXT(var, head, headname, field) \
- ((var) != TAILQ_LAST(head, headname) ? TAILQ_NEXT(var, field) \
- : TAILQ_FIRST(head))
-
-/*
- * Some extra functions for LIST manipulation
- */
-
-#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \
- do { \
- if (LIST_EMPTY(head)) { \
- LIST_INSERT_HEAD(head, elm, field); \
- } else { \
- elmtype _tmp; \
- LIST_FOREACH(_tmp, head, field) { \
- if (cmpfunc(elm, _tmp) < 0) { \
- LIST_INSERT_BEFORE(_tmp, elm, field); \
- break; \
- } \
- if (!LIST_NEXT(_tmp, field)) { \
- LIST_INSERT_AFTER(_tmp, elm, field); \
- break; \
- } \
- } \
- } \
- } while (0)
-
-#ifndef TAILQ_INSERT_SORTED
-#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \
- do { \
- if (TAILQ_FIRST(head) == NULL) { \
- TAILQ_INSERT_HEAD(head, elm, field); \
- } else { \
- elmtype _tmp; \
- TAILQ_FOREACH(_tmp, head, field) { \
- if (cmpfunc(elm, _tmp) < 0) { \
- TAILQ_INSERT_BEFORE(_tmp, elm, field); \
- break; \
- } \
- if (!TAILQ_NEXT(_tmp, field)) { \
- TAILQ_INSERT_AFTER(head, _tmp, elm, \
- field); \
- break; \
- } \
- } \
- } \
- } while (0)
-#endif
-
-/**
- * @brief Add all elements from \p srchead to \p dsthead using sort
- * comparator \p cmpfunc.
- * \p src will be re-initialized on completion.
- */
-#define TAILQ_CONCAT_SORTED(dsthead, srchead, elmtype, field, cmpfunc) \
- do { \
- elmtype _cstmp; \
- elmtype _cstmp2; \
- if (TAILQ_EMPTY(dsthead)) { \
- TAILQ_CONCAT(dsthead, srchead, field); \
- break; \
- } \
- TAILQ_FOREACH_SAFE(_cstmp, srchead, field, _cstmp2) { \
- TAILQ_INSERT_SORTED(dsthead, _cstmp, elmtype, field, \
- cmpfunc); \
- } \
- TAILQ_INIT(srchead); \
- } while (0)
-
-#define TAILQ_MOVE(newhead, oldhead, field) \
- do { \
- if (TAILQ_FIRST(oldhead)) { \
- TAILQ_FIRST(oldhead)->field.tqe_prev = \
- &(newhead)->tqh_first; \
- (newhead)->tqh_first = (oldhead)->tqh_first; \
- (newhead)->tqh_last = (oldhead)->tqh_last; \
- TAILQ_INIT(oldhead); \
- } else \
- TAILQ_INIT(newhead); \
- } while (/*CONSTCOND*/ 0)
-
-
-/* @brief Prepend \p shead to \p dhead */
-#define TAILQ_PREPEND(dhead, shead, headname, field) \
- do { \
- if (unlikely(TAILQ_EMPTY(dhead))) { \
- TAILQ_MOVE(dhead, shead, field); \
- } else if (likely(!TAILQ_EMPTY(shead))) { \
- TAILQ_LAST(shead, headname)->field.tqe_next = \
- TAILQ_FIRST(dhead); \
- TAILQ_FIRST(dhead)->field.tqe_prev = \
- &TAILQ_LAST(shead, headname)->field.tqe_next; \
- TAILQ_FIRST(shead)->field.tqe_prev = \
- &(dhead)->tqh_first; \
- TAILQ_FIRST(dhead) = TAILQ_FIRST(shead); \
- TAILQ_INIT(shead); \
- } \
- } while (0)
-
-/* @brief Insert \p shead after element \p listelm in \p dhead */
-#define TAILQ_INSERT_LIST(dhead, listelm, shead, headname, elmtype, field) \
- do { \
- if (TAILQ_LAST(dhead, headname) == listelm) { \
- TAILQ_CONCAT(dhead, shead, field); \
- } else { \
- elmtype _elm = TAILQ_FIRST(shead); \
- elmtype _last = TAILQ_LAST(shead, headname); \
- elmtype _aft = TAILQ_NEXT(listelm, field); \
- (listelm)->field.tqe_next = _elm; \
- _elm->field.tqe_prev = &(listelm)->field.tqe_next; \
- _last->field.tqe_next = _aft; \
- _aft->field.tqe_prev = &_last->field.tqe_next; \
- TAILQ_INIT((shead)); \
- } \
- } while (0)
-
-/* @brief Insert \p shead before element \p listelm in \p dhead */
-#define TAILQ_INSERT_LIST_BEFORE(dhead, insert_before, shead, headname, \
- elmtype, field) \
- do { \
- if (TAILQ_FIRST(dhead) == insert_before) { \
- TAILQ_PREPEND(dhead, shead, headname, field); \
- } else { \
- elmtype _first = TAILQ_FIRST(shead); \
- elmtype _last = TAILQ_LAST(shead, headname); \
- elmtype _dprev = \
- TAILQ_PREV(insert_before, headname, field); \
- _last->field.tqe_next = insert_before; \
- _dprev->field.tqe_next = _first; \
- (insert_before)->field.tqe_prev = \
- &_last->field.tqe_next; \
- _first->field.tqe_prev = &(_dprev)->field.tqe_next; \
- TAILQ_INIT((shead)); \
- } \
- } while (0)
-
-#ifndef SIMPLEQ_HEAD
-#define SIMPLEQ_HEAD(name, type) \
- struct name { \
- struct type *sqh_first; \
- struct type **sqh_last; \
- }
-#endif
-
-#ifndef SIMPLEQ_ENTRY
-#define SIMPLEQ_ENTRY(type) \
- struct { \
- struct type *sqe_next; \
- }
-#endif
-
-#ifndef SIMPLEQ_FIRST
-#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
-#endif
-
-#ifndef SIMPLEQ_REMOVE_HEAD
-#define SIMPLEQ_REMOVE_HEAD(head, field) \
- do { \
- if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == \
- NULL) \
- (head)->sqh_last = &(head)->sqh_first; \
- } while (0)
-#endif
-
-#ifndef SIMPLEQ_INSERT_TAIL
-#define SIMPLEQ_INSERT_TAIL(head, elm, field) \
- do { \
- (elm)->field.sqe_next = NULL; \
- *(head)->sqh_last = (elm); \
- (head)->sqh_last = &(elm)->field.sqe_next; \
- } while (0)
-#endif
-
-#ifndef SIMPLEQ_INIT
-#define SIMPLEQ_INIT(head) \
- do { \
- (head)->sqh_first = NULL; \
- (head)->sqh_last = &(head)->sqh_first; \
- } while (0)
-#endif
-
-#ifndef SIMPLEQ_INSERT_HEAD
-#define SIMPLEQ_INSERT_HEAD(head, elm, field) \
- do { \
- if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
- (head)->sqh_last = &(elm)->field.sqe_next; \
- (head)->sqh_first = (elm); \
- } while (0)
-#endif
-
-#ifndef SIMPLEQ_FOREACH
-#define SIMPLEQ_FOREACH(var, head, field) \
- for ((var) = SIMPLEQ_FIRST(head); (var) != SIMPLEQ_END(head); \
- (var) = SIMPLEQ_NEXT(var, field))
-#endif
-
-#ifndef SIMPLEQ_INSERT_AFTER
-#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) \
- do { \
- if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == \
- NULL) \
- (head)->sqh_last = &(elm)->field.sqe_next; \
- (listelm)->field.sqe_next = (elm); \
- } while (0)
-#endif
-
-#ifndef SIMPLEQ_END
-#define SIMPLEQ_END(head) NULL
-#endif
-
-#ifndef SIMPLEQ_NEXT
-#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
-#endif
-
-#ifndef SIMPLEQ_HEAD_INITIALIZER
-#define SIMPLEQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).sqh_first }
-#endif
-
-#ifndef SIMPLEQ_EMPTY
-#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
-#endif
-
-
-
-#endif /* _RDSYSQUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h
deleted file mode 100644
index 4a3e5d855..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDTIME_H_
-#define _RDTIME_H_
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) \
- do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
- } while (0)
-
-#define TIMESPEC_TO_TIMEVAL(tv, ts) \
- do { \
- (tv)->tv_sec = (ts)->tv_sec; \
- (tv)->tv_usec = (ts)->tv_nsec / 1000; \
- } while (0)
-#endif
-
-#define TIMESPEC_TO_TS(ts) \
- (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000))
-
-#define TS_TO_TIMESPEC(ts, tsx) \
- do { \
- (ts)->tv_sec = (tsx) / 1000000; \
- (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \
- if ((ts)->tv_nsec >= 1000000000LLU) { \
- (ts)->tv_sec++; \
- (ts)->tv_nsec -= 1000000000LLU; \
- } \
- } while (0)
-
-#define TIMESPEC_CLEAR(ts) ((ts)->tv_sec = (ts)->tv_nsec = 0LLU)
-
-
-#define RD_POLL_INFINITE -1
-#define RD_POLL_NOWAIT 0
-
-
-#if RD_UNITTEST_QPC_OVERRIDES
-/* Overrides for rd_clock() unittest using QPC on Windows */
-BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
-BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
-#define rd_QueryPerformanceFrequency(IFREQ) \
- rd_ut_QueryPerformanceFrequency(IFREQ)
-#define rd_QueryPerformanceCounter(PC) rd_ut_QueryPerformanceCounter(PC)
-#else
-#define rd_QueryPerformanceFrequency(IFREQ) QueryPerformanceFrequency(IFREQ)
-#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC)
-#endif
-
-/**
- * @returns a monotonically increasing clock in microseconds.
- * @remark There is no monotonic clock on OSX, the system time
- * is returned instead.
- */
-static RD_INLINE rd_ts_t rd_clock(void) RD_UNUSED;
-static RD_INLINE rd_ts_t rd_clock(void) {
-#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
- /* No monotonic clock on Darwin */
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
-#elif defined(_WIN32)
- LARGE_INTEGER now;
- static RD_TLS double freq = 0.0;
- if (!freq) {
- LARGE_INTEGER ifreq;
- rd_QueryPerformanceFrequency(&ifreq);
- /* Convert frequency to double to avoid overflow in
- * return statement */
- freq = (double)ifreq.QuadPart / 1000000.0;
- }
- rd_QueryPerformanceCounter(&now);
- return (rd_ts_t)((double)now.QuadPart / freq);
-#else
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ((rd_ts_t)ts.tv_sec * 1000000LLU) +
- ((rd_ts_t)ts.tv_nsec / 1000LLU);
-#endif
-}
-
-
-/**
- * @returns UTC wallclock time as number of microseconds since
- * beginning of the epoch.
- */
-static RD_INLINE RD_UNUSED rd_ts_t rd_uclock(void) {
- struct timeval tv;
- rd_gettimeofday(&tv, NULL);
- return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
-}
-
-
-
-/**
- * Thread-safe version of ctime() that strips the trailing newline.
- */
-static RD_INLINE const char *rd_ctime(const time_t *t) RD_UNUSED;
-static RD_INLINE const char *rd_ctime(const time_t *t) {
- static RD_TLS char ret[27];
-
-#ifndef _WIN32
- ctime_r(t, ret);
-#else
- ctime_s(ret, sizeof(ret), t);
-#endif
- ret[25] = '\0';
-
- return ret;
-}
-
-
-/**
- * @brief Convert a relative millisecond timeout to microseconds,
- * properly handling RD_POLL_NOWAIT, et.al.
- */
-static RD_INLINE rd_ts_t rd_timeout_us(int timeout_ms) {
- if (timeout_ms <= 0)
- return (rd_ts_t)timeout_ms;
- else
- return (rd_ts_t)timeout_ms * 1000;
-}
-
-/**
- * @brief Convert a relative microsecond timeout to milliseconds,
- * properly handling RD_POLL_NOWAIT, et.al.
- */
-static RD_INLINE int rd_timeout_ms(rd_ts_t timeout_us) {
- if (timeout_us <= 0)
- return (int)timeout_us;
- else
- /* + 999: Round up to millisecond to
- * avoid busy-looping during the last
- * millisecond. */
- return (int)((timeout_us + 999) / 1000);
-}
-
-
-/**
- * @brief Initialize an absolute timeout based on the provided \p timeout_ms
- *
- * To be used with rd_timeout_adjust().
- *
- * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
- *
- * @returns the absolute timeout which should later be passed
- * to rd_timeout_adjust().
- */
-static RD_INLINE rd_ts_t rd_timeout_init(int timeout_ms) {
- if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT)
- return timeout_ms;
-
- return rd_clock() + ((rd_ts_t)timeout_ms * 1000);
-}
-
-
-/**
- * @brief Initialize an absolute timespec timeout based on the provided
- * relative \p timeout_us.
- *
- * To be used with cnd_timedwait_abs().
- *
- * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec).
- */
-static RD_INLINE void rd_timeout_init_timespec_us(struct timespec *tspec,
- rd_ts_t timeout_us) {
- if (timeout_us == RD_POLL_INFINITE || timeout_us == RD_POLL_NOWAIT) {
- tspec->tv_sec = timeout_us;
- tspec->tv_nsec = 0;
- } else {
-#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
- struct timeval tv;
- gettimeofday(&tv, NULL);
- TIMEVAL_TO_TIMESPEC(&tv, tspec);
-#else
- timespec_get(tspec, TIME_UTC);
-#endif
- tspec->tv_sec += timeout_us / 1000000;
- tspec->tv_nsec += (timeout_us % 1000000) * 1000;
- if (tspec->tv_nsec >= 1000000000) {
- tspec->tv_nsec -= 1000000000;
- tspec->tv_sec++;
- }
- }
-}
-
-/**
- * @brief Initialize an absolute timespec timeout based on the provided
- * relative \p timeout_ms.
- *
- * To be used with cnd_timedwait_abs().
- *
- * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec).
- */
-static RD_INLINE void rd_timeout_init_timespec(struct timespec *tspec,
- int timeout_ms) {
- if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) {
- tspec->tv_sec = timeout_ms;
- tspec->tv_nsec = 0;
- } else {
-#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
- struct timeval tv;
- gettimeofday(&tv, NULL);
- TIMEVAL_TO_TIMESPEC(&tv, tspec);
-#else
- timespec_get(tspec, TIME_UTC);
-#endif
- tspec->tv_sec += timeout_ms / 1000;
- tspec->tv_nsec += (timeout_ms % 1000) * 1000000;
- if (tspec->tv_nsec >= 1000000000) {
- tspec->tv_nsec -= 1000000000;
- tspec->tv_sec++;
- }
- }
-}
-
-
-/**
- * @brief Same as rd_timeout_remains() but with microsecond precision
- */
-static RD_INLINE rd_ts_t rd_timeout_remains_us(rd_ts_t abs_timeout) {
- rd_ts_t timeout_us;
-
- if (abs_timeout == RD_POLL_INFINITE || abs_timeout == RD_POLL_NOWAIT)
- return (rd_ts_t)abs_timeout;
-
- timeout_us = abs_timeout - rd_clock();
- if (timeout_us <= 0)
- return RD_POLL_NOWAIT;
- else
- return timeout_us;
-}
-
-/**
- * @returns the remaining timeout for timeout \p abs_timeout previously set
- * up by rd_timeout_init()
- *
- * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
- *
- * @remark Check explicitly for 0 (NOWAIT) to check if there is
- * no remaining time to wait. Any other value, even negative (INFINITE),
- * means there is remaining time.
- * rd_timeout_expired() can be used to check the return value
- * in a bool fashion.
- */
-static RD_INLINE int rd_timeout_remains(rd_ts_t abs_timeout) {
- return rd_timeout_ms(rd_timeout_remains_us(abs_timeout));
-}
-
-
-
-/**
- * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms,
- * and operates on the return value of rd_timeout_remains().
- */
-static RD_INLINE int rd_timeout_remains_limit0(int remains_ms, int limit_ms) {
- if (remains_ms == RD_POLL_INFINITE || remains_ms > limit_ms)
- return limit_ms;
- else
- return remains_ms;
-}
-
-/**
- * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms
- */
-static RD_INLINE int rd_timeout_remains_limit(rd_ts_t abs_timeout,
- int limit_ms) {
- return rd_timeout_remains_limit0(rd_timeout_remains(abs_timeout),
- limit_ms);
-}
-
-/**
- * @returns 1 if the **relative** timeout as returned by rd_timeout_remains()
- * has timed out / expired, else 0.
- */
-static RD_INLINE int rd_timeout_expired(int timeout_ms) {
- return timeout_ms == RD_POLL_NOWAIT;
-}
-
-#endif /* _RDTIME_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h
deleted file mode 100644
index 8f3625512..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * librd - Rapid Development C library
- *
- * Copyright (c) 2012, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RDTYPES_H_
-#define _RDTYPES_H_
-
-#include <inttypes.h>
-
-
-/*
- * Fundamental types
- */
-
-
-/* Timestamp (microseconds).
- * Struct members with this type usually have the "ts_" prefix for
- * the internal monotonic clock timestamp, or "wts_" for wall clock timestamp.
- */
-typedef int64_t rd_ts_t;
-
-#define RD_TS_MAX INT64_MAX
-
-
-typedef uint8_t rd_bool_t;
-#define rd_true 1
-#define rd_false 0
-
-
-/**
- * @enum Denotes an async or sync operation
- */
-typedef enum {
- RD_SYNC = 0, /**< Synchronous/blocking */
- RD_ASYNC, /**< Asynchronous/non-blocking */
-} rd_async_t;
-
-
-/**
- * @enum Instruct function to acquire or not to acquire a lock
- */
-typedef enum {
- RD_DONT_LOCK = 0, /**< Do not acquire lock */
- RD_DO_LOCK = 1, /**< Do acquire lock */
-} rd_dolock_t;
-
-
-/*
- * Helpers
- */
-
-/**
- * @brief Overflow-safe type-agnostic compare for use in cmp functions.
- *
- * @warning A and B may be evaluated multiple times.
- *
- * @returns -1, 0 or 1.
- */
-#define RD_CMP(A, B) (int)((A) < (B) ? -1 : ((A) > (B)))
-
-
-#endif /* _RDTYPES_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c
deleted file mode 100644
index aa14b6aa8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef _WIN32
-#define RD_UNITTEST_QPC_OVERRIDES 1
-#endif
-
-#include "rd.h"
-#include "rdunittest.h"
-
-#include "rdvarint.h"
-#include "rdbuf.h"
-#include "crc32c.h"
-#include "rdmurmur2.h"
-#include "rdfnv1a.h"
-#if WITH_HDRHISTOGRAM
-#include "rdhdrhistogram.h"
-#endif
-#include "rdkafka_int.h"
-#include "rdkafka_broker.h"
-#include "rdkafka_request.h"
-
-#include "rdsysqueue.h"
-#include "rdkafka_sasl_oauthbearer.h"
-#if WITH_OAUTHBEARER_OIDC
-#include "rdkafka_sasl_oauthbearer_oidc.h"
-#endif
-#include "rdkafka_msgset.h"
-#include "rdkafka_txnmgr.h"
-
-rd_bool_t rd_unittest_assert_on_failure = rd_false;
-rd_bool_t rd_unittest_on_ci = rd_false;
-rd_bool_t rd_unittest_slow = rd_false;
-
-#if ENABLE_CODECOV
-/**
- * @name Code coverage
- * @{
- */
-
-static rd_atomic64_t rd_ut_covnrs[RD_UT_COVNR_MAX + 1];
-
-void rd_ut_coverage(const char *file, const char *func, int line, int covnr) {
- rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX);
- rd_atomic64_add(&rd_ut_covnrs[covnr], 1);
-}
-
-
-int64_t
-rd_ut_coverage_check(const char *file, const char *func, int line, int covnr) {
- int64_t r;
-
- rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX);
-
- r = rd_atomic64_get(&rd_ut_covnrs[covnr]);
-
- if (!r) {
- fprintf(stderr,
- "\033[31m"
- "RDUT: FAIL: %s:%d: %s: "
- "Code coverage nr %d: FAIL: "
- "code path not executed: "
- "perform `grep -RnF 'COVERAGE(%d)' src/` to find "
- "source location"
- "\033[0m\n",
- file, line, func, covnr, covnr);
- if (rd_unittest_assert_on_failure)
- rd_assert(!*"unittest failure");
- return 0;
- }
-
- fprintf(stderr,
- "\033[34mRDUT: CCOV: %s:%d: %s: Code coverage nr %d: "
- "PASS (%" PRId64 " code path execution(s))\033[0m\n",
- file, line, func, covnr, r);
-
- return r;
-}
-/**@}*/
-
-#endif /* ENABLE_CODECOV */
-
-
-/**
- * @name Test rdsysqueue.h / queue.h
- * @{
- */
-
-struct ut_tq {
- TAILQ_ENTRY(ut_tq) link;
- int v;
-};
-
-TAILQ_HEAD(ut_tq_head, ut_tq);
-
-struct ut_tq_args {
- const char *name; /**< Descriptive test name */
- struct {
- int base; /**< Base value */
- int cnt; /**< Number of elements to add */
- int step; /**< Value step */
- } q[3]; /**< Queue element definition */
- int qcnt; /**< Number of defs in .q */
- int exp[16]; /**< Expected value order after join */
-};
-
-/**
- * @brief Find the previous element (insert position) for
- * value \p val in list \p head or NULL if \p val is less than
- * the first element in \p head.
- * @remarks \p head must be ascending sorted.
- */
-static struct ut_tq *ut_tq_find_prev_pos(const struct ut_tq_head *head,
- int val) {
- struct ut_tq *e, *prev = NULL;
-
- TAILQ_FOREACH(e, head, link) {
- if (e->v > val)
- return prev;
- prev = e;
- }
-
- return prev;
-}
-
-static int ut_tq_test(const struct ut_tq_args *args) {
- int totcnt = 0;
- int fails = 0;
- struct ut_tq_head *tqh[3] = {NULL, NULL, NULL};
- struct ut_tq *e, *insert_after;
- int i, qi;
-
- RD_UT_SAY("Testing TAILQ: %s", args->name);
-
- /*
- * Verify TAILQ_INSERT_LIST:
- * For each insert position test:
- * - create two lists: tqh 0 and 1
- * - add entries to both lists
- * - insert list 1 into 0
- * - verify expected order and correctness
- */
-
- /* Use heap allocated heads to let valgrind/asan assist
- * in detecting corruption. */
-
- for (qi = 0; qi < args->qcnt; qi++) {
- tqh[qi] = rd_calloc(1, sizeof(*tqh[qi]));
- TAILQ_INIT(tqh[qi]);
-
- for (i = 0; i < args->q[qi].cnt; i++) {
- e = rd_malloc(sizeof(*e));
- e->v = args->q[qi].base + (i * args->q[qi].step);
- TAILQ_INSERT_TAIL(tqh[qi], e, link);
- }
-
- totcnt += args->q[qi].cnt;
- }
-
- for (qi = 1; qi < args->qcnt; qi++) {
- insert_after = ut_tq_find_prev_pos(tqh[0], args->q[qi].base);
- if (!insert_after) {
- /* Insert position is head of list,
- * do two-step concat+move */
- TAILQ_PREPEND(tqh[0], tqh[qi], ut_tq_head, link);
- } else {
- TAILQ_INSERT_LIST(tqh[0], insert_after, tqh[qi],
- ut_tq_head, struct ut_tq *, link);
- }
-
- RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), "expected empty tqh[%d]",
- qi);
- RD_UT_ASSERT(!TAILQ_EMPTY(tqh[0]), "expected non-empty tqh[0]");
-
- memset(tqh[qi], (int)'A', sizeof(*tqh[qi]));
- rd_free(tqh[qi]);
- }
-
- RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1],
- "TAILQ_LAST val %d, expected %d",
- TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]);
-
- /* Add sentinel value to verify that INSERT_TAIL works
- * after INSERT_LIST */
- e = rd_malloc(sizeof(*e));
- e->v = 99;
- TAILQ_INSERT_TAIL(tqh[0], e, link);
- totcnt++;
-
- i = 0;
- TAILQ_FOREACH(e, tqh[0], link) {
- if (i >= totcnt) {
- RD_UT_WARN(
- "Too many elements in list tqh[0]: "
- "idx %d > totcnt %d: element %p (value %d)",
- i, totcnt, e, e->v);
- fails++;
- } else if (e->v != args->exp[i]) {
- RD_UT_WARN(
- "Element idx %d/%d in tqh[0] has value %d, "
- "expected %d",
- i, totcnt, e->v, args->exp[i]);
- fails++;
- } else if (i == totcnt - 1 &&
- e != TAILQ_LAST(tqh[0], ut_tq_head)) {
- RD_UT_WARN("TAILQ_LAST == %p, expected %p",
- TAILQ_LAST(tqh[0], ut_tq_head), e);
- fails++;
- }
- i++;
- }
-
- /* Then scan it in reverse */
- i = totcnt - 1;
- TAILQ_FOREACH_REVERSE(e, tqh[0], ut_tq_head, link) {
- if (i < 0) {
- RD_UT_WARN(
- "REVERSE: Too many elements in list tqh[0]: "
- "idx %d < 0: element %p (value %d)",
- i, e, e->v);
- fails++;
- } else if (e->v != args->exp[i]) {
- RD_UT_WARN(
- "REVERSE: Element idx %d/%d in tqh[0] has "
- "value %d, expected %d",
- i, totcnt, e->v, args->exp[i]);
- fails++;
- } else if (i == totcnt - 1 &&
- e != TAILQ_LAST(tqh[0], ut_tq_head)) {
- RD_UT_WARN("REVERSE: TAILQ_LAST == %p, expected %p",
- TAILQ_LAST(tqh[0], ut_tq_head), e);
- fails++;
- }
- i--;
- }
-
- RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1],
- "TAILQ_LAST val %d, expected %d",
- TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]);
-
- while ((e = TAILQ_FIRST(tqh[0]))) {
- TAILQ_REMOVE(tqh[0], e, link);
- rd_free(e);
- }
-
- rd_free(tqh[0]);
-
- return fails;
-}
-
-
-static int unittest_sysqueue(void) {
- const struct ut_tq_args args[] = {
- {"empty tqh[0]",
- {{0, 0, 0}, {0, 3, 1}},
- 2,
- {0, 1, 2, 99 /*sentinel*/}},
- {"prepend 1,0",
- {{10, 3, 1}, {0, 3, 1}},
- 2,
- {0, 1, 2, 10, 11, 12, 99}},
- {"prepend 2,1,0",
- {
- {10, 3, 1}, /* 10, 11, 12 */
- {5, 3, 1}, /* 5, 6, 7 */
- {0, 2, 1} /* 0, 1 */
- },
- 3,
- {0, 1, 5, 6, 7, 10, 11, 12, 99}},
- {"insert 1", {{0, 3, 2}, {1, 2, 2}}, 2, {0, 1, 3, 2, 4, 99}},
- {"insert 1,2",
- {
- {0, 3, 3}, /* 0, 3, 6 */
- {1, 2, 3}, /* 1, 4 */
- {2, 1, 3} /* 2 */
- },
- 3,
- {0, 1, 2, 4, 3, 6, 99}},
- {"append 1",
- {{0, 5, 1}, {5, 5, 1}},
- 2,
- {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99}},
- {"append 1,2",
- {
- {0, 5, 1}, /* 0, 1, 2, 3, 4 */
- {5, 5, 1}, /* 5, 6, 7, 8, 9 */
- {11, 2, 1} /* 11, 12 */
- },
- 3,
- {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99}},
- {
- "insert 1,0,2",
- {
- {5, 3, 1}, /* 5, 6, 7 */
- {0, 1, 1}, /* 0 */
- {10, 2, 1} /* 10, 11 */
- },
- 3,
- {0, 5, 6, 7, 10, 11, 99},
- },
- {
- "insert 2,0,1",
- {
- {5, 3, 1}, /* 5, 6, 7 */
- {10, 2, 1}, /* 10, 11 */
- {0, 1, 1} /* 0 */
- },
- 3,
- {0, 5, 6, 7, 10, 11, 99},
- },
- {NULL}};
- int i;
- int fails = 0;
-
- for (i = 0; args[i].name != NULL; i++)
- fails += ut_tq_test(&args[i]);
-
- RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails);
-
- RD_UT_PASS();
-}
-
-/**@}*/
-
-
-/**
- * @name rd_clock() unittests
- * @{
- */
-
-#if RD_UNITTEST_QPC_OVERRIDES
-
-/**
- * These values are based off a machine with freq 14318180
- * which would cause the original rd_clock() calculation to overflow
- * after about 8 days.
- * Details:
- * https://github.com/confluentinc/confluent-kafka-dotnet/issues/603#issuecomment-417274540
- */
-
-static const int64_t rd_ut_qpc_freq = 14318180;
-static int64_t rd_ut_qpc_now;
-
-BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency) {
- lpFrequency->QuadPart = rd_ut_qpc_freq;
- return TRUE;
-}
-
-BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount) {
- lpPerformanceCount->QuadPart = rd_ut_qpc_now * rd_ut_qpc_freq;
- return TRUE;
-}
-
-static int unittest_rdclock(void) {
- rd_ts_t t1, t2;
-
- /* First let "uptime" be fresh boot (0). */
- rd_ut_qpc_now = 0;
- t1 = rd_clock();
- rd_ut_qpc_now++;
- t2 = rd_clock();
- RD_UT_ASSERT(t2 == t1 + (1 * 1000000),
- "Expected t2 %" PRId64 " to be 1s more than t1 %" PRId64,
- t2, t1);
-
- /* Then skip forward to 8 days, which should trigger the
- * overflow in a faulty implementation. */
- rd_ut_qpc_now = 8 * 86400;
- t2 = rd_clock();
- RD_UT_ASSERT(t2 == t1 + (8LL * 86400 * 1000000),
- "Expected t2 %" PRId64
- " to be 8 days larger than t1 %" PRId64,
- t2, t1);
-
- /* And make sure we can run on a system with 38 years of uptime.. */
- rd_ut_qpc_now = 38 * 365 * 86400;
- t2 = rd_clock();
- RD_UT_ASSERT(t2 == t1 + (38LL * 365 * 86400 * 1000000),
- "Expected t2 %" PRId64
- " to be 38 years larger than t1 %" PRId64,
- t2, t1);
-
- RD_UT_PASS();
-}
-#endif
-
-
-
-/**@}*/
-
-extern int unittest_string(void);
-extern int unittest_cgrp(void);
-#if WITH_SASL_SCRAM
-extern int unittest_scram(void);
-#endif
-extern int unittest_assignors(void);
-extern int unittest_map(void);
-#if WITH_CURL
-extern int unittest_http(void);
-#endif
-#if WITH_OAUTHBEARER_OIDC
-extern int unittest_sasl_oauthbearer_oidc(void);
-#endif
-
-int rd_unittest(void) {
- int fails = 0;
- const struct {
- const char *name;
- int (*call)(void);
- } unittests[] = {
- {"sysqueue", unittest_sysqueue},
- {"string", unittest_string},
- {"map", unittest_map},
- {"rdbuf", unittest_rdbuf},
- {"rdvarint", unittest_rdvarint},
- {"crc32c", unittest_rd_crc32c},
- {"msg", unittest_msg},
- {"murmurhash", unittest_murmur2},
- {"fnv1a", unittest_fnv1a},
-#if WITH_HDRHISTOGRAM
- {"rdhdrhistogram", unittest_rdhdrhistogram},
-#endif
-#ifdef _WIN32
- {"rdclock", unittest_rdclock},
-#endif
- {"conf", unittest_conf},
- {"broker", unittest_broker},
- {"request", unittest_request},
-#if WITH_SASL_OAUTHBEARER
- {"sasl_oauthbearer", unittest_sasl_oauthbearer},
-#endif
- {"aborted_txns", unittest_aborted_txns},
- {"cgrp", unittest_cgrp},
-#if WITH_SASL_SCRAM
- {"scram", unittest_scram},
-#endif
- {"assignors", unittest_assignors},
-#if WITH_CURL
- {"http", unittest_http},
-#endif
-#if WITH_OAUTHBEARER_OIDC
- {"sasl_oauthbearer_oidc", unittest_sasl_oauthbearer_oidc},
-#endif
- {NULL}
- };
- int i;
- const char *match = rd_getenv("RD_UT_TEST", NULL);
- int cnt = 0;
-
- if (rd_getenv("RD_UT_ASSERT", NULL))
- rd_unittest_assert_on_failure = rd_true;
- if (rd_getenv("CI", NULL)) {
- RD_UT_SAY("Unittests running on CI");
- rd_unittest_on_ci = rd_true;
- }
-
- if (rd_unittest_on_ci || (ENABLE_DEVEL + 0)) {
- RD_UT_SAY("Unittests will not error out on slow CPUs");
- rd_unittest_slow = rd_true;
- }
-
- rd_kafka_global_init();
-
-#if ENABLE_CODECOV
- for (i = 0; i < RD_UT_COVNR_MAX + 1; i++)
- rd_atomic64_init(&rd_ut_covnrs[i], 0);
-#endif
-
- for (i = 0; unittests[i].name; i++) {
- int f;
-
- if (match && !strstr(unittests[i].name, match))
- continue;
-
- f = unittests[i].call();
- RD_UT_SAY("unittest: %s: %4s\033[0m", unittests[i].name,
- f ? "\033[31mFAIL" : "\033[32mPASS");
- fails += f;
- cnt++;
- }
-
-#if ENABLE_CODECOV
-#if FIXME /* This check only works if all tests that use coverage checks \
- * are run, which we can't really know, so disable until we \
- * know what to do with this. */
- if (!match) {
- /* Verify all code paths were covered */
- int cov_fails = 0;
- for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) {
- if (!RD_UT_COVERAGE_CHECK(i))
- cov_fails++;
- }
- if (cov_fails > 0)
- RD_UT_SAY("%d code coverage failure(s) (ignored)\n",
- cov_fails);
- }
-#endif
-#endif
-
- if (!cnt && match)
- RD_UT_WARN("No unittests matching \"%s\"", match);
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h
deleted file mode 100644
index a15488568..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RD_UNITTEST_H
-#define _RD_UNITTEST_H
-
-#include <stdio.h>
-
-
-extern rd_bool_t rd_unittest_assert_on_failure;
-extern rd_bool_t rd_unittest_on_ci;
-extern rd_bool_t rd_unittest_slow;
-
-#define ENABLE_CODECOV ENABLE_DEVEL
-
-
-/**
- * @brief Begin single unit-test function (optional).
- * Currently only used for logging.
- */
-#define RD_UT_BEGIN() \
- fprintf(stderr, "\033[34mRDUT: INFO: %s:%d: %s: BEGIN: \033[0m\n", \
- __FILE__, __LINE__, __FUNCTION__)
-
-
-/**
- * @brief Fail the current unit-test function.
- */
-#define RD_UT_FAIL(...) \
- do { \
- fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", __FILE__, \
- __LINE__, __FUNCTION__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m\n"); \
- if (rd_unittest_assert_on_failure) \
- rd_assert(!*"unittest failure"); \
- return 1; \
- } while (0)
-
-/**
- * @brief Pass the current unit-test function
- */
-#define RD_UT_PASS() \
- do { \
- fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \
- __FILE__, __LINE__, __FUNCTION__); \
- return 0; \
- } while (0)
-
-/**
- * @brief Skip the current unit-test function
- */
-#define RD_UT_SKIP(...) \
- do { \
- fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", __FILE__, \
- __LINE__, __FUNCTION__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m\n"); \
- return 0; \
- } while (0)
-
-
-/**
- * @brief Fail unit-test if \p expr is false
- */
-#define RD_UT_ASSERT(expr, ...) \
- do { \
- if (!(expr)) { \
- fprintf(stderr, \
- "\033[31mRDUT: FAIL: %s:%d: %s: " \
- "assert failed: " #expr ": ", \
- __FILE__, __LINE__, __FUNCTION__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m\n"); \
- if (rd_unittest_assert_on_failure) \
- rd_assert(expr); \
- return 1; \
- } \
- } while (0)
-
-
-/**
- * @brief Check that value \p V is within inclusive range \p VMIN .. \p VMAX,
- * else asserts.
- *
- * @param VFMT is the printf formatter for \p V's type
- */
-#define RD_UT_ASSERT_RANGE(V, VMIN, VMAX, VFMT) \
- RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \
- VFMT " out of range " VFMT " .. " VFMT, (V), (VMIN), \
- (VMAX))
-
-
-/**
- * @brief Log something from a unit-test
- */
-#define RD_UT_SAY(...) \
- do { \
- fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", __FILE__, __LINE__, \
- __FUNCTION__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\n"); \
- } while (0)
-
-
-/**
- * @brief Warn about something from a unit-test
- */
-#define RD_UT_WARN(...) \
- do { \
- fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", __FILE__, \
- __LINE__, __FUNCTION__); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m\n"); \
- } while (0)
-
-
-
-int rd_unittest(void);
-
-
-
-/**
- * @name Manual code coverage
- *
- * The RD_UT_COVERAGE*() set of macros are used to perform manual
- * code coverage testing.
- * This provides an alternative to object and state inspection by
- * instead verifying that certain code paths (typically error paths)
- * are executed, allowing functional black-box testing on the one part
- * combined with precise knowledge of code flow on the other part.
- *
- * How to use:
- *
- * 1. First identify a code path that you want to make sure is executed, such
- * as a corner error case, increase RD_UT_COVNR_MAX (below) and use the
- * new max number as the coverage number (COVNR).
- *
- * 2. In the code path add RD_UT_COVERAGE(your_covnr).
- *
- * 3. Write a unittest case that is supposed to trigger the code path.
- *
- * 4. In the unittest, add a call to RD_UT_COVERAGE_CHECK(your_covnr) at the
- * point where you expect the code path to have executed.
- *
- * 5. RD_UT_COVERAGE_CHECK(your_covnr) will fail the current test, but not
- * return from your test function, so you need to `return 1;` if
- * RD_UT_COVERAGE_CHECK(your_covnr) returns 0, e.g:
- *
- * if (!RD_UT_COVERAGE_CHECK(your_covnr))
- * return 1; -- failure
- *
- * 6. Run the unit tests with `make unit` in tests/.
- *
- * 7. If the code path was not executed your test will fail, otherwise pass.
- *
- *
- * Code coverage checks require --enable-devel.
- *
- * There is a script in packaging/tools/rdutcoverage.sh that checks that
- * code coverage numbers are not reused.
- *
- * @{
- */
-
-#if ENABLE_CODECOV
-
-/* @define When adding new code coverages, use the next value and increment
- * this maximum accordingly. */
-#define RD_UT_COVNR_MAX 1
-
-/**
- * @brief Register code as covered/executed.
- */
-#define RD_UT_COVERAGE(COVNR) \
- rd_ut_coverage(__FILE__, __FUNCTION__, __LINE__, COVNR)
-
-/**
- * @returns how many times the code was executed.
- * will fail the unit test (but not return) if code has not
- * been executed.
- */
-#define RD_UT_COVERAGE_CHECK(COVNR) \
- rd_ut_coverage_check(__FILE__, __FUNCTION__, __LINE__, COVNR)
-
-
-void rd_ut_coverage(const char *file, const char *func, int line, int covnr);
-int64_t
-rd_ut_coverage_check(const char *file, const char *func, int line, int covnr);
-
-#else
-
-/* Does nothing if ENABLE_CODECOV is not set */
-#define RD_UT_COVERAGE(COVNR) \
- do { \
- } while (0)
-#define RD_UT_COVERAGE_CHECK(COVNR) 1
-
-#endif /* ENABLE_CODECOV */
-
-
-/**@}*/
-
-
-#endif /* _RD_UNITTEST_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c
deleted file mode 100644
index fb0cbd046..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdvarint.h"
-#include "rdunittest.h"
-
-
-static int do_test_rd_uvarint_enc_i64(const char *file,
- int line,
- int64_t num,
- const char *exp,
- size_t exp_size) {
- char buf[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num);
- size_t r;
- int ir;
- rd_buf_t b;
- rd_slice_t slice, bad_slice;
- int64_t ret_num;
-
- if (sz != exp_size || memcmp(buf, exp, exp_size))
- RD_UT_FAIL("i64 encode of %" PRId64
- ": "
- "expected size %" PRIusz " (got %" PRIusz ")\n",
- num, exp_size, sz);
-
- /* Verify with standard decoder */
- r = rd_varint_dec_i64(buf, sz, &ret_num);
- RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
- "varint decode failed: %" PRIusz, r);
- RD_UT_ASSERT(ret_num == num,
- "varint decode returned wrong number: "
- "%" PRId64 " != %" PRId64,
- ret_num, num);
-
- /* Verify with slice decoder */
- rd_buf_init(&b, 1, 0);
- rd_buf_push(&b, buf, sizeof(buf), NULL); /* including trailing 0xff
- * garbage which should be
- * ignored by decoder */
- rd_slice_init_full(&slice, &b);
-
- /* Should fail for incomplete reads */
- ir = rd_slice_narrow_copy(&slice, &bad_slice, sz - 1);
- RD_UT_ASSERT(ir, "narrow_copy failed");
- ret_num = -1;
- r = rd_slice_read_varint(&bad_slice, &ret_num);
- RD_UT_ASSERT(RD_UVARINT_DEC_FAILED(r),
- "varint decode failed should have failed, "
- "returned %" PRIusz,
- r);
- r = rd_slice_offset(&bad_slice);
- RD_UT_ASSERT(r == 0,
- "expected slice position to not change, but got %" PRIusz,
- r);
-
- /* Verify proper slice */
- ret_num = -1;
- r = rd_slice_read_varint(&slice, &ret_num);
- RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
- "varint decode failed: %" PRIusz, r);
- RD_UT_ASSERT(ret_num == num,
- "varint decode returned wrong number: "
- "%" PRId64 " != %" PRId64,
- ret_num, num);
- RD_UT_ASSERT(r == sz,
- "expected varint decoder to read %" PRIusz
- " bytes, "
- "not %" PRIusz,
- sz, r);
- r = rd_slice_offset(&slice);
- RD_UT_ASSERT(r == sz,
- "expected slice position to change to %" PRIusz
- ", but got %" PRIusz,
- sz, r);
-
-
- rd_buf_destroy(&b);
-
- RD_UT_PASS();
-}
-
-
-int unittest_rdvarint(void) {
- int fails = 0;
-
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 0,
- (const char[]) {0}, 1);
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 1,
- (const char[]) {0x2}, 1);
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -1,
- (const char[]) {0x1}, 1);
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 23,
- (const char[]) {0x2e}, 1);
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -23,
- (const char[]) {0x2d}, 1);
- fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 253,
- (const char[]) {0xfa, 3}, 2);
- fails += do_test_rd_uvarint_enc_i64(
- __FILE__, __LINE__, 1234567890101112,
- (const char[]) {0xf0, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8);
- fails += do_test_rd_uvarint_enc_i64(
- __FILE__, __LINE__, -1234567890101112,
- (const char[]) {0xef, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8);
-
- return fails;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h
deleted file mode 100644
index 6fe112ba9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2016 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _RDVARINT_H
-#define _RDVARINT_H
-
-#include "rd.h"
-#include "rdbuf.h"
-
-/**
- * @name signed varint zig-zag encoder/decoder
- * @{
- *
- */
-
-/**
- * @brief unsigned-varint encodes unsigned integer \p num into buffer
- * at \p dst of size \p dstsize.
- * @returns the number of bytes written to \p dst, or 0 if not enough space.
- */
-
-static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_u64(char *dst,
- size_t dstsize,
- uint64_t num) {
- size_t of = 0;
-
- do {
- if (unlikely(of >= dstsize))
- return 0; /* Not enough space */
-
- dst[of++] = (num & 0x7f) | (num > 0x7f ? 0x80 : 0);
- num >>= 7;
- } while (num);
-
- return of;
-}
-
-/**
- * @brief encodes a signed integer using zig-zag encoding.
- * @sa rd_uvarint_enc_u64
- */
-static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i64(char *dst,
- size_t dstsize,
- int64_t num) {
- return rd_uvarint_enc_u64(dst, dstsize, (num << 1) ^ (num >> 63));
-}
-
-
-static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i32(char *dst,
- size_t dstsize,
- int32_t num) {
- return rd_uvarint_enc_i64(dst, dstsize, num);
-}
-
-
-
-/**
- * @brief Use on return value from rd_uvarint_dec() to check if
- * decoded varint fit the size_t.
- *
- * @returns 1 on overflow, else 0.
- */
-#define RD_UVARINT_OVERFLOW(DEC_RETVAL) (DEC_RETVAL > SIZE_MAX)
-
-/**
- * @returns 1 if there were not enough bytes to decode the varint, else 0.
- */
-#define RD_UVARINT_UNDERFLOW(DEC_RETVAL) (DEC_RETVAL == 0)
-
-
-/**
- * @param DEC_RETVAL the return value from \c rd_uvarint_dec()
- * @returns 1 if varint decoding failed, else 0.
- * @warning \p DEC_RETVAL will be evaluated twice.
- */
-#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \
- (RD_UVARINT_UNDERFLOW(DEC_RETVAL) || RD_UVARINT_OVERFLOW(DEC_RETVAL))
-
-
-/**
- * @brief Decodes the unsigned-varint in buffer \p src of size \p srcsize
- * and stores the decoded unsigned integer in \p nump.
- *
- * @remark Use RD_UVARINT_OVERFLOW(returnvalue) to check if the varint
- * could not fit \p nump, and RD_UVARINT_UNDERFLOW(returnvalue) to
- * check if there were not enough bytes available in \p src to
- * decode the full varint.
- *
- * @returns the number of bytes read from \p src.
- */
-static RD_INLINE RD_UNUSED size_t rd_uvarint_dec(const char *src,
- size_t srcsize,
- uint64_t *nump) {
- size_t of = 0;
- uint64_t num = 0;
- int shift = 0;
-
- do {
- if (unlikely(srcsize-- == 0))
- return 0; /* Underflow */
- num |= (uint64_t)(src[(int)of] & 0x7f) << shift;
- shift += 7;
- } while (src[(int)of++] & 0x80);
-
- *nump = num;
- return of;
-}
-
-static RD_INLINE RD_UNUSED size_t rd_varint_dec_i64(const char *src,
- size_t srcsize,
- int64_t *nump) {
- uint64_t n;
- size_t r;
-
- r = rd_uvarint_dec(src, srcsize, &n);
- if (likely(!RD_UVARINT_DEC_FAILED(r)))
- *nump = (int64_t)(n >> 1) ^ -(int64_t)(n & 1);
-
- return r;
-}
-
-
-/**
- * @returns the maximum encoded size for a type
- */
-#define RD_UVARINT_ENC_SIZEOF(TYPE) (sizeof(TYPE) + 1 + (sizeof(TYPE) / 7))
-
-/**
- * @returns the encoding size of the value 0
- */
-#define RD_UVARINT_ENC_SIZE_0() ((size_t)1)
-
-
-int unittest_rdvarint(void);
-
-/**@}*/
-
-
-#endif /* _RDVARINT_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h
deleted file mode 100644
index 73edd41d6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Win32 (Visual Studio) support
- */
-#ifndef _RDWIN32_H_
-#define _RDWIN32_H_
-
-#include <stdlib.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <time.h>
-#include <assert.h>
-
-#define WIN32_MEAN_AND_LEAN
-#include <winsock2.h> /* for sockets + struct timeval */
-#include <io.h>
-#include <fcntl.h>
-
-
-/**
- * Types
- */
-#ifndef _SSIZE_T_DEFINED
-#define _SSIZE_T_DEFINED
-typedef SSIZE_T ssize_t;
-#endif
-typedef int socklen_t;
-
-struct iovec {
- void *iov_base;
- size_t iov_len;
-};
-
-struct msghdr {
- struct iovec *msg_iov;
- int msg_iovlen;
-};
-
-
-/**
- * Annotations, attributes, optimizers
- */
-#ifndef likely
-#define likely(x) x
-#endif
-#ifndef unlikely
-#define unlikely(x) x
-#endif
-
-#define RD_UNUSED
-#define RD_INLINE __inline
-#define RD_WARN_UNUSED_RESULT
-#define RD_NORETURN __declspec(noreturn)
-#define RD_IS_CONSTANT(p) (0)
-#ifdef _MSC_VER
-#define RD_TLS __declspec(thread)
-#elif defined(__MINGW32__)
-#define RD_TLS __thread
-#else
-#error Unknown Windows compiler, cannot set RD_TLS (thread-local-storage attribute)
-#endif
-
-
-/**
- * Allocation
- */
-#define rd_alloca(N) _alloca(N)
-
-
-/**
- * Strings, formatting, printf, ..
- */
-
-/* size_t and ssize_t format strings */
-#define PRIusz "Iu"
-#define PRIdsz "Id"
-
-#ifndef RD_FORMAT
-#define RD_FORMAT(...)
-#endif
-
-static RD_UNUSED RD_INLINE int
-rd_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
- int cnt = -1;
-
- if (size != 0)
- cnt = _vsnprintf_s(str, size, _TRUNCATE, format, ap);
- if (cnt == -1)
- cnt = _vscprintf(format, ap);
-
- return cnt;
-}
-
-static RD_UNUSED RD_INLINE int
-rd_snprintf(char *str, size_t size, const char *format, ...) {
- int cnt;
- va_list ap;
-
- va_start(ap, format);
- cnt = rd_vsnprintf(str, size, format, ap);
- va_end(ap);
-
- return cnt;
-}
-
-
-#define rd_strcasecmp(A, B) _stricmp(A, B)
-#define rd_strncasecmp(A, B, N) _strnicmp(A, B, N)
-/* There is a StrStrIA() but it requires extra linking, so use our own
- * implementation instead. */
-#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE)
-
-
-
-/**
- * Errors
- */
-
-/* MSVC:
- * This is the correct way to set errno on Windows,
- * but it is still pointless due to different errnos in
- * in different runtimes:
- * https://social.msdn.microsoft.com/Forums/vstudio/en-US/b4500c0d-1b69-40c7-9ef5-08da1025b5bf/setting-errno-from-within-a-dll?forum=vclanguage/
- * errno is thus highly deprecated, and buggy, on Windows
- * when using librdkafka as a dynamically loaded DLL. */
-#define rd_set_errno(err) _set_errno((err))
-
-static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
- static RD_TLS char ret[128];
-
- strerror_s(ret, sizeof(ret) - 1, err);
- return ret;
-}
-
-/**
- * @brief strerror() for Win32 API errors as returned by GetLastError() et.al.
- */
-static RD_UNUSED char *
-rd_strerror_w32(DWORD errcode, char *dst, size_t dstsize) {
- char *t;
- FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- (LPSTR)dst, (DWORD)dstsize - 1, NULL);
- /* Remove newlines */
- while ((t = strchr(dst, (int)'\r')) || (t = strchr(dst, (int)'\n')))
- *t = (char)'.';
- return dst;
-}
-
-
-/**
- * Atomics
- */
-#ifndef __cplusplus
-#include "rdatomic.h"
-#endif
-
-
-/**
- * Misc
- */
-
-/**
- * Microsecond sleep.
- * 'retry': if true, retry if sleep is interrupted (because of signal)
- */
-#define rd_usleep(usec, terminate) Sleep((usec) / 1000)
-
-
-/**
- * @brief gettimeofday() for win32
- */
-static RD_UNUSED int rd_gettimeofday(struct timeval *tv, struct timezone *tz) {
- SYSTEMTIME st;
- FILETIME ft;
- ULARGE_INTEGER d;
-
- GetSystemTime(&st);
- SystemTimeToFileTime(&st, &ft);
- d.HighPart = ft.dwHighDateTime;
- d.LowPart = ft.dwLowDateTime;
- tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L);
- tv->tv_usec = (long)(st.wMilliseconds * 1000);
-
- return 0;
-}
-
-
-#define rd_assert(EXPR) assert(EXPR)
-
-
-static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env,
- const char *def) {
- static RD_TLS char tmp[512];
- DWORD r;
- r = GetEnvironmentVariableA(env, tmp, sizeof(tmp));
- if (r == 0 || r > sizeof(tmp))
- return def;
- return tmp;
-}
-
-
-/**
- * Empty struct initializer
- */
-#define RD_ZERO_INIT \
- { 0 }
-
-#ifndef __cplusplus
-/**
- * Sockets, IO
- */
-
-/** @brief Socket type */
-typedef SOCKET rd_socket_t;
-
-/** @brief Socket API error return value */
-#define RD_SOCKET_ERROR SOCKET_ERROR
-
-/** @brief Last socket error */
-#define rd_socket_errno WSAGetLastError()
-
-/** @brief String representation of socket error */
-static RD_UNUSED const char *rd_socket_strerror(int err) {
- static RD_TLS char buf[256];
- rd_strerror_w32(err, buf, sizeof(buf));
- return buf;
-}
-
-/** @brief WSAPoll() struct type */
-typedef WSAPOLLFD rd_pollfd_t;
-
-/** @brief poll(2) */
-#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \
- WSAPoll(POLLFD, FDCNT, TIMEOUT_MS)
-
-
-/**
- * @brief Set socket to non-blocking
- * @returns 0 on success or -1 on failure (see rd_kafka_rd_socket_errno)
- */
-static RD_UNUSED int rd_fd_set_nonblocking(rd_socket_t fd) {
- u_long on = 1;
- if (ioctlsocket(fd, FIONBIO, &on) == SOCKET_ERROR)
- return (int)WSAGetLastError();
- return 0;
-}
-
-/**
- * @brief Create non-blocking pipe
- * @returns 0 on success or errno on failure
- */
-static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
- /* On windows, the "pipe" will be a tcp connection.
- * This is to allow WSAPoll to be used to poll pipe events */
-
- SOCKET listen_s = INVALID_SOCKET;
- SOCKET accept_s = INVALID_SOCKET;
- SOCKET connect_s = INVALID_SOCKET;
-
- struct sockaddr_in listen_addr;
- struct sockaddr_in connect_addr;
- socklen_t sock_len = 0;
- int bufsz;
-
- /* Create listen socket */
- listen_s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (listen_s == INVALID_SOCKET)
- goto err;
-
- listen_addr.sin_family = AF_INET;
- listen_addr.sin_addr.s_addr = ntohl(INADDR_LOOPBACK);
- listen_addr.sin_port = 0;
- if (bind(listen_s, (struct sockaddr *)&listen_addr,
- sizeof(listen_addr)) != 0)
- goto err;
-
- sock_len = sizeof(connect_addr);
- if (getsockname(listen_s, (struct sockaddr *)&connect_addr,
- &sock_len) != 0)
- goto err;
-
- if (listen(listen_s, 1) != 0)
- goto err;
-
- /* Create connection socket */
- connect_s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (connect_s == INVALID_SOCKET)
- goto err;
-
- if (connect(connect_s, (struct sockaddr *)&connect_addr,
- sizeof(connect_addr)) == SOCKET_ERROR)
- goto err;
-
- /* Wait for incoming connection */
- accept_s = accept(listen_s, NULL, NULL);
- if (accept_s == SOCKET_ERROR)
- goto err;
-
- /* Done with listening */
- closesocket(listen_s);
-
- if (rd_fd_set_nonblocking(accept_s) != 0)
- goto err;
-
- if (rd_fd_set_nonblocking(connect_s) != 0)
- goto err;
-
- /* Minimize buffer sizes to avoid a large number
- * of signaling bytes to accumulate when
- * io-signalled queue is not being served for a while. */
- bufsz = 100;
- setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz,
- sizeof(bufsz));
- bufsz = 100;
- setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz,
- sizeof(bufsz));
- bufsz = 100;
- setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz,
- sizeof(bufsz));
- bufsz = 100;
- setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz,
- sizeof(bufsz));
-
- /* Store resulting sockets.
- * They are bidirectional, so it does not matter which is read or
- * write side of pipe. */
- fds[0] = accept_s;
- fds[1] = connect_s;
- return 0;
-
-err:
- if (listen_s != INVALID_SOCKET)
- closesocket(listen_s);
- if (accept_s != INVALID_SOCKET)
- closesocket(accept_s);
- if (connect_s != INVALID_SOCKET)
- closesocket(connect_s);
- return -1;
-}
-
-/* Socket IO */
-#define rd_socket_read(fd, buf, sz) recv(fd, buf, sz, 0)
-#define rd_socket_write(fd, buf, sz) send(fd, buf, sz, 0)
-#define rd_socket_close(fd) closesocket(fd)
-
-/* File IO */
-#define rd_write(fd, buf, sz) _write(fd, buf, sz)
-#define rd_open(path, flags, mode) _open(path, flags, mode)
-#define rd_close(fd) _close(fd)
-
-#endif /* !__cplusplus*/
-
-#endif /* _RDWIN32_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c
deleted file mode 100644
index 89f7c8cf4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c
+++ /dev/null
@@ -1,1187 +0,0 @@
-/*
- * xxHash - Fast Hash algorithm
- * Copyright (C) 2012-2016, Yann Collet
- *
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You can contact the author at :
- * - xxHash homepage: http://www.xxhash.com
- * - xxHash source repository : https://github.com/Cyan4973/xxHash
- */
-
-
-/* *************************************
- * Tuning parameters
- ***************************************/
-/*!XXH_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is
- * safe and portable. Unfortunately, on some target/compiler combinations, the
- * generated assembly is sub-optimal. The below switch allow to select different
- * access method for improved performance. Method 0 (default) : use `memcpy()`.
- * Safe and portable. Method 1 : `__packed` statement. It depends on compiler
- * extension (ie, not portable). This method is safe if your compiler supports
- * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct
- * access. This method doesn't depend on compiler but violate C standard. It can
- * generate buggy code on targets which do not support unaligned memory
- * accesses. But in some circumstances, it's the only known way to get the most
- * performance (ie GCC + ARMv6) See http://stackoverflow.com/a/32095106/646947
- * for details. Prefer these methods in priority order (0 > 1 > 2)
- */
-#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line \
- for example */
-#if defined(__GNUC__) && \
- (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
-#define XXH_FORCE_MEMORY_ACCESS 2
-#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
- (defined(__GNUC__) && \
- (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
- defined(__ARM_ARCH_7S__)))
-#define XXH_FORCE_MEMORY_ACCESS 1
-#endif
-#endif
-
-/*!XXH_ACCEPT_NULL_INPUT_POINTER :
- * If input pointer is NULL, xxHash default behavior is to dereference it,
- * triggering a segfault. When this macro is enabled, xxHash actively checks
- * input for null pointer. It it is, result for null input pointers is the same
- * as a null-length input.
- */
-#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
-#define XXH_ACCEPT_NULL_INPUT_POINTER 0
-#endif
-
-/*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independent Hash values, based on
- * little-endian convention. Results are therefore identical for little-endian
- * and big-endian CPU. This comes at a performance cost for big-endian CPU,
- * since some swapping is required to emulate little-endian format. Should
- * endian-independence be of no importance for your application, you may set the
- * #define below to 1, to improve speed for Big-endian CPU. This option has no
- * impact on Little_Endian CPU.
- */
-#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
-#define XXH_FORCE_NATIVE_FORMAT 0
-#endif
-
-/*!XXH_FORCE_ALIGN_CHECK :
- * This is a minor performance trick, only useful with lots of very small keys.
- * It means : check for aligned/unaligned input.
- * The check costs one initial branch per hash;
- * set it to 0 when the input is guaranteed to be aligned,
- * or when alignment doesn't matter for performance.
- */
-#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
-#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)
-#define XXH_FORCE_ALIGN_CHECK 0
-#else
-#define XXH_FORCE_ALIGN_CHECK 1
-#endif
-#endif
-
-
-/* *************************************
- * Includes & Memory related functions
- ***************************************/
-/*! Modify the local functions below should you wish to use some other memory
- * routines for malloc(), free() */
-#include "rd.h"
-static void *XXH_malloc(size_t s) {
- return rd_malloc(s);
-}
-static void XXH_free(void *p) {
- rd_free(p);
-}
-/*! and for memcpy() */
-#include <string.h>
-static void *XXH_memcpy(void *dest, const void *src, size_t size) {
- return memcpy(dest, src, size);
-}
-
-#include <assert.h> /* assert */
-
-#define XXH_STATIC_LINKING_ONLY
-#include "rdxxhash.h"
-
-
-/* *************************************
- * Compiler Specific Options
- ***************************************/
-#ifdef _MSC_VER /* Visual Studio */
-#pragma warning( \
- disable : 4127) /* disable: C4127: conditional expression is constant */
-#define FORCE_INLINE static __forceinline
-#else
-#if defined(__cplusplus) || \
- defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
-#ifdef __GNUC__
-#define FORCE_INLINE static inline __attribute__((always_inline))
-#else
-#define FORCE_INLINE static inline
-#endif
-#else
-#define FORCE_INLINE static
-#endif /* __STDC_VERSION__ */
-#endif
-
-
-/* *************************************
- * Basic Types
- ***************************************/
-#ifndef MEM_MODULE
-#if !defined(__VMS) && \
- (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
-#include <stdint.h>
-typedef uint8_t BYTE;
-typedef uint16_t U16;
-typedef uint32_t U32;
-#else
-typedef unsigned char BYTE;
-typedef unsigned short U16;
-typedef unsigned int U32;
-#endif
-#endif
-
-#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory
- * access in hardware */
-static U32 XXH_read32(const void *memPtr) {
- return *(const U32 *)memPtr;
-}
-
-#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
-
-/* __pack instructions are safer, but compiler specific, hence potentially
- * problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union {
- U32 u32;
-} __attribute__((packed)) unalign;
-static U32 XXH_read32(const void *ptr) {
- return ((const unalign *)ptr)->u32;
-}
-
-#else
-
-/* portable and safe solution. Generally efficient.
- * see : http://stackoverflow.com/a/32095106/646947
- */
-static U32 XXH_read32(const void *memPtr) {
- U32 val;
- memcpy(&val, memPtr, sizeof(val));
- return val;
-}
-
-#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
-
-/* ****************************************
- * Compiler-specific Functions and Macros
- ******************************************/
-#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-
-/* Note : although _rotl exists for minGW (GCC under windows), performance seems
- * poor */
-#if defined(_MSC_VER)
-#define XXH_rotl32(x, r) _rotl(x, r)
-#define XXH_rotl64(x, r) _rotl64(x, r)
-#else
-#define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
-#define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
-#endif
-
-#if defined(_MSC_VER) /* Visual Studio */
-#define XXH_swap32 _byteswap_ulong
-#elif XXH_GCC_VERSION >= 403
-#define XXH_swap32 __builtin_bswap32
-#else
-static U32 XXH_swap32(U32 x) {
- return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
- ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
-}
-#endif
-
-
-/* *************************************
- * Architecture Macros
- ***************************************/
-typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess;
-
-/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler
- * command line */
-#ifndef XXH_CPU_LITTLE_ENDIAN
-static int XXH_isLittleEndian(void) {
- const union {
- U32 u;
- BYTE c[4];
- } one = {1}; /* don't use static : performance detrimental */
- return one.c[0];
-}
-#define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
-#endif
-
-
-/* ***************************
- * Memory reads
- *****************************/
-typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
-
-FORCE_INLINE U32 XXH_readLE32_align(const void *ptr,
- XXH_endianess endian,
- XXH_alignment align) {
- if (align == XXH_unaligned)
- return endian == XXH_littleEndian ? XXH_read32(ptr)
- : XXH_swap32(XXH_read32(ptr));
- else
- return endian == XXH_littleEndian
- ? *(const U32 *)ptr
- : XXH_swap32(*(const U32 *)ptr);
-}
-
-FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian) {
- return XXH_readLE32_align(ptr, endian, XXH_unaligned);
-}
-
-static U32 XXH_readBE32(const void *ptr) {
- return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr))
- : XXH_read32(ptr);
-}
-
-
-/* *************************************
- * Macros
- ***************************************/
-#define XXH_STATIC_ASSERT(c) \
- { \
- enum { XXH_sa = 1 / (int)(!!(c)) }; \
- } /* use after variable declarations */
-XXH_PUBLIC_API unsigned XXH_versionNumber(void) {
- return XXH_VERSION_NUMBER;
-}
-
-
-/* *******************************************************************
- * 32-bit hash functions
- *********************************************************************/
-static const U32 PRIME32_1 = 2654435761U;
-static const U32 PRIME32_2 = 2246822519U;
-static const U32 PRIME32_3 = 3266489917U;
-static const U32 PRIME32_4 = 668265263U;
-static const U32 PRIME32_5 = 374761393U;
-
-static U32 XXH32_round(U32 seed, U32 input) {
- seed += input * PRIME32_2;
- seed = XXH_rotl32(seed, 13);
- seed *= PRIME32_1;
- return seed;
-}
-
-/* mix all bits */
-static U32 XXH32_avalanche(U32 h32) {
- h32 ^= h32 >> 15;
- h32 *= PRIME32_2;
- h32 ^= h32 >> 13;
- h32 *= PRIME32_3;
- h32 ^= h32 >> 16;
- return (h32);
-}
-
-#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
-
-static U32 XXH32_finalize(U32 h32,
- const void *ptr,
- size_t len,
- XXH_endianess endian,
- XXH_alignment align)
-
-{
- const BYTE *p = (const BYTE *)ptr;
-
-#define PROCESS1 \
- h32 += (*p++) * PRIME32_5; \
- h32 = XXH_rotl32(h32, 11) * PRIME32_1;
-
-#define PROCESS4 \
- h32 += XXH_get32bits(p) * PRIME32_3; \
- p += 4; \
- h32 = XXH_rotl32(h32, 17) * PRIME32_4;
-
- switch (len & 15) /* or switch(bEnd - p) */
- {
- case 12:
- PROCESS4;
- /* fallthrough */
- case 8:
- PROCESS4;
- /* fallthrough */
- case 4:
- PROCESS4;
- return XXH32_avalanche(h32);
-
- case 13:
- PROCESS4;
- /* fallthrough */
- case 9:
- PROCESS4;
- /* fallthrough */
- case 5:
- PROCESS4;
- PROCESS1;
- return XXH32_avalanche(h32);
-
- case 14:
- PROCESS4;
- /* fallthrough */
- case 10:
- PROCESS4;
- /* fallthrough */
- case 6:
- PROCESS4;
- PROCESS1;
- PROCESS1;
- return XXH32_avalanche(h32);
-
- case 15:
- PROCESS4;
- /* fallthrough */
- case 11:
- PROCESS4;
- /* fallthrough */
- case 7:
- PROCESS4;
- /* fallthrough */
- case 3:
- PROCESS1;
- /* fallthrough */
- case 2:
- PROCESS1;
- /* fallthrough */
- case 1:
- PROCESS1;
- /* fallthrough */
- case 0:
- return XXH32_avalanche(h32);
- }
- assert(0);
- return h32; /* reaching this point is deemed impossible */
-}
-
-
-FORCE_INLINE U32 XXH32_endian_align(const void *input,
- size_t len,
- U32 seed,
- XXH_endianess endian,
- XXH_alignment align) {
- const BYTE *p = (const BYTE *)input;
- const BYTE *bEnd = p + len;
- U32 h32;
-
-#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- if (p == NULL) {
- len = 0;
- bEnd = p = (const BYTE *)(size_t)16;
- }
-#endif
-
- if (len >= 16) {
- const BYTE *const limit = bEnd - 15;
- U32 v1 = seed + PRIME32_1 + PRIME32_2;
- U32 v2 = seed + PRIME32_2;
- U32 v3 = seed + 0;
- U32 v4 = seed - PRIME32_1;
-
- do {
- v1 = XXH32_round(v1, XXH_get32bits(p));
- p += 4;
- v2 = XXH32_round(v2, XXH_get32bits(p));
- p += 4;
- v3 = XXH32_round(v3, XXH_get32bits(p));
- p += 4;
- v4 = XXH32_round(v4, XXH_get32bits(p));
- p += 4;
- } while (p < limit);
-
- h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) +
- XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
- } else {
- h32 = seed + PRIME32_5;
- }
-
- h32 += (U32)len;
-
- return XXH32_finalize(h32, p, len & 15, endian, align);
-}
-
-
-XXH_PUBLIC_API unsigned int
-XXH32(const void *input, size_t len, unsigned int seed) {
-#if 0
- /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
- XXH32_state_t state;
- XXH32_reset(&state, seed);
- XXH32_update(&state, input, len);
- return XXH32_digest(&state);
-#else
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if (XXH_FORCE_ALIGN_CHECK) {
- if ((((size_t)input) & 3) ==
- 0) { /* Input is 4-bytes aligned, leverage the speed benefit
- */
- if ((endian_detected == XXH_littleEndian) ||
- XXH_FORCE_NATIVE_FORMAT)
- return XXH32_endian_align(input, len, seed,
- XXH_littleEndian,
- XXH_aligned);
- else
- return XXH32_endian_align(input, len, seed,
- XXH_bigEndian,
- XXH_aligned);
- }
- }
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH32_endian_align(input, len, seed, XXH_littleEndian,
- XXH_unaligned);
- else
- return XXH32_endian_align(input, len, seed, XXH_bigEndian,
- XXH_unaligned);
-#endif
-}
-
-
-
-/*====== Hash streaming ======*/
-
-XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) {
- return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) {
- XXH_free(statePtr);
- return XXH_OK;
-}
-
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState,
- const XXH32_state_t *srcState) {
- memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
- unsigned int seed) {
- XXH32_state_t state; /* using a local state to memcpy() in order to
- avoid strict-aliasing warnings */
- memset(&state, 0, sizeof(state));
- state.v1 = seed + PRIME32_1 + PRIME32_2;
- state.v2 = seed + PRIME32_2;
- state.v3 = seed + 0;
- state.v4 = seed - PRIME32_1;
- /* do not write into reserved, planned to be removed in a future version
- */
- memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
- return XXH_OK;
-}
-
-
-FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state,
- const void *input,
- size_t len,
- XXH_endianess endian) {
- if (input == NULL)
-#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- return XXH_OK;
-#else
- return XXH_ERROR;
-#endif
-
- {
- const BYTE *p = (const BYTE *)input;
- const BYTE *const bEnd = p + len;
-
- state->total_len_32 += (unsigned)len;
- state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
-
- if (state->memsize + len < 16) { /* fill in tmp buffer */
- XXH_memcpy((BYTE *)(state->mem32) + state->memsize,
- input, len);
- state->memsize += (unsigned)len;
- return XXH_OK;
- }
-
- if (state->memsize) { /* some data left from previous update */
- XXH_memcpy((BYTE *)(state->mem32) + state->memsize,
- input, 16 - state->memsize);
- {
- const U32 *p32 = state->mem32;
- state->v1 = XXH32_round(
- state->v1, XXH_readLE32(p32, endian));
- p32++;
- state->v2 = XXH32_round(
- state->v2, XXH_readLE32(p32, endian));
- p32++;
- state->v3 = XXH32_round(
- state->v3, XXH_readLE32(p32, endian));
- p32++;
- state->v4 = XXH32_round(
- state->v4, XXH_readLE32(p32, endian));
- }
- p += 16 - state->memsize;
- state->memsize = 0;
- }
-
- if (p <= bEnd - 16) {
- const BYTE *const limit = bEnd - 16;
- U32 v1 = state->v1;
- U32 v2 = state->v2;
- U32 v3 = state->v3;
- U32 v4 = state->v4;
-
- do {
- v1 = XXH32_round(v1, XXH_readLE32(p, endian));
- p += 4;
- v2 = XXH32_round(v2, XXH_readLE32(p, endian));
- p += 4;
- v3 = XXH32_round(v3, XXH_readLE32(p, endian));
- p += 4;
- v4 = XXH32_round(v4, XXH_readLE32(p, endian));
- p += 4;
- } while (p <= limit);
-
- state->v1 = v1;
- state->v2 = v2;
- state->v3 = v3;
- state->v4 = v4;
- }
-
- if (p < bEnd) {
- XXH_memcpy(state->mem32, p, (size_t)(bEnd - p));
- state->memsize = (unsigned)(bEnd - p);
- }
- }
-
- return XXH_OK;
-}
-
-
-XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state_in,
- const void *input,
- size_t len) {
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH32_update_endian(state_in, input, len,
- XXH_littleEndian);
- else
- return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-
-FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state,
- XXH_endianess endian) {
- U32 h32;
-
- if (state->large_len) {
- h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) +
- XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
- } else {
- h32 = state->v3 /* == seed */ + PRIME32_5;
- }
-
- h32 += state->total_len_32;
-
- return XXH32_finalize(h32, state->mem32, state->memsize, endian,
- XXH_aligned);
-}
-
-
-XXH_PUBLIC_API unsigned int XXH32_digest(const XXH32_state_t *state_in) {
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH32_digest_endian(state_in, XXH_littleEndian);
- else
- return XXH32_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-/*====== Canonical representation ======*/
-
-/*! Default XXH result types are basic unsigned 32 and 64 bits.
- * The canonical representation follows human-readable write convention, aka
- * big-endian (large digits first). These functions allow transformation of hash
- * result into and from its canonical format. This way, hash values can be
- * written into a file or buffer, remaining comparable across different systems.
- */
-
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
- XXH32_hash_t hash) {
- XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
- if (XXH_CPU_LITTLE_ENDIAN)
- hash = XXH_swap32(hash);
- memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API XXH32_hash_t
-XXH32_hashFromCanonical(const XXH32_canonical_t *src) {
- return XXH_readBE32(src);
-}
-
-
-#ifndef XXH_NO_LONG_LONG
-
-/* *******************************************************************
- * 64-bit hash functions
- *********************************************************************/
-
-/*====== Memory access ======*/
-
-#ifndef MEM_MODULE
-#define MEM_MODULE
-#if !defined(__VMS) && \
- (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
-#include <stdint.h>
-typedef uint64_t U64;
-#else
-/* if compiler doesn't support unsigned long long, replace by another 64-bit
- * type */
-typedef unsigned long long U64;
-#endif
-#endif
-
-
-#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory
- * access in hardware */
-static U64 XXH_read64(const void *memPtr) {
- return *(const U64 *)memPtr;
-}
-
-#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
-
-/* __pack instructions are safer, but compiler specific, hence potentially
- * problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union {
- U32 u32;
- U64 u64;
-} __attribute__((packed)) unalign64;
-static U64 XXH_read64(const void *ptr) {
- return ((const unalign64 *)ptr)->u64;
-}
-
-#else
-
-/* portable and safe solution. Generally efficient.
- * see : http://stackoverflow.com/a/32095106/646947
- */
-
-static U64 XXH_read64(const void *memPtr) {
- U64 val;
- memcpy(&val, memPtr, sizeof(val));
- return val;
-}
-
-#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
-#if defined(_MSC_VER) /* Visual Studio */
-#define XXH_swap64 _byteswap_uint64
-#elif XXH_GCC_VERSION >= 403
-#define XXH_swap64 __builtin_bswap64
-#else
-static U64 XXH_swap64(U64 x) {
- return ((x << 56) & 0xff00000000000000ULL) |
- ((x << 40) & 0x00ff000000000000ULL) |
- ((x << 24) & 0x0000ff0000000000ULL) |
- ((x << 8) & 0x000000ff00000000ULL) |
- ((x >> 8) & 0x00000000ff000000ULL) |
- ((x >> 24) & 0x0000000000ff0000ULL) |
- ((x >> 40) & 0x000000000000ff00ULL) |
- ((x >> 56) & 0x00000000000000ffULL);
-}
-#endif
-
-FORCE_INLINE U64 XXH_readLE64_align(const void *ptr,
- XXH_endianess endian,
- XXH_alignment align) {
- if (align == XXH_unaligned)
- return endian == XXH_littleEndian ? XXH_read64(ptr)
- : XXH_swap64(XXH_read64(ptr));
- else
- return endian == XXH_littleEndian
- ? *(const U64 *)ptr
- : XXH_swap64(*(const U64 *)ptr);
-}
-
-FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian) {
- return XXH_readLE64_align(ptr, endian, XXH_unaligned);
-}
-
-static U64 XXH_readBE64(const void *ptr) {
- return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr))
- : XXH_read64(ptr);
-}
-
-
-/*====== xxh64 ======*/
-
-static const U64 PRIME64_1 = 11400714785074694791ULL;
-static const U64 PRIME64_2 = 14029467366897019727ULL;
-static const U64 PRIME64_3 = 1609587929392839161ULL;
-static const U64 PRIME64_4 = 9650029242287828579ULL;
-static const U64 PRIME64_5 = 2870177450012600261ULL;
-
-static U64 XXH64_round(U64 acc, U64 input) {
- acc += input * PRIME64_2;
- acc = XXH_rotl64(acc, 31);
- acc *= PRIME64_1;
- return acc;
-}
-
-static U64 XXH64_mergeRound(U64 acc, U64 val) {
- val = XXH64_round(0, val);
- acc ^= val;
- acc = acc * PRIME64_1 + PRIME64_4;
- return acc;
-}
-
-static U64 XXH64_avalanche(U64 h64) {
- h64 ^= h64 >> 33;
- h64 *= PRIME64_2;
- h64 ^= h64 >> 29;
- h64 *= PRIME64_3;
- h64 ^= h64 >> 32;
- return h64;
-}
-
-
-#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
-
-static U64 XXH64_finalize(U64 h64,
- const void *ptr,
- size_t len,
- XXH_endianess endian,
- XXH_alignment align) {
- const BYTE *p = (const BYTE *)ptr;
-
-#define PROCESS1_64 \
- h64 ^= (*p++) * PRIME64_5; \
- h64 = XXH_rotl64(h64, 11) * PRIME64_1;
-
-#define PROCESS4_64 \
- h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
- p += 4; \
- h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
-
-#define PROCESS8_64 \
- { \
- U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
- p += 8; \
- h64 ^= k1; \
- h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; \
- }
-
- switch (len & 31) {
- case 24:
- PROCESS8_64;
- /* fallthrough */
- case 16:
- PROCESS8_64;
- /* fallthrough */
- case 8:
- PROCESS8_64;
- return XXH64_avalanche(h64);
-
- case 28:
- PROCESS8_64;
- /* fallthrough */
- case 20:
- PROCESS8_64;
- /* fallthrough */
- case 12:
- PROCESS8_64;
- /* fallthrough */
- case 4:
- PROCESS4_64;
- return XXH64_avalanche(h64);
-
- case 25:
- PROCESS8_64;
- /* fallthrough */
- case 17:
- PROCESS8_64;
- /* fallthrough */
- case 9:
- PROCESS8_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 29:
- PROCESS8_64;
- /* fallthrough */
- case 21:
- PROCESS8_64;
- /* fallthrough */
- case 13:
- PROCESS8_64;
- /* fallthrough */
- case 5:
- PROCESS4_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 26:
- PROCESS8_64;
- /* fallthrough */
- case 18:
- PROCESS8_64;
- /* fallthrough */
- case 10:
- PROCESS8_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 30:
- PROCESS8_64;
- /* fallthrough */
- case 22:
- PROCESS8_64;
- /* fallthrough */
- case 14:
- PROCESS8_64;
- /* fallthrough */
- case 6:
- PROCESS4_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 27:
- PROCESS8_64;
- /* fallthrough */
- case 19:
- PROCESS8_64;
- /* fallthrough */
- case 11:
- PROCESS8_64;
- PROCESS1_64;
- PROCESS1_64;
- PROCESS1_64;
- return XXH64_avalanche(h64);
-
- case 31:
- PROCESS8_64;
- /* fallthrough */
- case 23:
- PROCESS8_64;
- /* fallthrough */
- case 15:
- PROCESS8_64;
- /* fallthrough */
- case 7:
- PROCESS4_64;
- /* fallthrough */
- case 3:
- PROCESS1_64;
- /* fallthrough */
- case 2:
- PROCESS1_64;
- /* fallthrough */
- case 1:
- PROCESS1_64;
- /* fallthrough */
- case 0:
- return XXH64_avalanche(h64);
- }
-
- /* impossible to reach */
- assert(0);
- return 0; /* unreachable, but some compilers complain without it */
-}
-
-FORCE_INLINE U64 XXH64_endian_align(const void *input,
- size_t len,
- U64 seed,
- XXH_endianess endian,
- XXH_alignment align) {
- const BYTE *p = (const BYTE *)input;
- const BYTE *bEnd = p + len;
- U64 h64;
-
-#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- if (p == NULL) {
- len = 0;
- bEnd = p = (const BYTE *)(size_t)32;
- }
-#endif
-
- if (len >= 32) {
- const BYTE *const limit = bEnd - 32;
- U64 v1 = seed + PRIME64_1 + PRIME64_2;
- U64 v2 = seed + PRIME64_2;
- U64 v3 = seed + 0;
- U64 v4 = seed - PRIME64_1;
-
- do {
- v1 = XXH64_round(v1, XXH_get64bits(p));
- p += 8;
- v2 = XXH64_round(v2, XXH_get64bits(p));
- p += 8;
- v3 = XXH64_round(v3, XXH_get64bits(p));
- p += 8;
- v4 = XXH64_round(v4, XXH_get64bits(p));
- p += 8;
- } while (p <= limit);
-
- h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) +
- XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
- h64 = XXH64_mergeRound(h64, v1);
- h64 = XXH64_mergeRound(h64, v2);
- h64 = XXH64_mergeRound(h64, v3);
- h64 = XXH64_mergeRound(h64, v4);
-
- } else {
- h64 = seed + PRIME64_5;
- }
-
- h64 += (U64)len;
-
- return XXH64_finalize(h64, p, len, endian, align);
-}
-
-
-XXH_PUBLIC_API unsigned long long
-XXH64(const void *input, size_t len, unsigned long long seed) {
-#if 0
- /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
- XXH64_state_t state;
- XXH64_reset(&state, seed);
- XXH64_update(&state, input, len);
- return XXH64_digest(&state);
-#else
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if (XXH_FORCE_ALIGN_CHECK) {
- if ((((size_t)input) & 7) ==
- 0) { /* Input is aligned, let's leverage the speed advantage
- */
- if ((endian_detected == XXH_littleEndian) ||
- XXH_FORCE_NATIVE_FORMAT)
- return XXH64_endian_align(input, len, seed,
- XXH_littleEndian,
- XXH_aligned);
- else
- return XXH64_endian_align(input, len, seed,
- XXH_bigEndian,
- XXH_aligned);
- }
- }
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH64_endian_align(input, len, seed, XXH_littleEndian,
- XXH_unaligned);
- else
- return XXH64_endian_align(input, len, seed, XXH_bigEndian,
- XXH_unaligned);
-#endif
-}
-
-/*====== Hash Streaming ======*/
-
-XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) {
- return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t));
-}
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) {
- XXH_free(statePtr);
- return XXH_OK;
-}
-
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dstState,
- const XXH64_state_t *srcState) {
- memcpy(dstState, srcState, sizeof(*dstState));
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
- unsigned long long seed) {
- XXH64_state_t state; /* using a local state to memcpy() in order to
- avoid strict-aliasing warnings */
- memset(&state, 0, sizeof(state));
- state.v1 = seed + PRIME64_1 + PRIME64_2;
- state.v2 = seed + PRIME64_2;
- state.v3 = seed + 0;
- state.v4 = seed - PRIME64_1;
- /* do not write into reserved, planned to be removed in a future version
- */
- memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
- return XXH_OK;
-}
-
-FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state,
- const void *input,
- size_t len,
- XXH_endianess endian) {
- if (input == NULL)
-#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- return XXH_OK;
-#else
- return XXH_ERROR;
-#endif
-
- {
- const BYTE *p = (const BYTE *)input;
- const BYTE *const bEnd = p + len;
-
- state->total_len += len;
-
- if (state->memsize + len < 32) { /* fill in tmp buffer */
- XXH_memcpy(((BYTE *)state->mem64) + state->memsize,
- input, len);
- state->memsize += (U32)len;
- return XXH_OK;
- }
-
- if (state->memsize) { /* tmp buffer is full */
- XXH_memcpy(((BYTE *)state->mem64) + state->memsize,
- input, 32 - state->memsize);
- state->v1 = XXH64_round(
- state->v1, XXH_readLE64(state->mem64 + 0, endian));
- state->v2 = XXH64_round(
- state->v2, XXH_readLE64(state->mem64 + 1, endian));
- state->v3 = XXH64_round(
- state->v3, XXH_readLE64(state->mem64 + 2, endian));
- state->v4 = XXH64_round(
- state->v4, XXH_readLE64(state->mem64 + 3, endian));
- p += 32 - state->memsize;
- state->memsize = 0;
- }
-
- if (p + 32 <= bEnd) {
- const BYTE *const limit = bEnd - 32;
- U64 v1 = state->v1;
- U64 v2 = state->v2;
- U64 v3 = state->v3;
- U64 v4 = state->v4;
-
- do {
- v1 = XXH64_round(v1, XXH_readLE64(p, endian));
- p += 8;
- v2 = XXH64_round(v2, XXH_readLE64(p, endian));
- p += 8;
- v3 = XXH64_round(v3, XXH_readLE64(p, endian));
- p += 8;
- v4 = XXH64_round(v4, XXH_readLE64(p, endian));
- p += 8;
- } while (p <= limit);
-
- state->v1 = v1;
- state->v2 = v2;
- state->v3 = v3;
- state->v4 = v4;
- }
-
- if (p < bEnd) {
- XXH_memcpy(state->mem64, p, (size_t)(bEnd - p));
- state->memsize = (unsigned)(bEnd - p);
- }
- }
-
- return XXH_OK;
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state_in,
- const void *input,
- size_t len) {
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH64_update_endian(state_in, input, len,
- XXH_littleEndian);
- else
- return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
-}
-
-FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state,
- XXH_endianess endian) {
- U64 h64;
-
- if (state->total_len >= 32) {
- U64 const v1 = state->v1;
- U64 const v2 = state->v2;
- U64 const v3 = state->v3;
- U64 const v4 = state->v4;
-
- h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) +
- XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
- h64 = XXH64_mergeRound(h64, v1);
- h64 = XXH64_mergeRound(h64, v2);
- h64 = XXH64_mergeRound(h64, v3);
- h64 = XXH64_mergeRound(h64, v4);
- } else {
- h64 = state->v3 /*seed*/ + PRIME64_5;
- }
-
- h64 += (U64)state->total_len;
-
- return XXH64_finalize(h64, state->mem64, (size_t)state->total_len,
- endian, XXH_aligned);
-}
-
-XXH_PUBLIC_API unsigned long long XXH64_digest(const XXH64_state_t *state_in) {
- XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
-
- if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
- return XXH64_digest_endian(state_in, XXH_littleEndian);
- else
- return XXH64_digest_endian(state_in, XXH_bigEndian);
-}
-
-
-/*====== Canonical representation ======*/
-
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
- XXH64_hash_t hash) {
- XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
- if (XXH_CPU_LITTLE_ENDIAN)
- hash = XXH_swap64(hash);
- memcpy(dst, &hash, sizeof(*dst));
-}
-
-XXH_PUBLIC_API XXH64_hash_t
-XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
- return XXH_readBE64(src);
-}
-
-#endif /* XXH_NO_LONG_LONG */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h
deleted file mode 100644
index 1dad7a111..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/*
- xxHash - Extremely Fast Hash algorithm
- Header File
- Copyright (C) 2012-2016, Yann Collet.
-
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - xxHash source repository : https://github.com/Cyan4973/xxHash
-*/
-
-/* Notice extracted from xxHash homepage :
-
-xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
-It also successfully passes all tests from the SMHasher suite.
-
-Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo
-@3GHz)
-
-Name Speed Q.Score Author
-xxHash 5.4 GB/s 10
-CrapWow 3.2 GB/s 2 Andrew
-MumurHash 3a 2.7 GB/s 10 Austin Appleby
-SpookyHash 2.0 GB/s 10 Bob Jenkins
-SBox 1.4 GB/s 9 Bret Mulvey
-Lookup3 1.2 GB/s 9 Bob Jenkins
-SuperFastHash 1.2 GB/s 1 Paul Hsieh
-CityHash64 1.05 GB/s 10 Pike & Alakuijala
-FNV 0.55 GB/s 5 Fowler, Noll, Vo
-CRC32 0.43 GB/s 9
-MD5-32 0.33 GB/s 10 Ronald L. Rivest
-SHA1-32 0.28 GB/s 10
-
-Q.Score is a measure of quality of the hash function.
-It depends on successfully passing SMHasher test set.
-10 is a perfect score.
-
-A 64-bit version, named XXH64, is available since r35.
-It offers much better speed, but for 64-bit applications only.
-Name Speed on 64 bits Speed on 32 bits
-XXH64 13.8 GB/s 1.9 GB/s
-XXH32 6.8 GB/s 6.0 GB/s
-*/
-
-#ifndef XXHASH_H_5627135585666179
-#define XXHASH_H_5627135585666179 1
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-
-/* ****************************
- * Definitions
- ******************************/
-#include <stddef.h> /* size_t */
-typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode;
-
-
-/* ****************************
- * API modifier
- ******************************/
-/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
- * This is useful to include xxhash functions in `static` mode
- * in order to inline them, and remove their symbol from the public list.
- * Inlining can offer dramatic performance improvement on small keys.
- * Methodology :
- * #define XXH_INLINE_ALL
- * #include "xxhash.h"
- * `xxhash.c` is automatically included.
- * It's not useful to compile and link it as a separate module.
- */
-#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
-#ifndef XXH_STATIC_LINKING_ONLY
-#define XXH_STATIC_LINKING_ONLY
-#endif
-#if defined(__GNUC__)
-#define XXH_PUBLIC_API static __inline __attribute__((unused))
-#elif defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-#define XXH_PUBLIC_API static inline
-#elif defined(_MSC_VER)
-#define XXH_PUBLIC_API static __inline
-#else
-/* this version may generate warnings for unused static functions */
-#define XXH_PUBLIC_API static
-#endif
-#else
-#define XXH_PUBLIC_API /* do nothing */
-#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
-
-/*! XXH_NAMESPACE, aka Namespace Emulation :
- *
- * If you want to include _and expose_ xxHash functions from within your own
- * library, but also want to avoid symbol collisions with other libraries which
- * may also include xxHash,
- *
- * you can use XXH_NAMESPACE, to automatically prefix any public symbol from
- * xxhash library with the value of XXH_NAMESPACE (therefore, avoid NULL and
- * numeric values).
- *
- * Note that no change is required within the calling program as long as it
- * includes `xxhash.h` : regular symbol name will be automatically translated by
- * this header.
- */
-#ifdef XXH_NAMESPACE
-#define XXH_CAT(A, B) A##B
-#define XXH_NAME2(A, B) XXH_CAT(A, B)
-#define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
-#define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
-#define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
-#define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
-#define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
-#define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
-#define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
-#define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
-#define XXH32_canonicalFromHash \
- XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
-#define XXH32_hashFromCanonical \
- XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
-#define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
-#define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
-#define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
-#define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
-#define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
-#define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
-#define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
-#define XXH64_canonicalFromHash \
- XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
-#define XXH64_hashFromCanonical \
- XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
-#endif
-
-
-/* *************************************
- * Version
- ***************************************/
-#define XXH_VERSION_MAJOR 0
-#define XXH_VERSION_MINOR 6
-#define XXH_VERSION_RELEASE 5
-#define XXH_VERSION_NUMBER \
- (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \
- XXH_VERSION_RELEASE)
-XXH_PUBLIC_API unsigned XXH_versionNumber(void);
-
-
-/*-**********************************************************************
- * 32-bit hash
- ************************************************************************/
-typedef unsigned int XXH32_hash_t;
-
-/*! XXH32() :
- Calculate the 32-bit hash of sequence "length" bytes stored at memory
- address "input". The memory between input & input+length must be valid
- (allocated and read-accessible). "seed" can be used to alter the result
- predictably.
- Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
- */
-XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input,
- size_t length,
- unsigned int seed);
-
-/*====== Streaming ======*/
-typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
-XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void);
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state,
- const XXH32_state_t *src_state);
-
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
- unsigned int seed);
-XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr,
- const void *input,
- size_t length);
-XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr);
-
-/*
- * Streaming functions generate the xxHash of an input provided in multiple
- * segments. Note that, for small input, they are slower than single-call
- * functions, due to state management. For small inputs, prefer `XXH32()` and
- * `XXH64()`, which are better optimized.
- *
- * XXH state must first be allocated, using XXH*_createState() .
- *
- * Start a new hash by initializing state with a seed, using XXH*_reset().
- *
- * Then, feed the hash state by calling XXH*_update() as many times as
- * necessary. The function returns an error code, with 0 meaning OK, and any
- * other value meaning there is an error.
- *
- * Finally, a hash value can be produced anytime, by using XXH*_digest().
- * This function returns the nn-bits hash as an int or long long.
- *
- * It's still possible to continue inserting input into the hash state after a
- * digest, and generate some new hashes later on, by calling again
- * XXH*_digest().
- *
- * When done, free XXH state space if it was allocated dynamically.
- */
-
-/*====== Canonical representation ======*/
-
-typedef struct {
- unsigned char digest[4];
-} XXH32_canonical_t;
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
- XXH32_hash_t hash);
-XXH_PUBLIC_API XXH32_hash_t
-XXH32_hashFromCanonical(const XXH32_canonical_t *src);
-
-/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
- * The canonical representation uses human-readable write convention, aka
- * big-endian (large digits first). These functions allow transformation of hash
- * result into and from its canonical format. This way, hash values can be
- * written into a file / memory, and remain comparable on different systems and
- * programs.
- */
-
-
-#ifndef XXH_NO_LONG_LONG
-/*-**********************************************************************
- * 64-bit hash
- ************************************************************************/
-typedef unsigned long long XXH64_hash_t;
-
-/*! XXH64() :
- Calculate the 64-bit hash of sequence of length "len" stored at memory
- address "input". "seed" can be used to alter the result predictably. This
- function runs faster on 64-bit systems, but slower on 32-bit systems (see
- benchmark).
-*/
-XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input,
- size_t length,
- unsigned long long seed);
-
-/*====== Streaming ======*/
-typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
-XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void);
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr);
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state,
- const XXH64_state_t *src_state);
-
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
- unsigned long long seed);
-XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr,
- const void *input,
- size_t length);
-XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr);
-
-/*====== Canonical representation ======*/
-typedef struct {
- unsigned char digest[8];
-} XXH64_canonical_t;
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
- XXH64_hash_t hash);
-XXH_PUBLIC_API XXH64_hash_t
-XXH64_hashFromCanonical(const XXH64_canonical_t *src);
-#endif /* XXH_NO_LONG_LONG */
-
-
-
-#ifdef XXH_STATIC_LINKING_ONLY
-
-/* ================================================================================================
- This section contains declarations which are not guaranteed to remain stable.
- They may change in future versions, becoming incompatible with a different
-version of the library. These declarations should only be used with static
-linking. Never use them in association with dynamic linking !
-===================================================================================================
-*/
-
-/* These definitions are only present to allow
- * static allocation of XXH state, on stack or in a struct for example.
- * Never **ever** use members directly. */
-
-#if !defined(__VMS) && \
- (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
-#include <stdint.h>
-
-struct XXH32_state_s {
- uint32_t total_len_32;
- uint32_t large_len;
- uint32_t v1;
- uint32_t v2;
- uint32_t v3;
- uint32_t v4;
- uint32_t mem32[4];
- uint32_t memsize;
- uint32_t reserved; /* never read nor write, might be removed in a future
- version */
-}; /* typedef'd to XXH32_state_t */
-
-struct XXH64_state_s {
- uint64_t total_len;
- uint64_t v1;
- uint64_t v2;
- uint64_t v3;
- uint64_t v4;
- uint64_t mem64[4];
- uint32_t memsize;
- uint32_t reserved[2]; /* never read nor write, might be removed in a
- future version */
-}; /* typedef'd to XXH64_state_t */
-
-#else
-
-struct XXH32_state_s {
- unsigned total_len_32;
- unsigned large_len;
- unsigned v1;
- unsigned v2;
- unsigned v3;
- unsigned v4;
- unsigned mem32[4];
- unsigned memsize;
- unsigned reserved; /* never read nor write, might be removed in a future
- version */
-}; /* typedef'd to XXH32_state_t */
-
-#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
-struct XXH64_state_s {
- unsigned long long total_len;
- unsigned long long v1;
- unsigned long long v2;
- unsigned long long v3;
- unsigned long long v4;
- unsigned long long mem64[4];
- unsigned memsize;
- unsigned reserved[2]; /* never read nor write, might be removed in a
- future version */
-}; /* typedef'd to XXH64_state_t */
-#endif
-
-#endif
-
-
-#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
-#include "rdxxhash.c" /* include xxhash function bodies as `static`, for inlining */
-#endif
-
-#endif /* XXH_STATIC_LINKING_ONLY */
-
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* XXHASH_H_5627135585666179 */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c
deleted file mode 100644
index 603546c47..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c
+++ /dev/null
@@ -1,1347 +0,0 @@
-/**
- * Copyright: public domain
- *
- * From https://github.com/ccxvii/minilibs sha
- * 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684:
- *
- * These libraries are in the public domain (or the equivalent where that is not
- * possible). You can do anything you want with them. You have no legal
- * obligation to do anything else, although I appreciate attribution.
- */
-
-#include "rd.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <setjmp.h>
-#include <stdio.h>
-
-#include "regexp.h"
-
-#define nelem(a) (sizeof(a) / sizeof(a)[0])
-
-typedef unsigned int Rune;
-
-static int isalpharune(Rune c) {
- /* TODO: Add unicode support */
- return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
-}
-
-static Rune toupperrune(Rune c) {
- /* TODO: Add unicode support */
- if (c >= 'a' && c <= 'z')
- return c - 'a' + 'A';
- return c;
-}
-
-static int chartorune(Rune *r, const char *s) {
- /* TODO: Add UTF-8 decoding */
- *r = *s;
- return 1;
-}
-
-#define REPINF 255
-#define MAXTHREAD 1000
-#define MAXSUB REG_MAXSUB
-
-typedef struct Reclass Reclass;
-typedef struct Renode Renode;
-typedef struct Reinst Reinst;
-typedef struct Rethread Rethread;
-typedef struct Restate Restate;
-
-struct Reclass {
- Rune *end;
- Rune spans[64];
-};
-
-struct Restate {
- Reprog *prog;
- Renode *pstart, *pend;
-
- const char *source;
- unsigned int ncclass;
- unsigned int nsub;
- Renode *sub[MAXSUB];
-
- int lookahead;
- Rune yychar;
- Reclass *yycc;
- int yymin, yymax;
-
- const char *error;
- jmp_buf kaboom;
-};
-
-struct Reprog {
- Reinst *start, *end;
- int flags;
- unsigned int nsub;
- Reclass cclass[16];
- Restate g; /**< Upstream has this as a global variable */
-};
-
-static void die(Restate *g, const char *message) {
- g->error = message;
- longjmp(g->kaboom, 1);
-}
-
-static Rune canon(Rune c) {
- Rune u = toupperrune(c);
- if (c >= 128 && u < 128)
- return c;
- return u;
-}
-
-/* Scan */
-
-enum { L_CHAR = 256,
- L_CCLASS, /* character class */
- L_NCCLASS, /* negative character class */
- L_NC, /* "(?:" no capture */
- L_PLA, /* "(?=" positive lookahead */
- L_NLA, /* "(?!" negative lookahead */
- L_WORD, /* "\b" word boundary */
- L_NWORD, /* "\B" non-word boundary */
- L_REF, /* "\1" back-reference */
- L_COUNT /* {M,N} */
-};
-
-static int hex(Restate *g, int c) {
- if (c >= '0' && c <= '9')
- return c - '0';
- if (c >= 'a' && c <= 'f')
- return c - 'a' + 0xA;
- if (c >= 'A' && c <= 'F')
- return c - 'A' + 0xA;
- die(g, "invalid escape sequence");
- return 0;
-}
-
-static int dec(Restate *g, int c) {
- if (c >= '0' && c <= '9')
- return c - '0';
- die(g, "invalid quantifier");
- return 0;
-}
-
-#define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789"
-
-static int nextrune(Restate *g) {
- g->source += chartorune(&g->yychar, g->source);
- if (g->yychar == '\\') {
- g->source += chartorune(&g->yychar, g->source);
- switch (g->yychar) {
- case 0:
- die(g, "unterminated escape sequence");
- case 'f':
- g->yychar = '\f';
- return 0;
- case 'n':
- g->yychar = '\n';
- return 0;
- case 'r':
- g->yychar = '\r';
- return 0;
- case 't':
- g->yychar = '\t';
- return 0;
- case 'v':
- g->yychar = '\v';
- return 0;
- case 'c':
- g->yychar = (*g->source++) & 31;
- return 0;
- case 'x':
- g->yychar = hex(g, *g->source++) << 4;
- g->yychar += hex(g, *g->source++);
- if (g->yychar == 0) {
- g->yychar = '0';
- return 1;
- }
- return 0;
- case 'u':
- g->yychar = hex(g, *g->source++) << 12;
- g->yychar += hex(g, *g->source++) << 8;
- g->yychar += hex(g, *g->source++) << 4;
- g->yychar += hex(g, *g->source++);
- if (g->yychar == 0) {
- g->yychar = '0';
- return 1;
- }
- return 0;
- }
- if (strchr(ESCAPES, g->yychar))
- return 1;
- if (isalpharune(g->yychar) ||
- g->yychar == '_') /* check identity escape */
- die(g, "invalid escape character");
- return 0;
- }
- return 0;
-}
-
-static int lexcount(Restate *g) {
- g->yychar = *g->source++;
-
- g->yymin = dec(g, g->yychar);
- g->yychar = *g->source++;
- while (g->yychar != ',' && g->yychar != '}') {
- g->yymin = g->yymin * 10 + dec(g, g->yychar);
- g->yychar = *g->source++;
- }
- if (g->yymin >= REPINF)
- die(g, "numeric overflow");
-
- if (g->yychar == ',') {
- g->yychar = *g->source++;
- if (g->yychar == '}') {
- g->yymax = REPINF;
- } else {
- g->yymax = dec(g, g->yychar);
- g->yychar = *g->source++;
- while (g->yychar != '}') {
- g->yymax = g->yymax * 10 + dec(g, g->yychar);
- g->yychar = *g->source++;
- }
- if (g->yymax >= REPINF)
- die(g, "numeric overflow");
- }
- } else {
- g->yymax = g->yymin;
- }
-
- return L_COUNT;
-}
-
-static void newcclass(Restate *g) {
- if (g->ncclass >= nelem(g->prog->cclass))
- die(g, "too many character classes");
- g->yycc = g->prog->cclass + g->ncclass++;
- g->yycc->end = g->yycc->spans;
-}
-
-static void addrange(Restate *g, Rune a, Rune b) {
- if (a > b)
- die(g, "invalid character class range");
- if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans))
- die(g, "too many character class ranges");
- *g->yycc->end++ = a;
- *g->yycc->end++ = b;
-}
-
-static void addranges_d(Restate *g) {
- addrange(g, '0', '9');
-}
-
-static void addranges_D(Restate *g) {
- addrange(g, 0, '0' - 1);
- addrange(g, '9' + 1, 0xFFFF);
-}
-
-static void addranges_s(Restate *g) {
- addrange(g, 0x9, 0x9);
- addrange(g, 0xA, 0xD);
- addrange(g, 0x20, 0x20);
- addrange(g, 0xA0, 0xA0);
- addrange(g, 0x2028, 0x2029);
- addrange(g, 0xFEFF, 0xFEFF);
-}
-
-static void addranges_S(Restate *g) {
- addrange(g, 0, 0x9 - 1);
- addrange(g, 0x9 + 1, 0xA - 1);
- addrange(g, 0xD + 1, 0x20 - 1);
- addrange(g, 0x20 + 1, 0xA0 - 1);
- addrange(g, 0xA0 + 1, 0x2028 - 1);
- addrange(g, 0x2029 + 1, 0xFEFF - 1);
- addrange(g, 0xFEFF + 1, 0xFFFF);
-}
-
-static void addranges_w(Restate *g) {
- addrange(g, '0', '9');
- addrange(g, 'A', 'Z');
- addrange(g, '_', '_');
- addrange(g, 'a', 'z');
-}
-
-static void addranges_W(Restate *g) {
- addrange(g, 0, '0' - 1);
- addrange(g, '9' + 1, 'A' - 1);
- addrange(g, 'Z' + 1, '_' - 1);
- addrange(g, '_' + 1, 'a' - 1);
- addrange(g, 'z' + 1, 0xFFFF);
-}
-
-static int lexclass(Restate *g) {
- int type = L_CCLASS;
- int quoted, havesave, havedash;
- Rune save = 0;
-
- newcclass(g);
-
- quoted = nextrune(g);
- if (!quoted && g->yychar == '^') {
- type = L_NCCLASS;
- quoted = nextrune(g);
- }
-
- havesave = havedash = 0;
- for (;;) {
- if (g->yychar == 0)
- die(g, "unterminated character class");
- if (!quoted && g->yychar == ']')
- break;
-
- if (!quoted && g->yychar == '-') {
- if (havesave) {
- if (havedash) {
- addrange(g, save, '-');
- havesave = havedash = 0;
- } else {
- havedash = 1;
- }
- } else {
- save = '-';
- havesave = 1;
- }
- } else if (quoted && strchr("DSWdsw", g->yychar)) {
- if (havesave) {
- addrange(g, save, save);
- if (havedash)
- addrange(g, '-', '-');
- }
- switch (g->yychar) {
- case 'd':
- addranges_d(g);
- break;
- case 's':
- addranges_s(g);
- break;
- case 'w':
- addranges_w(g);
- break;
- case 'D':
- addranges_D(g);
- break;
- case 'S':
- addranges_S(g);
- break;
- case 'W':
- addranges_W(g);
- break;
- }
- havesave = havedash = 0;
- } else {
- if (quoted) {
- if (g->yychar == 'b')
- g->yychar = '\b';
- else if (g->yychar == '0')
- g->yychar = 0;
- /* else identity escape */
- }
- if (havesave) {
- if (havedash) {
- addrange(g, save, g->yychar);
- havesave = havedash = 0;
- } else {
- addrange(g, save, save);
- save = g->yychar;
- }
- } else {
- save = g->yychar;
- havesave = 1;
- }
- }
-
- quoted = nextrune(g);
- }
-
- if (havesave) {
- addrange(g, save, save);
- if (havedash)
- addrange(g, '-', '-');
- }
-
- return type;
-}
-
-static int lex(Restate *g) {
- int quoted = nextrune(g);
- if (quoted) {
- switch (g->yychar) {
- case 'b':
- return L_WORD;
- case 'B':
- return L_NWORD;
- case 'd':
- newcclass(g);
- addranges_d(g);
- return L_CCLASS;
- case 's':
- newcclass(g);
- addranges_s(g);
- return L_CCLASS;
- case 'w':
- newcclass(g);
- addranges_w(g);
- return L_CCLASS;
- case 'D':
- newcclass(g);
- addranges_d(g);
- return L_NCCLASS;
- case 'S':
- newcclass(g);
- addranges_s(g);
- return L_NCCLASS;
- case 'W':
- newcclass(g);
- addranges_w(g);
- return L_NCCLASS;
- case '0':
- g->yychar = 0;
- return L_CHAR;
- }
- if (g->yychar >= '0' && g->yychar <= '9') {
- g->yychar -= '0';
- if (*g->source >= '0' && *g->source <= '9')
- g->yychar = g->yychar * 10 + *g->source++ - '0';
- return L_REF;
- }
- return L_CHAR;
- }
-
- switch (g->yychar) {
- case 0:
- case '$':
- case ')':
- case '*':
- case '+':
- case '.':
- case '?':
- case '^':
- case '|':
- return g->yychar;
- }
-
- if (g->yychar == '{')
- return lexcount(g);
- if (g->yychar == '[')
- return lexclass(g);
- if (g->yychar == '(') {
- if (g->source[0] == '?') {
- if (g->source[1] == ':') {
- g->source += 2;
- return L_NC;
- }
- if (g->source[1] == '=') {
- g->source += 2;
- return L_PLA;
- }
- if (g->source[1] == '!') {
- g->source += 2;
- return L_NLA;
- }
- }
- return '(';
- }
-
- return L_CHAR;
-}
-
-/* Parse */
-
-enum { P_CAT,
- P_ALT,
- P_REP,
- P_BOL,
- P_EOL,
- P_WORD,
- P_NWORD,
- P_PAR,
- P_PLA,
- P_NLA,
- P_ANY,
- P_CHAR,
- P_CCLASS,
- P_NCCLASS,
- P_REF };
-
-struct Renode {
- unsigned char type;
- unsigned char ng, m, n;
- Rune c;
- Reclass *cc;
- Renode *x;
- Renode *y;
-};
-
-static Renode *newnode(Restate *g, int type) {
- Renode *node = g->pend++;
- node->type = type;
- node->cc = NULL;
- node->c = 0;
- node->ng = 0;
- node->m = 0;
- node->n = 0;
- node->x = node->y = NULL;
- return node;
-}
-
-static int empty(Renode *node) {
- if (!node)
- return 1;
- switch (node->type) {
- default:
- return 1;
- case P_CAT:
- return empty(node->x) && empty(node->y);
- case P_ALT:
- return empty(node->x) || empty(node->y);
- case P_REP:
- return empty(node->x) || node->m == 0;
- case P_PAR:
- return empty(node->x);
- case P_REF:
- return empty(node->x);
- case P_ANY:
- case P_CHAR:
- case P_CCLASS:
- case P_NCCLASS:
- return 0;
- }
-}
-
-static Renode *newrep(Restate *g, Renode *atom, int ng, int min, int max) {
- Renode *rep = newnode(g, P_REP);
- if (max == REPINF && empty(atom))
- die(g, "infinite loop matching the empty string");
- rep->ng = ng;
- rep->m = min;
- rep->n = max;
- rep->x = atom;
- return rep;
-}
-
-static void next(Restate *g) {
- g->lookahead = lex(g);
-}
-
-static int re_accept(Restate *g, int t) {
- if (g->lookahead == t) {
- next(g);
- return 1;
- }
- return 0;
-}
-
-static Renode *parsealt(Restate *g);
-
-static Renode *parseatom(Restate *g) {
- Renode *atom;
- if (g->lookahead == L_CHAR) {
- atom = newnode(g, P_CHAR);
- atom->c = g->yychar;
- next(g);
- return atom;
- }
- if (g->lookahead == L_CCLASS) {
- atom = newnode(g, P_CCLASS);
- atom->cc = g->yycc;
- next(g);
- return atom;
- }
- if (g->lookahead == L_NCCLASS) {
- atom = newnode(g, P_NCCLASS);
- atom->cc = g->yycc;
- next(g);
- return atom;
- }
- if (g->lookahead == L_REF) {
- atom = newnode(g, P_REF);
- if (g->yychar == 0 || g->yychar > g->nsub || !g->sub[g->yychar])
- die(g, "invalid back-reference");
- atom->n = g->yychar;
- atom->x = g->sub[g->yychar];
- next(g);
- return atom;
- }
- if (re_accept(g, '.'))
- return newnode(g, P_ANY);
- if (re_accept(g, '(')) {
- atom = newnode(g, P_PAR);
- if (g->nsub == MAXSUB)
- die(g, "too many captures");
- atom->n = g->nsub++;
- atom->x = parsealt(g);
- g->sub[atom->n] = atom;
- if (!re_accept(g, ')'))
- die(g, "unmatched '('");
- return atom;
- }
- if (re_accept(g, L_NC)) {
- atom = parsealt(g);
- if (!re_accept(g, ')'))
- die(g, "unmatched '('");
- return atom;
- }
- if (re_accept(g, L_PLA)) {
- atom = newnode(g, P_PLA);
- atom->x = parsealt(g);
- if (!re_accept(g, ')'))
- die(g, "unmatched '('");
- return atom;
- }
- if (re_accept(g, L_NLA)) {
- atom = newnode(g, P_NLA);
- atom->x = parsealt(g);
- if (!re_accept(g, ')'))
- die(g, "unmatched '('");
- return atom;
- }
- die(g, "syntax error");
- return NULL;
-}
-
-static Renode *parserep(Restate *g) {
- Renode *atom;
-
- if (re_accept(g, '^'))
- return newnode(g, P_BOL);
- if (re_accept(g, '$'))
- return newnode(g, P_EOL);
- if (re_accept(g, L_WORD))
- return newnode(g, P_WORD);
- if (re_accept(g, L_NWORD))
- return newnode(g, P_NWORD);
-
- atom = parseatom(g);
- if (g->lookahead == L_COUNT) {
- int min = g->yymin, max = g->yymax;
- next(g);
- if (max < min)
- die(g, "invalid quantifier");
- return newrep(g, atom, re_accept(g, '?'), min, max);
- }
- if (re_accept(g, '*'))
- return newrep(g, atom, re_accept(g, '?'), 0, REPINF);
- if (re_accept(g, '+'))
- return newrep(g, atom, re_accept(g, '?'), 1, REPINF);
- if (re_accept(g, '?'))
- return newrep(g, atom, re_accept(g, '?'), 0, 1);
- return atom;
-}
-
-static Renode *parsecat(Restate *g) {
- Renode *cat, *x;
- if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') {
- cat = parserep(g);
- while (g->lookahead && g->lookahead != '|' &&
- g->lookahead != ')') {
- x = cat;
- cat = newnode(g, P_CAT);
- cat->x = x;
- cat->y = parserep(g);
- }
- return cat;
- }
- return NULL;
-}
-
-static Renode *parsealt(Restate *g) {
- Renode *alt, *x;
- alt = parsecat(g);
- while (re_accept(g, '|')) {
- x = alt;
- alt = newnode(g, P_ALT);
- alt->x = x;
- alt->y = parsecat(g);
- }
- return alt;
-}
-
-/* Compile */
-
-enum { I_END,
- I_JUMP,
- I_SPLIT,
- I_PLA,
- I_NLA,
- I_ANYNL,
- I_ANY,
- I_CHAR,
- I_CCLASS,
- I_NCCLASS,
- I_REF,
- I_BOL,
- I_EOL,
- I_WORD,
- I_NWORD,
- I_LPAR,
- I_RPAR };
-
-struct Reinst {
- unsigned char opcode;
- unsigned char n;
- Rune c;
- Reclass *cc;
- Reinst *x;
- Reinst *y;
-};
-
-static unsigned int count(Renode *node) {
- unsigned int min, max;
- if (!node)
- return 0;
- switch (node->type) {
- default:
- return 1;
- case P_CAT:
- return count(node->x) + count(node->y);
- case P_ALT:
- return count(node->x) + count(node->y) + 2;
- case P_REP:
- min = node->m;
- max = node->n;
- if (min == max)
- return count(node->x) * min;
- if (max < REPINF)
- return count(node->x) * max + (max - min);
- return count(node->x) * (min + 1) + 2;
- case P_PAR:
- return count(node->x) + 2;
- case P_PLA:
- return count(node->x) + 2;
- case P_NLA:
- return count(node->x) + 2;
- }
-}
-
-static Reinst *emit(Reprog *prog, int opcode) {
- Reinst *inst = prog->end++;
- inst->opcode = opcode;
- inst->n = 0;
- inst->c = 0;
- inst->cc = NULL;
- inst->x = inst->y = NULL;
- return inst;
-}
-
-static void compile(Reprog *prog, Renode *node) {
- Reinst *inst, *split, *jump;
- unsigned int i;
-
- if (!node)
- return;
-
- switch (node->type) {
- case P_CAT:
- compile(prog, node->x);
- compile(prog, node->y);
- break;
-
- case P_ALT:
- split = emit(prog, I_SPLIT);
- compile(prog, node->x);
- jump = emit(prog, I_JUMP);
- compile(prog, node->y);
- split->x = split + 1;
- split->y = jump + 1;
- jump->x = prog->end;
- break;
-
- case P_REP:
- for (i = 0; i < node->m; ++i) {
- inst = prog->end;
- compile(prog, node->x);
- }
- if (node->m == node->n)
- break;
- if (node->n < REPINF) {
- for (i = node->m; i < node->n; ++i) {
- split = emit(prog, I_SPLIT);
- compile(prog, node->x);
- if (node->ng) {
- split->y = split + 1;
- split->x = prog->end;
- } else {
- split->x = split + 1;
- split->y = prog->end;
- }
- }
- } else if (node->m == 0) {
- split = emit(prog, I_SPLIT);
- compile(prog, node->x);
- jump = emit(prog, I_JUMP);
- if (node->ng) {
- split->y = split + 1;
- split->x = prog->end;
- } else {
- split->x = split + 1;
- split->y = prog->end;
- }
- jump->x = split;
- } else {
- split = emit(prog, I_SPLIT);
- if (node->ng) {
- split->y = inst;
- split->x = prog->end;
- } else {
- split->x = inst;
- split->y = prog->end;
- }
- }
- break;
-
- case P_BOL:
- emit(prog, I_BOL);
- break;
- case P_EOL:
- emit(prog, I_EOL);
- break;
- case P_WORD:
- emit(prog, I_WORD);
- break;
- case P_NWORD:
- emit(prog, I_NWORD);
- break;
-
- case P_PAR:
- inst = emit(prog, I_LPAR);
- inst->n = node->n;
- compile(prog, node->x);
- inst = emit(prog, I_RPAR);
- inst->n = node->n;
- break;
- case P_PLA:
- split = emit(prog, I_PLA);
- compile(prog, node->x);
- emit(prog, I_END);
- split->x = split + 1;
- split->y = prog->end;
- break;
- case P_NLA:
- split = emit(prog, I_NLA);
- compile(prog, node->x);
- emit(prog, I_END);
- split->x = split + 1;
- split->y = prog->end;
- break;
-
- case P_ANY:
- emit(prog, I_ANY);
- break;
- case P_CHAR:
- inst = emit(prog, I_CHAR);
- inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c;
- break;
- case P_CCLASS:
- inst = emit(prog, I_CCLASS);
- inst->cc = node->cc;
- break;
- case P_NCCLASS:
- inst = emit(prog, I_NCCLASS);
- inst->cc = node->cc;
- break;
- case P_REF:
- inst = emit(prog, I_REF);
- inst->n = node->n;
- break;
- }
-}
-
-#ifdef TEST
-static void dumpnode(Renode *node) {
- Rune *p;
- if (!node) {
- printf("Empty");
- return;
- }
- switch (node->type) {
- case P_CAT:
- printf("Cat(");
- dumpnode(node->x);
- printf(", ");
- dumpnode(node->y);
- printf(")");
- break;
- case P_ALT:
- printf("Alt(");
- dumpnode(node->x);
- printf(", ");
- dumpnode(node->y);
- printf(")");
- break;
- case P_REP:
- printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m,
- node->n);
- dumpnode(node->x);
- printf(")");
- break;
- case P_BOL:
- printf("Bol");
- break;
- case P_EOL:
- printf("Eol");
- break;
- case P_WORD:
- printf("Word");
- break;
- case P_NWORD:
- printf("NotWord");
- break;
- case P_PAR:
- printf("Par(%d,", node->n);
- dumpnode(node->x);
- printf(")");
- break;
- case P_PLA:
- printf("PLA(");
- dumpnode(node->x);
- printf(")");
- break;
- case P_NLA:
- printf("NLA(");
- dumpnode(node->x);
- printf(")");
- break;
- case P_ANY:
- printf("Any");
- break;
- case P_CHAR:
- printf("Char(%c)", node->c);
- break;
- case P_CCLASS:
- printf("Class(");
- for (p = node->cc->spans; p < node->cc->end; p += 2)
- printf("%02X-%02X,", p[0], p[1]);
- printf(")");
- break;
- case P_NCCLASS:
- printf("NotClass(");
- for (p = node->cc->spans; p < node->cc->end; p += 2)
- printf("%02X-%02X,", p[0], p[1]);
- printf(")");
- break;
- case P_REF:
- printf("Ref(%d)", node->n);
- break;
- }
-}
-
-static void dumpprog(Reprog *prog) {
- Reinst *inst;
- int i;
- for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) {
- printf("% 5d: ", i);
- switch (inst->opcode) {
- case I_END:
- puts("end");
- break;
- case I_JUMP:
- printf("jump %d\n", (int)(inst->x - prog->start));
- break;
- case I_SPLIT:
- printf("split %d %d\n", (int)(inst->x - prog->start),
- (int)(inst->y - prog->start));
- break;
- case I_PLA:
- printf("pla %d %d\n", (int)(inst->x - prog->start),
- (int)(inst->y - prog->start));
- break;
- case I_NLA:
- printf("nla %d %d\n", (int)(inst->x - prog->start),
- (int)(inst->y - prog->start));
- break;
- case I_ANY:
- puts("any");
- break;
- case I_ANYNL:
- puts("anynl");
- break;
- case I_CHAR:
- printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n"
- : "char U+%04X\n",
- inst->c);
- break;
- case I_CCLASS:
- puts("cclass");
- break;
- case I_NCCLASS:
- puts("ncclass");
- break;
- case I_REF:
- printf("ref %d\n", inst->n);
- break;
- case I_BOL:
- puts("bol");
- break;
- case I_EOL:
- puts("eol");
- break;
- case I_WORD:
- puts("word");
- break;
- case I_NWORD:
- puts("nword");
- break;
- case I_LPAR:
- printf("lpar %d\n", inst->n);
- break;
- case I_RPAR:
- printf("rpar %d\n", inst->n);
- break;
- }
- }
-}
-#endif
-
-Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) {
- Reprog *prog;
- Restate *g;
- Renode *node;
- Reinst *split, *jump;
- int i;
- unsigned int ncount;
- size_t pattern_len = strlen(pattern);
-
- if (pattern_len > 10000) {
- /* Avoid stack exhaustion in recursive parseatom() et.al. */
- if (errorp)
- *errorp = "regexp pattern too long (max 10000)";
- return NULL;
- }
-
- prog = rd_calloc(1, sizeof(Reprog));
- g = &prog->g;
- g->prog = prog;
- g->pstart = g->pend = rd_malloc(sizeof(Renode) * pattern_len * 2);
-
- if (setjmp(g->kaboom)) {
- if (errorp)
- *errorp = g->error;
- rd_free(g->pstart);
- rd_free(prog);
- return NULL;
- }
-
- g->source = pattern;
- g->ncclass = 0;
- g->nsub = 1;
- for (i = 0; i < MAXSUB; ++i)
- g->sub[i] = 0;
-
- g->prog->flags = cflags;
-
- next(g);
- node = parsealt(g);
- if (g->lookahead == ')')
- die(g, "unmatched ')'");
- if (g->lookahead != 0)
- die(g, "syntax error");
-
- g->prog->nsub = g->nsub;
- ncount = count(node);
- if (ncount > 10000)
- die(g, "regexp graph too large");
- g->prog->start = g->prog->end =
- rd_malloc((ncount + 6) * sizeof(Reinst));
-
- split = emit(g->prog, I_SPLIT);
- split->x = split + 3;
- split->y = split + 1;
- emit(g->prog, I_ANYNL);
- jump = emit(g->prog, I_JUMP);
- jump->x = split;
- emit(g->prog, I_LPAR);
- compile(g->prog, node);
- emit(g->prog, I_RPAR);
- emit(g->prog, I_END);
-
-#ifdef TEST
- dumpnode(node);
- putchar('\n');
- dumpprog(g->prog);
-#endif
-
- rd_free(g->pstart);
-
- if (errorp)
- *errorp = NULL;
- return g->prog;
-}
-
-void re_regfree(Reprog *prog) {
- if (prog) {
- rd_free(prog->start);
- rd_free(prog);
- }
-}
-
-/* Match */
-
-static int isnewline(int c) {
- return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029;
-}
-
-static int iswordchar(int c) {
- return c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
- (c >= '0' && c <= '9');
-}
-
-static int incclass(Reclass *cc, Rune c) {
- Rune *p;
- for (p = cc->spans; p < cc->end; p += 2)
- if (p[0] <= c && c <= p[1])
- return 1;
- return 0;
-}
-
-static int incclasscanon(Reclass *cc, Rune c) {
- Rune *p, r;
- for (p = cc->spans; p < cc->end; p += 2)
- for (r = p[0]; r <= p[1]; ++r)
- if (c == canon(r))
- return 1;
- return 0;
-}
-
-static int strncmpcanon(const char *a, const char *b, unsigned int n) {
- Rune ra, rb;
- int c;
- while (n--) {
- if (!*a)
- return -1;
- if (!*b)
- return 1;
- a += chartorune(&ra, a);
- b += chartorune(&rb, b);
- c = canon(ra) - canon(rb);
- if (c)
- return c;
- }
- return 0;
-}
-
-struct Rethread {
- Reinst *pc;
- const char *sp;
- Resub sub;
-};
-
-static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) {
- t->pc = pc;
- t->sp = sp;
- memcpy(&t->sub, sub, sizeof t->sub);
-}
-
-static int
-match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) {
- Rethread ready[MAXTHREAD];
- Resub scratch;
- Resub sub;
- Rune c;
- unsigned int nready;
- int i;
-
- /* queue initial thread */
- spawn(ready + 0, pc, sp, out);
- nready = 1;
-
- /* run threads in stack order */
- while (nready > 0) {
- --nready;
- pc = ready[nready].pc;
- sp = ready[nready].sp;
- memcpy(&sub, &ready[nready].sub, sizeof sub);
- for (;;) {
- switch (pc->opcode) {
- case I_END:
- for (i = 0; i < MAXSUB; ++i) {
- out->sub[i].sp = sub.sub[i].sp;
- out->sub[i].ep = sub.sub[i].ep;
- }
- return 1;
- case I_JUMP:
- pc = pc->x;
- continue;
- case I_SPLIT:
- if (nready >= MAXTHREAD) {
- fprintf(
- stderr,
- "regexec: backtrack overflow!\n");
- return 0;
- }
- spawn(&ready[nready++], pc->y, sp, &sub);
- pc = pc->x;
- continue;
-
- case I_PLA:
- if (!match(pc->x, sp, bol, flags, &sub))
- goto dead;
- pc = pc->y;
- continue;
- case I_NLA:
- memcpy(&scratch, &sub, sizeof scratch);
- if (match(pc->x, sp, bol, flags, &scratch))
- goto dead;
- pc = pc->y;
- continue;
-
- case I_ANYNL:
- sp += chartorune(&c, sp);
- if (c == 0)
- goto dead;
- break;
- case I_ANY:
- sp += chartorune(&c, sp);
- if (c == 0)
- goto dead;
- if (isnewline(c))
- goto dead;
- break;
- case I_CHAR:
- sp += chartorune(&c, sp);
- if (c == 0)
- goto dead;
- if (flags & REG_ICASE)
- c = canon(c);
- if (c != pc->c)
- goto dead;
- break;
- case I_CCLASS:
- sp += chartorune(&c, sp);
- if (c == 0)
- goto dead;
- if (flags & REG_ICASE) {
- if (!incclasscanon(pc->cc, canon(c)))
- goto dead;
- } else {
- if (!incclass(pc->cc, c))
- goto dead;
- }
- break;
- case I_NCCLASS:
- sp += chartorune(&c, sp);
- if (c == 0)
- goto dead;
- if (flags & REG_ICASE) {
- if (incclasscanon(pc->cc, canon(c)))
- goto dead;
- } else {
- if (incclass(pc->cc, c))
- goto dead;
- }
- break;
- case I_REF:
- i = (int)(sub.sub[pc->n].ep -
- sub.sub[pc->n].sp);
- if (flags & REG_ICASE) {
- if (strncmpcanon(sp, sub.sub[pc->n].sp,
- i))
- goto dead;
- } else {
- if (strncmp(sp, sub.sub[pc->n].sp, i))
- goto dead;
- }
- if (i > 0)
- sp += i;
- break;
-
- case I_BOL:
- if (sp == bol && !(flags & REG_NOTBOL))
- break;
- if (flags & REG_NEWLINE)
- if (sp > bol && isnewline(sp[-1]))
- break;
- goto dead;
- case I_EOL:
- if (*sp == 0)
- break;
- if (flags & REG_NEWLINE)
- if (isnewline(*sp))
- break;
- goto dead;
- case I_WORD:
- i = sp > bol && iswordchar(sp[-1]);
- i ^= iswordchar(sp[0]);
- if (i)
- break;
- goto dead;
- case I_NWORD:
- i = sp > bol && iswordchar(sp[-1]);
- i ^= iswordchar(sp[0]);
- if (!i)
- break;
- goto dead;
-
- case I_LPAR:
- sub.sub[pc->n].sp = sp;
- break;
- case I_RPAR:
- sub.sub[pc->n].ep = sp;
- break;
- default:
- goto dead;
- }
- pc = pc + 1;
- }
- dead:;
- }
- return 0;
-}
-
-int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) {
- Resub scratch;
- int i;
-
- if (!sub)
- sub = &scratch;
-
- sub->nsub = prog->nsub;
- for (i = 0; i < MAXSUB; ++i)
- sub->sub[i].sp = sub->sub[i].ep = NULL;
-
- return !match(prog->start, sp, sp, prog->flags | eflags, sub);
-}
-
-#ifdef TEST
-int main(int argc, char **argv) {
- const char *error;
- const char *s;
- Reprog *p;
- Resub m;
- unsigned int i;
-
- if (argc > 1) {
- p = regcomp(argv[1], 0, &error);
- if (!p) {
- fprintf(stderr, "regcomp: %s\n", error);
- return 1;
- }
-
- if (argc > 2) {
- s = argv[2];
- printf("nsub = %d\n", p->nsub);
- if (!regexec(p, s, &m, 0)) {
- for (i = 0; i < m.nsub; ++i) {
- int n = m.sub[i].ep - m.sub[i].sp;
- if (n > 0)
- printf(
- "match %d: s=%d e=%d n=%d "
- "'%.*s'\n",
- i, (int)(m.sub[i].sp - s),
- (int)(m.sub[i].ep - s), n,
- n, m.sub[i].sp);
- else
- printf("match %d: n=0 ''\n", i);
- }
- } else {
- printf("no match\n");
- }
- }
- }
-
- return 0;
-}
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h
deleted file mode 100644
index 3fd225071..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Copyright: public domain
- *
- * From https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684:
- *
- * These libraries are in the public domain (or the equivalent where that is not possible).
- * You can do anything you want with them. You have no legal obligation to do anything else,
- * although I appreciate attribution.
- */
-
-#ifndef regexp_h
-#define regexp_h
-
-typedef struct Reprog Reprog;
-typedef struct Resub Resub;
-
-Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp);
-int re_regexec(Reprog *prog, const char *string, Resub *sub, int eflags);
-void re_regfree(Reprog *prog);
-
-enum {
- /* regcomp flags */
- REG_ICASE = 1,
- REG_NEWLINE = 2,
-
- /* regexec flags */
- REG_NOTBOL = 4,
-
- /* limits */
- REG_MAXSUB = 16
-};
-
-struct Resub {
- unsigned int nsub;
- struct {
- const char *sp;
- const char *ep;
- } sub[REG_MAXSUB];
-};
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c
deleted file mode 100644
index e3988b186..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c
+++ /dev/null
@@ -1,1866 +0,0 @@
-/*
- * C port of the snappy compressor from Google.
- * This is a very fast compressor with comparable compression to lzo.
- * Works best on 64bit little-endian, but should be good on others too.
- * Ported by Andi Kleen.
- * Uptodate with snappy 1.1.0
- */
-
-/*
- * Copyright 2005 Google Inc. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-align"
-#endif
-
-#ifndef SG
-#define SG /* Scatter-Gather / iovec support in Snappy */
-#endif
-
-#ifdef __KERNEL__
-#include <linux/kernel.h>
-#ifdef SG
-#include <linux/uio.h>
-#endif
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/snappy.h>
-#include <linux/vmalloc.h>
-#include <asm/unaligned.h>
-#else
-#include "snappy.h"
-#include "snappy_compat.h"
-#endif
-
-#include "rd.h"
-
-#ifdef _MSC_VER
-#define inline __inline
-#endif
-
-static inline u64 get_unaligned64(const void *b)
-{
- u64 ret;
- memcpy(&ret, b, sizeof(u64));
- return ret;
-}
-static inline u32 get_unaligned32(const void *b)
-{
- u32 ret;
- memcpy(&ret, b, sizeof(u32));
- return ret;
-}
-#define get_unaligned_le32(x) (le32toh(get_unaligned32((u32 *)(x))))
-
-static inline void put_unaligned64(u64 v, void *b)
-{
- memcpy(b, &v, sizeof(v));
-}
-static inline void put_unaligned32(u32 v, void *b)
-{
- memcpy(b, &v, sizeof(v));
-}
-static inline void put_unaligned16(u16 v, void *b)
-{
- memcpy(b, &v, sizeof(v));
-}
-#define put_unaligned_le16(v,x) (put_unaligned16(htole16(v), (u16 *)(x)))
-
-
-#define CRASH_UNLESS(x) BUG_ON(!(x))
-#define CHECK(cond) CRASH_UNLESS(cond)
-#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
-#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
-#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
-#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
-#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
-#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
-
-#define UNALIGNED_LOAD32(_p) get_unaligned32((u32 *)(_p))
-#define UNALIGNED_LOAD64(_p) get_unaligned64((u64 *)(_p))
-
-#define UNALIGNED_STORE16(_p, _val) put_unaligned16(_val, (u16 *)(_p))
-#define UNALIGNED_STORE32(_p, _val) put_unaligned32(_val, (u32 *)(_p))
-#define UNALIGNED_STORE64(_p, _val) put_unaligned64(_val, (u64 *)(_p))
-
-/*
- * This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
- * on some platforms, in particular ARM.
- */
-static inline void unaligned_copy64(const void *src, void *dst)
-{
- if (sizeof(void *) == 8) {
- UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
- } else {
- const char *src_char = (const char *)(src);
- char *dst_char = (char *)(dst);
-
- UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
- UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
- }
-}
-
-#ifdef NDEBUG
-
-#define DCHECK(cond) do {} while(0)
-#define DCHECK_LE(a, b) do {} while(0)
-#define DCHECK_GE(a, b) do {} while(0)
-#define DCHECK_EQ(a, b) do {} while(0)
-#define DCHECK_NE(a, b) do {} while(0)
-#define DCHECK_LT(a, b) do {} while(0)
-#define DCHECK_GT(a, b) do {} while(0)
-
-#else
-
-#define DCHECK(cond) CHECK(cond)
-#define DCHECK_LE(a, b) CHECK_LE(a, b)
-#define DCHECK_GE(a, b) CHECK_GE(a, b)
-#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
-#define DCHECK_NE(a, b) CHECK_NE(a, b)
-#define DCHECK_LT(a, b) CHECK_LT(a, b)
-#define DCHECK_GT(a, b) CHECK_GT(a, b)
-
-#endif
-
-static inline bool is_little_endian(void)
-{
-#ifdef __LITTLE_ENDIAN__
- return true;
-#endif
- return false;
-}
-
-#if defined(__xlc__) // xlc compiler on AIX
-#define rd_clz(n) __cntlz4(n)
-#define rd_ctz(n) __cnttz4(n)
-#define rd_ctz64(n) __cnttz8(n)
-
-#elif defined(__SUNPRO_C) // Solaris Studio compiler on sun
-/*
- * Source for following definitions is Hacker’s Delight, Second Edition by Henry S. Warren
- * http://www.hackersdelight.org/permissions.htm
- */
-u32 rd_clz(u32 x) {
- u32 n;
-
- if (x == 0) return(32);
- n = 1;
- if ((x >> 16) == 0) {n = n +16; x = x <<16;}
- if ((x >> 24) == 0) {n = n + 8; x = x << 8;}
- if ((x >> 28) == 0) {n = n + 4; x = x << 4;}
- if ((x >> 30) == 0) {n = n + 2; x = x << 2;}
- n = n - (x >> 31);
- return n;
-}
-
-u32 rd_ctz(u32 x) {
- u32 y;
- u32 n;
-
- if (x == 0) return 32;
- n = 31;
- y = x <<16; if (y != 0) {n = n -16; x = y;}
- y = x << 8; if (y != 0) {n = n - 8; x = y;}
- y = x << 4; if (y != 0) {n = n - 4; x = y;}
- y = x << 2; if (y != 0) {n = n - 2; x = y;}
- y = x << 1; if (y != 0) {n = n - 1;}
- return n;
-}
-
-u64 rd_ctz64(u64 x) {
- u64 y;
- u64 n;
-
- if (x == 0) return 64;
- n = 63;
- y = x <<32; if (y != 0) {n = n -32; x = y;}
- y = x <<16; if (y != 0) {n = n -16; x = y;}
- y = x << 8; if (y != 0) {n = n - 8; x = y;}
- y = x << 4; if (y != 0) {n = n - 4; x = y;}
- y = x << 2; if (y != 0) {n = n - 2; x = y;}
- y = x << 1; if (y != 0) {n = n - 1;}
- return n;
-}
-#elif !defined(_MSC_VER)
-#define rd_clz(n) __builtin_clz(n)
-#define rd_ctz(n) __builtin_ctz(n)
-#define rd_ctz64(n) __builtin_ctzll(n)
-#else
-#include <intrin.h>
-static int inline rd_clz(u32 x) {
- int r = 0;
- if (_BitScanForward(&r, x))
- return 31 - r;
- else
- return 32;
-}
-
-static int inline rd_ctz(u32 x) {
- int r = 0;
- if (_BitScanForward(&r, x))
- return r;
- else
- return 32;
-}
-
-static int inline rd_ctz64(u64 x) {
-#ifdef _M_X64
- int r = 0;
- if (_BitScanReverse64(&r, x))
- return r;
- else
- return 64;
-#else
- int r;
- if ((r = rd_ctz(x & 0xffffffff)) < 32)
- return r;
- return 32 + rd_ctz(x >> 32);
-#endif
-}
-#endif
-
-
-static inline int log2_floor(u32 n)
-{
- return n == 0 ? -1 : 31 ^ rd_clz(n);
-}
-
-static inline RD_UNUSED int find_lsb_set_non_zero(u32 n)
-{
- return rd_ctz(n);
-}
-
-static inline RD_UNUSED int find_lsb_set_non_zero64(u64 n)
-{
- return rd_ctz64(n);
-}
-
-#define kmax32 5
-
-/*
- * Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
- * Never reads a character at or beyond limit. If a valid/terminated varint32
- * was found in the range, stores it in *OUTPUT and returns a pointer just
- * past the last byte of the varint32. Else returns NULL. On success,
- * "result <= limit".
- */
-static inline const char *varint_parse32_with_limit(const char *p,
- const char *l,
- u32 * OUTPUT)
-{
- const unsigned char *ptr = (const unsigned char *)(p);
- const unsigned char *limit = (const unsigned char *)(l);
- u32 b, result;
-
- if (ptr >= limit)
- return NULL;
- b = *(ptr++);
- result = b & 127;
- if (b < 128)
- goto done;
- if (ptr >= limit)
- return NULL;
- b = *(ptr++);
- result |= (b & 127) << 7;
- if (b < 128)
- goto done;
- if (ptr >= limit)
- return NULL;
- b = *(ptr++);
- result |= (b & 127) << 14;
- if (b < 128)
- goto done;
- if (ptr >= limit)
- return NULL;
- b = *(ptr++);
- result |= (b & 127) << 21;
- if (b < 128)
- goto done;
- if (ptr >= limit)
- return NULL;
- b = *(ptr++);
- result |= (b & 127) << 28;
- if (b < 16)
- goto done;
- return NULL; /* Value is too long to be a varint32 */
-done:
- *OUTPUT = result;
- return (const char *)(ptr);
-}
-
-/*
- * REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
- * EFFECTS Encodes "v" into "ptr" and returns a pointer to the
- * byte just past the last encoded byte.
- */
-static inline char *varint_encode32(char *sptr, u32 v)
-{
- /* Operate on characters as unsigneds */
- unsigned char *ptr = (unsigned char *)(sptr);
- static const int B = 128;
-
- if (v < (1 << 7)) {
- *(ptr++) = v;
- } else if (v < (1 << 14)) {
- *(ptr++) = v | B;
- *(ptr++) = v >> 7;
- } else if (v < (1 << 21)) {
- *(ptr++) = v | B;
- *(ptr++) = (v >> 7) | B;
- *(ptr++) = v >> 14;
- } else if (v < (1 << 28)) {
- *(ptr++) = v | B;
- *(ptr++) = (v >> 7) | B;
- *(ptr++) = (v >> 14) | B;
- *(ptr++) = v >> 21;
- } else {
- *(ptr++) = v | B;
- *(ptr++) = (v >> 7) | B;
- *(ptr++) = (v >> 14) | B;
- *(ptr++) = (v >> 21) | B;
- *(ptr++) = v >> 28;
- }
- return (char *)(ptr);
-}
-
-#ifdef SG
-
-static inline void *n_bytes_after_addr(void *addr, size_t n_bytes)
-{
- return (void *) ((char *)addr + n_bytes);
-}
-
-struct source {
- struct iovec *iov;
- int iovlen;
- int curvec;
- int curoff;
- size_t total;
-};
-
-/* Only valid at beginning when nothing is consumed */
-static inline int available(struct source *s)
-{
- return (int) s->total;
-}
-
-static inline const char *peek(struct source *s, size_t *len)
-{
- if (likely(s->curvec < s->iovlen)) {
- struct iovec *iv = &s->iov[s->curvec];
- if ((unsigned)s->curoff < (size_t)iv->iov_len) {
- *len = iv->iov_len - s->curoff;
- return n_bytes_after_addr(iv->iov_base, s->curoff);
- }
- }
- *len = 0;
- return NULL;
-}
-
-static inline void skip(struct source *s, size_t n)
-{
- struct iovec *iv = &s->iov[s->curvec];
- s->curoff += (int) n;
- DCHECK_LE((unsigned)s->curoff, (size_t)iv->iov_len);
- if ((unsigned)s->curoff >= (size_t)iv->iov_len &&
- s->curvec + 1 < s->iovlen) {
- s->curoff = 0;
- s->curvec++;
- }
-}
-
-struct sink {
- struct iovec *iov;
- int iovlen;
- unsigned curvec;
- unsigned curoff;
- unsigned written;
-};
-
-static inline void append(struct sink *s, const char *data, size_t n)
-{
- struct iovec *iov = &s->iov[s->curvec];
- char *dst = n_bytes_after_addr(iov->iov_base, s->curoff);
- size_t nlen = min_t(size_t, iov->iov_len - s->curoff, n);
- if (data != dst)
- memcpy(dst, data, nlen);
- s->written += (int) n;
- s->curoff += (int) nlen;
- while ((n -= nlen) > 0) {
- data += nlen;
- s->curvec++;
- DCHECK_LT((signed)s->curvec, s->iovlen);
- iov++;
- nlen = min_t(size_t, (size_t)iov->iov_len, n);
- memcpy(iov->iov_base, data, nlen);
- s->curoff = (int) nlen;
- }
-}
-
-static inline void *sink_peek(struct sink *s, size_t n)
-{
- struct iovec *iov = &s->iov[s->curvec];
- if (s->curvec < (size_t)iov->iov_len && iov->iov_len - s->curoff >= n)
- return n_bytes_after_addr(iov->iov_base, s->curoff);
- return NULL;
-}
-
-#else
-
-struct source {
- const char *ptr;
- size_t left;
-};
-
-static inline int available(struct source *s)
-{
- return s->left;
-}
-
-static inline const char *peek(struct source *s, size_t * len)
-{
- *len = s->left;
- return s->ptr;
-}
-
-static inline void skip(struct source *s, size_t n)
-{
- s->left -= n;
- s->ptr += n;
-}
-
-struct sink {
- char *dest;
-};
-
-static inline void append(struct sink *s, const char *data, size_t n)
-{
- if (data != s->dest)
- memcpy(s->dest, data, n);
- s->dest += n;
-}
-
-#define sink_peek(s, n) sink_peek_no_sg(s)
-
-static inline void *sink_peek_no_sg(const struct sink *s)
-{
- return s->dest;
-}
-
-#endif
-
-struct writer {
- char *base;
- char *op;
- char *op_limit;
-};
-
-/* Called before decompression */
-static inline void writer_set_expected_length(struct writer *w, size_t len)
-{
- w->op_limit = w->op + len;
-}
-
-/* Called after decompression */
-static inline bool writer_check_length(struct writer *w)
-{
- return w->op == w->op_limit;
-}
-
-/*
- * Copy "len" bytes from "src" to "op", one byte at a time. Used for
- * handling COPY operations where the input and output regions may
- * overlap. For example, suppose:
- * src == "ab"
- * op == src + 2
- * len == 20
- * After IncrementalCopy(src, op, len), the result will have
- * eleven copies of "ab"
- * ababababababababababab
- * Note that this does not match the semantics of either memcpy()
- * or memmove().
- */
-static inline void incremental_copy(const char *src, char *op, ssize_t len)
-{
- DCHECK_GT(len, 0);
- do {
- *op++ = *src++;
- } while (--len > 0);
-}
-
-/*
- * Equivalent to IncrementalCopy except that it can write up to ten extra
- * bytes after the end of the copy, and that it is faster.
- *
- * The main part of this loop is a simple copy of eight bytes at a time until
- * we've copied (at least) the requested amount of bytes. However, if op and
- * src are less than eight bytes apart (indicating a repeating pattern of
- * length < 8), we first need to expand the pattern in order to get the correct
- * results. For instance, if the buffer looks like this, with the eight-byte
- * <src> and <op> patterns marked as intervals:
- *
- * abxxxxxxxxxxxx
- * [------] src
- * [------] op
- *
- * a single eight-byte copy from <src> to <op> will repeat the pattern once,
- * after which we can move <op> two bytes without moving <src>:
- *
- * ababxxxxxxxxxx
- * [------] src
- * [------] op
- *
- * and repeat the exercise until the two no longer overlap.
- *
- * This allows us to do very well in the special case of one single byte
- * repeated many times, without taking a big hit for more general cases.
- *
- * The worst case of extra writing past the end of the match occurs when
- * op - src == 1 and len == 1; the last copy will read from byte positions
- * [0..7] and write to [4..11], whereas it was only supposed to write to
- * position 1. Thus, ten excess bytes.
- */
-
-#define kmax_increment_copy_overflow 10
-
-static inline void incremental_copy_fast_path(const char *src, char *op,
- ssize_t len)
-{
- while (op - src < 8) {
- unaligned_copy64(src, op);
- len -= op - src;
- op += op - src;
- }
- while (len > 0) {
- unaligned_copy64(src, op);
- src += 8;
- op += 8;
- len -= 8;
- }
-}
-
-static inline bool writer_append_from_self(struct writer *w, u32 offset,
- u32 len)
-{
- char *const op = w->op;
- CHECK_LE(op, w->op_limit);
- const u32 space_left = (u32) (w->op_limit - op);
-
- if ((unsigned)(op - w->base) <= offset - 1u) /* -1u catches offset==0 */
- return false;
- if (len <= 16 && offset >= 8 && space_left >= 16) {
- /* Fast path, used for the majority (70-80%) of dynamic
- * invocations. */
- unaligned_copy64(op - offset, op);
- unaligned_copy64(op - offset + 8, op + 8);
- } else {
- if (space_left >= len + kmax_increment_copy_overflow) {
- incremental_copy_fast_path(op - offset, op, len);
- } else {
- if (space_left < len) {
- return false;
- }
- incremental_copy(op - offset, op, len);
- }
- }
-
- w->op = op + len;
- return true;
-}
-
-static inline bool writer_append(struct writer *w, const char *ip, u32 len)
-{
- char *const op = w->op;
- CHECK_LE(op, w->op_limit);
- const u32 space_left = (u32) (w->op_limit - op);
- if (space_left < len)
- return false;
- memcpy(op, ip, len);
- w->op = op + len;
- return true;
-}
-
-static inline bool writer_try_fast_append(struct writer *w, const char *ip,
- u32 available_bytes, u32 len)
-{
- char *const op = w->op;
- const int space_left = (int) (w->op_limit - op);
- if (len <= 16 && available_bytes >= 16 && space_left >= 16) {
- /* Fast path, used for the majority (~95%) of invocations */
- unaligned_copy64(ip, op);
- unaligned_copy64(ip + 8, op + 8);
- w->op = op + len;
- return true;
- }
- return false;
-}
-
-/*
- * Any hash function will produce a valid compressed bitstream, but a good
- * hash function reduces the number of collisions and thus yields better
- * compression for compressible input, and more speed for incompressible
- * input. Of course, it doesn't hurt if the hash function is reasonably fast
- * either, as it gets called a lot.
- */
-static inline u32 hash_bytes(u32 bytes, int shift)
-{
- u32 kmul = 0x1e35a7bd;
- return (bytes * kmul) >> shift;
-}
-
-static inline u32 hash(const char *p, int shift)
-{
- return hash_bytes(UNALIGNED_LOAD32(p), shift);
-}
-
-/*
- * Compressed data can be defined as:
- * compressed := item* literal*
- * item := literal* copy
- *
- * The trailing literal sequence has a space blowup of at most 62/60
- * since a literal of length 60 needs one tag byte + one extra byte
- * for length information.
- *
- * Item blowup is trickier to measure. Suppose the "copy" op copies
- * 4 bytes of data. Because of a special check in the encoding code,
- * we produce a 4-byte copy only if the offset is < 65536. Therefore
- * the copy op takes 3 bytes to encode, and this type of item leads
- * to at most the 62/60 blowup for representing literals.
- *
- * Suppose the "copy" op copies 5 bytes of data. If the offset is big
- * enough, it will take 5 bytes to encode the copy op. Therefore the
- * worst case here is a one-byte literal followed by a five-byte copy.
- * I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
- *
- * This last factor dominates the blowup, so the final estimate is:
- */
-size_t rd_kafka_snappy_max_compressed_length(size_t source_len)
-{
- return 32 + source_len + source_len / 6;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_max_compressed_length);
-
-enum {
- LITERAL = 0,
- COPY_1_BYTE_OFFSET = 1, /* 3 bit length + 3 bits of offset in opcode */
- COPY_2_BYTE_OFFSET = 2,
- COPY_4_BYTE_OFFSET = 3
-};
-
-static inline char *emit_literal(char *op,
- const char *literal,
- int len, bool allow_fast_path)
-{
- int n = len - 1; /* Zero-length literals are disallowed */
-
- if (n < 60) {
- /* Fits in tag byte */
- *op++ = LITERAL | (n << 2);
-
-/*
- * The vast majority of copies are below 16 bytes, for which a
- * call to memcpy is overkill. This fast path can sometimes
- * copy up to 15 bytes too much, but that is okay in the
- * main loop, since we have a bit to go on for both sides:
- *
- * - The input will always have kInputMarginBytes = 15 extra
- * available bytes, as long as we're in the main loop, and
- * if not, allow_fast_path = false.
- * - The output will always have 32 spare bytes (see
- * MaxCompressedLength).
- */
- if (allow_fast_path && len <= 16) {
- unaligned_copy64(literal, op);
- unaligned_copy64(literal + 8, op + 8);
- return op + len;
- }
- } else {
- /* Encode in upcoming bytes */
- char *base = op;
- int count = 0;
- op++;
- while (n > 0) {
- *op++ = n & 0xff;
- n >>= 8;
- count++;
- }
- DCHECK(count >= 1);
- DCHECK(count <= 4);
- *base = LITERAL | ((59 + count) << 2);
- }
- memcpy(op, literal, len);
- return op + len;
-}
-
-static inline char *emit_copy_less_than64(char *op, int offset, int len)
-{
- DCHECK_LE(len, 64);
- DCHECK_GE(len, 4);
- DCHECK_LT(offset, 65536);
-
- if ((len < 12) && (offset < 2048)) {
- int len_minus_4 = len - 4;
- DCHECK(len_minus_4 < 8); /* Must fit in 3 bits */
- *op++ =
- COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8)
- << 5);
- *op++ = offset & 0xff;
- } else {
- *op++ = COPY_2_BYTE_OFFSET + ((len - 1) << 2);
- put_unaligned_le16(offset, op);
- op += 2;
- }
- return op;
-}
-
-static inline char *emit_copy(char *op, int offset, int len)
-{
- /*
- * Emit 64 byte copies but make sure to keep at least four bytes
- * reserved
- */
- while (len >= 68) {
- op = emit_copy_less_than64(op, offset, 64);
- len -= 64;
- }
-
- /*
- * Emit an extra 60 byte copy if have too much data to fit in
- * one copy
- */
- if (len > 64) {
- op = emit_copy_less_than64(op, offset, 60);
- len -= 60;
- }
-
- /* Emit remainder */
- op = emit_copy_less_than64(op, offset, len);
- return op;
-}
-
-/**
- * rd_kafka_snappy_uncompressed_length - return length of uncompressed output.
- * @start: compressed buffer
- * @n: length of compressed buffer.
- * @result: Write the length of the uncompressed output here.
- *
- * Returns true when successfull, otherwise false.
- */
-bool rd_kafka_snappy_uncompressed_length(const char *start, size_t n, size_t * result)
-{
- u32 v = 0;
- const char *limit = start + n;
- if (varint_parse32_with_limit(start, limit, &v) != NULL) {
- *result = v;
- return true;
- } else {
- return false;
- }
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompressed_length);
-
-/*
- * The size of a compression block. Note that many parts of the compression
- * code assumes that kBlockSize <= 65536; in particular, the hash table
- * can only store 16-bit offsets, and EmitCopy() also assumes the offset
- * is 65535 bytes or less. Note also that if you change this, it will
- * affect the framing format
- * Note that there might be older data around that is compressed with larger
- * block sizes, so the decompression code should not rely on the
- * non-existence of long backreferences.
- */
-#define kblock_log 16
-#define kblock_size (1 << kblock_log)
-
-/*
- * This value could be halfed or quartered to save memory
- * at the cost of slightly worse compression.
- */
-#define kmax_hash_table_bits 14
-#define kmax_hash_table_size (1U << kmax_hash_table_bits)
-
-/*
- * Use smaller hash table when input.size() is smaller, since we
- * fill the table, incurring O(hash table size) overhead for
- * compression, and if the input is short, we won't need that
- * many hash table entries anyway.
- */
-static u16 *get_hash_table(struct snappy_env *env, size_t input_size,
- int *table_size)
-{
- unsigned htsize = 256;
-
- DCHECK(kmax_hash_table_size >= 256);
- while (htsize < kmax_hash_table_size && htsize < input_size)
- htsize <<= 1;
- CHECK_EQ(0, htsize & (htsize - 1));
- CHECK_LE(htsize, kmax_hash_table_size);
-
- u16 *table;
- table = env->hash_table;
-
- *table_size = htsize;
- memset(table, 0, htsize * sizeof(*table));
- return table;
-}
-
-/*
- * Return the largest n such that
- *
- * s1[0,n-1] == s2[0,n-1]
- * and n <= (s2_limit - s2).
- *
- * Does not read *s2_limit or beyond.
- * Does not read *(s1 + (s2_limit - s2)) or beyond.
- * Requires that s2_limit >= s2.
- *
- * Separate implementation for x86_64, for speed. Uses the fact that
- * x86_64 is little endian.
- */
-#if defined(__LITTLE_ENDIAN__) && BITS_PER_LONG == 64
-static inline int find_match_length(const char *s1,
- const char *s2, const char *s2_limit)
-{
- int matched = 0;
-
- DCHECK_GE(s2_limit, s2);
- /*
- * Find out how long the match is. We loop over the data 64 bits at a
- * time until we find a 64-bit block that doesn't match; then we find
- * the first non-matching bit and use that to calculate the total
- * length of the match.
- */
- while (likely(s2 <= s2_limit - 8)) {
- if (unlikely
- (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
- s2 += 8;
- matched += 8;
- } else {
- /*
- * On current (mid-2008) Opteron models there
- * is a 3% more efficient code sequence to
- * find the first non-matching byte. However,
- * what follows is ~10% better on Intel Core 2
- * and newer, and we expect AMD's bsf
- * instruction to improve.
- */
- u64 x =
- UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 +
- matched);
- int matching_bits = find_lsb_set_non_zero64(x);
- matched += matching_bits >> 3;
- return matched;
- }
- }
- while (likely(s2 < s2_limit)) {
- if (likely(s1[matched] == *s2)) {
- ++s2;
- ++matched;
- } else {
- return matched;
- }
- }
- return matched;
-}
-#else
-static inline int find_match_length(const char *s1,
- const char *s2, const char *s2_limit)
-{
- /* Implementation based on the x86-64 version, above. */
- DCHECK_GE(s2_limit, s2);
- int matched = 0;
-
- while (s2 <= s2_limit - 4 &&
- UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
- s2 += 4;
- matched += 4;
- }
- if (is_little_endian() && s2 <= s2_limit - 4) {
- u32 x =
- UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
- int matching_bits = find_lsb_set_non_zero(x);
- matched += matching_bits >> 3;
- } else {
- while ((s2 < s2_limit) && (s1[matched] == *s2)) {
- ++s2;
- ++matched;
- }
- }
- return matched;
-}
-#endif
-
-/*
- * For 0 <= offset <= 4, GetU32AtOffset(GetEightBytesAt(p), offset) will
- * equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
- * empirically found that overlapping loads such as
- * UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
- * are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to u32.
- *
- * We have different versions for 64- and 32-bit; ideally we would avoid the
- * two functions and just inline the UNALIGNED_LOAD64 call into
- * GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
- * enough to avoid loading the value multiple times then. For 64-bit, the load
- * is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
- * done at GetUint32AtOffset() time.
- */
-
-#if BITS_PER_LONG == 64
-
-typedef u64 eight_bytes_reference;
-
-static inline eight_bytes_reference get_eight_bytes_at(const char* ptr)
-{
- return UNALIGNED_LOAD64(ptr);
-}
-
-static inline u32 get_u32_at_offset(u64 v, int offset)
-{
- DCHECK_GE(offset, 0);
- DCHECK_LE(offset, 4);
- return v >> (is_little_endian()? 8 * offset : 32 - 8 * offset);
-}
-
-#else
-
-typedef const char *eight_bytes_reference;
-
-static inline eight_bytes_reference get_eight_bytes_at(const char* ptr)
-{
- return ptr;
-}
-
-static inline u32 get_u32_at_offset(const char *v, int offset)
-{
- DCHECK_GE(offset, 0);
- DCHECK_LE(offset, 4);
- return UNALIGNED_LOAD32(v + offset);
-}
-#endif
-
-/*
- * Flat array compression that does not emit the "uncompressed length"
- * prefix. Compresses "input" string to the "*op" buffer.
- *
- * REQUIRES: "input" is at most "kBlockSize" bytes long.
- * REQUIRES: "op" points to an array of memory that is at least
- * "MaxCompressedLength(input.size())" in size.
- * REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
- * REQUIRES: "table_size" is a power of two
- *
- * Returns an "end" pointer into "op" buffer.
- * "end - op" is the compressed size of "input".
- */
-
-static char *compress_fragment(const char *const input,
- const size_t input_size,
- char *op, u16 * table, const unsigned table_size)
-{
- /* "ip" is the input pointer, and "op" is the output pointer. */
- const char *ip = input;
- CHECK_LE(input_size, kblock_size);
- CHECK_EQ(table_size & (table_size - 1), 0);
- const int shift = 32 - log2_floor(table_size);
- DCHECK_EQ(UINT_MAX >> shift, table_size - 1);
- const char *ip_end = input + input_size;
- const char *baseip = ip;
- /*
- * Bytes in [next_emit, ip) will be emitted as literal bytes. Or
- * [next_emit, ip_end) after the main loop.
- */
- const char *next_emit = ip;
-
- const unsigned kinput_margin_bytes = 15;
-
- if (likely(input_size >= kinput_margin_bytes)) {
- const char *const ip_limit = input + input_size -
- kinput_margin_bytes;
-
- u32 next_hash;
- for (next_hash = hash(++ip, shift);;) {
- DCHECK_LT(next_emit, ip);
-/*
- * The body of this loop calls EmitLiteral once and then EmitCopy one or
- * more times. (The exception is that when we're close to exhausting
- * the input we goto emit_remainder.)
- *
- * In the first iteration of this loop we're just starting, so
- * there's nothing to copy, so calling EmitLiteral once is
- * necessary. And we only start a new iteration when the
- * current iteration has determined that a call to EmitLiteral will
- * precede the next call to EmitCopy (if any).
- *
- * Step 1: Scan forward in the input looking for a 4-byte-long match.
- * If we get close to exhausting the input then goto emit_remainder.
- *
- * Heuristic match skipping: If 32 bytes are scanned with no matches
- * found, start looking only at every other byte. If 32 more bytes are
- * scanned, look at every third byte, etc.. When a match is found,
- * immediately go back to looking at every byte. This is a small loss
- * (~5% performance, ~0.1% density) for lcompressible data due to more
- * bookkeeping, but for non-compressible data (such as JPEG) it's a huge
- * win since the compressor quickly "realizes" the data is incompressible
- * and doesn't bother looking for matches everywhere.
- *
- * The "skip" variable keeps track of how many bytes there are since the
- * last match; dividing it by 32 (ie. right-shifting by five) gives the
- * number of bytes to move ahead for each iteration.
- */
- u32 skip_bytes = 32;
-
- const char *next_ip = ip;
- const char *candidate;
- do {
- ip = next_ip;
- u32 hval = next_hash;
- DCHECK_EQ(hval, hash(ip, shift));
- u32 bytes_between_hash_lookups = skip_bytes++ >> 5;
- next_ip = ip + bytes_between_hash_lookups;
- if (unlikely(next_ip > ip_limit)) {
- goto emit_remainder;
- }
- next_hash = hash(next_ip, shift);
- candidate = baseip + table[hval];
- DCHECK_GE(candidate, baseip);
- DCHECK_LT(candidate, ip);
-
- table[hval] = (u16) (ip - baseip);
- } while (likely(UNALIGNED_LOAD32(ip) !=
- UNALIGNED_LOAD32(candidate)));
-
-/*
- * Step 2: A 4-byte match has been found. We'll later see if more
- * than 4 bytes match. But, prior to the match, input
- * bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
- */
- DCHECK_LE(next_emit + 16, ip_end);
- op = emit_literal(op, next_emit, (int) (ip - next_emit), true);
-
-/*
- * Step 3: Call EmitCopy, and then see if another EmitCopy could
- * be our next move. Repeat until we find no match for the
- * input immediately after what was consumed by the last EmitCopy call.
- *
- * If we exit this loop normally then we need to call EmitLiteral next,
- * though we don't yet know how big the literal will be. We handle that
- * by proceeding to the next iteration of the main loop. We also can exit
- * this loop via goto if we get close to exhausting the input.
- */
- eight_bytes_reference input_bytes;
- u32 candidate_bytes = 0;
-
- do {
-/*
- * We have a 4-byte match at ip, and no need to emit any
- * "literal bytes" prior to ip.
- */
- const char *base = ip;
- int matched = 4 +
- find_match_length(candidate + 4, ip + 4,
- ip_end);
- ip += matched;
- int offset = (int) (base - candidate);
- DCHECK_EQ(0, memcmp(base, candidate, matched));
- op = emit_copy(op, offset, matched);
-/*
- * We could immediately start working at ip now, but to improve
- * compression we first update table[Hash(ip - 1, ...)].
- */
- const char *insert_tail = ip - 1;
- next_emit = ip;
- if (unlikely(ip >= ip_limit)) {
- goto emit_remainder;
- }
- input_bytes = get_eight_bytes_at(insert_tail);
- u32 prev_hash =
- hash_bytes(get_u32_at_offset
- (input_bytes, 0), shift);
- table[prev_hash] = (u16) (ip - baseip - 1);
- u32 cur_hash =
- hash_bytes(get_u32_at_offset
- (input_bytes, 1), shift);
- candidate = baseip + table[cur_hash];
- candidate_bytes = UNALIGNED_LOAD32(candidate);
- table[cur_hash] = (u16) (ip - baseip);
- } while (get_u32_at_offset(input_bytes, 1) ==
- candidate_bytes);
-
- next_hash =
- hash_bytes(get_u32_at_offset(input_bytes, 2),
- shift);
- ++ip;
- }
- }
-
-emit_remainder:
- /* Emit the remaining bytes as a literal */
- if (next_emit < ip_end)
- op = emit_literal(op, next_emit, (int) (ip_end - next_emit), false);
-
- return op;
-}
-
-/*
- * -----------------------------------------------------------------------
- * Lookup table for decompression code. Generated by ComputeTable() below.
- * -----------------------------------------------------------------------
- */
-
-/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
-static const u32 wordmask[] = {
- 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
-};
-
-/*
- * Data stored per entry in lookup table:
- * Range Bits-used Description
- * ------------------------------------
- * 1..64 0..7 Literal/copy length encoded in opcode byte
- * 0..7 8..10 Copy offset encoded in opcode byte / 256
- * 0..4 11..13 Extra bytes after opcode
- *
- * We use eight bits for the length even though 7 would have sufficed
- * because of efficiency reasons:
- * (1) Extracting a byte is faster than a bit-field
- * (2) It properly aligns copy offset so we do not need a <<8
- */
-static const u16 char_table[256] = {
- 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
- 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
- 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
- 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
- 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
- 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
- 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
- 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
- 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
- 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
- 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
- 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
- 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
- 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
- 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
- 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
- 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
- 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
- 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
- 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
- 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
- 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
- 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
- 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
- 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
- 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
- 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
- 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
- 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
- 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
- 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
- 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
-};
-
-struct snappy_decompressor {
- struct source *reader; /* Underlying source of bytes to decompress */
- const char *ip; /* Points to next buffered byte */
- const char *ip_limit; /* Points just past buffered bytes */
- u32 peeked; /* Bytes peeked from reader (need to skip) */
- bool eof; /* Hit end of input without an error? */
- char scratch[5]; /* Temporary buffer for peekfast boundaries */
-};
-
-static void
-init_snappy_decompressor(struct snappy_decompressor *d, struct source *reader)
-{
- d->reader = reader;
- d->ip = NULL;
- d->ip_limit = NULL;
- d->peeked = 0;
- d->eof = false;
-}
-
-static void exit_snappy_decompressor(struct snappy_decompressor *d)
-{
- skip(d->reader, d->peeked);
-}
-
-/*
- * Read the uncompressed length stored at the start of the compressed data.
- * On succcess, stores the length in *result and returns true.
- * On failure, returns false.
- */
-static bool read_uncompressed_length(struct snappy_decompressor *d,
- u32 * result)
-{
- DCHECK(d->ip == NULL); /*
- * Must not have read anything yet
- * Length is encoded in 1..5 bytes
- */
- *result = 0;
- u32 shift = 0;
- while (true) {
- if (shift >= 32)
- return false;
- size_t n;
- const char *ip = peek(d->reader, &n);
- if (n == 0)
- return false;
- const unsigned char c = *(const unsigned char *)(ip);
- skip(d->reader, 1);
- *result |= (u32) (c & 0x7f) << shift;
- if (c < 128) {
- break;
- }
- shift += 7;
- }
- return true;
-}
-
-static bool refill_tag(struct snappy_decompressor *d);
-
-/*
- * Process the next item found in the input.
- * Returns true if successful, false on error or end of input.
- */
-static void decompress_all_tags(struct snappy_decompressor *d,
- struct writer *writer)
-{
- const char *ip = d->ip;
-
- /*
- * We could have put this refill fragment only at the beginning of the loop.
- * However, duplicating it at the end of each branch gives the compiler more
- * scope to optimize the <ip_limit_ - ip> expression based on the local
- * context, which overall increases speed.
- */
-#define MAYBE_REFILL() \
- if (d->ip_limit - ip < 5) { \
- d->ip = ip; \
- if (!refill_tag(d)) return; \
- ip = d->ip; \
- }
-
-
- MAYBE_REFILL();
- for (;;) {
- if (d->ip_limit - ip < 5) {
- d->ip = ip;
- if (!refill_tag(d))
- return;
- ip = d->ip;
- }
-
- const unsigned char c = *(const unsigned char *)(ip++);
-
- if ((c & 0x3) == LITERAL) {
- u32 literal_length = (c >> 2) + 1;
- if (writer_try_fast_append(writer, ip, (u32) (d->ip_limit - ip),
- literal_length)) {
- DCHECK_LT(literal_length, 61);
- ip += literal_length;
- MAYBE_REFILL();
- continue;
- }
- if (unlikely(literal_length >= 61)) {
- /* Long literal */
- const u32 literal_ll = literal_length - 60;
- literal_length = (get_unaligned_le32(ip) &
- wordmask[literal_ll]) + 1;
- ip += literal_ll;
- }
-
- u32 avail = (u32) (d->ip_limit - ip);
- while (avail < literal_length) {
- if (!writer_append(writer, ip, avail))
- return;
- literal_length -= avail;
- skip(d->reader, d->peeked);
- size_t n;
- ip = peek(d->reader, &n);
- avail = (u32) n;
- d->peeked = avail;
- if (avail == 0)
- return; /* Premature end of input */
- d->ip_limit = ip + avail;
- }
- if (!writer_append(writer, ip, literal_length))
- return;
- ip += literal_length;
- MAYBE_REFILL();
- } else {
- const u32 entry = char_table[c];
- const u32 trailer = get_unaligned_le32(ip) &
- wordmask[entry >> 11];
- const u32 length = entry & 0xff;
- ip += entry >> 11;
-
- /*
- * copy_offset/256 is encoded in bits 8..10.
- * By just fetching those bits, we get
- * copy_offset (since the bit-field starts at
- * bit 8).
- */
- const u32 copy_offset = entry & 0x700;
- if (!writer_append_from_self(writer,
- copy_offset + trailer,
- length))
- return;
- MAYBE_REFILL();
- }
- }
-}
-
-#undef MAYBE_REFILL
-
-static bool refill_tag(struct snappy_decompressor *d)
-{
- const char *ip = d->ip;
-
- if (ip == d->ip_limit) {
- size_t n;
- /* Fetch a new fragment from the reader */
- skip(d->reader, d->peeked); /* All peeked bytes are used up */
- ip = peek(d->reader, &n);
- d->peeked = (u32) n;
- if (n == 0) {
- d->eof = true;
- return false;
- }
- d->ip_limit = ip + n;
- }
-
- /* Read the tag character */
- DCHECK_LT(ip, d->ip_limit);
- const unsigned char c = *(const unsigned char *)(ip);
- const u32 entry = char_table[c];
- const u32 needed = (entry >> 11) + 1; /* +1 byte for 'c' */
- DCHECK_LE(needed, sizeof(d->scratch));
-
- /* Read more bytes from reader if needed */
- u32 nbuf = (u32) (d->ip_limit - ip);
-
- if (nbuf < needed) {
- /*
- * Stitch together bytes from ip and reader to form the word
- * contents. We store the needed bytes in "scratch". They
- * will be consumed immediately by the caller since we do not
- * read more than we need.
- */
- memmove(d->scratch, ip, nbuf);
- skip(d->reader, d->peeked); /* All peeked bytes are used up */
- d->peeked = 0;
- while (nbuf < needed) {
- size_t length;
- const char *src = peek(d->reader, &length);
- if (length == 0)
- return false;
- u32 to_add = min_t(u32, needed - nbuf, (u32) length);
- memcpy(d->scratch + nbuf, src, to_add);
- nbuf += to_add;
- skip(d->reader, to_add);
- }
- DCHECK_EQ(nbuf, needed);
- d->ip = d->scratch;
- d->ip_limit = d->scratch + needed;
- } else if (nbuf < 5) {
- /*
- * Have enough bytes, but move into scratch so that we do not
- * read past end of input
- */
- memmove(d->scratch, ip, nbuf);
- skip(d->reader, d->peeked); /* All peeked bytes are used up */
- d->peeked = 0;
- d->ip = d->scratch;
- d->ip_limit = d->scratch + nbuf;
- } else {
- /* Pass pointer to buffer returned by reader. */
- d->ip = ip;
- }
- return true;
-}
-
-static int internal_uncompress(struct source *r,
- struct writer *writer, u32 max_len)
-{
- struct snappy_decompressor decompressor;
- u32 uncompressed_len = 0;
-
- init_snappy_decompressor(&decompressor, r);
-
- if (!read_uncompressed_length(&decompressor, &uncompressed_len))
- return -EIO;
- /* Protect against possible DoS attack */
- if ((u64) (uncompressed_len) > max_len)
- return -EIO;
-
- writer_set_expected_length(writer, uncompressed_len);
-
- /* Process the entire input */
- decompress_all_tags(&decompressor, writer);
-
- exit_snappy_decompressor(&decompressor);
- if (decompressor.eof && writer_check_length(writer))
- return 0;
- return -EIO;
-}
-
-static inline int sn_compress(struct snappy_env *env, struct source *reader,
- struct sink *writer)
-{
- int err;
- size_t written = 0;
- int N = available(reader);
- char ulength[kmax32];
- char *p = varint_encode32(ulength, N);
-
- append(writer, ulength, p - ulength);
- written += (p - ulength);
-
- while (N > 0) {
- /* Get next block to compress (without copying if possible) */
- size_t fragment_size;
- const char *fragment = peek(reader, &fragment_size);
- if (fragment_size == 0) {
- err = -EIO;
- goto out;
- }
- const unsigned num_to_read = min_t(int, N, kblock_size);
- size_t bytes_read = fragment_size;
-
- int pending_advance = 0;
- if (bytes_read >= num_to_read) {
- /* Buffer returned by reader is large enough */
- pending_advance = num_to_read;
- fragment_size = num_to_read;
- }
- else {
- memcpy(env->scratch, fragment, bytes_read);
- skip(reader, bytes_read);
-
- while (bytes_read < num_to_read) {
- fragment = peek(reader, &fragment_size);
- size_t n =
- min_t(size_t, fragment_size,
- num_to_read - bytes_read);
- memcpy((char *)(env->scratch) + bytes_read, fragment, n);
- bytes_read += n;
- skip(reader, n);
- }
- DCHECK_EQ(bytes_read, num_to_read);
- fragment = env->scratch;
- fragment_size = num_to_read;
- }
- if (fragment_size < num_to_read)
- return -EIO;
-
- /* Get encoding table for compression */
- int table_size;
- u16 *table = get_hash_table(env, num_to_read, &table_size);
-
- /* Compress input_fragment and append to dest */
- char *dest;
- dest = sink_peek(writer, rd_kafka_snappy_max_compressed_length(num_to_read));
- if (!dest) {
- /*
- * Need a scratch buffer for the output,
- * because the byte sink doesn't have enough
- * in one piece.
- */
- dest = env->scratch_output;
- }
- char *end = compress_fragment(fragment, fragment_size,
- dest, table, table_size);
- append(writer, dest, end - dest);
- written += (end - dest);
-
- N -= num_to_read;
- skip(reader, pending_advance);
- }
-
- err = 0;
-out:
- return err;
-}
-
-#ifdef SG
-
-int rd_kafka_snappy_compress_iov(struct snappy_env *env,
- const struct iovec *iov_in, size_t iov_in_cnt,
- size_t input_length,
- struct iovec *iov_out) {
- struct source reader = {
- .iov = (struct iovec *)iov_in,
- .iovlen = (int)iov_in_cnt,
- .total = input_length
- };
- struct sink writer = {
- .iov = iov_out,
- .iovlen = 1
- };
- int err = sn_compress(env, &reader, &writer);
-
- iov_out->iov_len = writer.written;
-
- return err;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress_iov);
-
-/**
- * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
- * @env: Preallocated environment
- * @input: Input buffer
- * @input_length: Length of input_buffer
- * @compressed: Output buffer for compressed data
- * @compressed_length: The real length of the output written here.
- *
- * Return 0 on success, otherwise an negative error code.
- *
- * The output buffer must be at least
- * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
- *
- * Requires a preallocated environment from rd_kafka_snappy_init_env.
- * The environment does not keep state over individual calls
- * of this function, just preallocates the memory.
- */
-int rd_kafka_snappy_compress(struct snappy_env *env,
- const char *input,
- size_t input_length,
- char *compressed, size_t *compressed_length)
-{
- struct iovec iov_in = {
- .iov_base = (char *)input,
- .iov_len = input_length,
- };
- struct iovec iov_out = {
- .iov_base = compressed,
- .iov_len = 0xffffffff,
- };
- return rd_kafka_snappy_compress_iov(env,
- &iov_in, 1, input_length,
- &iov_out);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress);
-
-int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
- size_t input_len, char *uncompressed)
-{
- struct source reader = {
- .iov = iov_in,
- .iovlen = iov_in_len,
- .total = input_len
- };
- struct writer output = {
- .base = uncompressed,
- .op = uncompressed
- };
- return internal_uncompress(&reader, &output, 0xffffffff);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress_iov);
-
-/**
- * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
- * @compressed: Input buffer with compressed data
- * @n: length of compressed buffer
- * @uncompressed: buffer for uncompressed data
- *
- * The uncompressed data buffer must be at least
- * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
- *
- * Return 0 on success, otherwise an negative error code.
- */
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
-{
- struct iovec iov = {
- .iov_base = (char *)compressed,
- .iov_len = n
- };
- return rd_kafka_snappy_uncompress_iov(&iov, 1, n, uncompressed);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
-
-
-/**
- * @brief Decompress Snappy message with Snappy-java framing.
- *
- * @returns a malloced buffer with the uncompressed data, or NULL on failure.
- */
-char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
- size_t *outlenp,
- char *errstr, size_t errstr_size) {
- int pass;
- char *outbuf = NULL;
-
- /**
- * Traverse all chunks in two passes:
- * pass 1: calculate total uncompressed length
- * pass 2: uncompress
- *
- * Each chunk is prefixed with 4: length */
-
- for (pass = 1 ; pass <= 2 ; pass++) {
- ssize_t of = 0; /* inbuf offset */
- ssize_t uof = 0; /* outbuf offset */
-
- while (of + 4 <= (ssize_t)inlen) {
- uint32_t clen; /* compressed length */
- size_t ulen; /* uncompressed length */
- int r;
-
- memcpy(&clen, inbuf+of, 4);
- clen = be32toh(clen);
- of += 4;
-
- if (unlikely(clen > inlen - of)) {
- rd_snprintf(errstr, errstr_size,
- "Invalid snappy-java chunk length "
- "%"PRId32" > %"PRIdsz
- " available bytes",
- clen, (ssize_t)inlen - of);
- return NULL;
- }
-
- /* Acquire uncompressed length */
- if (unlikely(!rd_kafka_snappy_uncompressed_length(
- inbuf+of, clen, &ulen))) {
- rd_snprintf(errstr, errstr_size,
- "Failed to get length of "
- "(snappy-java framed) Snappy "
- "compressed payload "
- "(clen %"PRId32")",
- clen);
- return NULL;
- }
-
- if (pass == 1) {
- /* pass 1: calculate total length */
- of += clen;
- uof += ulen;
- continue;
- }
-
- /* pass 2: Uncompress to outbuf */
- if (unlikely((r = rd_kafka_snappy_uncompress(
- inbuf+of, clen, outbuf+uof)))) {
- rd_snprintf(errstr, errstr_size,
- "Failed to decompress Snappy-java "
- "framed payload of size %"PRId32
- ": %s",
- clen,
- rd_strerror(-r/*negative errno*/));
- rd_free(outbuf);
- return NULL;
- }
-
- of += clen;
- uof += ulen;
- }
-
- if (unlikely(of != (ssize_t)inlen)) {
- rd_snprintf(errstr, errstr_size,
- "%"PRIusz" trailing bytes in Snappy-java "
- "framed compressed data",
- inlen - of);
- if (outbuf)
- rd_free(outbuf);
- return NULL;
- }
-
- if (pass == 1) {
- if (uof <= 0) {
- rd_snprintf(errstr, errstr_size,
- "Empty Snappy-java framed data");
- return NULL;
- }
-
- /* Allocate memory for uncompressed data */
- outbuf = rd_malloc(uof);
- if (unlikely(!outbuf)) {
- rd_snprintf(errstr, errstr_size,
- "Failed to allocate memory "
- "(%"PRIdsz") for "
- "uncompressed Snappy data: %s",
- uof, rd_strerror(errno));
- return NULL;
- }
-
- } else {
- /* pass 2 */
- *outlenp = uof;
- }
- }
-
- return outbuf;
-}
-
-
-
-#else
-/**
- * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
- * @env: Preallocated environment
- * @input: Input buffer
- * @input_length: Length of input_buffer
- * @compressed: Output buffer for compressed data
- * @compressed_length: The real length of the output written here.
- *
- * Return 0 on success, otherwise an negative error code.
- *
- * The output buffer must be at least
- * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
- *
- * Requires a preallocated environment from rd_kafka_snappy_init_env.
- * The environment does not keep state over individual calls
- * of this function, just preallocates the memory.
- */
-int rd_kafka_snappy_compress(struct snappy_env *env,
- const char *input,
- size_t input_length,
- char *compressed, size_t *compressed_length)
-{
- struct source reader = {
- .ptr = input,
- .left = input_length
- };
- struct sink writer = {
- .dest = compressed,
- };
- int err = sn_compress(env, &reader, &writer);
-
- /* Compute how many bytes were added */
- *compressed_length = (writer.dest - compressed);
- return err;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_compress);
-
-/**
- * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
- * @compressed: Input buffer with compressed data
- * @n: length of compressed buffer
- * @uncompressed: buffer for uncompressed data
- *
- * The uncompressed data buffer must be at least
- * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
- *
- * Return 0 on success, otherwise an negative error code.
- */
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
-{
- struct source reader = {
- .ptr = compressed,
- .left = n
- };
- struct writer output = {
- .base = uncompressed,
- .op = uncompressed
- };
- return internal_uncompress(&reader, &output, 0xffffffff);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
-#endif
-
-static inline void clear_env(struct snappy_env *env)
-{
- memset(env, 0, sizeof(*env));
-}
-
-#ifdef SG
-/**
- * rd_kafka_snappy_init_env_sg - Allocate snappy compression environment
- * @env: Environment to preallocate
- * @sg: Input environment ever does scather gather
- *
- * If false is passed to sg then multiple entries in an iovec
- * are not legal.
- * Returns 0 on success, otherwise negative errno.
- * Must run in process context.
- */
-int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg)
-{
- if (rd_kafka_snappy_init_env(env) < 0)
- goto error;
-
- if (sg) {
- env->scratch = vmalloc(kblock_size);
- if (!env->scratch)
- goto error;
- env->scratch_output =
- vmalloc(rd_kafka_snappy_max_compressed_length(kblock_size));
- if (!env->scratch_output)
- goto error;
- }
- return 0;
-error:
- rd_kafka_snappy_free_env(env);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_init_env_sg);
-#endif
-
-/**
- * rd_kafka_snappy_init_env - Allocate snappy compression environment
- * @env: Environment to preallocate
- *
- * Passing multiple entries in an iovec is not allowed
- * on the environment allocated here.
- * Returns 0 on success, otherwise negative errno.
- * Must run in process context.
- */
-int rd_kafka_snappy_init_env(struct snappy_env *env)
-{
- clear_env(env);
- env->hash_table = vmalloc(sizeof(u16) * kmax_hash_table_size);
- if (!env->hash_table)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL(rd_kafka_snappy_init_env);
-
-/**
- * rd_kafka_snappy_free_env - Free an snappy compression environment
- * @env: Environment to free.
- *
- * Must run in process context.
- */
-void rd_kafka_snappy_free_env(struct snappy_env *env)
-{
- vfree(env->hash_table);
-#ifdef SG
- vfree(env->scratch);
- vfree(env->scratch_output);
-#endif
- clear_env(env);
-}
-EXPORT_SYMBOL(rd_kafka_snappy_free_env);
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop /* -Wcast-align ignore */
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h
deleted file mode 100644
index b3742f1ac..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_SNAPPY_H
-#define _LINUX_SNAPPY_H 1
-
-#include <stdbool.h>
-#include <stddef.h>
-
-/* Only needed for compression. This preallocates the worst case */
-struct snappy_env {
- unsigned short *hash_table;
- void *scratch;
- void *scratch_output;
-};
-
-struct iovec;
-int rd_kafka_snappy_init_env(struct snappy_env *env);
-int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg);
-void rd_kafka_snappy_free_env(struct snappy_env *env);
-int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
- size_t input_len, char *uncompressed);
-int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed);
-char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
- size_t *outlenp,
- char *errstr, size_t errstr_size);
-int rd_kafka_snappy_compress_iov(struct snappy_env *env,
- const struct iovec *iov_in, size_t iov_in_cnt,
- size_t input_length,
- struct iovec *iov_out);
-bool rd_kafka_snappy_uncompressed_length(const char *buf, size_t len, size_t *result);
-size_t rd_kafka_snappy_max_compressed_length(size_t source_len);
-
-
-
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h b/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h
deleted file mode 100644
index 3286f63de..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2005 Google Inc. All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "rdkafka_int.h"
-#include "rdendian.h"
-
-
-
-#ifdef __FreeBSD__
-# include <sys/endian.h>
-#elif defined(__APPLE_CC_) || (defined(__MACH__) && defined(__APPLE__)) /* MacOS/X support */
-# include <machine/endian.h>
-
-#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN
-# define htole16(x) (x)
-# define le32toh(x) (x)
-#elif __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
-# define htole16(x) __DARWIN_OSSwapInt16(x)
-# define le32toh(x) __DARWIN_OSSwapInt32(x)
-#else
-# error "Endianness is undefined"
-#endif
-
-
-#elif !defined(__WIN32__) && !defined(_MSC_VER) && !defined(__sun) && !defined(_AIX)
-# include <endian.h>
-#endif
-
-#include <stdlib.h>
-#include <assert.h>
-#include <string.h>
-#include <errno.h>
-#include <stdbool.h>
-#include <limits.h>
-#if !defined(__WIN32__) && !defined(_MSC_VER)
-#include <sys/uio.h>
-#endif
-
-#ifdef __ANDROID__
-#define le32toh letoh32
-#endif
-
-#if !defined(__MINGW32__) && defined(__WIN32__) && defined(SG)
-struct iovec {
- void *iov_base; /* Pointer to data. */
- size_t iov_len; /* Length of data. */
-};
-#endif
-
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned u32;
-typedef unsigned long long u64;
-
-#ifdef _MSC_VER
-#define BUG_ON(x) do { if (unlikely((x))) abort(); } while (0)
-#else
-#define BUG_ON(x) assert(!(x))
-#endif
-
-
-#define vmalloc(x) rd_malloc(x)
-#define vfree(x) rd_free(x)
-
-#define EXPORT_SYMBOL(x)
-
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
-
-#ifndef likely
-#define likely(x) __builtin_expect((x), 1)
-#define unlikely(x) __builtin_expect((x), 0)
-#endif
-
-#define min_t(t,x,y) ((x) < (y) ? (x) : (y))
-#define max_t(t,x,y) ((x) > (y) ? (x) : (y))
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define __LITTLE_ENDIAN__ 1
-#endif
-
-#if __LITTLE_ENDIAN__ == 1 || defined(__WIN32__)
-#ifndef htole16
-#define htole16(x) (x)
-#endif
-#ifndef le32toh
-#define le32toh(x) (x)
-#endif
-#endif
-
-
-#if defined(_MSC_VER)
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define htole16(x) (x)
-#define le32toh(x) (x)
-
-#elif BYTE_ORDER == BIG_ENDIAN
-#define htole16(x) __builtin_bswap16(x)
-#define le32toh(x) __builtin_bswap32(x)
-#endif
-#endif
-
-#if defined(__sun)
-#ifndef htole16
-#define htole16(x) LE_16(x)
-#endif
-#ifndef le32toh
-#define le32toh(x) LE_32(x)
-#endif
-#endif
-
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json b/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json
deleted file mode 100644
index d0dbedda7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json
+++ /dev/null
@@ -1,444 +0,0 @@
-{ "$schema": "http://json-schema.org/schema#",
- "id": "https://github.com/edenhill/librdkafka/src/statistics_schema.json",
- "title": "librdkafka statistics schema - INCOMPLETE - WORK IN PROGRESS",
- "definitions": {
- "window": {
- "type": "object",
- "title": "Rolling window statistics",
- "description": "The values are in microseconds unless otherwise stated.",
- "properties": {
- "type": "object",
- "properties": {
- "min": {
- "type": "integer"
- },
- "max": {
- "type": "integer"
- },
- "avg": {
- "type": "integer"
- },
- "sum": {
- "type": "integer"
- },
- "stddev": {
- "type": "integer"
- },
- "p50": {
- "type": "integer"
- },
- "p75": {
- "type": "integer"
- },
- "p90": {
- "type": "integer"
- },
- "p95": {
- "type": "integer"
- },
- "p99": {
- "type": "integer"
- },
- "p99_99": {
- "type": "integer"
- },
- "outofrange": {
- "type": "integer"
- },
- "hdrsize": {
- "type": "integer"
- },
- "cnt": {
- "type": "integer"
- }
- }
- }
- }
- },
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "client_id": {
- "type": "string"
- },
- "type": {
- "type": "string"
- },
- "ts": {
- "type": "integer"
- },
- "time": {
- "type": "integer"
- },
- "replyq": {
- "type": "integer"
- },
- "msg_cnt": {
- "type": "integer"
- },
- "msg_size": {
- "type": "integer"
- },
- "msg_max": {
- "type": "integer"
- },
- "msg_size_max": {
- "type": "integer"
- },
- "simple_cnt": {
- "type": "integer"
- },
- "metadata_cache_cnt": {
- "type": "integer"
- },
- "brokers": {
- "type": "object",
- "additionalProperties": {
- "type": "object",
- "title": "Broker object keyed by the broker \"name:port/id\"",
- "properties": {
- "name": {
- "type": "string"
- },
- "nodeid": {
- "type": "integer"
- },
- "state": {
- "type": "string"
- },
- "stateage": {
- "type": "integer"
- },
- "outbuf_cnt": {
- "type": "integer"
- },
- "outbuf_msg_cnt": {
- "type": "integer"
- },
- "waitresp_cnt": {
- "type": "integer"
- },
- "waitresp_msg_cnt": {
- "type": "integer"
- },
- "tx": {
- "type": "integer"
- },
- "txbytes": {
- "type": "integer"
- },
- "txerrs": {
- "type": "integer"
- },
- "txretries": {
- "type": "integer"
- },
- "txidle": {
- "type": "integer"
- },
- "req_timeouts": {
- "type": "integer"
- },
- "rx": {
- "type": "integer"
- },
- "rxbytes": {
- "type": "integer"
- },
- "rxerrs": {
- "type": "integer"
- },
- "rxcorriderrs": {
- "type": "integer"
- },
- "rxpartial": {
- "type": "integer"
- },
- "rxidle": {
- "type": "integer"
- },
- "zbuf_grow": {
- "type": "integer"
- },
- "buf_grow": {
- "type": "integer"
- },
- "wakeups": {
- "type": "integer"
- },
- "int_latency": {
- "$ref": "#/definitions/window"
- },
- "outbuf_latency": {
- "$ref": "#/definitions/window"
- },
- "rtt": {
- "$ref": "#/definitions/window"
- },
- "throttle": {
- "$ref": "#/definitions/window"
- },
- "toppars": {
- "type": "object",
- "additionalProperties": {
- "type": "object",
- "properties": {
- "topic": {
- "type": "string"
- },
- "partition": {
- "type": "integer"
- }
- },
- "required": [
- "topic",
- "partition"
- ]
- }
- }
- },
- "required": [
- "name",
- "nodeid",
- "state",
- "stateage",
- "outbuf_cnt",
- "outbuf_msg_cnt",
- "waitresp_cnt",
- "waitresp_msg_cnt",
- "tx",
- "txbytes",
- "txerrs",
- "txretries",
- "req_timeouts",
- "rx",
- "rxbytes",
- "rxerrs",
- "rxcorriderrs",
- "rxpartial",
- "zbuf_grow",
- "buf_grow",
- "wakeups",
- "int_latency",
- "rtt",
- "throttle",
- "toppars"
- ]
- }
- },
- "topics": {
- "type": "object",
- "properties": {
- "additionalProperties": {
- "type": "object",
- "properties": {
- "topic": {
- "type": "string"
- },
- "metadata_age": {
- "type": "integer"
- },
- "batchsize": {
- "$ref": "#/definitions/window"
- },
- "batchcnt": {
- "$ref": "#/definitions/window"
- },
- "partitions": {
- "type": "object",
- "properties": {
- "^-?[0-9]+$": {
- "type": "object",
- "properties": {
- "partition": {
- "type": "integer"
- },
- "leader": {
- "type": "integer"
- },
- "desired": {
- "type": "boolean"
- },
- "unknown": {
- "type": "boolean"
- },
- "msgq_cnt": {
- "type": "integer"
- },
- "msgq_bytes": {
- "type": "integer"
- },
- "xmit_msgq_cnt": {
- "type": "integer"
- },
- "xmit_msgq_bytes": {
- "type": "integer"
- },
- "fetchq_cnt": {
- "type": "integer"
- },
- "fetchq_size": {
- "type": "integer"
- },
- "fetch_state": {
- "type": "string"
- },
- "query_offset": {
- "type": "integer"
- },
- "next_offset": {
- "type": "integer"
- },
- "app_offset": {
- "type": "integer"
- },
- "stored_offset": {
- "type": "integer"
- },
- "stored_leader_epoch": {
- "type": "integer"
- },
- "commited_offset": {
- "type": "integer"
- },
- "committed_offset": {
- "type": "integer"
- },
- "committed_leader_epoch": {
- "type": "integer"
- },
-
- "eof_offset": {
- "type": "integer"
- },
- "lo_offset": {
- "type": "integer"
- },
- "hi_offset": {
- "type": "integer"
- },
- "consumer_lag": {
- "type": "integer"
- },
- "consumer_lag_stored": {
- "type": "integer"
- },
- "leader_epoch": {
- "type": "integer"
- },
- "txmsgs": {
- "type": "integer"
- },
- "txbytes": {
- "type": "integer"
- },
- "rxmsgs": {
- "type": "integer"
- },
- "rxbytes": {
- "type": "integer"
- },
- "msgs": {
- "type": "integer"
- },
- "rx_ver_drops": {
- "type": "integer"
- },
- "msgs_inflight": {
- "type": "integer"
- }
- },
- "required": [
- "partition",
- "leader",
- "desired",
- "unknown",
- "msgq_cnt",
- "msgq_bytes",
- "xmit_msgq_cnt",
- "xmit_msgq_bytes",
- "fetchq_cnt",
- "fetchq_size",
- "fetch_state",
- "query_offset",
- "next_offset",
- "app_offset",
- "stored_offset",
- "commited_offset",
- "committed_offset",
- "eof_offset",
- "lo_offset",
- "hi_offset",
- "consumer_lag",
- "txmsgs",
- "txbytes",
- "rxmsgs",
- "rxbytes",
- "msgs",
- "rx_ver_drops"
- ]
- }
- }
- }
- },
- "required": [
- "topic",
- "metadata_age",
- "batchsize",
- "partitions"
- ]
- }
- }
- },
- "tx": {
- "type": "integer"
- },
- "tx_bytes": {
- "type": "integer"
- },
- "rx": {
- "type": "integer"
- },
- "rx_bytes": {
- "type": "integer"
- },
- "txmsgs": {
- "type": "integer"
- },
- "txmsg_bytes": {
- "type": "integer"
- },
- "rxmsgs": {
- "type": "integer"
- },
- "rxmsg_bytes": {
- "type": "integer"
- }
- },
- "required": [
- "name",
- "client_id",
- "type",
- "ts",
- "time",
- "replyq",
- "msg_cnt",
- "msg_size",
- "msg_max",
- "msg_size_max",
- "simple_cnt",
- "metadata_cache_cnt",
- "brokers",
- "topics",
- "tx",
- "tx_bytes",
- "rx",
- "rx_bytes",
- "txmsgs",
- "txmsg_bytes",
- "rxmsgs",
- "rxmsg_bytes"
- ]
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c
deleted file mode 100644
index b0ec8e956..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c
+++ /dev/null
@@ -1,932 +0,0 @@
-/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
-Copyright (c) 2012 Marcus Geelnard
-Copyright (c) 2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
-
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
-
- 3. This notice may not be removed or altered from any source
- distribution.
-*/
-
-#include "rd.h"
-#include <stdlib.h>
-
-#if !WITH_C11THREADS
-
-/* Platform specific includes */
-#if defined(_TTHREAD_POSIX_)
- #include <signal.h>
- #include <sched.h>
- #include <unistd.h>
- #include <sys/time.h>
- #include <errno.h>
-#elif defined(_TTHREAD_WIN32_)
- #include <process.h>
- #include <sys/timeb.h>
-#endif
-
-
-/* Standard, good-to-have defines */
-#ifndef NULL
- #define NULL (void*)0
-#endif
-#ifndef TRUE
- #define TRUE 1
-#endif
-#ifndef FALSE
- #define FALSE 0
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static RD_TLS int thrd_is_detached;
-
-
-int mtx_init(mtx_t *mtx, int type)
-{
-#if defined(_TTHREAD_WIN32_)
- mtx->mAlreadyLocked = FALSE;
- mtx->mRecursive = type & mtx_recursive;
- mtx->mTimed = type & mtx_timed;
- if (!mtx->mTimed)
- {
- InitializeCriticalSection(&(mtx->mHandle.cs));
- }
- else
- {
- mtx->mHandle.mut = CreateMutex(NULL, FALSE, NULL);
- if (mtx->mHandle.mut == NULL)
- {
- return thrd_error;
- }
- }
- return thrd_success;
-#else
- int ret;
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- if (type & mtx_recursive)
- {
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- }
- ret = pthread_mutex_init(mtx, &attr);
- pthread_mutexattr_destroy(&attr);
- return ret == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-void mtx_destroy(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
- if (!mtx->mTimed)
- {
- DeleteCriticalSection(&(mtx->mHandle.cs));
- }
- else
- {
- CloseHandle(mtx->mHandle.mut);
- }
-#else
- pthread_mutex_destroy(mtx);
-#endif
-}
-
-int mtx_lock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
- if (!mtx->mTimed)
- {
- EnterCriticalSection(&(mtx->mHandle.cs));
- }
- else
- {
- switch (WaitForSingleObject(mtx->mHandle.mut, INFINITE))
- {
- case WAIT_OBJECT_0:
- break;
- case WAIT_ABANDONED:
- default:
- return thrd_error;
- }
- }
-
- if (!mtx->mRecursive)
- {
- rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */
- mtx->mAlreadyLocked = TRUE;
- }
- return thrd_success;
-#else
- return pthread_mutex_lock(mtx) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
-{
-#if defined(_TTHREAD_WIN32_)
- struct timespec current_ts;
- DWORD timeoutMs;
-
- if (!mtx->mTimed)
- {
- return thrd_error;
- }
-
- timespec_get(&current_ts, TIME_UTC);
-
- if ((current_ts.tv_sec > ts->tv_sec) || ((current_ts.tv_sec == ts->tv_sec) && (current_ts.tv_nsec >= ts->tv_nsec)))
- {
- timeoutMs = 0;
- }
- else
- {
- timeoutMs = (DWORD)(ts->tv_sec - current_ts.tv_sec) * 1000;
- timeoutMs += (ts->tv_nsec - current_ts.tv_nsec) / 1000000;
- timeoutMs += 1;
- }
-
- /* TODO: the timeout for WaitForSingleObject doesn't include time
- while the computer is asleep. */
- switch (WaitForSingleObject(mtx->mHandle.mut, timeoutMs))
- {
- case WAIT_OBJECT_0:
- break;
- case WAIT_TIMEOUT:
- return thrd_timedout;
- case WAIT_ABANDONED:
- default:
- return thrd_error;
- }
-
- if (!mtx->mRecursive)
- {
- rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */
- mtx->mAlreadyLocked = TRUE;
- }
-
- return thrd_success;
-#elif defined(_POSIX_TIMEOUTS) && (_POSIX_TIMEOUTS >= 200112L) && defined(_POSIX_THREADS) && (_POSIX_THREADS >= 200112L)
- switch (pthread_mutex_timedlock(mtx, ts)) {
- case 0:
- return thrd_success;
- case ETIMEDOUT:
- return thrd_timedout;
- default:
- return thrd_error;
- }
-#else
- int rc;
- struct timespec cur, dur;
-
- /* Try to acquire the lock and, if we fail, sleep for 5ms. */
- while ((rc = pthread_mutex_trylock (mtx)) == EBUSY) {
- timespec_get(&cur, TIME_UTC);
-
- if ((cur.tv_sec > ts->tv_sec) || ((cur.tv_sec == ts->tv_sec) && (cur.tv_nsec >= ts->tv_nsec)))
- {
- break;
- }
-
- dur.tv_sec = ts->tv_sec - cur.tv_sec;
- dur.tv_nsec = ts->tv_nsec - cur.tv_nsec;
- if (dur.tv_nsec < 0)
- {
- dur.tv_sec--;
- dur.tv_nsec += 1000000000;
- }
-
- if ((dur.tv_sec != 0) || (dur.tv_nsec > 5000000))
- {
- dur.tv_sec = 0;
- dur.tv_nsec = 5000000;
- }
-
- nanosleep(&dur, NULL);
- }
-
- switch (rc) {
- case 0:
- return thrd_success;
- case ETIMEDOUT:
- case EBUSY:
- return thrd_timedout;
- default:
- return thrd_error;
- }
-#endif
-}
-
-int mtx_trylock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
- int ret;
-
- if (!mtx->mTimed)
- {
- ret = TryEnterCriticalSection(&(mtx->mHandle.cs)) ? thrd_success : thrd_busy;
- }
- else
- {
- ret = (WaitForSingleObject(mtx->mHandle.mut, 0) == WAIT_OBJECT_0) ? thrd_success : thrd_busy;
- }
-
- if ((!mtx->mRecursive) && (ret == thrd_success))
- {
- if (mtx->mAlreadyLocked)
- {
- LeaveCriticalSection(&(mtx->mHandle.cs));
- ret = thrd_busy;
- }
- else
- {
- mtx->mAlreadyLocked = TRUE;
- }
- }
- return ret;
-#else
- return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
-#endif
-}
-
-int mtx_unlock(mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
- mtx->mAlreadyLocked = FALSE;
- if (!mtx->mTimed)
- {
- LeaveCriticalSection(&(mtx->mHandle.cs));
- }
- else
- {
- if (!ReleaseMutex(mtx->mHandle.mut))
- {
- return thrd_error;
- }
- }
- return thrd_success;
-#else
- return pthread_mutex_unlock(mtx) == 0 ? thrd_success : thrd_error;;
-#endif
-}
-
-#if defined(_TTHREAD_WIN32_)
-#define _CONDITION_EVENT_ONE 0
-#define _CONDITION_EVENT_ALL 1
-#endif
-
-int cnd_init(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
- cond->mWaitersCount = 0;
-
- /* Init critical section */
- InitializeCriticalSection(&cond->mWaitersCountLock);
-
- /* Init events */
- cond->mEvents[_CONDITION_EVENT_ONE] = CreateEvent(NULL, FALSE, FALSE, NULL);
- if (cond->mEvents[_CONDITION_EVENT_ONE] == NULL)
- {
- cond->mEvents[_CONDITION_EVENT_ALL] = NULL;
- return thrd_error;
- }
- cond->mEvents[_CONDITION_EVENT_ALL] = CreateEvent(NULL, TRUE, FALSE, NULL);
- if (cond->mEvents[_CONDITION_EVENT_ALL] == NULL)
- {
- CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
- cond->mEvents[_CONDITION_EVENT_ONE] = NULL;
- return thrd_error;
- }
-
- return thrd_success;
-#else
- return pthread_cond_init(cond, NULL) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-void cnd_destroy(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
- if (cond->mEvents[_CONDITION_EVENT_ONE] != NULL)
- {
- CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
- }
- if (cond->mEvents[_CONDITION_EVENT_ALL] != NULL)
- {
- CloseHandle(cond->mEvents[_CONDITION_EVENT_ALL]);
- }
- DeleteCriticalSection(&cond->mWaitersCountLock);
-#else
- pthread_cond_destroy(cond);
-#endif
-}
-
-int cnd_signal(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
- int haveWaiters;
-
- /* Are there any waiters? */
- EnterCriticalSection(&cond->mWaitersCountLock);
- haveWaiters = (cond->mWaitersCount > 0);
- LeaveCriticalSection(&cond->mWaitersCountLock);
-
- /* If we have any waiting threads, send them a signal */
- if(haveWaiters)
- {
- if (SetEvent(cond->mEvents[_CONDITION_EVENT_ONE]) == 0)
- {
- return thrd_error;
- }
- }
-
- return thrd_success;
-#else
- return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int cnd_broadcast(cnd_t *cond)
-{
-#if defined(_TTHREAD_WIN32_)
- int haveWaiters;
-
- /* Are there any waiters? */
- EnterCriticalSection(&cond->mWaitersCountLock);
- haveWaiters = (cond->mWaitersCount > 0);
- LeaveCriticalSection(&cond->mWaitersCountLock);
-
- /* If we have any waiting threads, send them a signal */
- if(haveWaiters)
- {
- if (SetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
- {
- return thrd_error;
- }
- }
-
- return thrd_success;
-#else
- return pthread_cond_broadcast(cond) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-#if defined(_TTHREAD_WIN32_)
-int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout)
-{
- int result, lastWaiter;
-
- /* Increment number of waiters */
- EnterCriticalSection(&cond->mWaitersCountLock);
- ++ cond->mWaitersCount;
- LeaveCriticalSection(&cond->mWaitersCountLock);
-
- /* Release the mutex while waiting for the condition (will decrease
- the number of waiters when done)... */
- mtx_unlock(mtx);
-
- /* Wait for either event to become signaled due to cnd_signal() or
- cnd_broadcast() being called */
- result = WaitForMultipleObjects(2, cond->mEvents, FALSE, timeout);
-
- /* Check if we are the last waiter */
- EnterCriticalSection(&cond->mWaitersCountLock);
- -- cond->mWaitersCount;
- lastWaiter = (result == (WAIT_OBJECT_0 + _CONDITION_EVENT_ALL)) &&
- (cond->mWaitersCount == 0);
- LeaveCriticalSection(&cond->mWaitersCountLock);
-
- /* If we are the last waiter to be notified to stop waiting, reset the event */
- if (lastWaiter)
- {
- if (ResetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
- {
- /* The mutex is locked again before the function returns, even if an error occurred */
- mtx_lock(mtx);
- return thrd_error;
- }
- }
-
- /* The mutex is locked again before the function returns, even if an error occurred */
- mtx_lock(mtx);
-
- if (result == WAIT_TIMEOUT)
- return thrd_timedout;
- else if (result == (int)WAIT_FAILED)
- return thrd_error;
-
- return thrd_success;
-}
-#endif
-
-int cnd_wait(cnd_t *cond, mtx_t *mtx)
-{
-#if defined(_TTHREAD_WIN32_)
- return _cnd_timedwait_win32(cond, mtx, INFINITE);
-#else
- return pthread_cond_wait(cond, mtx) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
-{
-#if defined(_TTHREAD_WIN32_)
- struct timespec now;
- if (timespec_get(&now, TIME_UTC) == TIME_UTC)
- {
- unsigned long long nowInMilliseconds = now.tv_sec * 1000 + now.tv_nsec / 1000000;
- unsigned long long tsInMilliseconds = ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
- DWORD delta = (tsInMilliseconds > nowInMilliseconds) ?
- (DWORD)(tsInMilliseconds - nowInMilliseconds) : 0;
- return _cnd_timedwait_win32(cond, mtx, delta);
- }
- else
- return thrd_error;
-#else
- int ret;
- ret = pthread_cond_timedwait(cond, mtx, ts);
- if (ret == ETIMEDOUT)
- {
- return thrd_timedout;
- }
- return ret == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-
-
-#if defined(_TTHREAD_WIN32_)
-struct TinyCThreadTSSData {
- void* value;
- tss_t key;
- struct TinyCThreadTSSData* next;
-};
-
-static tss_dtor_t _tinycthread_tss_dtors[1088] = { NULL, };
-
-static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_head = NULL;
-static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_tail = NULL;
-
-static void _tinycthread_tss_cleanup (void);
-
-static void _tinycthread_tss_cleanup (void) {
- struct TinyCThreadTSSData* data;
- int iteration;
- unsigned int again = 1;
- void* value;
-
- for (iteration = 0 ; iteration < TSS_DTOR_ITERATIONS && again > 0 ; iteration++)
- {
- again = 0;
- for (data = _tinycthread_tss_head ; data != NULL ; data = data->next)
- {
- if (data->value != NULL)
- {
- value = data->value;
- data->value = NULL;
-
- if (_tinycthread_tss_dtors[data->key] != NULL)
- {
- again = 1;
- _tinycthread_tss_dtors[data->key](value);
- }
- }
- }
- }
-
- while (_tinycthread_tss_head != NULL) {
- data = _tinycthread_tss_head->next;
- rd_free (_tinycthread_tss_head);
- _tinycthread_tss_head = data;
- }
- _tinycthread_tss_head = NULL;
- _tinycthread_tss_tail = NULL;
-}
-
-static void NTAPI _tinycthread_tss_callback(PVOID h, DWORD dwReason, PVOID pv)
-{
- (void)h;
- (void)pv;
-
- if (_tinycthread_tss_head != NULL && (dwReason == DLL_THREAD_DETACH || dwReason == DLL_PROCESS_DETACH))
- {
- _tinycthread_tss_cleanup();
- }
-}
-
-#ifdef _WIN32
- #ifdef _M_X64
- #pragma const_seg(".CRT$XLB")
- #else
- #pragma data_seg(".CRT$XLB")
- #endif
- PIMAGE_TLS_CALLBACK p_thread_callback = _tinycthread_tss_callback;
- #ifdef _M_X64
- #pragma const_seg()
- #else
- #pragma data_seg()
- #endif
-#else
- PIMAGE_TLS_CALLBACK p_thread_callback __attribute__((section(".CRT$XLB"))) = _tinycthread_tss_callback;
-#endif
-
-#endif /* defined(_TTHREAD_WIN32_) */
-
-/** Information to pass to the new thread (what to run). */
-typedef struct {
- thrd_start_t mFunction; /**< Pointer to the function to be executed. */
- void * mArg; /**< Function argument for the thread function. */
-} _thread_start_info;
-
-/* Thread wrapper function. */
-#if defined(_TTHREAD_WIN32_)
-static DWORD WINAPI _thrd_wrapper_function(LPVOID aArg)
-#elif defined(_TTHREAD_POSIX_)
-static void * _thrd_wrapper_function(void * aArg)
-#endif
-{
- thrd_start_t fun;
- void *arg;
- int res;
-
- /* Get thread startup information */
- _thread_start_info *ti = (_thread_start_info *) aArg;
- fun = ti->mFunction;
- arg = ti->mArg;
-
- /* The thread is responsible for freeing the startup information */
- rd_free((void *)ti);
-
- /* Call the actual client thread function */
- res = fun(arg);
-
-#if defined(_TTHREAD_WIN32_)
- if (_tinycthread_tss_head != NULL)
- {
- _tinycthread_tss_cleanup();
- }
-
- return (DWORD)res;
-#else
- return (void*)(intptr_t)res;
-#endif
-}
-
-int thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
-{
- /* Fill out the thread startup information (passed to the thread wrapper,
- which will eventually free it) */
- _thread_start_info* ti = (_thread_start_info*)rd_malloc(sizeof(_thread_start_info));
- if (ti == NULL)
- {
- return thrd_nomem;
- }
- ti->mFunction = func;
- ti->mArg = arg;
-
- /* Create the thread */
-#if defined(_TTHREAD_WIN32_)
- *thr = CreateThread(NULL, 0, _thrd_wrapper_function, (LPVOID) ti, 0, NULL);
-#elif defined(_TTHREAD_POSIX_)
- {
- int err;
- if((err = pthread_create(thr, NULL, _thrd_wrapper_function,
- (void *)ti)) != 0) {
- errno = err;
- *thr = 0;
- }
- }
-#endif
-
- /* Did we fail to create the thread? */
- if(!*thr)
- {
- rd_free(ti);
- return thrd_error;
- }
-
- return thrd_success;
-}
-
-thrd_t thrd_current(void)
-{
-#if defined(_TTHREAD_WIN32_)
- return GetCurrentThread();
-#else
- return pthread_self();
-#endif
-}
-
-int thrd_detach(thrd_t thr)
-{
- thrd_is_detached = 1;
-#if defined(_TTHREAD_WIN32_)
- /* https://stackoverflow.com/questions/12744324/how-to-detach-a-thread-on-windows-c#answer-12746081 */
- return CloseHandle(thr) != 0 ? thrd_success : thrd_error;
-#else
- return pthread_detach(thr) == 0 ? thrd_success : thrd_error;
-#endif
-}
-
-int thrd_equal(thrd_t thr0, thrd_t thr1)
-{
-#if defined(_TTHREAD_WIN32_)
- return thr0 == thr1;
-#else
- return pthread_equal(thr0, thr1);
-#endif
-}
-
-void thrd_exit(int res)
-{
-#if defined(_TTHREAD_WIN32_)
- if (_tinycthread_tss_head != NULL)
- {
- _tinycthread_tss_cleanup();
- }
-
- ExitThread(res);
-#else
- pthread_exit((void*)(intptr_t)res);
-#endif
-}
-
-int thrd_join(thrd_t thr, int *res)
-{
-#if defined(_TTHREAD_WIN32_)
- DWORD dwRes;
-
- if (WaitForSingleObject(thr, INFINITE) == WAIT_FAILED)
- {
- return thrd_error;
- }
- if (res != NULL)
- {
- if (GetExitCodeThread(thr, &dwRes) != 0)
- {
- *res = dwRes;
- }
- else
- {
- return thrd_error;
- }
- }
- CloseHandle(thr);
-#elif defined(_TTHREAD_POSIX_)
- void *pres;
- if (pthread_join(thr, &pres) != 0)
- {
- return thrd_error;
- }
- if (res != NULL)
- {
- *res = (int)(intptr_t)pres;
- }
-#endif
- return thrd_success;
-}
-
-int thrd_sleep(const struct timespec *duration, struct timespec *remaining)
-{
-#if !defined(_TTHREAD_WIN32_)
- return nanosleep(duration, remaining);
-#else
- struct timespec start;
- DWORD t;
-
- timespec_get(&start, TIME_UTC);
-
- t = SleepEx((DWORD)(duration->tv_sec * 1000 +
- duration->tv_nsec / 1000000 +
- (((duration->tv_nsec % 1000000) == 0) ? 0 : 1)),
- TRUE);
-
- if (t == 0) {
- return 0;
- } else if (remaining != NULL) {
- timespec_get(remaining, TIME_UTC);
- remaining->tv_sec -= start.tv_sec;
- remaining->tv_nsec -= start.tv_nsec;
- if (remaining->tv_nsec < 0)
- {
- remaining->tv_nsec += 1000000000;
- remaining->tv_sec -= 1;
- }
- } else {
- return -1;
- }
-
- return 0;
-#endif
-}
-
-void thrd_yield(void)
-{
-#if defined(_TTHREAD_WIN32_)
- Sleep(0);
-#else
- sched_yield();
-#endif
-}
-
-int tss_create(tss_t *key, tss_dtor_t dtor)
-{
-#if defined(_TTHREAD_WIN32_)
- *key = TlsAlloc();
- if (*key == TLS_OUT_OF_INDEXES)
- {
- return thrd_error;
- }
- _tinycthread_tss_dtors[*key] = dtor;
-#else
- if (pthread_key_create(key, dtor) != 0)
- {
- return thrd_error;
- }
-#endif
- return thrd_success;
-}
-
-void tss_delete(tss_t key)
-{
-#if defined(_TTHREAD_WIN32_)
- struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*) TlsGetValue (key);
- struct TinyCThreadTSSData* prev = NULL;
- if (data != NULL)
- {
- if (data == _tinycthread_tss_head)
- {
- _tinycthread_tss_head = data->next;
- }
- else
- {
- prev = _tinycthread_tss_head;
- if (prev != NULL)
- {
- while (prev->next != data)
- {
- prev = prev->next;
- }
- }
- }
-
- if (data == _tinycthread_tss_tail)
- {
- _tinycthread_tss_tail = prev;
- }
-
- rd_free (data);
- }
- _tinycthread_tss_dtors[key] = NULL;
- TlsFree(key);
-#else
- pthread_key_delete(key);
-#endif
-}
-
-void *tss_get(tss_t key)
-{
-#if defined(_TTHREAD_WIN32_)
- struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
- if (data == NULL)
- {
- return NULL;
- }
- return data->value;
-#else
- return pthread_getspecific(key);
-#endif
-}
-
-int tss_set(tss_t key, void *val)
-{
-#if defined(_TTHREAD_WIN32_)
- struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
- if (data == NULL)
- {
- data = (struct TinyCThreadTSSData*)rd_malloc(sizeof(struct TinyCThreadTSSData));
- if (data == NULL)
- {
- return thrd_error;
- }
-
- data->value = NULL;
- data->key = key;
- data->next = NULL;
-
- if (_tinycthread_tss_tail != NULL)
- {
- _tinycthread_tss_tail->next = data;
- }
- else
- {
- _tinycthread_tss_tail = data;
- }
-
- if (_tinycthread_tss_head == NULL)
- {
- _tinycthread_tss_head = data;
- }
-
- if (!TlsSetValue(key, data))
- {
- rd_free (data);
- return thrd_error;
- }
- }
- data->value = val;
-#else
- if (pthread_setspecific(key, val) != 0)
- {
- return thrd_error;
- }
-#endif
- return thrd_success;
-}
-
-#if defined(_TTHREAD_EMULATE_TIMESPEC_GET_)
-int _tthread_timespec_get(struct timespec *ts, int base)
-{
-#if defined(_TTHREAD_WIN32_)
- struct _timeb tb;
-#elif !defined(CLOCK_REALTIME)
- struct timeval tv;
-#endif
-
- if (base != TIME_UTC)
- {
- return 0;
- }
-
-#if defined(_TTHREAD_WIN32_)
- _ftime_s(&tb);
- ts->tv_sec = (time_t)tb.time;
- ts->tv_nsec = 1000000L * (long)tb.millitm;
-#elif defined(CLOCK_REALTIME)
- base = (clock_gettime(CLOCK_REALTIME, ts) == 0) ? base : 0;
-#else
- gettimeofday(&tv, NULL);
- ts->tv_sec = (time_t)tv.tv_sec;
- ts->tv_nsec = 1000L * (long)tv.tv_usec;
-#endif
-
- return base;
-}
-#endif /* _TTHREAD_EMULATE_TIMESPEC_GET_ */
-
-#if defined(_TTHREAD_WIN32_)
-void call_once(once_flag *flag, void (*func)(void))
-{
- /* The idea here is that we use a spin lock (via the
- InterlockedCompareExchange function) to restrict access to the
- critical section until we have initialized it, then we use the
- critical section to block until the callback has completed
- execution. */
- while (flag->status < 3)
- {
- switch (flag->status)
- {
- case 0:
- if (InterlockedCompareExchange (&(flag->status), 1, 0) == 0) {
- InitializeCriticalSection(&(flag->lock));
- EnterCriticalSection(&(flag->lock));
- flag->status = 2;
- func();
- flag->status = 3;
- LeaveCriticalSection(&(flag->lock));
- return;
- }
- break;
- case 1:
- break;
- case 2:
- EnterCriticalSection(&(flag->lock));
- LeaveCriticalSection(&(flag->lock));
- break;
- }
- }
-}
-#endif /* defined(_TTHREAD_WIN32_) */
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !WITH_C11THREADS */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h
deleted file mode 100644
index 6bc39fe09..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h
+++ /dev/null
@@ -1,503 +0,0 @@
-/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
-Copyright (c) 2012 Marcus Geelnard
-Copyright (c) 2013-2014 Evan Nemerson
-
-This software is provided 'as-is', without any express or implied
-warranty. In no event will the authors be held liable for any damages
-arising from the use of this software.
-
-Permission is granted to anyone to use this software for any purpose,
-including commercial applications, and to alter it and redistribute it
-freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
-
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
-
- 3. This notice may not be removed or altered from any source
- distribution.
-*/
-
-#ifndef _TINYCTHREAD_H_
-#define _TINYCTHREAD_H_
-
-/* Include config to know if C11 threads are available */
-#ifdef _WIN32
-#include "win32_config.h"
-#else
-#include "../config.h"
-#endif
-
-#if WITH_C11THREADS
-#include <threads.h>
-#else
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
-* @file
-* @mainpage TinyCThread API Reference
-*
-* @section intro_sec Introduction
-* TinyCThread is a minimal, portable implementation of basic threading
-* classes for C.
-*
-* They closely mimic the functionality and naming of the C11 standard, and
-* should be easily replaceable with the corresponding standard variants.
-*
-* @section port_sec Portability
-* The Win32 variant uses the native Win32 API for implementing the thread
-* classes, while for other systems, the POSIX threads API (pthread) is used.
-*
-* @section misc_sec Miscellaneous
-* The following special keywords are available: #_Thread_local.
-*
-* For more detailed information, browse the different sections of this
-* documentation. A good place to start is:
-* tinycthread.h.
-*/
-
-/* Which platform are we on? */
-#if !defined(_TTHREAD_PLATFORM_DEFINED_)
- #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__)
- #define _TTHREAD_WIN32_
- #else
- #define _TTHREAD_POSIX_
- #endif
- #define _TTHREAD_PLATFORM_DEFINED_
-#endif
-
-/* Activate some POSIX functionality (e.g. clock_gettime and recursive mutexes) */
-#if defined(_TTHREAD_POSIX_)
- #undef _FEATURES_H
- #if !defined(_GNU_SOURCE)
- #define _GNU_SOURCE
- #endif
- #if !defined(_POSIX_C_SOURCE) || ((_POSIX_C_SOURCE - 0) < 199309L)
- #undef _POSIX_C_SOURCE
- #define _POSIX_C_SOURCE 199309L
- #endif
- #if !defined(_XOPEN_SOURCE) || ((_XOPEN_SOURCE - 0) < 500)
- #undef _XOPEN_SOURCE
- #define _XOPEN_SOURCE 500
- #endif
-#endif
-
-/* Generic includes */
-#include <time.h>
-
-/* Platform specific includes */
-#if defined(_TTHREAD_POSIX_)
- #ifndef _GNU_SOURCE
- #define _GNU_SOURCE /* for pthread_setname_np() */
- #endif
- #include <pthread.h>
-#elif defined(_TTHREAD_WIN32_)
- #ifndef WIN32_LEAN_AND_MEAN
- #define WIN32_LEAN_AND_MEAN
- #define __UNDEF_LEAN_AND_MEAN
- #endif
- #include <windows.h>
- #ifdef __UNDEF_LEAN_AND_MEAN
- #undef WIN32_LEAN_AND_MEAN
- #undef __UNDEF_LEAN_AND_MEAN
- #endif
-#endif
-
-/* Compiler-specific information */
-#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
- #define TTHREAD_NORETURN _Noreturn
-#elif defined(__GNUC__)
- #define TTHREAD_NORETURN __attribute__((__noreturn__))
-#else
- #define TTHREAD_NORETURN
-#endif
-
-/* If TIME_UTC is missing, provide it and provide a wrapper for
- timespec_get. */
-#ifndef TIME_UTC
-#define TIME_UTC 1
-#define _TTHREAD_EMULATE_TIMESPEC_GET_
-
-#if defined(_TTHREAD_WIN32_)
-struct _tthread_timespec {
- time_t tv_sec;
- long tv_nsec;
-};
-#define timespec _tthread_timespec
-#endif
-
-int _tthread_timespec_get(struct timespec *ts, int base);
-#define timespec_get _tthread_timespec_get
-#endif
-
-/** TinyCThread version (major number). */
-#define TINYCTHREAD_VERSION_MAJOR 1
-/** TinyCThread version (minor number). */
-#define TINYCTHREAD_VERSION_MINOR 2
-/** TinyCThread version (full version). */
-#define TINYCTHREAD_VERSION (TINYCTHREAD_VERSION_MAJOR * 100 + TINYCTHREAD_VERSION_MINOR)
-
-/**
-* @def _Thread_local
-* Thread local storage keyword.
-* A variable that is declared with the @c _Thread_local keyword makes the
-* value of the variable local to each thread (known as thread-local storage,
-* or TLS). Example usage:
-* @code
-* // This variable is local to each thread.
-* _Thread_local int variable;
-* @endcode
-* @note The @c _Thread_local keyword is a macro that maps to the corresponding
-* compiler directive (e.g. @c __declspec(thread)).
-* @note This directive is currently not supported on Mac OS X (it will give
-* a compiler error), since compile-time TLS is not supported in the Mac OS X
-* executable format. Also, some older versions of MinGW (before GCC 4.x) do
-* not support this directive, nor does the Tiny C Compiler.
-* @hideinitializer
-*/
-
-#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) && !defined(_Thread_local)
- #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
- #define _Thread_local __thread
- #else
- #define _Thread_local __declspec(thread)
- #endif
-#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && (((__GNUC__ << 8) | __GNUC_MINOR__) < ((4 << 8) | 9))
- #define _Thread_local __thread
-#endif
-
-/* Macros */
-#if defined(_TTHREAD_WIN32_)
-#define TSS_DTOR_ITERATIONS (4)
-#else
-#define TSS_DTOR_ITERATIONS PTHREAD_DESTRUCTOR_ITERATIONS
-#endif
-
-/* Function return values */
-/* Note: The values are unspecified by C11 but match glibc and musl to make
- * sure they're compatible for the case where librdkafka was built with
- * tinycthreads but the runtime libc also provides C11 threads.
- * The *BSD values are notably different. */
-#define thrd_success 0 /**< The requested operation succeeded */
-#define thrd_busy 1 /**< The requested operation failed because a tesource requested by a test and return function is already in use */
-#define thrd_error 2 /**< The requested operation failed */
-#define thrd_nomem 3 /**< The requested operation failed because it was unable to allocate memory */
-#define thrd_timedout 4 /**< The time specified in the call was reached without acquiring the requested resource */
-
-/* Mutex types */
-#define mtx_plain 0
-#define mtx_recursive 1
-#define mtx_timed 2
-
-/* Mutex */
-#if defined(_TTHREAD_WIN32_)
-typedef struct {
- union {
- CRITICAL_SECTION cs; /* Critical section handle (used for non-timed mutexes) */
- HANDLE mut; /* Mutex handle (used for timed mutex) */
- } mHandle; /* Mutex handle */
- int mAlreadyLocked; /* TRUE if the mutex is already locked */
- int mRecursive; /* TRUE if the mutex is recursive */
- int mTimed; /* TRUE if the mutex is timed */
-} mtx_t;
-#else
-typedef pthread_mutex_t mtx_t;
-#endif
-
-/** Create a mutex object.
-* @param mtx A mutex object.
-* @param type Bit-mask that must have one of the following six values:
-* @li @c mtx_plain for a simple non-recursive mutex
-* @li @c mtx_timed for a non-recursive mutex that supports timeout
-* @li @c mtx_plain | @c mtx_recursive (same as @c mtx_plain, but recursive)
-* @li @c mtx_timed | @c mtx_recursive (same as @c mtx_timed, but recursive)
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_init(mtx_t *mtx, int type);
-
-/** Release any resources used by the given mutex.
-* @param mtx A mutex object.
-*/
-void mtx_destroy(mtx_t *mtx);
-
-/** Lock the given mutex.
-* Blocks until the given mutex can be locked. If the mutex is non-recursive, and
-* the calling thread already has a lock on the mutex, this call will block
-* forever.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_lock(mtx_t *mtx);
-
-/** NOT YET IMPLEMENTED.
-*/
-int mtx_timedlock(mtx_t *mtx, const struct timespec *ts);
-
-/** Try to lock the given mutex.
-* The specified mutex shall support either test and return or timeout. If the
-* mutex is already locked, the function returns without blocking.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_busy if the resource
-* requested is already in use, or @ref thrd_error if the request could not be
-* honored.
-*/
-int mtx_trylock(mtx_t *mtx);
-
-/** Unlock the given mutex.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int mtx_unlock(mtx_t *mtx);
-
-/* Condition variable */
-#if defined(_TTHREAD_WIN32_)
-typedef struct {
- HANDLE mEvents[2]; /* Signal and broadcast event HANDLEs. */
- unsigned int mWaitersCount; /* Count of the number of waiters. */
- CRITICAL_SECTION mWaitersCountLock; /* Serialize access to mWaitersCount. */
-} cnd_t;
-#else
-typedef pthread_cond_t cnd_t;
-#endif
-
-/** Create a condition variable object.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_init(cnd_t *cond);
-
-/** Release any resources used by the given condition variable.
-* @param cond A condition variable object.
-*/
-void cnd_destroy(cnd_t *cond);
-
-/** Signal a condition variable.
-* Unblocks one of the threads that are blocked on the given condition variable
-* at the time of the call. If no threads are blocked on the condition variable
-* at the time of the call, the function does nothing and return success.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_signal(cnd_t *cond);
-
-/** Broadcast a condition variable.
-* Unblocks all of the threads that are blocked on the given condition variable
-* at the time of the call. If no threads are blocked on the condition variable
-* at the time of the call, the function does nothing and return success.
-* @param cond A condition variable object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_broadcast(cnd_t *cond);
-
-/** Wait for a condition variable to become signaled.
-* The function atomically unlocks the given mutex and endeavors to block until
-* the given condition variable is signaled by a call to cnd_signal or to
-* cnd_broadcast. When the calling thread becomes unblocked it locks the mutex
-* before it returns.
-* @param cond A condition variable object.
-* @param mtx A mutex object.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int cnd_wait(cnd_t *cond, mtx_t *mtx);
-
-/** Wait for a condition variable to become signaled.
-* The function atomically unlocks the given mutex and endeavors to block until
-* the given condition variable is signaled by a call to cnd_signal or to
-* cnd_broadcast, or until after the specified time. When the calling thread
-* becomes unblocked it locks the mutex before it returns.
-* @param cond A condition variable object.
-* @param mtx A mutex object.
-* @param xt A point in time at which the request will time out (absolute time).
-* @return @ref thrd_success upon success, or @ref thrd_timeout if the time
-* specified in the call was reached without acquiring the requested resource, or
-* @ref thrd_error if the request could not be honored.
-*/
-int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts);
-
-#if defined(_TTHREAD_WIN32_)
-int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout);
-#endif
-
-/* Thread */
-#if defined(_TTHREAD_WIN32_)
-typedef HANDLE thrd_t;
-#else
-typedef pthread_t thrd_t;
-#endif
-
-/** Thread start function.
-* Any thread that is started with the @ref thrd_create() function must be
-* started through a function of this type.
-* @param arg The thread argument (the @c arg argument of the corresponding
-* @ref thrd_create() call).
-* @return The thread return value, which can be obtained by another thread
-* by using the @ref thrd_join() function.
-*/
-typedef int (*thrd_start_t)(void *arg);
-
-/** Create a new thread.
-* @param thr Identifier of the newly created thread.
-* @param func A function pointer to the function that will be executed in
-* the new thread.
-* @param arg An argument to the thread function.
-* @return @ref thrd_success on success, or @ref thrd_nomem if no memory could
-* be allocated for the thread requested, or @ref thrd_error if the request
-* could not be honored.
-* @note A thread’s identifier may be reused for a different thread once the
-* original thread has exited and either been detached or joined to another
-* thread.
-*/
-int thrd_create(thrd_t *thr, thrd_start_t func, void *arg);
-
-/** Identify the calling thread.
-* @return The identifier of the calling thread.
-*/
-thrd_t thrd_current(void);
-
-
-/** Dispose of any resources allocated to the thread when that thread exits.
- * @return thrd_success, or thrd_error on error
-*/
-int thrd_detach(thrd_t thr);
-
-/** Compare two thread identifiers.
-* The function determines if two thread identifiers refer to the same thread.
-* @return Zero if the two thread identifiers refer to different threads.
-* Otherwise a nonzero value is returned.
-*/
-int thrd_equal(thrd_t thr0, thrd_t thr1);
-
-/** Terminate execution of the calling thread.
-* @param res Result code of the calling thread.
-*/
-TTHREAD_NORETURN void thrd_exit(int res);
-
-/** Wait for a thread to terminate.
-* The function joins the given thread with the current thread by blocking
-* until the other thread has terminated.
-* @param thr The thread to join with.
-* @param res If this pointer is not NULL, the function will store the result
-* code of the given thread in the integer pointed to by @c res.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int thrd_join(thrd_t thr, int *res);
-
-/** Put the calling thread to sleep.
-* Suspend execution of the calling thread.
-* @param duration Interval to sleep for
-* @param remaining If non-NULL, this parameter will hold the remaining
-* time until time_point upon return. This will
-* typically be zero, but if the thread was woken up
-* by a signal that is not ignored before duration was
-* reached @c remaining will hold a positive time.
-* @return 0 (zero) on successful sleep, -1 if an interrupt occurred,
-* or a negative value if the operation fails.
-*/
-int thrd_sleep(const struct timespec *duration, struct timespec *remaining);
-
-/** Yield execution to another thread.
-* Permit other threads to run, even if the current thread would ordinarily
-* continue to run.
-*/
-void thrd_yield(void);
-
-/* Thread local storage */
-#if defined(_TTHREAD_WIN32_)
-typedef DWORD tss_t;
-#else
-typedef pthread_key_t tss_t;
-#endif
-
-/** Destructor function for a thread-specific storage.
-* @param val The value of the destructed thread-specific storage.
-*/
-typedef void (*tss_dtor_t)(void *val);
-
-/** Create a thread-specific storage.
-* @param key The unique key identifier that will be set if the function is
-* successful.
-* @param dtor Destructor function. This can be NULL.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-* @note On Windows, the @c dtor will definitely be called when
-* appropriate for threads created with @ref thrd_create. It will be
-* called for other threads in most cases, the possible exception being
-* for DLLs loaded with LoadLibraryEx. In order to be certain, you
-* should use @ref thrd_create whenever possible.
-*/
-int tss_create(tss_t *key, tss_dtor_t dtor);
-
-/** Delete a thread-specific storage.
-* The function releases any resources used by the given thread-specific
-* storage.
-* @param key The key that shall be deleted.
-*/
-void tss_delete(tss_t key);
-
-/** Get the value for a thread-specific storage.
-* @param key The thread-specific storage identifier.
-* @return The value for the current thread held in the given thread-specific
-* storage.
-*/
-void *tss_get(tss_t key);
-
-/** Set the value for a thread-specific storage.
-* @param key The thread-specific storage identifier.
-* @param val The value of the thread-specific storage to set for the current
-* thread.
-* @return @ref thrd_success on success, or @ref thrd_error if the request could
-* not be honored.
-*/
-int tss_set(tss_t key, void *val);
-
-#if defined(_TTHREAD_WIN32_)
- typedef struct {
- LONG volatile status;
- CRITICAL_SECTION lock;
- } once_flag;
- #define ONCE_FLAG_INIT {0,}
-#else
- #define once_flag pthread_once_t
- #define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
-#endif
-
-/** Invoke a callback exactly once
- * @param flag Flag used to ensure the callback is invoked exactly
- * once.
- * @param func Callback to invoke.
- */
-#if defined(_TTHREAD_WIN32_)
- void call_once(once_flag *flag, void (*func)(void));
-#else
- #define call_once(flag,func) pthread_once(flag,func)
-#endif
-
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !WITH_C11THREADS */
-
-/**
- * @brief librdkafka extensions to c11threads
- */
-#include "tinycthread_extra.h"
-
-#endif /* _TINYTHREAD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c
deleted file mode 100644
index 58049448c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Extra methods added to tinycthread/c11threads
- */
-
-#include "rd.h"
-#include "rdtime.h"
-#include "tinycthread.h"
-
-
-int thrd_setname(const char *name) {
-#if HAVE_PTHREAD_SETNAME_GNU
- if (!pthread_setname_np(pthread_self(), name))
- return thrd_success;
-#elif HAVE_PTHREAD_SETNAME_DARWIN
- pthread_setname_np(name);
- return thrd_success;
-#elif HAVE_PTHREAD_SETNAME_FREEBSD
- pthread_set_name_np(pthread_self(), name);
- return thrd_success;
-#endif
- return thrd_error;
-}
-
-int thrd_is_current(thrd_t thr) {
-#if defined(_TTHREAD_WIN32_)
- return GetThreadId(thr) == GetCurrentThreadId();
-#else
- return (pthread_self() == thr);
-#endif
-}
-
-
-#ifdef _WIN32
-void cnd_wait_enter(cnd_t *cond) {
- /* Increment number of waiters */
- EnterCriticalSection(&cond->mWaitersCountLock);
- ++cond->mWaitersCount;
- LeaveCriticalSection(&cond->mWaitersCountLock);
-}
-
-void cnd_wait_exit(cnd_t *cond) {
- /* Increment number of waiters */
- EnterCriticalSection(&cond->mWaitersCountLock);
- --cond->mWaitersCount;
- LeaveCriticalSection(&cond->mWaitersCountLock);
-}
-#endif
-
-
-
-int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) {
- if (timeout_ms == -1 /* INFINITE*/)
- return cnd_wait(cnd, mtx);
-#if defined(_TTHREAD_WIN32_)
- return _cnd_timedwait_win32(cnd, mtx, (DWORD)timeout_ms);
-#else
- struct timeval tv;
- struct timespec ts;
-
- gettimeofday(&tv, NULL);
- ts.tv_sec = tv.tv_sec;
- ts.tv_nsec = tv.tv_usec * 1000;
-
- ts.tv_sec += timeout_ms / 1000;
- ts.tv_nsec += (timeout_ms % 1000) * 1000000;
-
- if (ts.tv_nsec >= 1000000000) {
- ts.tv_sec++;
- ts.tv_nsec -= 1000000000;
- }
-
- return cnd_timedwait(cnd, mtx, &ts);
-#endif
-}
-
-int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp) {
- rd_ts_t pre = rd_clock();
- int r;
- r = cnd_timedwait_ms(cnd, mtx, *timeout_msp);
- if (r != thrd_timedout) {
- /* Subtract spent time */
- (*timeout_msp) -= (int)(rd_clock() - pre) / 1000;
- }
- return r;
-}
-
-int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) {
- if (tspec->tv_sec == RD_POLL_INFINITE)
- return cnd_wait(cnd, mtx);
- else if (tspec->tv_sec == RD_POLL_NOWAIT)
- return thrd_timedout;
-
- return cnd_timedwait(cnd, mtx, tspec);
-}
-
-
-/**
- * @name Read-write locks
- * @{
- */
-#ifndef _WIN32
-int rwlock_init(rwlock_t *rwl) {
- int r = pthread_rwlock_init(rwl, NULL);
- if (r) {
- errno = r;
- return thrd_error;
- }
- return thrd_success;
-}
-
-int rwlock_destroy(rwlock_t *rwl) {
- int r = pthread_rwlock_destroy(rwl);
- if (r) {
- errno = r;
- return thrd_error;
- }
- return thrd_success;
-}
-
-int rwlock_rdlock(rwlock_t *rwl) {
- int r = pthread_rwlock_rdlock(rwl);
- assert(r == 0);
- return thrd_success;
-}
-
-int rwlock_wrlock(rwlock_t *rwl) {
- int r = pthread_rwlock_wrlock(rwl);
- assert(r == 0);
- return thrd_success;
-}
-
-int rwlock_rdunlock(rwlock_t *rwl) {
- int r = pthread_rwlock_unlock(rwl);
- assert(r == 0);
- return thrd_success;
-}
-
-int rwlock_wrunlock(rwlock_t *rwl) {
- int r = pthread_rwlock_unlock(rwl);
- assert(r == 0);
- return thrd_success;
-}
-/**@}*/
-
-
-#endif /* !_MSC_VER */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h
deleted file mode 100644
index e5f673173..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Extra methods added to tinychtread/c11threads
- */
-
-
-#ifndef _TINYCTHREAD_EXTRA_H_
-#define _TINYCTHREAD_EXTRA_H_
-
-
-#ifndef _WIN32
-#include <pthread.h> /* needed for rwlock_t */
-#endif
-
-
-/**
- * @brief Set thread system name if platform supports it (pthreads)
- * @return thrd_success or thrd_error
- */
-int thrd_setname(const char *name);
-
-/**
- * @brief Checks if passed thread is the current thread.
- * @return non-zero if same thread, else 0.
- */
-int thrd_is_current(thrd_t thr);
-
-
-#ifdef _WIN32
-/**
- * @brief Mark the current thread as waiting on cnd.
- *
- * @remark This is to be used when the thread uses its own
- * WaitForMultipleEvents() call rather than cnd_timedwait().
- *
- * @sa cnd_wait_exit()
- */
-void cnd_wait_enter(cnd_t *cond);
-
-/**
- * @brief Mark the current thread as no longer waiting on cnd.
- */
-void cnd_wait_exit(cnd_t *cond);
-#endif
-
-
-/**
- * @brief Same as cnd_timedwait() but takes a relative timeout in milliseconds.
- */
-int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms);
-
-/**
- * @brief Same as cnd_timedwait_ms() but updates the remaining time.
- */
-int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp);
-
-/**
- * @brief Same as cnd_timedwait() but honours
- * RD_POLL_INFINITE (uses cnd_wait()),
- * and RD_POLL_NOWAIT (return thrd_timedout immediately).
- *
- * @remark Set up \p tspec with rd_timeout_init_timespec().
- */
-int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec);
-
-
-
-/**
- * @brief Read-write locks
- */
-
-#if defined(_TTHREAD_WIN32_)
-typedef struct rwlock_t {
- SRWLOCK lock;
- LONG rcnt;
- LONG wcnt;
-} rwlock_t;
-#define rwlock_init(rwl) \
- do { \
- (rwl)->rcnt = (rwl)->wcnt = 0; \
- InitializeSRWLock(&(rwl)->lock); \
- } while (0)
-#define rwlock_destroy(rwl)
-#define rwlock_rdlock(rwl) \
- do { \
- if (0) \
- printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
- AcquireSRWLockShared(&(rwl)->lock); \
- InterlockedIncrement(&(rwl)->rcnt); \
- } while (0)
-#define rwlock_wrlock(rwl) \
- do { \
- if (0) \
- printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
- AcquireSRWLockExclusive(&(rwl)->lock); \
- InterlockedIncrement(&(rwl)->wcnt); \
- } while (0)
-#define rwlock_rdunlock(rwl) \
- do { \
- if (0) \
- printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \
- ReleaseSRWLockShared(&(rwl)->lock); \
- InterlockedDecrement(&(rwl)->rcnt); \
- } while (0)
-#define rwlock_wrunlock(rwl) \
- do { \
- if (0) \
- printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \
- ReleaseSRWLockExclusive(&(rwl)->lock); \
- InterlockedDecrement(&(rwl)->wcnt); \
- } while (0)
-
-#define rwlock_rdlock_d(rwl) \
- do { \
- if (1) \
- printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
- AcquireSRWLockShared(&(rwl)->lock); \
- InterlockedIncrement(&(rwl)->rcnt); \
- } while (0)
-#define rwlock_wrlock_d(rwl) \
- do { \
- if (1) \
- printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
- AcquireSRWLockExclusive(&(rwl)->lock); \
- InterlockedIncrement(&(rwl)->wcnt); \
- } while (0)
-#define rwlock_rdunlock_d(rwl) \
- do { \
- if (1) \
- printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \
- ReleaseSRWLockShared(&(rwl)->lock); \
- InterlockedDecrement(&(rwl)->rcnt); \
- } while (0)
-#define rwlock_wrunlock_d(rwl) \
- do { \
- if (1) \
- printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \
- GetCurrentThreadId(), __LINE__, rwl, \
- __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
- assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \
- ReleaseSRWLockExclusive(&(rwl)->lock); \
- InterlockedDecrement(&(rwl)->wcnt); \
- } while (0)
-
-
-#else
-typedef pthread_rwlock_t rwlock_t;
-
-int rwlock_init(rwlock_t *rwl);
-int rwlock_destroy(rwlock_t *rwl);
-int rwlock_rdlock(rwlock_t *rwl);
-int rwlock_wrlock(rwlock_t *rwl);
-int rwlock_rdunlock(rwlock_t *rwl);
-int rwlock_wrunlock(rwlock_t *rwl);
-
-#endif
-
-
-#endif /* _TINYCTHREAD_EXTRA_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h b/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h
deleted file mode 100644
index dd61b2c92..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Hand-crafted config header file for Win32 builds.
- */
-#ifndef _RD_WIN32_CONFIG_H_
-#define _RD_WIN32_CONFIG_H_
-
-#ifndef WITHOUT_WIN32_CONFIG
-#define WITH_SSL 1
-#define WITH_ZLIB 1
-#define WITH_SNAPPY 1
-#define WITH_ZSTD 1
-#define WITH_CURL 1
-#define WITH_OAUTHBEARER_OIDC 1
-/* zstd is linked dynamically on Windows, but the dynamic library provides
- * the experimental/advanced API, just as the static builds on *nix */
-#define WITH_ZSTD_STATIC 1
-#define WITH_SASL_SCRAM 1
-#define WITH_SASL_OAUTHBEARER 1
-#define ENABLE_DEVEL 0
-#define WITH_PLUGINS 1
-#define WITH_HDRHISTOGRAM 1
-#endif
-#define SOLIB_EXT ".dll"
-
-/* Notice: Keep up to date */
-#define BUILT_WITH \
- "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS " \
- "HDRHISTOGRAM"
-
-#endif /* _RD_WIN32_CONFIG_H_ */