summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/librdkafka-2.1.0/tests
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/librdkafka-2.1.0/tests')
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore15
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c72
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c244
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c173
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c865
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c133
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c163
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c136
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c99
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c576
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c537
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c473
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c512
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c172
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c166
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c142
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c332
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c289
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c71
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c147
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c541
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c79
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c198
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c589
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c119
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c509
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c377
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c73
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c86
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c85
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c284
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c251
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c96
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c252
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c77
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c459
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c65
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c283
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c124
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c125
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c220
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp236
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c366
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c311
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp112
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp237
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp163
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp275
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c126
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp180
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c481
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp140
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp129
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp148
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp197
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c448
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c381
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c252
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c350
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c357
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp96
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c2535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c3797
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp133
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c211
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp388
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c334
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c358
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c172
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c297
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c197
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c230
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp122
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp466
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp1218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c189
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp195
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp446
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c1297
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c617
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c3926
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c300
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c259
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp183
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp127
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp3170
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp176
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp214
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c324
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c121
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp148
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c183
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c118
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c69
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c78
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c213
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp125
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c78
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c127
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c81
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c113
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c92
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp143
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c181
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c608
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c189
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c164
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp60
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt154
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py256
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/Makefile182
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/README.md505
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh33
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb30
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py297
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh17
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh20
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py183
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh56
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md13
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12bin4345 -> 0 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem109
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key34
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile12
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md31
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c74
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h90
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh165
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py363
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt16
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile22
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c314
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h54
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile12
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java46
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md14
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java162
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions483
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh59
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh50
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh43
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py115
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c58
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt2
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh16
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh16
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh140
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c249
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py328
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c801
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h85
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c145
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.c6960
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example27
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.h936
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp126
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h360
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h402
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md4
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq42
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py150
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt3
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py124
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c122
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp159
205 files changed, 0 insertions, 59902 deletions
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore
deleted file mode 100644
index 6d6f9ff96..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-*.test
-test.conf
-test-runner
-core
-vgcore.*
-core.*
-stats_*.json
-test_report_*.json
-test_suite_*.json
-.\#*
-*.pyc
-# sqlite3 db:
-rdktests
-*.log
-*.png
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c
deleted file mode 100644
index e0a02fb62..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * @brief Initialize a client with debugging to have it print its
- * build options, OpenSSL version, etc.
- * Useful for manually verifying build options in CI logs.
- */
-static void show_build_opts(void) {
- rd_kafka_conf_t *conf = rd_kafka_conf_new();
- rd_kafka_t *rk;
- char errstr[512];
-
- TEST_SAY("builtin.features = %s\n",
- test_conf_get(conf, "builtin.features"));
-
- test_conf_set(conf, "debug", "generic,security");
-
- /* Try with SSL first, which may or may not be a build option. */
- if (rd_kafka_conf_set(conf, "security.protocol", "SSL", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_SAY("Failed to security.protocol=SSL: %s\n", errstr);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "Failed to create producer: %s", errstr);
-
- rd_kafka_destroy(rk);
-}
-
-
-/**
- * @brief Call librdkafka built-in unit-tests
- */
-int main_0000_unittests(int argc, char **argv) {
- int fails = 0;
-
- show_build_opts();
-
- fails += rd_kafka_unittest();
- if (fails)
- TEST_FAIL("%d unit-test(s) failed", fails);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c
deleted file mode 100644
index c2a4eb57a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests multiple rd_kafka_t object creations and destructions.
- * Issue #20
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-int main_0001_multiobj(int argc, char **argv) {
- int partition = RD_KAFKA_PARTITION_UA; /* random */
- int i;
- int NUM_ITER = test_quick ? 2 : 5;
- const char *topic = NULL;
-
- TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER);
-
- /* Create, use and destroy NUM_ITER kafka instances. */
- for (i = 0; i < NUM_ITER; i++) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- test_timing_t t_full, t_destroy;
-
- test_conf_init(&conf, &topic_conf, 30);
-
- if (!topic)
- topic = test_mk_topic_name("0001", 0);
-
- TIMING_START(&t_full, "full create-produce-destroy cycle");
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL(
- "Failed to create topic for "
- "rdkafka instance #%i: %s\n",
- i, rd_kafka_err2str(rd_kafka_last_error()));
-
- rd_snprintf(msg, sizeof(msg),
- "%s test message for iteration #%i", argv[0], i);
-
- /* Produce a message */
- rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, NULL);
-
- /* Wait for it to be sent (and possibly acked) */
- rd_kafka_flush(rk, -1);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TIMING_START(&t_destroy, "rd_kafka_destroy()");
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_destroy);
-
- TIMING_STOP(&t_full);
-
- /* Topic is created on the first iteration. */
- if (i > 0)
- TIMING_ASSERT(&t_full, 0, 999);
- }
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c
deleted file mode 100644
index 087e37ae6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests that producing to unknown partitions fails.
- * Issue #39
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgs_wait = 0; /* bitmask */
-
-/**
- * Delivery report callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (!(msgs_wait & (1 << msgid)))
- TEST_FAIL(
- "Unwanted delivery report for message #%i "
- "(waiting for 0x%x)\n",
- msgid, msgs_wait);
-
- TEST_SAY("Delivery report for message #%i: %s\n", msgid,
- rd_kafka_err2str(err));
-
- msgs_wait &= ~(1 << msgid);
-
- if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- TEST_FAIL("Message #%i failed with unexpected error %s\n",
- msgid, rd_kafka_err2str(err));
-}
-
-
-static void do_test_unkpart(void) {
- int partition = 99; /* non-existent */
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = 10;
- int i;
- int fails = 0;
- const struct rd_kafka_metadata *metadata;
-
- TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(&conf, &topic_conf, 10);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- /* Request metadata so that we know the cluster is up before producing
- * messages, otherwise erroneous partitions will not fail immediately.*/
- if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata,
- tmout_multip(15000))) !=
- RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Failed to acquire metadata: %s\n",
- rd_kafka_err2str(r));
-
- rd_kafka_metadata_destroy(metadata);
-
- /* Produce a message */
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s test message #%i",
- __FUNCTION__, i);
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1) {
- if (rd_kafka_last_error() ==
- RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- TEST_SAY(
- "Failed to produce message #%i: "
- "unknown partition: good!\n",
- i);
- else
- TEST_FAIL(
- "Failed to produce message #%i: %s\n", i,
- rd_kafka_err2str(rd_kafka_last_error()));
- free(msgidp);
- } else {
- if (i > 5) {
- fails++;
- TEST_SAY(
- "Message #%i produced: "
- "should've failed\n",
- i);
- }
- msgs_wait |= (1 << i);
- }
-
- /* After half the messages: forcibly refresh metadata
- * to update the actual partition count:
- * this will make subsequent produce() calls fail immediately.
- */
- if (i == 5) {
- r = test_get_partition_count(
- rk, rd_kafka_topic_name(rkt), 15000);
- TEST_ASSERT(r != -1, "failed to get partition count");
- }
- }
-
- /* Wait for messages to time out */
- rd_kafka_flush(rk, -1);
-
- if (msgs_wait != 0)
- TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
-
-
- if (fails > 0)
- TEST_FAIL("See previous error(s)\n");
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__);
-}
-
-
-/**
- * @brief Test message timeouts for messages produced to unknown partitions
- * when there is no broker connection, which makes the messages end
- * up in the UA partition.
- * This verifies the UA partitions are properly scanned for timeouts.
- *
- * This test is a copy of confluent-kafka-python's
- * test_Producer.test_basic_api() test that surfaced this issue.
- */
-static void do_test_unkpart_timeout_nobroker(void) {
- const char *topic = test_mk_topic_name("0002_unkpart_tmout", 0);
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_resp_err_t err;
- int remains = 0;
-
- TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(NULL, NULL, 10);
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "debug", "topic");
- test_conf_set(conf, "message.timeout.ms", "10");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
- NULL, 0, NULL, 0, &remains);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
- remains++;
-
- err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
- "hi", 2, "hello", 5, &remains);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
- remains++;
-
- err = rd_kafka_produce(rkt, 9 /* explicit, but unknown, partition */,
- RD_KAFKA_MSG_F_COPY, "three", 5, NULL, 0,
- &remains);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
- remains++;
-
- rd_kafka_poll(rk, 1);
- rd_kafka_poll(rk, 2);
- TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));
- rd_kafka_flush(rk, -1);
-
- TEST_ASSERT(rd_kafka_outq_len(rk) == 0,
- "expected no more messages in queue, got %d",
- rd_kafka_outq_len(rk));
-
- TEST_ASSERT(remains == 0, "expected no messages remaining, got %d",
- remains);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__);
-}
-
-
-int main_0002_unkpart(int argc, char **argv) {
- do_test_unkpart();
- do_test_unkpart_timeout_nobroker();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c
deleted file mode 100644
index 97b511125..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests "message.bytes.max"
- * Issue #24
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgs_wait = 0; /* bitmask */
-
-/**
- * Delivery report callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (err)
- TEST_FAIL("Unexpected delivery error for message #%i: %s\n",
- msgid, rd_kafka_err2str(err));
-
- if (!(msgs_wait & (1 << msgid)))
- TEST_FAIL(
- "Unwanted delivery report for message #%i "
- "(waiting for 0x%x)\n",
- msgid, msgs_wait);
-
- TEST_SAY("Delivery report for message #%i: %s\n", msgid,
- rd_kafka_err2str(err));
-
- msgs_wait &= ~(1 << msgid);
-}
-
-
-int main_0003_msgmaxsize(int argc, char **argv) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
-
- static const struct {
- ssize_t keylen;
- ssize_t len;
- rd_kafka_resp_err_t exp_err;
- } sizes[] = {/* message.max.bytes is including framing */
- {-1, 5000, RD_KAFKA_RESP_ERR_NO_ERROR},
- {0, 99900, RD_KAFKA_RESP_ERR_NO_ERROR},
- {0, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
- {100000, 0, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
- {1000, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
- {0, 101000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
- {99000, -1, RD_KAFKA_RESP_ERR_NO_ERROR},
- {-1, -1, RD_KAFKA_RESP_ERR__END}};
- int i;
-
- test_conf_init(&conf, &topic_conf, 10);
-
- /* Set a small maximum message size. */
- if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s\n", errstr);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- for (i = 0; sizes[i].exp_err != RD_KAFKA_RESP_ERR__END; i++) {
- void *value =
- sizes[i].len != -1 ? calloc(1, sizes[i].len) : NULL;
- size_t len = sizes[i].len != -1 ? sizes[i].len : 0;
- void *key =
- sizes[i].keylen != -1 ? calloc(1, sizes[i].keylen) : NULL;
- size_t keylen = sizes[i].keylen != -1 ? sizes[i].keylen : 0;
- int *msgidp = malloc(sizeof(*msgidp));
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- *msgidp = i;
-
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, value,
- len, key, keylen, msgidp);
- if (r == -1)
- err = rd_kafka_last_error();
-
- if (err != sizes[i].exp_err) {
- TEST_FAIL("Msg #%d produce(len=%" PRIdsz
- ", keylen=%" PRIdsz "): got %s, expected %s",
- i, sizes[i].len, sizes[i].keylen,
- rd_kafka_err2name(err),
- rd_kafka_err2name(sizes[i].exp_err));
- } else {
- TEST_SAY(
- "Msg #%d produce() returned expected %s "
- "for value size %" PRIdsz " and key size %" PRIdsz
- "\n",
- i, rd_kafka_err2name(err), sizes[i].len,
- sizes[i].keylen);
-
- if (!sizes[i].exp_err)
- msgs_wait |= (1 << i);
- else
- free(msgidp);
- }
-
- if (value)
- free(value);
- if (key)
- free(key);
- }
-
- /* Wait for messages to be delivered. */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 50);
-
- if (msgs_wait != 0)
- TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c
deleted file mode 100644
index 51401e17d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c
+++ /dev/null
@@ -1,865 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests various config related things
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
-}
-
-static void
-error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
-}
-
-
-static int32_t partitioner(const rd_kafka_topic_t *rkt,
- const void *keydata,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- return 0;
-}
-
-
-static void
-conf_verify(int line, const char **arr, size_t cnt, const char **confs) {
- int i, j;
-
-
- for (i = 0; confs[i]; i += 2) {
- for (j = 0; j < (int)cnt; j += 2) {
- if (!strcmp(confs[i], arr[j])) {
- if (strcmp(confs[i + 1], arr[j + 1]))
- TEST_FAIL(
- "%i: Property %s mismatch: "
- "expected %s != retrieved %s",
- line, confs[i], confs[i + 1],
- arr[j + 1]);
- }
- if (j == (int)cnt)
- TEST_FAIL(
- "%i: "
- "Property %s not found in config\n",
- line, confs[i]);
- }
- }
-}
-
-
-static void conf_cmp(const char *desc,
- const char **a,
- size_t acnt,
- const char **b,
- size_t bcnt) {
- int i;
-
- if (acnt != bcnt)
- TEST_FAIL("%s config compare: count %" PRIusz " != %" PRIusz
- " mismatch",
- desc, acnt, bcnt);
-
- for (i = 0; i < (int)acnt; i += 2) {
- if (strcmp(a[i], b[i]))
- TEST_FAIL("%s conf mismatch: %s != %s", desc, a[i],
- b[i]);
- else if (strcmp(a[i + 1], b[i + 1])) {
- /* The default_topic_conf will be auto-created
- * when global->topic fallthru is used, so its
- * value will not match here. */
- if (!strcmp(a[i], "default_topic_conf"))
- continue;
- TEST_FAIL("%s conf value mismatch for %s: %s != %s",
- desc, a[i], a[i + 1], b[i + 1]);
- }
- }
-}
-
-
-/**
- * @brief Not called, just used for config
- */
-static int on_new_call_cnt;
-static rd_kafka_resp_err_t my_on_new(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- TEST_SAY("%s: on_new() called\n", rd_kafka_name(rk));
- on_new_call_cnt++;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief When rd_kafka_new() succeeds it takes ownership of the config object,
- * but when it fails the config object remains in application custody.
- * These tests makes sure that's the case (preferably run with valgrind)
- */
-static void do_test_kafka_new_failures(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- char errstr[512];
-
- conf = rd_kafka_conf_new();
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
- rd_kafka_destroy(rk);
-
- /* Set an erroneous configuration value that is not checked
- * by conf_set() but by rd_kafka_new() */
- conf = rd_kafka_conf_new();
- if (rd_kafka_conf_set(conf, "partition.assignment.strategy",
- "range,thiswillfail", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(!rk, "kafka_new() should have failed");
-
- /* config object should still belong to us,
- * correct the erroneous config and try again. */
- if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL,
- errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
- rd_kafka_destroy(rk);
-
- /* set conflicting properties */
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "acks", "1");
- test_conf_set(conf, "enable.idempotence", "true");
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(!rk, "kafka_new() should have failed");
- rd_kafka_conf_destroy(conf);
- TEST_SAY(_C_GRN "Ok: %s\n", errstr);
-}
-
-
-/**
- * @brief Verify that INVALID properties (such as for Java SSL properties)
- * work, as well as INTERNAL properties.
- */
-static void do_test_special_invalid_conf(void) {
- rd_kafka_conf_t *conf;
- char errstr[512];
- rd_kafka_conf_res_t res;
-
- conf = rd_kafka_conf_new();
-
- res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", errstr,
- sizeof(errstr));
- /* Existing apps might not print the error string when conf_set
- * returns UNKNOWN, only on INVALID, so make sure that is
- * what is being returned. */
- TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
- "expected ssl.truststore.location to fail with INVALID, "
- "not %d",
- res);
- /* Make sure there is a link to documentation */
- TEST_ASSERT(strstr(errstr, "http"),
- "expected ssl.truststore.location to provide link to "
- "documentation, not \"%s\"",
- errstr);
- TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
-
-
- res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", errstr,
- sizeof(errstr));
- /* Existing apps might not print the error string when conf_set
- * returns UNKNOWN, only on INVALID, so make sure that is
- * what is being returned. */
- TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
- "expected sasl.jaas.config to fail with INVALID, "
- "not %d",
- res);
- /* Make sure there is a link to documentation */
- TEST_ASSERT(strstr(errstr, "http"),
- "expected sasl.jaas.config to provide link to "
- "documentation, not \"%s\"",
- errstr);
- TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
-
-
- res = rd_kafka_conf_set(conf, "interceptors", "1", errstr,
- sizeof(errstr));
- TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
- "expected interceptors to fail with INVALID, "
- "not %d",
- res);
- TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
-
- rd_kafka_conf_destroy(conf);
-}
-
-
-/**
- * @brief Verify idempotence configuration constraints
- */
-static void do_test_idempotence_conf(void) {
- static const struct {
- const char *prop;
- const char *val;
- rd_bool_t topic_conf;
- rd_bool_t exp_rk_fail;
- rd_bool_t exp_rkt_fail;
- } check[] = {{"acks", "1", rd_true, rd_false, rd_true},
- {"acks", "all", rd_true, rd_false, rd_false},
- {"queuing.strategy", "lifo", rd_true, rd_false, rd_true},
- {NULL}};
- int i;
-
- for (i = 0; check[i].prop; i++) {
- int j;
-
- for (j = 0; j < 1 + (check[i].topic_conf ? 1 : 0); j++) {
- /* j = 0: set on global config
- * j = 1: set on topic config */
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf = NULL;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- char errstr[512];
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "enable.idempotence", "true");
-
- if (j == 0)
- test_conf_set(conf, check[i].prop,
- check[i].val);
-
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
- sizeof(errstr));
-
- if (!rk) {
- /* default topic config (j=0) will fail. */
- TEST_ASSERT(check[i].exp_rk_fail ||
- (j == 0 &&
- check[i].exp_rkt_fail &&
- check[i].topic_conf),
- "Did not expect config #%d.%d "
- "to fail: %s",
- i, j, errstr);
-
- rd_kafka_conf_destroy(conf);
- continue;
-
- } else {
- TEST_ASSERT(!check[i].exp_rk_fail,
- "Expect config #%d.%d to fail", i,
- j);
- }
-
- if (j == 1) {
- tconf = rd_kafka_topic_conf_new();
- test_topic_conf_set(tconf, check[i].prop,
- check[i].val);
- }
-
- rkt = rd_kafka_topic_new(rk, "mytopic", tconf);
- if (!rkt) {
- TEST_ASSERT(
- check[i].exp_rkt_fail,
- "Did not expect topic config "
- "#%d.%d to fail: %s",
- i, j,
- rd_kafka_err2str(rd_kafka_last_error()));
-
-
- } else {
- TEST_ASSERT(!check[i].exp_rkt_fail,
- "Expect topic config "
- "#%d.%d to fail",
- i, j);
- rd_kafka_topic_destroy(rkt);
- }
-
- rd_kafka_destroy(rk);
- }
- }
-}
-
-
-/**
- * @brief Verify that configuration properties can be extract
- * from the instance config object.
- */
-static void do_test_instance_conf(void) {
- rd_kafka_conf_t *conf;
- const rd_kafka_conf_t *iconf;
- rd_kafka_t *rk;
- rd_kafka_conf_res_t res;
- static const char *props[] = {
- "linger.ms", "123", "group.id", "test1",
- "enable.auto.commit", "false", NULL,
- };
- const char **p;
-
- conf = rd_kafka_conf_new();
-
- for (p = props; *p; p += 2) {
- res = rd_kafka_conf_set(conf, *p, *(p + 1), NULL, 0);
- TEST_ASSERT(res == RD_KAFKA_CONF_OK, "failed to set %s", *p);
- }
-
- rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0);
- TEST_ASSERT(rk, "failed to create consumer");
-
- iconf = rd_kafka_conf(rk);
- TEST_ASSERT(conf, "failed to get instance config");
-
- for (p = props; *p; p += 2) {
- char dest[512];
- size_t destsz = sizeof(dest);
-
- res = rd_kafka_conf_get(iconf, *p, dest, &destsz);
- TEST_ASSERT(res == RD_KAFKA_CONF_OK,
- "failed to get %s: result %d", *p, res);
-
- TEST_SAY("Instance config %s=%s\n", *p, dest);
- TEST_ASSERT(!strcmp(*(p + 1), dest), "Expected %s=%s, not %s",
- *p, *(p + 1), dest);
- }
-
- rd_kafka_destroy(rk);
-}
-
-
-/**
- * @brief Verify that setting and retrieving the default topic config works.
- */
-static void do_test_default_topic_conf(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- const char *val, *exp_val;
-
- SUB_TEST_QUICK();
-
- conf = rd_kafka_conf_new();
-
- /* Set topic-level property, this will create the default topic config*/
- exp_val = "1234";
- test_conf_set(conf, "message.timeout.ms", exp_val);
-
- /* Get the default topic config */
- tconf = rd_kafka_conf_get_default_topic_conf(conf);
- TEST_ASSERT(tconf != NULL, "");
-
- /* Get value from global config by fall-thru */
- val = test_conf_get(conf, "message.timeout.ms");
- TEST_ASSERT(val && !strcmp(val, exp_val),
- "Expected (conf) message.timeout.ms=%s, not %s", exp_val,
- val ? val : "(NULL)");
-
- /* Get value from default topic config */
- val = test_topic_conf_get(tconf, "message.timeout.ms");
- TEST_ASSERT(val && !strcmp(val, exp_val),
- "Expected (topic conf) message.timeout.ms=%s, not %s",
- exp_val, val ? val : "(NULL)");
-
- /* Now change the value, should be reflected in both. */
- exp_val = "4444";
- test_topic_conf_set(tconf, "message.timeout.ms", exp_val);
-
- /* Get value from global config by fall-thru */
- val = test_conf_get(conf, "message.timeout.ms");
- TEST_ASSERT(val && !strcmp(val, exp_val),
- "Expected (conf) message.timeout.ms=%s, not %s", exp_val,
- val ? val : "(NULL)");
-
- /* Get value from default topic config */
- val = test_topic_conf_get(tconf, "message.timeout.ms");
- TEST_ASSERT(val && !strcmp(val, exp_val),
- "Expected (topic conf) message.timeout.ms=%s, not %s",
- exp_val, val ? val : "(NULL)");
-
-
- rd_kafka_conf_destroy(conf);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify behaviour of checking that message.timeout.ms fits within
- * configured linger.ms. By larry-cdn77.
- */
-static void do_message_timeout_linger_checks(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_t *rk;
- char errstr[512];
- int i;
- const char values[7][3][40] = {
- {"-", "-", "default and L and M"},
- {"100", "-", "set L such that L<M"},
- {"-", "300000", "set M such that L<M"},
- {"100", "300000", "set L and M such that L<M"},
- {"500000", "-", "!set L such that L>=M"},
- {"-", "10", "set M such that L>=M"},
- {"500000", "10", "!set L and M such that L>=M"}};
-
- SUB_TEST_QUICK();
-
- for (i = 0; i < 7; i++) {
- const char *linger = values[i][0];
- const char *msgtimeout = values[i][1];
- const char *desc = values[i][2];
- rd_bool_t expect_fail = *desc == '!';
-
- if (expect_fail)
- desc++; /* Push past the '!' */
-
- conf = rd_kafka_conf_new();
- tconf = rd_kafka_topic_conf_new();
-
- if (*linger != '-')
- test_conf_set(conf, "linger.ms", linger);
-
- if (*msgtimeout != '-')
- test_topic_conf_set(tconf, "message.timeout.ms",
- msgtimeout);
-
- rd_kafka_conf_set_default_topic_conf(conf, tconf);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
- sizeof(errstr));
-
- if (!rk)
- TEST_SAY("#%d \"%s\": rd_kafka_new() failed: %s\n", i,
- desc, errstr);
- else
- TEST_SAY("#%d \"%s\": rd_kafka_new() succeeded\n", i,
- desc);
-
- if (!expect_fail) {
- TEST_ASSERT(rk != NULL,
- "Expected success: "
- "message timeout linger: %s: %s",
- desc, errstr);
-
- rd_kafka_destroy(rk);
-
- } else {
- TEST_ASSERT(rk == NULL,
- "Expected failure: "
- "message timeout linger: %s",
- desc);
-
- rd_kafka_conf_destroy(conf);
- }
- }
-
- SUB_TEST_PASS();
-}
-
-
-int main_0004_conf(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *ignore_conf, *conf, *conf2;
- rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2;
- char errstr[512];
- rd_kafka_resp_err_t err;
- const char **arr_orig, **arr_dup;
- size_t cnt_orig, cnt_dup;
- int i;
- const char *topic;
- static const char *gconfs[] = {
- "message.max.bytes",
- "12345", /* int property */
- "client.id",
- "my id", /* string property */
- "debug",
- "topic,metadata,interceptor", /* S2F property */
- "topic.blacklist",
- "__.*", /* #778 */
- "auto.offset.reset",
- "earliest", /* Global->Topic fallthru */
-#if WITH_ZLIB
- "compression.codec",
- "gzip", /* S2I property */
-#endif
-#if defined(_WIN32)
- "ssl.ca.certificate.stores",
- "Intermediate ,, Root ,",
-#endif
- NULL
- };
- static const char *tconfs[] = {"request.required.acks",
- "-1", /* int */
- "auto.commit.enable",
- "false", /* bool */
- "auto.offset.reset",
- "error", /* S2I */
- "offset.store.path",
- "my/path", /* string */
- NULL};
-
- test_conf_init(&ignore_conf, &ignore_topic_conf, 10);
- rd_kafka_conf_destroy(ignore_conf);
- rd_kafka_topic_conf_destroy(ignore_topic_conf);
-
- topic = test_mk_topic_name("0004", 0);
-
- /* Set up a global config object */
- conf = rd_kafka_conf_new();
-
- for (i = 0; gconfs[i]; i += 2) {
- if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i + 1], errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s\n", errstr);
- }
-
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
- rd_kafka_conf_set_error_cb(conf, error_cb);
- /* interceptor configs are not exposed as strings or in dumps
- * so the dump verification step will not cover them, but valgrind
- * will help track down memory leaks/use-after-free etc. */
- err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", my_on_new,
- NULL);
- TEST_ASSERT(!err, "add_on_new() failed: %s", rd_kafka_err2str(err));
-
- /* Set up a topic config object */
- tconf = rd_kafka_topic_conf_new();
-
- rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner);
- rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef);
-
- for (i = 0; tconfs[i]; i += 2) {
- if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i + 1],
- errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s\n", errstr);
- }
-
-
- /* Verify global config */
- arr_orig = rd_kafka_conf_dump(conf, &cnt_orig);
- conf_verify(__LINE__, arr_orig, cnt_orig, gconfs);
-
- /* Verify copied global config */
- conf2 = rd_kafka_conf_dup(conf);
- arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup);
- conf_verify(__LINE__, arr_dup, cnt_dup, gconfs);
- conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup);
- rd_kafka_conf_dump_free(arr_orig, cnt_orig);
- rd_kafka_conf_dump_free(arr_dup, cnt_dup);
-
- /* Verify topic config */
- arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig);
- conf_verify(__LINE__, arr_orig, cnt_orig, tconfs);
-
- /* Verify copied topic config */
- tconf2 = rd_kafka_topic_conf_dup(tconf);
- arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup);
- conf_verify(__LINE__, arr_dup, cnt_dup, tconfs);
- conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup);
- rd_kafka_conf_dump_free(arr_orig, cnt_orig);
- rd_kafka_conf_dump_free(arr_dup, cnt_dup);
-
-
- /*
- * Create kafka instances using original and copied confs
- */
-
- /* original */
- TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d",
- on_new_call_cnt);
- on_new_call_cnt = 0;
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- TEST_ASSERT(on_new_call_cnt == 1, "expected 1 on_new call, not %d",
- on_new_call_cnt);
-
- rkt = rd_kafka_topic_new(rk, topic, tconf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- /* copied */
- on_new_call_cnt = 0; /* interceptors are not copied. */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf2);
- TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d",
- on_new_call_cnt);
-
- rkt = rd_kafka_topic_new(rk, topic, tconf2);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
-
- /* Incremental S2F property.
- * NOTE: The order of fields returned in get() is hardcoded here. */
- {
- static const char *s2fs[] = {"generic,broker,queue,cgrp",
- "generic,broker,queue,cgrp",
-
- "-broker,+queue,topic",
- "generic,topic,queue,cgrp",
-
- "-all,security,-fetch,+metadata",
- "metadata,security",
-
- NULL};
-
- TEST_SAY("Incremental S2F tests\n");
- conf = rd_kafka_conf_new();
-
- for (i = 0; s2fs[i]; i += 2) {
- const char *val;
-
- TEST_SAY(" Set: %s\n", s2fs[i]);
- test_conf_set(conf, "debug", s2fs[i]);
- val = test_conf_get(conf, "debug");
- TEST_SAY(" Now: %s\n", val);
-
- if (strcmp(val, s2fs[i + 1]))
- TEST_FAIL_LATER(
- "\n"
- "Expected: %s\n"
- " Got: %s",
- s2fs[i + 1], val);
- }
- rd_kafka_conf_destroy(conf);
- }
-
- {
- rd_kafka_conf_res_t res;
-
- TEST_SAY("Error reporting for S2F properties\n");
- conf = rd_kafka_conf_new();
-
- res =
- rd_kafka_conf_set(conf, "debug", "cgrp,invalid-value,topic",
- errstr, sizeof(errstr));
-
- TEST_ASSERT(
- res == RD_KAFKA_CONF_INVALID,
- "expected 'debug=invalid-value' to fail with INVALID, "
- "not %d",
- res);
- TEST_ASSERT(strstr(errstr, "invalid-value"),
- "expected invalid value to be mentioned in error, "
- "not \"%s\"",
- errstr);
- TEST_ASSERT(!strstr(errstr, "cgrp") && !strstr(errstr, "topic"),
- "expected only invalid value to be mentioned, "
- "not \"%s\"",
- errstr);
- TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
-
- rd_kafka_conf_destroy(conf);
- }
-
-#if WITH_SSL
- {
- TEST_SAY(
- "Verifying that ssl.ca.location is not "
- "overwritten (#3566)\n");
-
- conf = rd_kafka_conf_new();
-
- test_conf_set(conf, "security.protocol", "SSL");
- test_conf_set(conf, "ssl.ca.location", "/?/does/!/not/exist!");
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
- sizeof(errstr));
- TEST_ASSERT(!rk,
- "Expected rd_kafka_new() to fail with "
- "invalid ssl.ca.location");
- TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr);
- rd_kafka_conf_destroy(conf);
- }
-
-#ifdef _WIN32
- {
- FILE *fp;
- TEST_SAY(
- "Verifying that OpenSSL_AppLink "
- "is not needed (#3554)\n");
-
- /* Create dummy file so the file open works,
- * but parsing fails. */
- fp = fopen("_tmp_0004", "w");
- TEST_ASSERT(fp != NULL, "Failed to create dummy file: %s",
- rd_strerror(errno));
- if (fwrite("?", 1, 1, fp) != 1)
- TEST_FAIL("Failed to write to dummy file _tmp_0004: %s",
- rd_strerror(errno));
- fclose(fp);
-
- conf = rd_kafka_conf_new();
-
- test_conf_set(conf, "security.protocol", "SSL");
- test_conf_set(conf, "ssl.keystore.location", "_tmp_0004");
- test_conf_set(conf, "ssl.keystore.password", "x");
-
- /* Prior to the fix OpenSSL will assert with a message like
- * this: "OPENSSL_Uplink(00007FF9C0229D30,08): no
- * OPENSSL_Applink"
- * and the program will exit with error code 1. */
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
- sizeof(errstr));
- _unlink("tmp_0004");
-
- TEST_ASSERT(!rk,
- "Expected rd_kafka_new() to fail due to "
- "dummy ssl.keystore.location");
- TEST_ASSERT(strstr(errstr, "ssl.keystore.location") != NULL,
- "Expected rd_kafka_new() to fail with "
- "dummy ssl.keystore.location, not: %s",
- errstr);
-
- TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr);
- }
-#endif /* _WIN32 */
-
-#endif /* WITH_SSL */
-
- /* Canonical int values, aliases, s2i-verified strings, doubles */
- {
- static const struct {
- const char *prop;
- const char *val;
- const char *exp;
- int is_global;
- } props[] = {
- {"request.required.acks", "0", "0"},
- {"request.required.acks", "-1", "-1"},
- {"request.required.acks", "1", "1"},
- {"acks", "3", "3"}, /* alias test */
- {"request.required.acks", "393", "393"},
- {"request.required.acks", "bad", NULL},
- {"request.required.acks", "all", "-1"},
- {"request.required.acks", "all", "-1", 1 /*fallthru*/},
- {"acks", "0", "0"}, /* alias test */
-#if WITH_SASL
- {"sasl.mechanisms", "GSSAPI", "GSSAPI", 1},
- {"sasl.mechanisms", "PLAIN", "PLAIN", 1},
- {"sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1},
- {"sasl.mechanisms", "", NULL, 1},
-#endif
- {"linger.ms", "12555.3", "12555.3", 1},
- {"linger.ms", "1500.000", "1500", 1},
- {"linger.ms", "0.0001", "0.0001", 1},
- {NULL}
- };
-
- TEST_SAY("Canonical tests\n");
- tconf = rd_kafka_topic_conf_new();
- conf = rd_kafka_conf_new();
-
- for (i = 0; props[i].prop; i++) {
- char dest[64];
- size_t destsz;
- rd_kafka_conf_res_t res;
-
- TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop,
- props[i].val, props[i].exp,
- props[i].is_global ? "global" : "topic");
-
-
- /* Set value */
- if (props[i].is_global)
- res = rd_kafka_conf_set(conf, props[i].prop,
- props[i].val, errstr,
- sizeof(errstr));
- else
- res = rd_kafka_topic_conf_set(
- tconf, props[i].prop, props[i].val, errstr,
- sizeof(errstr));
- if ((res == RD_KAFKA_CONF_OK ? 1 : 0) !=
- (props[i].exp ? 1 : 0))
- TEST_FAIL("Expected %s, got %s",
- props[i].exp ? "success" : "failure",
- (res == RD_KAFKA_CONF_OK
- ? "OK"
- : (res == RD_KAFKA_CONF_INVALID
- ? "INVALID"
- : "UNKNOWN")));
-
- if (!props[i].exp)
- continue;
-
- /* Get value and compare to expected result */
- destsz = sizeof(dest);
- if (props[i].is_global)
- res = rd_kafka_conf_get(conf, props[i].prop,
- dest, &destsz);
- else
- res = rd_kafka_topic_conf_get(
- tconf, props[i].prop, dest, &destsz);
- TEST_ASSERT(res == RD_KAFKA_CONF_OK,
- ".._conf_get(%s) returned %d",
- props[i].prop, res);
-
- TEST_ASSERT(!strcmp(props[i].exp, dest),
- "Expected \"%s\", got \"%s\"", props[i].exp,
- dest);
- }
- rd_kafka_topic_conf_destroy(tconf);
- rd_kafka_conf_destroy(conf);
- }
-
- do_test_kafka_new_failures();
-
- do_test_special_invalid_conf();
-
- do_test_idempotence_conf();
-
- do_test_instance_conf();
-
- do_test_default_topic_conf();
-
- do_message_timeout_linger_checks();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c
deleted file mode 100644
index 722cef3b0..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests messages are produced in order.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgid_next = 0;
-static int fails = 0;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (msgid != msgid_next) {
- fails++;
- TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
- return;
- }
-
- msgid_next = msgid + 1;
-}
-
-
-int main_0005_order(int argc, char **argv) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = test_quick ? 500 : 50000;
- int i;
- test_timing_t t_produce, t_delivery;
-
- test_conf_init(&conf, &topic_conf, 10);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Produce messages */
- TIMING_START(&t_produce, "PRODUCE");
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
- i);
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1)
- TEST_FAIL("Failed to produce message #%i: %s\n", i,
- rd_strerror(errno));
- }
- TIMING_STOP(&t_produce);
- TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt);
-
- /* Wait for messages to be delivered */
- TIMING_START(&t_delivery, "DELIVERY");
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 50);
- TIMING_STOP(&t_delivery);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgid_next != msgcnt)
- TEST_FAIL("Still waiting for messages: next %i != end %i\n",
- msgid_next, msgcnt);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c
deleted file mode 100644
index 8a25f6a1d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Makes sure all symbols in the public API actually resolves during linking.
- * This test needs to be updated manually when new symbols are added.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-int main_0006_symbols(int argc, char **argv) {
-
- if (argc < 0 /* always false */) {
- rd_kafka_version();
- rd_kafka_version_str();
- rd_kafka_get_debug_contexts();
- rd_kafka_get_err_descs(NULL, NULL);
- rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
- rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
- rd_kafka_last_error();
- rd_kafka_conf_new();
- rd_kafka_conf_destroy(NULL);
- rd_kafka_conf_dup(NULL);
- rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
- rd_kafka_conf_set_dr_cb(NULL, NULL);
- rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
- rd_kafka_conf_set_error_cb(NULL, NULL);
- rd_kafka_conf_set_stats_cb(NULL, NULL);
- rd_kafka_conf_set_log_cb(NULL, NULL);
- rd_kafka_conf_set_socket_cb(NULL, NULL);
- rd_kafka_conf_set_rebalance_cb(NULL, NULL);
- rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
- rd_kafka_conf_set_throttle_cb(NULL, NULL);
- rd_kafka_conf_set_default_topic_conf(NULL, NULL);
- rd_kafka_conf_get(NULL, NULL, NULL, NULL);
-#ifndef _WIN32
- rd_kafka_conf_set_open_cb(NULL, NULL);
-#endif
- rd_kafka_conf_set_opaque(NULL, NULL);
- rd_kafka_opaque(NULL);
- rd_kafka_conf_dump(NULL, NULL);
- rd_kafka_topic_conf_dump(NULL, NULL);
- rd_kafka_conf_dump_free(NULL, 0);
- rd_kafka_conf_properties_show(NULL);
- rd_kafka_topic_conf_new();
- rd_kafka_topic_conf_dup(NULL);
- rd_kafka_topic_conf_destroy(NULL);
- rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
- rd_kafka_topic_conf_set_opaque(NULL, NULL);
- rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
- rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
- rd_kafka_topic_partition_available(NULL, 0);
- rd_kafka_topic_opaque(NULL);
- rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
- rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL,
- NULL);
- rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0,
- NULL, NULL);
- rd_kafka_new(0, NULL, NULL, 0);
- rd_kafka_destroy(NULL);
- rd_kafka_flush(NULL, 0);
- rd_kafka_name(NULL);
- rd_kafka_memberid(NULL);
- rd_kafka_topic_new(NULL, NULL, NULL);
- rd_kafka_topic_destroy(NULL);
- rd_kafka_topic_name(NULL);
- rd_kafka_message_destroy(NULL);
- rd_kafka_message_errstr(NULL);
- rd_kafka_message_timestamp(NULL, NULL);
- rd_kafka_consume_start(NULL, 0, 0);
- rd_kafka_consume_stop(NULL, 0);
- rd_kafka_consume(NULL, 0, 0);
- rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
- rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
- rd_kafka_offset_store(NULL, 0, 0);
- rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
- rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
- rd_kafka_poll(NULL, 0);
- rd_kafka_brokers_add(NULL, NULL);
- /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
- rd_kafka_set_log_level(NULL, 0);
- rd_kafka_log_print(NULL, 0, NULL, NULL);
-#ifndef _WIN32
- rd_kafka_log_syslog(NULL, 0, NULL, NULL);
-#endif
- rd_kafka_outq_len(NULL);
- rd_kafka_dump(NULL, NULL);
- rd_kafka_thread_cnt();
- rd_kafka_wait_destroyed(0);
- rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
- rd_kafka_metadata_destroy(NULL);
- rd_kafka_queue_get_partition(NULL, NULL, 0);
- rd_kafka_queue_destroy(NULL);
- rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
- rd_kafka_consume_queue(NULL, 0);
- rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
- rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
- rd_kafka_seek(NULL, 0, 0, 0);
- rd_kafka_yield(NULL);
- rd_kafka_mem_free(NULL, NULL);
- rd_kafka_list_groups(NULL, NULL, NULL, 0);
- rd_kafka_group_list_destroy(NULL);
-
- /* KafkaConsumer API */
- rd_kafka_subscribe(NULL, NULL);
- rd_kafka_unsubscribe(NULL);
- rd_kafka_subscription(NULL, NULL);
- rd_kafka_consumer_poll(NULL, 0);
- rd_kafka_consumer_close(NULL);
- rd_kafka_assign(NULL, NULL);
- rd_kafka_assignment(NULL, NULL);
- rd_kafka_commit(NULL, NULL, 0);
- rd_kafka_commit_message(NULL, NULL, 0);
- rd_kafka_committed(NULL, NULL, 0);
- rd_kafka_position(NULL, NULL);
-
- /* TopicPartition */
- rd_kafka_topic_partition_list_new(0);
- rd_kafka_topic_partition_list_destroy(NULL);
- rd_kafka_topic_partition_list_add(NULL, NULL, 0);
- rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
- rd_kafka_topic_partition_list_del(NULL, NULL, 0);
- rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
- rd_kafka_topic_partition_list_copy(NULL);
- rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
- rd_kafka_topic_partition_list_find(NULL, NULL, 0);
- rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
- rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
- }
-
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c
deleted file mode 100644
index cf196d60c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Auto create topics
- *
- * NOTE! This test requires auto.create.topics.enable=true to be
- * configured on the broker!
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgs_wait = 0; /* bitmask */
-
-/**
- * Delivery report callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (!(msgs_wait & (1 << msgid)))
- TEST_FAIL(
- "Unwanted delivery report for message #%i "
- "(waiting for 0x%x)\n",
- msgid, msgs_wait);
-
- TEST_SAY("Delivery report for message #%i: %s\n", msgid,
- rd_kafka_err2str(err));
-
- msgs_wait &= ~(1 << msgid);
-
- if (err)
- TEST_FAIL("Message #%i failed with unexpected error %s\n",
- msgid, rd_kafka_err2str(err));
-}
-
-
-int main_0007_autotopic(int argc, char **argv) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = 10;
- int i;
-
- /* Generate unique topic name */
- test_conf_init(&conf, &topic_conf, 10);
-
- TEST_SAY(
- "\033[33mNOTE! This test requires "
- "auto.create.topics.enable=true to be configured on "
- "the broker!\033[0m\n");
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1),
- topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Produce a message */
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
- i);
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1)
- TEST_FAIL("Failed to produce message #%i: %s\n", i,
- rd_strerror(errno));
- msgs_wait |= (1 << i);
- }
-
- /* Wait for messages to time out */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 50);
-
- if (msgs_wait != 0)
- TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c
deleted file mode 100644
index d52081b75..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests request.required.acks (issue #75)
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgid_next = 0;
-static int fails = 0;
-static rd_kafka_msg_status_t exp_status;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- int msgid = *(int *)rkmessage->_private;
- rd_kafka_msg_status_t status = rd_kafka_message_status(rkmessage);
-
- free(rkmessage->_private);
-
- if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s (status %d)\n",
- rd_kafka_err2str(rkmessage->err), status);
-
- if (msgid != msgid_next) {
- fails++;
- TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
- return;
- }
-
- TEST_ASSERT(status == exp_status,
- "For msgid #%d: expected status %d, got %d", msgid,
- exp_status, status);
-
- msgid_next = msgid + 1;
-}
-
-
-int main_0008_reqacks(int argc, char **argv) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- char msg[128];
- int msgcnt = test_quick ? 20 : 100;
- int i;
- int reqacks;
- int idbase = 0;
- const char *topic = NULL;
-
- TEST_SAY(
- "\033[33mNOTE! This test requires at "
- "least 3 brokers!\033[0m\n");
-
- TEST_SAY(
- "\033[33mNOTE! This test requires "
- "default.replication.factor=3 to be configured on "
- "all brokers!\033[0m\n");
-
- /* Try different request.required.acks settings (issue #75) */
- for (reqacks = -1; reqacks <= 1; reqacks++) {
- char tmp[10];
-
- test_conf_init(&conf, &topic_conf, 10);
-
- if (reqacks != -1)
- test_conf_set(conf, "enable.idempotence", "false");
-
- if (!topic)
- topic = test_mk_topic_name("0008", 0);
-
- rd_snprintf(tmp, sizeof(tmp), "%i", reqacks);
-
- if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks",
- tmp, errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
- if (reqacks == 0)
- exp_status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- else
- exp_status = RD_KAFKA_MSG_STATUS_PERSISTED;
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY(
- "Created kafka instance %s with required acks %d, "
- "expecting status %d\n",
- rd_kafka_name(rk), reqacks, exp_status);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_strerror(errno));
-
- /* Produce messages */
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = idbase + i;
- rd_snprintf(msg, sizeof(msg),
- "%s test message #%i (acks=%i)", argv[0],
- *msgidp, reqacks);
- r = rd_kafka_produce(rkt, partition,
- RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1)
- TEST_FAIL("Failed to produce message #%i: %s\n",
- *msgidp, rd_strerror(errno));
- }
-
- TEST_SAY("Produced %i messages, waiting for deliveries\n",
- msgcnt);
-
- /* Wait for messages to time out */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 50);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgid_next != idbase + msgcnt)
- TEST_FAIL(
- "Still waiting for messages: "
- "next %i != end %i\n",
- msgid_next, msgcnt);
- idbase += i;
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
- }
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c
deleted file mode 100644
index 32590820e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * @name Verify that the builtin mock cluster works by producing to a topic
- * and then consuming from it.
- */
-
-
-
-int main_0009_mock_cluster(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0009_mock_cluster", 1);
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_t *p, *c;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- const int msgcnt = 100;
- const char *bootstraps;
- rd_kafka_topic_partition_list_t *parts;
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "bootstrap.servers", bootstraps);
-
- /* Producer */
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
-
- /* Consumer */
- test_conf_set(conf, "auto.offset.reset", "earliest");
- c = test_create_consumer(topic, NULL, conf, NULL);
-
- rkt = test_create_producer_topic(p, topic, NULL);
-
- /* Produce */
- test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, NULL, 0);
-
- /* Produce tiny messages */
- test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, "hello",
- 5);
-
- rd_kafka_topic_destroy(rkt);
-
- /* Assign */
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, 0);
- rd_kafka_topic_partition_list_add(parts, topic, 1);
- rd_kafka_topic_partition_list_add(parts, topic, 2);
- rd_kafka_topic_partition_list_add(parts, topic, 3);
- test_consumer_assign("CONSUME", c, parts);
- rd_kafka_topic_partition_list_destroy(parts);
-
-
- /* Consume */
- test_consumer_poll("CONSUME", c, 0, -1, 0, msgcnt, NULL);
-
- rd_kafka_destroy(c);
- rd_kafka_destroy(p);
-
- test_mock_cluster_destroy(mcluster);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c
deleted file mode 100644
index 584d37bc6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests messages are produced in order.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgid_next = 0;
-static int fails = 0;
-static int msgcounter = 0;
-static int *dr_partition_count = NULL;
-static const int topic_num_partitions = 4;
-static int msg_partition_wo_flag = 2;
-static int msg_partition_wo_flag_success = 0;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_single_partition_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (msgid != msgid_next) {
- fails++;
- TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
- return;
- }
-
- msgid_next = msgid + 1;
- msgcounter--;
-}
-
-/* Produce a batch of messages to a single partition. */
-static void test_single_partition(void) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = test_quick ? 100 : 100000;
- int failcnt = 0;
- int i;
- rd_kafka_message_t *rkmessages;
-
- msgid_next = 0;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("test_single_partition: Created kafka instance %s\n",
- rd_kafka_name(rk));
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Create messages */
- rkmessages = calloc(sizeof(*rkmessages), msgcnt);
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
- __FILE__, __FUNCTION__, i);
-
- rkmessages[i].payload = rd_strdup(msg);
- rkmessages[i].len = strlen(msg);
- rkmessages[i]._private = msgidp;
- rkmessages[i].partition = 2; /* Will be ignored since
- * RD_KAFKA_MSG_F_PARTITION
- * is not supplied. */
- }
-
- r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
- rkmessages, msgcnt);
-
- /* Scan through messages to check for errors. */
- for (i = 0; i < msgcnt; i++) {
- if (rkmessages[i].err) {
- failcnt++;
- if (failcnt < 100)
- TEST_SAY("Message #%i failed: %s\n", i,
- rd_kafka_err2str(rkmessages[i].err));
- }
- }
-
- /* All messages should've been produced. */
- if (r < msgcnt) {
- TEST_SAY(
- "Not all messages were accepted "
- "by produce_batch(): %i < %i\n",
- r, msgcnt);
- if (msgcnt - r != failcnt)
- TEST_SAY(
- "Discrepency between failed messages (%i) "
- "and return value %i (%i - %i)\n",
- failcnt, msgcnt - r, msgcnt, r);
- TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
- }
-
- free(rkmessages);
- TEST_SAY(
- "Single partition: "
- "Produced %i messages, waiting for deliveries\n",
- r);
-
- msgcounter = msgcnt;
-
- /* Wait for messages to be delivered */
- test_wait_delivery(rk, &msgcounter);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgid_next != msgcnt)
- TEST_FAIL("Still waiting for messages: next %i != end %i\n",
- msgid_next, msgcnt);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return;
-}
-
-
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_partitioner_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (msgcounter <= 0)
- TEST_FAIL(
- "Too many message dr_cb callback calls "
- "(at msgid #%i)\n",
- msgid);
- msgcounter--;
-}
-
-/* Produce a batch of messages using random (default) partitioner */
-static void test_partitioner(void) {
- int partition = RD_KAFKA_PARTITION_UA;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = test_quick ? 100 : 100000;
- int failcnt = 0;
- int i;
- rd_kafka_message_t *rkmessages;
-
- test_conf_init(&conf, &topic_conf, 30);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_partitioner_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("test_partitioner: Created kafka instance %s\n",
- rd_kafka_name(rk));
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Create messages */
- rkmessages = calloc(sizeof(*rkmessages), msgcnt);
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
- __FILE__, __FUNCTION__, i);
-
- rkmessages[i].payload = rd_strdup(msg);
- rkmessages[i].len = strlen(msg);
- rkmessages[i]._private = msgidp;
- }
-
- r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
- rkmessages, msgcnt);
-
- /* Scan through messages to check for errors. */
- for (i = 0; i < msgcnt; i++) {
- if (rkmessages[i].err) {
- failcnt++;
- if (failcnt < 100)
- TEST_SAY("Message #%i failed: %s\n", i,
- rd_kafka_err2str(rkmessages[i].err));
- }
- }
-
- /* All messages should've been produced. */
- if (r < msgcnt) {
- TEST_SAY(
- "Not all messages were accepted "
- "by produce_batch(): %i < %i\n",
- r, msgcnt);
- if (msgcnt - r != failcnt)
- TEST_SAY(
- "Discrepency between failed messages (%i) "
- "and return value %i (%i - %i)\n",
- failcnt, msgcnt - r, msgcnt, r);
- TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
- }
-
- free(rkmessages);
- TEST_SAY(
- "Partitioner: "
- "Produced %i messages, waiting for deliveries\n",
- r);
-
- msgcounter = msgcnt;
- /* Wait for messages to be delivered */
- test_wait_delivery(rk, &msgcounter);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgcounter != 0)
- TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
- msgcnt);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return;
-}
-
-static void dr_per_message_partition_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque) {
-
- free(rkmessage->_private);
-
- if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(rkmessage->err));
-
- if (msgcounter <= 0)
- TEST_FAIL(
- "Too many message dr_cb callback calls "
- "(at msg offset #%" PRId64 ")\n",
- rkmessage->offset);
-
- TEST_ASSERT(rkmessage->partition < topic_num_partitions);
- msgcounter--;
-
- dr_partition_count[rkmessage->partition]++;
-}
-
-/* Produce a batch of messages using with per message partition flag */
-static void test_per_message_partition_flag(void) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)];
- int msgcnt = test_quick ? 100 : 1000;
- int failcnt = 0;
- int i;
- int *rkpartition_counts;
- rd_kafka_message_t *rkmessages;
- const char *topic_name;
-
- test_conf_init(&conf, &topic_conf, 30);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_msg_cb(conf, dr_per_message_partition_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("test_per_message_partition_flag: Created kafka instance %s\n",
- rd_kafka_name(rk));
- topic_name = test_mk_topic_name("0011_per_message_flag", 1);
- test_create_topic(rk, topic_name, topic_num_partitions, 1);
-
- rkt = rd_kafka_topic_new(rk, topic_name, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Create messages */
- rkpartition_counts = calloc(sizeof(int), topic_num_partitions);
- dr_partition_count = calloc(sizeof(int), topic_num_partitions);
- rkmessages = calloc(sizeof(*rkmessages), msgcnt);
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
- __FILE__, __FUNCTION__, i);
-
- rkmessages[i].payload = rd_strdup(msg);
- rkmessages[i].len = strlen(msg);
- rkmessages[i]._private = msgidp;
- rkmessages[i].partition = jitter(0, topic_num_partitions - 1);
- rkpartition_counts[rkmessages[i].partition]++;
- }
-
- r = rd_kafka_produce_batch(
- rkt, partition, RD_KAFKA_MSG_F_PARTITION | RD_KAFKA_MSG_F_FREE,
- rkmessages, msgcnt);
-
- /* Scan through messages to check for errors. */
- for (i = 0; i < msgcnt; i++) {
- if (rkmessages[i].err) {
- failcnt++;
- if (failcnt < 100)
- TEST_SAY("Message #%i failed: %s\n", i,
- rd_kafka_err2str(rkmessages[i].err));
- }
- }
-
- /* All messages should've been produced. */
- if (r < msgcnt) {
- TEST_SAY(
- "Not all messages were accepted "
- "by produce_batch(): %i < %i\n",
- r, msgcnt);
- if (msgcnt - r != failcnt)
- TEST_SAY(
- "Discrepency between failed messages (%i) "
- "and return value %i (%i - %i)\n",
- failcnt, msgcnt - r, msgcnt, r);
- TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
- }
-
- free(rkmessages);
- TEST_SAY(
- "Per-message partition: "
- "Produced %i messages, waiting for deliveries\n",
- r);
-
- msgcounter = msgcnt;
- /* Wait for messages to be delivered */
- test_wait_delivery(rk, &msgcounter);
-
- if (msgcounter != 0)
- TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
- msgcnt);
-
- for (i = 0; i < topic_num_partitions; i++) {
- if (dr_partition_count[i] != rkpartition_counts[i]) {
- TEST_FAIL(
- "messages were not sent to designated "
- "partitions expected messages %i in "
- "partition %i, but only "
- "%i messages were sent",
- rkpartition_counts[i], i, dr_partition_count[i]);
- }
- }
-
- free(rkpartition_counts);
- free(dr_partition_count);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return;
-}
-
-static void
-dr_partitioner_wo_per_message_flag_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque) {
- free(rkmessage->_private);
-
- if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(rkmessage->err));
- if (msgcounter <= 0)
- TEST_FAIL(
- "Too many message dr_cb callback calls "
- "(at msg offset #%" PRId64 ")\n",
- rkmessage->offset);
- if (rkmessage->partition != msg_partition_wo_flag)
- msg_partition_wo_flag_success = 1;
- msgcounter--;
-}
-
-/**
- * @brief Produce a batch of messages using partitioner
- * without per message partition flag
- */
-static void test_message_partitioner_wo_per_message_flag(void) {
- int partition = RD_KAFKA_PARTITION_UA;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)];
- int msgcnt = test_quick ? 100 : 1000;
- int failcnt = 0;
- int i;
- rd_kafka_message_t *rkmessages;
-
- test_conf_init(&conf, &topic_conf, 30);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_msg_cb(conf,
- dr_partitioner_wo_per_message_flag_cb);
- test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("test_partitioner: Created kafka instance %s\n",
- rd_kafka_name(rk));
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Create messages */
- rkmessages = calloc(sizeof(*rkmessages), msgcnt);
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
- __FILE__, __FUNCTION__, i);
-
- rkmessages[i].payload = rd_strdup(msg);
- rkmessages[i].len = strlen(msg);
- rkmessages[i]._private = msgidp;
- rkmessages[i].partition = msg_partition_wo_flag;
- }
-
- r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
- rkmessages, msgcnt);
-
- /* Scan through messages to check for errors. */
- for (i = 0; i < msgcnt; i++) {
- if (rkmessages[i].err) {
- failcnt++;
- if (failcnt < 100)
- TEST_SAY("Message #%i failed: %s\n", i,
- rd_kafka_err2str(rkmessages[i].err));
- }
- }
-
- /* All messages should've been produced. */
- if (r < msgcnt) {
- TEST_SAY(
- "Not all messages were accepted "
- "by produce_batch(): %i < %i\n",
- r, msgcnt);
- if (msgcnt - r != failcnt)
- TEST_SAY(
- "Discrepency between failed messages (%i) "
- "and return value %i (%i - %i)\n",
- failcnt, msgcnt - r, msgcnt, r);
- TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
- }
-
- free(rkmessages);
- TEST_SAY(
- "Partitioner: "
- "Produced %i messages, waiting for deliveries\n",
- r);
-
- msgcounter = msgcnt;
- /* Wait for messages to be delivered */
- test_wait_delivery(rk, &msgcounter);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgcounter != 0)
- TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
- msgcnt);
- if (msg_partition_wo_flag_success == 0) {
- TEST_FAIL(
- "partitioner was not used, all messages were sent to "
- "message specified partition %i",
- i);
- }
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return;
-}
-
-
-int main_0011_produce_batch(int argc, char **argv) {
- test_message_partitioner_wo_per_message_flag();
- test_single_partition();
- test_partitioner();
- if (test_can_create_topics(1))
- test_per_message_partition_flag();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c
deleted file mode 100644
index 30ff392c4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Produce messages, then consume them.
- * Consume both through the standard interface and through the queue interface.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int prod_msg_remains = 0;
-static int fails = 0;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (prod_msg_remains == 0)
- TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
- prod_msg_remains);
-
- prod_msg_remains--;
-}
-
-
-/**
- * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
- */
-static void produce_messages(uint64_t testid,
- const char *topic,
- int partition_cnt,
- int msgcnt) {
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- char msg[128];
- int failcnt = 0;
- int i;
- rd_kafka_message_t *rkmessages;
- int32_t partition;
- int msgid = 0;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Make sure all replicas are in-sync after producing
- * so that consume test wont fail. */
- rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
- errstr, sizeof(errstr));
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Create messages. */
- prod_msg_remains = msgcnt;
- rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt);
- for (partition = 0; partition < partition_cnt; partition++) {
- int batch_cnt = msgcnt / partition_cnt;
-
- for (i = 0; i < batch_cnt; i++) {
- rd_snprintf(msg, sizeof(msg),
- "testid=%" PRIu64 ", partition=%i, msg=%i",
- testid, (int)partition, msgid);
- rkmessages[i].payload = rd_strdup(msg);
- rkmessages[i].len = strlen(msg);
- msgid++;
- }
-
- TEST_SAY("Start produce to partition %i: msgs #%d..%d\n",
- (int)partition, msgid - batch_cnt, msgid);
- /* Produce batch for this partition */
- r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
- rkmessages, batch_cnt);
- if (r == -1)
- TEST_FAIL(
- "Failed to produce "
- "batch for partition %i: %s",
- (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
-
- /* Scan through messages to check for errors. */
- for (i = 0; i < batch_cnt; i++) {
- if (rkmessages[i].err) {
- failcnt++;
- if (failcnt < 100)
- TEST_SAY("Message #%i failed: %s\n", i,
- rd_kafka_err2str(
- rkmessages[i].err));
- }
- }
-
- /* All messages should've been produced. */
- if (r < batch_cnt) {
- TEST_SAY(
- "Not all messages were accepted "
- "by produce_batch(): %i < %i\n",
- r, batch_cnt);
-
- if (batch_cnt - r != failcnt)
- TEST_SAY(
- "Discrepency between failed "
- "messages (%i) "
- "and return value %i (%i - %i)\n",
- failcnt, batch_cnt - r, batch_cnt, r);
- TEST_FAIL("%i/%i messages failed\n", batch_cnt - r,
- batch_cnt);
- }
-
- TEST_SAY(
- "Produced %i messages to partition %i, "
- "waiting for deliveries\n",
- r, partition);
- }
-
-
- free(rkmessages);
-
- /* Wait for messages to be delivered */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 100);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (prod_msg_remains != 0)
- TEST_FAIL("Still waiting for %i messages to be produced",
- prod_msg_remains);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-
-static int *cons_msgs;
-static int cons_msgs_size;
-static int cons_msgs_cnt;
-
-static void verify_consumed_msg_reset(int msgcnt) {
- TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt);
- if (cons_msgs) {
- free(cons_msgs);
- cons_msgs = NULL;
- }
-
- if (msgcnt) {
- int i;
-
- cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
- for (i = 0; i < msgcnt; i++)
- cons_msgs[i] = -1;
- }
-
- cons_msgs_size = msgcnt;
- cons_msgs_cnt = 0;
-}
-
-
-static int int_cmp(const void *_a, const void *_b) {
- int a = *(int *)_a;
- int b = *(int *)_b;
- return RD_CMP(a, b);
-}
-
-static void verify_consumed_msg_check0(const char *func, int line) {
- int i;
- int fails = 0;
-
- if (cons_msgs_cnt < cons_msgs_size) {
- TEST_SAY("Missing %i messages in consumer\n",
- cons_msgs_size - cons_msgs_cnt);
- fails++;
- }
-
- qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
-
- for (i = 0; i < cons_msgs_size; i++) {
- if (cons_msgs[i] != i) {
- TEST_SAY(
- "Consumed message #%i is wrong, "
- "expected #%i\n",
- cons_msgs[i], i);
- fails++;
- }
- }
-
- if (fails)
- TEST_FAIL("See above error(s)");
-
- verify_consumed_msg_reset(0);
-}
-
-
-#define verify_consumed_msg_check() \
- verify_consumed_msg_check0(__FUNCTION__, __LINE__)
-
-
-
-static void verify_consumed_msg0(const char *func,
- int line,
- uint64_t testid,
- int32_t partition,
- int msgnum,
- rd_kafka_message_t *rkmessage) {
- uint64_t in_testid;
- int in_part;
- int in_msgnum;
- char buf[1024];
-
- if (rkmessage->len + 1 >= sizeof(buf))
- TEST_FAIL(
- "Incoming message too large (%i): "
- "not sourced by this test",
- (int)rkmessage->len);
-
- rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len,
- (char *)rkmessage->payload);
-
- if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
- &in_part, &in_msgnum) != 3)
- TEST_FAIL("Incorrect message format: %s", buf);
-
- if (test_level > 2) {
- TEST_SAY("%s:%i: Our testid %" PRIu64
- ", part %i =? %i, "
- "msg %i =? %i "
- ", message's: \"%s\"\n",
- func, line, testid, (int)partition,
- (int)rkmessage->partition, msgnum, in_msgnum, buf);
- }
-
- if (testid != in_testid || (partition != -1 && partition != in_part) ||
- (msgnum != -1 && msgnum != in_msgnum) ||
- (in_msgnum < 0 || in_msgnum > cons_msgs_size))
- goto fail_match;
-
- if (cons_msgs_cnt == cons_msgs_size) {
- TEST_SAY(
- "Too many messages in cons_msgs (%i) while reading "
- "message \"%s\"\n",
- cons_msgs_cnt, buf);
- verify_consumed_msg_check();
- TEST_FAIL("See above error(s)");
- }
-
- cons_msgs[cons_msgs_cnt++] = in_msgnum;
-
- return;
-
-fail_match:
- TEST_FAIL("%s:%i: Our testid %" PRIu64
- ", part %i, msg %i/%i did "
- "not match message's: \"%s\"\n",
- func, line, testid, (int)partition, msgnum, cons_msgs_size,
- buf);
-}
-
-#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
- verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
- rkmessage)
-
-
-static void consume_messages(uint64_t testid,
- const char *topic,
- int32_t partition,
- int msg_base,
- int batch_cnt,
- int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- int i;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk));
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt,
- partition);
-
- /* Consume messages */
- if (rd_kafka_consume_start(rkt, partition,
- RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
- TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition,
- batch_cnt, rd_kafka_err2str(rd_kafka_last_error()));
-
- for (i = 0; i < batch_cnt;) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage =
- rd_kafka_consume(rkt, partition, tmout_multip(5000));
- if (!rkmessage)
- TEST_FAIL(
- "Failed to consume message %i/%i from "
- "partition %i: %s",
- i, batch_cnt, (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- if (rkmessage->err) {
- if (rkmessage->err ==
- RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- rd_kafka_message_destroy(rkmessage);
- continue;
- }
- TEST_FAIL(
- "Consume message %i/%i from partition %i "
- "has error: %s: %s",
- i, batch_cnt, (int)partition,
- rd_kafka_err2str(rkmessage->err),
- rd_kafka_message_errstr(rkmessage));
- }
-
- verify_consumed_msg(testid, partition, msg_base + i, rkmessage);
-
- rd_kafka_message_destroy(rkmessage);
- i++;
- }
-
- rd_kafka_consume_stop(rkt, partition);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-static void consume_messages_with_queues(uint64_t testid,
- const char *topic,
- int partition_cnt,
- int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- rd_kafka_queue_t *rkqu;
- int i;
- int32_t partition;
- int batch_cnt = msgcnt / partition_cnt;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- test_conf_set(conf, "enable.partition.eof", "true");
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- /* Create queue */
- rkqu = rd_kafka_queue_new(rk);
-
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- TEST_SAY("Consuming %i messages from one queue serving %i partitions\n",
- msgcnt, partition_cnt);
-
- /* Start consuming each partition */
- for (partition = 0; partition < partition_cnt; partition++) {
- /* Consume messages */
- TEST_SAY("Start consuming partition %i at offset -%i\n",
- partition, batch_cnt);
- if (rd_kafka_consume_start_queue(
- rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt),
- rkqu) == -1)
- TEST_FAIL("consume_start_queue(%i) failed: %s",
- (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- }
-
-
- /* Consume messages from queue */
- for (i = 0; i < msgcnt;) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000));
- if (!rkmessage)
- TEST_FAIL(
- "Failed to consume message %i/%i from "
- "queue: %s",
- i, msgcnt, rd_kafka_err2str(rd_kafka_last_error()));
- if (rkmessage->err) {
- if (rkmessage->err ==
- RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("Topic %s [%" PRId32
- "] reached "
- "EOF at offset %" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition,
- rkmessage->offset);
- rd_kafka_message_destroy(rkmessage);
- continue;
- }
- TEST_FAIL(
- "Consume message %i/%i from queue "
- "has error (offset %" PRId64 ", partition %" PRId32
- "): %s",
- i, msgcnt, rkmessage->offset, rkmessage->partition,
- rd_kafka_err2str(rkmessage->err));
- }
-
- verify_consumed_msg(testid, -1, -1, rkmessage);
-
- rd_kafka_message_destroy(rkmessage);
- i++;
- }
-
- /* Stop consuming each partition */
- for (partition = 0; partition < partition_cnt; partition++)
- rd_kafka_consume_stop(rkt, partition);
-
- /* Destroy queue */
- rd_kafka_queue_destroy(rkqu);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-/**
- * Produce to two partitions.
- * Consume with standard interface from both, one after the other.
- * Consume with queue interface from both, simultanously.
- */
-static void test_produce_consume(void) {
- int msgcnt = test_quick ? 100 : 1000;
- int partition_cnt = 2;
- int i;
- uint64_t testid;
- int msg_base = 0;
- const char *topic;
-
- /* Generate a testid so we can differentiate messages
- * from other tests */
- testid = test_id_generate();
-
- /* Read test.conf to configure topic name */
- test_conf_init(NULL, NULL, 20);
- topic = test_mk_topic_name("0012", 1);
-
- TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid);
-
- /* Produce messages */
- produce_messages(testid, topic, partition_cnt, msgcnt);
-
-
- /* Consume messages with standard interface */
- verify_consumed_msg_reset(msgcnt);
- for (i = 0; i < partition_cnt; i++) {
- consume_messages(testid, topic, i, msg_base,
- msgcnt / partition_cnt, msgcnt);
- msg_base += msgcnt / partition_cnt;
- }
- verify_consumed_msg_check();
-
- /* Consume messages with queue interface */
- verify_consumed_msg_reset(msgcnt);
- consume_messages_with_queues(testid, topic, partition_cnt, msgcnt);
- verify_consumed_msg_check();
-
- return;
-}
-
-
-
-int main_0012_produce_consume(int argc, char **argv) {
- test_produce_consume();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c
deleted file mode 100644
index 26a7ac070..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Produce NULL payload messages, then consume them.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int prod_msg_remains = 0;
-static int fails = 0;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (prod_msg_remains == 0)
- TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
- prod_msg_remains);
-
- prod_msg_remains--;
-}
-
-
-/**
- * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
- */
-static void produce_null_messages(uint64_t testid,
- const char *topic,
- int partition_cnt,
- int msgcnt) {
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- int i;
- int32_t partition;
- int msgid = 0;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Make sure all replicas are in-sync after producing
- * so that consume test wont fail. */
- rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
- errstr, sizeof(errstr));
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- /* Produce messages */
- prod_msg_remains = msgcnt;
- for (partition = 0; partition < partition_cnt; partition++) {
- int batch_cnt = msgcnt / partition_cnt;
-
- for (i = 0; i < batch_cnt; i++) {
- char key[128];
- rd_snprintf(key, sizeof(key),
- "testid=%" PRIu64 ", partition=%i, msg=%i",
- testid, (int)partition, msgid);
- r = rd_kafka_produce(rkt, partition, 0, NULL, 0, key,
- strlen(key), NULL);
- if (r == -1)
- TEST_FAIL(
- "Failed to produce message %i "
- "to partition %i: %s",
- msgid, (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- msgid++;
- }
- }
-
-
- TEST_SAY(
- "Produced %d messages to %d partition(s), "
- "waiting for deliveries\n",
- msgcnt, partition_cnt);
- /* Wait for messages to be delivered */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 100);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (prod_msg_remains != 0)
- TEST_FAIL("Still waiting for %i messages to be produced",
- prod_msg_remains);
- else
- TEST_SAY("All messages delivered\n");
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-
-static int *cons_msgs;
-static int cons_msgs_size;
-static int cons_msgs_cnt;
-
-static void verify_consumed_msg_reset(int msgcnt) {
- if (cons_msgs) {
- free(cons_msgs);
- cons_msgs = NULL;
- }
-
- if (msgcnt) {
- int i;
-
- cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
- for (i = 0; i < msgcnt; i++)
- cons_msgs[i] = -1;
- }
-
- cons_msgs_size = msgcnt;
- cons_msgs_cnt = 0;
-}
-
-
-static int int_cmp(const void *_a, const void *_b) {
- int a = *(int *)_a;
- int b = *(int *)_b;
- return RD_CMP(a, b);
-}
-
-static void verify_consumed_msg_check0(const char *func, int line) {
- int i;
- int fails = 0;
-
- if (cons_msgs_cnt < cons_msgs_size) {
- TEST_SAY("Missing %i messages in consumer\n",
- cons_msgs_size - cons_msgs_cnt);
- fails++;
- }
-
- qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
-
- for (i = 0; i < cons_msgs_size; i++) {
- if (cons_msgs[i] != i) {
- TEST_SAY(
- "Consumed message #%i is wrong, "
- "expected #%i\n",
- cons_msgs[i], i);
- fails++;
- }
- }
-
- if (fails)
- TEST_FAIL("See above error(s)");
-
- verify_consumed_msg_reset(0);
-}
-
-
-#define verify_consumed_msg_check() \
- verify_consumed_msg_check0(__FUNCTION__, __LINE__)
-
-
-
-static void verify_consumed_msg0(const char *func,
- int line,
- uint64_t testid,
- int32_t partition,
- int msgnum,
- rd_kafka_message_t *rkmessage) {
- uint64_t in_testid;
- int in_part;
- int in_msgnum;
- char buf[128];
-
- if (rkmessage->len != 0)
- TEST_FAIL("Incoming message not NULL: %i bytes",
- (int)rkmessage->len);
-
- if (rkmessage->key_len + 1 >= sizeof(buf))
- TEST_FAIL(
- "Incoming message key too large (%i): "
- "not sourced by this test",
- (int)rkmessage->key_len);
-
- rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len,
- (char *)rkmessage->key);
-
- if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
- &in_part, &in_msgnum) != 3)
- TEST_FAIL("Incorrect key format: %s", buf);
-
- if (testid != in_testid || (partition != -1 && partition != in_part) ||
- (msgnum != -1 && msgnum != in_msgnum) ||
- (in_msgnum < 0 || in_msgnum > cons_msgs_size))
- goto fail_match;
-
- if (test_level > 2) {
- TEST_SAY("%s:%i: Our testid %" PRIu64
- ", part %i (%i), "
- "msg %i/%i did "
- ", key's: \"%s\"\n",
- func, line, testid, (int)partition,
- (int)rkmessage->partition, msgnum, cons_msgs_size,
- buf);
- }
-
- if (cons_msgs_cnt == cons_msgs_size) {
- TEST_SAY(
- "Too many messages in cons_msgs (%i) while reading "
- "message key \"%s\"\n",
- cons_msgs_cnt, buf);
- verify_consumed_msg_check();
- TEST_FAIL("See above error(s)");
- }
-
- cons_msgs[cons_msgs_cnt++] = in_msgnum;
-
- return;
-
-fail_match:
- TEST_FAIL("%s:%i: Our testid %" PRIu64
- ", part %i, msg %i/%i did "
- "not match message's key: \"%s\"\n",
- func, line, testid, (int)partition, msgnum, cons_msgs_size,
- buf);
-}
-
-#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
- verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
- rkmessage)
-
-
-static void consume_messages(uint64_t testid,
- const char *topic,
- int32_t partition,
- int msg_base,
- int batch_cnt,
- int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- int i;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt,
- partition);
-
- /* Consume messages */
- if (rd_kafka_consume_start(rkt, partition,
- RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
- TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition,
- batch_cnt, rd_kafka_err2str(rd_kafka_last_error()));
-
- for (i = 0; i < batch_cnt; i++) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage =
- rd_kafka_consume(rkt, partition, tmout_multip(5000));
- if (!rkmessage)
- TEST_FAIL(
- "Failed to consume message %i/%i from "
- "partition %i: %s",
- i, batch_cnt, (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- if (rkmessage->err)
- TEST_FAIL(
- "Consume message %i/%i from partition %i "
- "has error: %s",
- i, batch_cnt, (int)partition,
- rd_kafka_err2str(rkmessage->err));
-
- verify_consumed_msg(testid, partition, msg_base + i, rkmessage);
-
- rd_kafka_message_destroy(rkmessage);
- }
-
- rd_kafka_consume_stop(rkt, partition);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-static void consume_messages_with_queues(uint64_t testid,
- const char *topic,
- int partition_cnt,
- int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- rd_kafka_queue_t *rkqu;
- int i;
- int32_t partition;
- int batch_cnt = msgcnt / partition_cnt;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- /* Create queue */
- rkqu = rd_kafka_queue_new(rk);
-
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- TEST_SAY("Consuming %i messages from one queue serving %i partitions\n",
- msgcnt, partition_cnt);
-
- /* Start consuming each partition */
- for (partition = 0; partition < partition_cnt; partition++) {
- /* Consume messages */
- TEST_SAY("Start consuming partition %i at tail offset -%i\n",
- partition, batch_cnt);
- if (rd_kafka_consume_start_queue(
- rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt),
- rkqu) == -1)
- TEST_FAIL("consume_start_queue(%i) failed: %s",
- (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- }
-
-
- /* Consume messages from queue */
- for (i = 0; i < msgcnt; i++) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000));
- if (!rkmessage)
- TEST_FAIL(
- "Failed to consume message %i/%i from "
- "queue: %s",
- i, msgcnt, rd_kafka_err2str(rd_kafka_last_error()));
- if (rkmessage->err)
- TEST_FAIL(
- "Consume message %i/%i from queue "
- "has error (partition %" PRId32 "): %s",
- i, msgcnt, rkmessage->partition,
- rd_kafka_err2str(rkmessage->err));
-
- verify_consumed_msg(testid, -1, -1, rkmessage);
-
- rd_kafka_message_destroy(rkmessage);
- }
-
- /* Stop consuming each partition */
- for (partition = 0; partition < partition_cnt; partition++)
- rd_kafka_consume_stop(rkt, partition);
-
- /* Destroy queue */
- rd_kafka_queue_destroy(rkqu);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-static void test_produce_consume(void) {
- int msgcnt = test_quick ? 100 : 1000;
- int partition_cnt = 1;
- int i;
- uint64_t testid;
- int msg_base = 0;
- const char *topic;
-
- /* Generate a testid so we can differentiate messages
- * from other tests */
- testid = test_id_generate();
-
- /* Read test.conf to configure topic name */
- test_conf_init(NULL, NULL, 20);
- topic = test_mk_topic_name("0013", 0);
-
- TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid);
-
- /* Produce messages */
- produce_null_messages(testid, topic, partition_cnt, msgcnt);
-
-
- /* Consume messages with standard interface */
- verify_consumed_msg_reset(msgcnt);
- for (i = 0; i < partition_cnt; i++) {
- consume_messages(testid, topic, i, msg_base,
- msgcnt / partition_cnt, msgcnt);
- msg_base += msgcnt / partition_cnt;
- }
- verify_consumed_msg_check();
-
- /* Consume messages with queue interface */
- verify_consumed_msg_reset(msgcnt);
- consume_messages_with_queues(testid, topic, partition_cnt, msgcnt);
- verify_consumed_msg_check();
-
- return;
-}
-
-
-
-int main_0013_null_msgs(int argc, char **argv) {
- test_produce_consume();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c
deleted file mode 100644
index edae85f5c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-static int prod_msg_remains = 0;
-static int fails = 0;
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
-
- if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(err));
-
- if (prod_msg_remains == 0)
- TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
- prod_msg_remains);
-
- prod_msg_remains--;
-}
-
-
-/**
- * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
- */
-static void produce_messages(uint64_t testid,
- const char *topic,
- int partition_cnt,
- int msg_base,
- int msgcnt) {
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- int i;
- int32_t partition;
- int msgid = msg_base;
-
- test_conf_init(&conf, &topic_conf, 20);
-
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Make sure all replicas are in-sync after producing
- * so that consume test wont fail. */
- rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
- errstr, sizeof(errstr));
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- /* Produce messages */
- prod_msg_remains = msgcnt;
- for (partition = 0; partition < partition_cnt; partition++) {
- int batch_cnt = msgcnt / partition_cnt;
-
- for (i = 0; i < batch_cnt; i++) {
- char key[128];
- char buf[128];
- rd_snprintf(key, sizeof(key),
- "testid=%" PRIu64 ", partition=%i, msg=%i",
- testid, (int)partition, msgid);
- rd_snprintf(buf, sizeof(buf),
- "data: testid=%" PRIu64
- ", partition=%i, msg=%i",
- testid, (int)partition, msgid);
-
- r = rd_kafka_produce(
- rkt, partition, RD_KAFKA_MSG_F_COPY, buf,
- strlen(buf), key, strlen(key), NULL);
- if (r == -1)
- TEST_FAIL(
- "Failed to produce message %i "
- "to partition %i: %s",
- msgid, (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
- msgid++;
- }
- }
-
-
- /* Wait for messages to be delivered */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 100);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (prod_msg_remains != 0)
- TEST_FAIL("Still waiting for %i messages to be produced",
- prod_msg_remains);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-
-static int *cons_msgs;
-static int cons_msgs_size;
-static int cons_msgs_cnt;
-static int cons_msg_next;
-static int cons_msg_stop = -1;
-static int64_t cons_last_offset = -1; /* last offset received */
-
-static void verify_consumed_msg_reset(int msgcnt) {
- if (cons_msgs) {
- free(cons_msgs);
- cons_msgs = NULL;
- }
-
- if (msgcnt) {
- int i;
-
- cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
- for (i = 0; i < msgcnt; i++)
- cons_msgs[i] = -1;
- }
-
- cons_msgs_size = msgcnt;
- cons_msgs_cnt = 0;
- cons_msg_next = 0;
- cons_msg_stop = -1;
- cons_last_offset = -1;
-
- TEST_SAY("Reset consumed_msg stats, making room for %d new messages\n",
- msgcnt);
-}
-
-
-static int int_cmp(const void *_a, const void *_b) {
- int a = *(int *)_a;
- int b = *(int *)_b;
- /* Sort -1 (non-received msgs) at the end */
- return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b);
-}
-
-static void verify_consumed_msg_check0(const char *func,
- int line,
- const char *desc,
- int expected_cnt) {
- int i;
- int fails = 0;
- int not_recvd = 0;
-
- TEST_SAY("%s: received %d/%d/%d messages\n", desc, cons_msgs_cnt,
- expected_cnt, cons_msgs_size);
- if (expected_cnt > cons_msgs_size)
- TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", expected_cnt,
- cons_msgs_size);
-
- if (cons_msgs_cnt < expected_cnt) {
- TEST_SAY("%s: Missing %i messages in consumer\n", desc,
- expected_cnt - cons_msgs_cnt);
- fails++;
- }
-
- qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
-
- for (i = 0; i < expected_cnt; i++) {
- if (cons_msgs[i] != i) {
- if (cons_msgs[i] == -1) {
- not_recvd++;
- TEST_SAY("%s: msg %d/%d not received\n", desc,
- i, expected_cnt);
- } else
- TEST_SAY(
- "%s: Consumed message #%i is wrong, "
- "expected #%i\n",
- desc, cons_msgs[i], i);
- fails++;
- }
- }
-
- if (not_recvd)
- TEST_SAY("%s: %d messages not received at all\n", desc,
- not_recvd);
-
- if (fails)
- TEST_FAIL("%s: See above error(s)", desc);
- else
- TEST_SAY(
- "%s: message range check: %d/%d messages consumed: "
- "succeeded\n",
- desc, cons_msgs_cnt, expected_cnt);
-}
-
-
-#define verify_consumed_msg_check(desc, expected_cnt) \
- verify_consumed_msg_check0(__FUNCTION__, __LINE__, desc, expected_cnt)
-
-
-
-static void verify_consumed_msg0(const char *func,
- int line,
- uint64_t testid,
- int32_t partition,
- int msgnum,
- rd_kafka_message_t *rkmessage) {
- uint64_t in_testid;
- int in_part;
- int in_msgnum;
- char buf[128];
-
- if (rkmessage->key_len + 1 >= sizeof(buf))
- TEST_FAIL(
- "Incoming message key too large (%i): "
- "not sourced by this test",
- (int)rkmessage->key_len);
-
- rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len,
- (char *)rkmessage->key);
-
- if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
- &in_part, &in_msgnum) != 3)
- TEST_FAIL("Incorrect key format: %s", buf);
-
- if (test_level > 2) {
- TEST_SAY("%s:%i: Our testid %" PRIu64
- ", part %i (%i), "
- "msg %i/%i, key's: \"%s\"\n",
- func, line, testid, (int)partition,
- (int)rkmessage->partition, msgnum, cons_msgs_size,
- buf);
- }
-
- if (testid != in_testid || (partition != -1 && partition != in_part) ||
- (msgnum != -1 && msgnum != in_msgnum) ||
- (in_msgnum < 0 || in_msgnum > cons_msgs_size))
- goto fail_match;
-
- if (cons_msgs_cnt == cons_msgs_size) {
- TEST_SAY(
- "Too many messages in cons_msgs (%i) while reading "
- "message key \"%s\"\n",
- cons_msgs_cnt, buf);
- verify_consumed_msg_check("?", cons_msgs_size);
- TEST_FAIL("See above error(s)");
- }
-
- cons_msgs[cons_msgs_cnt++] = in_msgnum;
- cons_last_offset = rkmessage->offset;
-
- return;
-
-fail_match:
- TEST_FAIL("%s:%i: Our testid %" PRIu64
- ", part %i, msg %i/%i did "
- "not match message's key: \"%s\"\n",
- func, line, testid, (int)partition, msgnum, cons_msgs_size,
- buf);
-}
-
-#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
- verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
- rkmessage)
-
-
-static void consume_cb(rd_kafka_message_t *rkmessage, void *opaque) {
- int64_t testid = *(int64_t *)opaque;
-
- if (test_level > 2)
- TEST_SAY("Consumed message #%d? at offset %" PRId64 ": %s\n",
- cons_msg_next, rkmessage->offset,
- rd_kafka_err2str(rkmessage->err));
-
- if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("EOF at offset %" PRId64 "\n", rkmessage->offset);
- return;
- }
-
- if (rkmessage->err)
- TEST_FAIL(
- "Consume message from partition %i "
- "has error: %s",
- (int)rkmessage->partition,
- rd_kafka_err2str(rkmessage->err));
-
- verify_consumed_msg(testid, rkmessage->partition, cons_msg_next,
- rkmessage);
-
- if (cons_msg_next == cons_msg_stop) {
- rd_kafka_yield(NULL /*FIXME*/);
- }
-
- cons_msg_next++;
-}
-
-static void consume_messages_callback_multi(const char *desc,
- uint64_t testid,
- const char *topic,
- int32_t partition,
- const char *offset_store_method,
- int msg_base,
- int msg_cnt,
- int64_t initial_offset,
- int iterations) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- int i;
-
- TEST_SAY("%s: Consume messages %d+%d from %s [%" PRId32
- "] "
- "from offset %" PRId64 " in %d iterations\n",
- desc, msg_base, msg_cnt, topic, partition, initial_offset,
- iterations);
-
- test_conf_init(&conf, &topic_conf, 20);
-
- test_topic_conf_set(topic_conf, "offset.store.method",
- offset_store_method);
-
- if (!strcmp(offset_store_method, "broker")) {
- /* Broker based offset storage requires a group.id */
- test_conf_set(conf, "group.id", topic);
- }
-
- test_conf_set(conf, "enable.partition.eof", "true");
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "smallest",
- NULL, 0);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("%s: Failed to create topic: %s\n", desc,
- rd_kafka_err2str(rd_kafka_last_error()));
-
- cons_msg_stop = cons_msg_next + msg_cnt - 1;
-
- /* Consume the same batch of messages multiple times to
- * make sure back-to-back start&stops work. */
- for (i = 0; i < iterations; i++) {
- int cnta;
- test_timing_t t_stop;
-
- TEST_SAY(
- "%s: Iteration #%i: Consuming from "
- "partition %i at offset %" PRId64
- ", "
- "msgs range %d..%d\n",
- desc, i, partition, initial_offset, cons_msg_next,
- cons_msg_stop);
-
- /* Consume messages */
- if (rd_kafka_consume_start(rkt, partition, initial_offset) ==
- -1)
- TEST_FAIL("%s: consume_start(%i) failed: %s", desc,
- (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
-
-
- /* Stop consuming messages when this number of messages
- * is reached. */
- cnta = cons_msg_next;
- do {
- rd_kafka_consume_callback(rkt, partition, 1000,
- consume_cb, &testid);
- } while (cons_msg_next < cons_msg_stop);
-
- TEST_SAY("%s: Iteration #%i: consumed %i messages\n", desc, i,
- cons_msg_next - cnta);
-
- TIMING_START(&t_stop, "rd_kafka_consume_stop()");
- rd_kafka_consume_stop(rkt, partition);
- TIMING_STOP(&t_stop);
-
- /* Advance next offset so we dont reconsume
- * messages on the next run. */
- if (initial_offset != RD_KAFKA_OFFSET_STORED) {
- initial_offset = cons_last_offset + 1;
- cons_msg_stop = cons_msg_next + msg_cnt - 1;
- }
- }
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-}
-
-
-
-static void test_produce_consume(const char *offset_store_method) {
- int msgcnt = 100;
- int partition_cnt = 1;
- int i;
- uint64_t testid;
- int msg_base = 0;
- const char *topic;
-
- /* Generate a testid so we can differentiate messages
- * from other tests */
- testid = test_id_generate();
-
- /* Read test.conf to configure topic name */
- test_conf_init(NULL, NULL, 20);
- topic = test_mk_topic_name("0014", 1 /*random*/);
-
- TEST_SAY("Topic %s, testid %" PRIu64 ", offset.store.method=%s\n",
- topic, testid, offset_store_method);
-
- /* Produce messages */
- produce_messages(testid, topic, partition_cnt, msg_base, msgcnt);
-
- /* 100% of messages */
- verify_consumed_msg_reset(msgcnt);
-
- /* Consume 50% of messages with callbacks: stored offsets with no prior
- * offset stored. */
- for (i = 0; i < partition_cnt; i++)
- consume_messages_callback_multi("STORED.1/2", testid, topic, i,
- offset_store_method, msg_base,
- (msgcnt / partition_cnt) / 2,
- RD_KAFKA_OFFSET_STORED, 1);
- verify_consumed_msg_check("STORED.1/2", msgcnt / 2);
-
- /* Consume the rest using the now stored offset */
- for (i = 0; i < partition_cnt; i++)
- consume_messages_callback_multi("STORED.2/2", testid, topic, i,
- offset_store_method, msg_base,
- (msgcnt / partition_cnt) / 2,
- RD_KAFKA_OFFSET_STORED, 1);
- verify_consumed_msg_check("STORED.2/2", msgcnt);
-
-
- /* Consume messages with callbacks: logical offsets */
- verify_consumed_msg_reset(msgcnt);
- for (i = 0; i < partition_cnt; i++) {
- int p_msg_cnt = msgcnt / partition_cnt;
- int64_t initial_offset = RD_KAFKA_OFFSET_TAIL(p_msg_cnt);
- const int iterations = 4;
- consume_messages_callback_multi("TAIL+", testid, topic, i,
- offset_store_method,
- /* start here (msgid) */
- msg_base,
- /* consume this many messages
- * per iteration. */
- p_msg_cnt / iterations,
- /* start here (offset) */
- initial_offset, iterations);
- }
-
- verify_consumed_msg_check("TAIL+", msgcnt);
-
- verify_consumed_msg_reset(0);
-
- return;
-}
-
-
-
-int main_0014_reconsume_191(int argc, char **argv) {
- if (test_broker_version >= TEST_BRKVER(0, 8, 2, 0))
- test_produce_consume("broker");
- test_produce_consume("file");
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c
deleted file mode 100644
index a551a0b53..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-
-static void do_legacy_seek(const char *topic, uint64_t testid, int msg_cnt) {
- rd_kafka_t *rk_c;
- rd_kafka_topic_t *rkt_c;
- int32_t partition = 0;
- int i;
- int64_t offset_last, offset_base;
- int dance_iterations = 10;
- int msgs_per_dance = 10;
- const int msg_base = 0;
-
- SUB_TEST_QUICK();
-
- rk_c = test_create_consumer(NULL, NULL, NULL, NULL);
- rkt_c = test_create_consumer_topic(rk_c, topic);
-
- /* Start consumer tests */
- test_consumer_start("verify.all", rkt_c, partition,
- RD_KAFKA_OFFSET_BEGINNING);
- /* Make sure all messages are available */
- offset_last = test_consume_msgs("verify.all", rkt_c, testid, partition,
- TEST_NO_SEEK, msg_base, msg_cnt,
- 1 /* parse format*/);
-
- /* Rewind offset back to its base. */
- offset_base = offset_last - msg_cnt + 1;
-
- TEST_SAY("%s [%" PRId32
- "]: Do random seek&consume for msgs #%d+%d with "
- "offsets %" PRId64 "..%" PRId64 "\n",
- rd_kafka_topic_name(rkt_c), partition, msg_base, msg_cnt,
- offset_base, offset_last);
-
- /* Now go dancing over the entire range with offset seeks. */
- for (i = 0; i < dance_iterations; i++) {
- int64_t offset =
- jitter((int)offset_base, (int)offset_base + msg_cnt);
-
- test_consume_msgs(
- "dance", rkt_c, testid, partition, offset,
- msg_base + (int)(offset - offset_base),
- RD_MIN(msgs_per_dance, (int)(offset_last - offset)),
- 1 /* parse format */);
- }
-
- test_consumer_stop("1", rkt_c, partition);
-
- rd_kafka_topic_destroy(rkt_c);
- rd_kafka_destroy(rk_c);
-
- SUB_TEST_PASS();
-}
-
-
-static void do_seek(const char *topic,
- uint64_t testid,
- int msg_cnt,
- rd_bool_t with_timeout) {
- rd_kafka_t *c;
- rd_kafka_topic_partition_list_t *partitions;
- char errstr[512];
- int i;
-
- SUB_TEST_QUICK("%s timeout", with_timeout ? "with" : "without");
-
- c = test_create_consumer(topic, NULL, NULL, NULL);
-
- partitions = rd_kafka_topic_partition_list_new(3);
- for (i = 0; i < 3; i++)
- rd_kafka_topic_partition_list_add(partitions, topic, i)
- ->offset = RD_KAFKA_OFFSET_END;
-
- TEST_CALL__(rd_kafka_assign(c, partitions));
-
- /* Should see no messages */
- test_consumer_poll_no_msgs("NO.MSGS", c, testid, 3000);
-
- /* Seek to beginning */
- for (i = 0; i < 3; i++) {
- /* Sentinel to verify that this field is reset by
- * seek_partitions() */
- partitions->elems[i].err = RD_KAFKA_RESP_ERR__BAD_MSG;
- partitions->elems[i].offset =
- i == 0 ?
- /* Logical and absolute offsets for the same thing */
- RD_KAFKA_OFFSET_BEGINNING
- : 0;
- }
-
- TEST_SAY("Seeking\n");
- TEST_CALL_ERROR__(
- rd_kafka_seek_partitions(c, partitions, with_timeout ? 7000 : -1));
-
- /* Verify that there are no per-partition errors */
- for (i = 0; i < 3; i++)
- TEST_ASSERT_LATER(!partitions->elems[i].err,
- "Partition #%d has unexpected error: %s", i,
- rd_kafka_err2name(partitions->elems[i].err));
- TEST_LATER_CHECK();
-
- rd_kafka_topic_partition_list_destroy(partitions);
-
- /* Should now see all messages */
- test_consumer_poll("MSGS", c, testid, -1, 0, msg_cnt, NULL);
-
- /* Some close/destroy variation */
- if (with_timeout)
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0015_offsets_seek(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0015", 1);
- int msg_cnt_per_part = test_quick ? 100 : 1000;
- int msg_cnt = 3 * msg_cnt_per_part;
- uint64_t testid;
-
- testid = test_id_generate();
-
- test_produce_msgs_easy_multi(
- testid, topic, 0, 0 * msg_cnt_per_part, msg_cnt_per_part, topic, 1,
- 1 * msg_cnt_per_part, msg_cnt_per_part, topic, 2,
- 2 * msg_cnt_per_part, msg_cnt_per_part, NULL);
-
- /* legacy seek: only reads partition 0 */
- do_legacy_seek(topic, testid, msg_cnt_per_part);
-
- do_seek(topic, testid, msg_cnt, rd_true /*with timeout*/);
-
- do_seek(topic, testid, msg_cnt, rd_true /*without timeout*/);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c
deleted file mode 100644
index 2d0605b88..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-/**
- * @name Verify KIP-511, client.software.name and client.software.version
- *
- */
-static char jmx_cmd[512];
-
-/**
- * @brief Verify that the expected software name and version is reported
- * in JMX metrics.
- */
-static void jmx_verify(const char *exp_swname, const char *exp_swversion) {
-#if _WIN32
- return;
-#else
- int r;
- char cmd[512 + 256];
-
- if (!*jmx_cmd)
- return;
-
- rd_snprintf(cmd, sizeof(cmd),
- "%s | "
- "grep -F 'clientSoftwareName=%s,clientSoftwareVersion=%s'",
- jmx_cmd, exp_swname, exp_swversion ? exp_swversion : "");
- r = system(cmd);
- if (WEXITSTATUS(r) == 1)
- TEST_FAIL(
- "Expected software name and version not found in "
- "JMX metrics with command \"%s\"",
- cmd);
- else if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r))
- TEST_FAIL(
- "Failed to execute JmxTool command \"%s\": "
- "exit code %d",
- cmd, r);
-
- TEST_SAY(
- "Expected software name \"%s\" and version \"%s\" "
- "found in JMX metrics\n",
- exp_swname, exp_swversion);
-#endif /* !_WIN32 */
-}
-
-
-static void do_test_swname(const char *broker,
- const char *swname,
- const char *swversion,
- const char *exp_swname,
- const char *exp_swversion) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- const rd_kafka_metadata_t *md;
- rd_kafka_resp_err_t err;
-
- TEST_SAY(_C_MAG
- "[ Test client.software.name=%s, "
- "client.software.version=%s ]\n",
- swname ? swname : "NULL", swversion ? swversion : "NULL");
-
- test_conf_init(&conf, NULL, 30 /* jmxtool is severely slow */);
- if (broker)
- test_conf_set(conf, "bootstrap.servers", broker);
- if (swname)
- test_conf_set(conf, "client.software.name", swname);
- if (swversion)
- test_conf_set(conf, "client.software.version", swversion);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* Trigger a metadata request so we know we're connected. */
- err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
- TEST_ASSERT(!err, "metadata() failed: %s", rd_kafka_err2str(err));
- rd_kafka_metadata_destroy(md);
-
- /* Verify JMX metrics, if possible */
- jmx_verify(exp_swname, exp_swversion);
-
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN
- "[ Test client.software.name=%s, "
- "client.software.version=%s: PASS ]\n",
- swname ? swname : "NULL", swversion ? swversion : "NULL");
-}
-
-int main_0016_client_swname(int argc, char **argv) {
- const char *broker;
- const char *kafka_path;
- const char *jmx_port;
- const char *reason = NULL;
-
- /* If available, use the Kafka JmxTool to query software name
- * in broker JMX metrics */
- if (!(broker = test_getenv("BROKER_ADDRESS_2", NULL)))
- reason =
- "Env var BROKER_ADDRESS_2 missing "
- "(not running in trivup or trivup too old?)";
- else if (test_broker_version < TEST_BRKVER(2, 5, 0, 0))
- reason =
- "Client software JMX metrics not exposed prior to "
- "Apache Kafka 2.5.0.0";
- else if (!(kafka_path = test_getenv("KAFKA_PATH", NULL)))
- reason = "Env var KAFKA_PATH missing (not running in trivup?)";
- else if (!(jmx_port = test_getenv("BROKER_JMX_PORT_2", NULL)))
- reason =
- "Env var BROKER_JMX_PORT_2 missing "
- "(not running in trivup or trivup too old?)";
- else
- rd_snprintf(jmx_cmd, sizeof(jmx_cmd),
- "%s/bin/kafka-run-class.sh kafka.tools.JmxTool "
- "--jmx-url "
- "service:jmx:rmi:///jndi/rmi://:%s/jmxrmi "
- "--attributes connections --one-time true | "
- "grep clientSoftware",
- kafka_path, jmx_port);
-
- if (reason)
- TEST_WARN("Will not be able to verify JMX metrics: %s\n",
- reason);
-
- /* Default values, the version is not checked since the
- * built librdkafka may not use the same string, and additionally we
- * don't want to perform the string mangling here to make the string
- * protocol safe. */
- do_test_swname(broker, NULL, NULL, "librdkafka", NULL);
- /* Properly formatted */
- do_test_swname(broker, "my-little-version", "1.2.3.4",
- "my-little-version", "1.2.3.4");
- /* Containing invalid characters, verify that safing the strings works
- */
- do_test_swname(broker, "?1?this needs! ESCAPING?", "--v99.11 ~b~",
- "1-this-needs--ESCAPING", "v99.11--b");
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c
deleted file mode 100644
index f28f63f24..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Basic compression tests, with rather lacking verification.
- */
-
-
-int main_0017_compression(int argc, char **argv) {
- rd_kafka_t *rk_p, *rk_c;
- const int msg_cnt = 1000;
- int msg_base = 0;
- uint64_t testid;
-#define CODEC_CNT 5
- const char *codecs[CODEC_CNT + 1] = {
- "none",
-#if WITH_ZLIB
- "gzip",
-#endif
-#if WITH_SNAPPY
- "snappy",
-#endif
-#if WITH_ZSTD
- "zstd",
-#endif
- "lz4",
- NULL
- };
- char *topics[CODEC_CNT];
- const int32_t partition = 0;
- int i;
- int crc;
-
- testid = test_id_generate();
-
- /* Produce messages */
- rk_p = test_create_producer();
- for (i = 0; codecs[i] != NULL; i++) {
- rd_kafka_topic_t *rkt_p;
-
- topics[i] = rd_strdup(test_mk_topic_name(codecs[i], 1));
- TEST_SAY(
- "Produce %d messages with %s compression to "
- "topic %s\n",
- msg_cnt, codecs[i], topics[i]);
- rkt_p = test_create_producer_topic(
- rk_p, topics[i], "compression.codec", codecs[i], NULL);
-
- /* Produce small message that will not decrease with
- * compression (issue #781) */
- test_produce_msgs(rk_p, rkt_p, testid, partition,
- msg_base + (partition * msg_cnt), 1, NULL, 5);
-
- /* Produce standard sized messages */
- test_produce_msgs(rk_p, rkt_p, testid, partition,
- msg_base + (partition * msg_cnt) + 1,
- msg_cnt - 1, NULL, 512);
- rd_kafka_topic_destroy(rkt_p);
- }
-
- rd_kafka_destroy(rk_p);
-
-
- /* restart timeout (mainly for helgrind use since it is very slow) */
- test_timeout_set(30);
-
- /* Consume messages: Without and with CRC checking */
- for (crc = 0; crc < 2; crc++) {
- const char *crc_tof = crc ? "true" : "false";
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "check.crcs", crc_tof);
-
- rk_c = test_create_consumer(NULL, NULL, conf, NULL);
-
- for (i = 0; codecs[i] != NULL; i++) {
- rd_kafka_topic_t *rkt_c =
- rd_kafka_topic_new(rk_c, topics[i], NULL);
-
- TEST_SAY("Consume %d messages from topic %s (crc=%s)\n",
- msg_cnt, topics[i], crc_tof);
- /* Start consuming */
- test_consumer_start(codecs[i], rkt_c, partition,
- RD_KAFKA_OFFSET_BEGINNING);
-
- /* Consume messages */
- test_consume_msgs(
- codecs[i], rkt_c, testid, partition,
- /* Use offset 0 here, which is wrong, should
- * be TEST_NO_SEEK, but it exposed a bug
- * where the Offset query was postponed
- * till after the seek, causing messages
- * to be replayed. */
- 0, msg_base, msg_cnt, 1 /* parse format */);
-
- test_consumer_stop(codecs[i], rkt_c, partition);
-
- rd_kafka_topic_destroy(rkt_c);
- }
-
- rd_kafka_destroy(rk_c);
- }
-
- for (i = 0; codecs[i] != NULL; i++)
- rd_free(topics[i]);
-
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c
deleted file mode 100644
index 6b22339d7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdstring.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * KafkaConsumer balanced group testing: termination
- *
- * Runs two consumers subscribing to the same topics, waits for both to
- * get an assignment and then closes one of them.
- */
-
-
-static int assign_cnt = 0;
-static int consumed_msg_cnt = 0;
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque) {
- char *memberid = rd_kafka_memberid(rk);
-
- TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
- rd_kafka_name(rk), memberid, rd_kafka_err2str(err));
-
- if (memberid)
- free(memberid);
-
- test_print_partition_list(partitions);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- assign_cnt++;
- rd_kafka_assign(rk, partitions);
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- if (assign_cnt == 0)
- TEST_FAIL("asymetric rebalance_cb\n");
- assign_cnt--;
- rd_kafka_assign(rk, NULL);
- break;
-
- default:
- TEST_FAIL("rebalance failed: %s\n", rd_kafka_err2str(err));
- break;
- }
-}
-
-
-static void consume_all(rd_kafka_t **rk_c,
- int rk_cnt,
- int exp_msg_cnt,
- int max_time /*ms*/) {
- int64_t ts_start = test_clock();
- int i;
-
- max_time *= 1000;
- while (ts_start + max_time > test_clock()) {
- for (i = 0; i < rk_cnt; i++) {
- rd_kafka_message_t *rkmsg;
-
- if (!rk_c[i])
- continue;
-
- rkmsg = rd_kafka_consumer_poll(rk_c[i], 500);
-
- if (!rkmsg)
- continue;
- else if (rkmsg->err)
- TEST_SAY(
- "Message error "
- "(at offset %" PRId64
- " after "
- "%d/%d messages and %dms): %s\n",
- rkmsg->offset, consumed_msg_cnt,
- exp_msg_cnt,
- (int)(test_clock() - ts_start) / 1000,
- rd_kafka_message_errstr(rkmsg));
- else
- consumed_msg_cnt++;
-
- rd_kafka_message_destroy(rkmsg);
-
- if (consumed_msg_cnt >= exp_msg_cnt) {
- static int once = 0;
- if (!once++)
- TEST_SAY("All messages consumed\n");
- return;
- }
- }
- }
-}
-
-struct args {
- rd_kafka_t *c;
- rd_kafka_queue_t *queue;
-};
-
-static int poller_thread_main(void *p) {
- struct args *args = (struct args *)p;
-
- while (!rd_kafka_consumer_closed(args->c)) {
- rd_kafka_message_t *rkm;
-
- /* Using a long timeout (1 minute) to verify that the
- * queue is woken when close is done. */
- rkm = rd_kafka_consume_queue(args->queue, 60 * 1000);
- if (rkm)
- rd_kafka_message_destroy(rkm);
- }
-
- return 0;
-}
-
-/**
- * @brief Close consumer using async queue.
- */
-static void consumer_close_queue(rd_kafka_t *c) {
- /* Use the standard consumer queue rather than a temporary queue,
- * the latter is covered by test 0116. */
- rd_kafka_queue_t *queue = rd_kafka_queue_get_consumer(c);
- struct args args = {c, queue};
- thrd_t thrd;
- int ret;
-
- /* Spin up poller thread */
- if (thrd_create(&thrd, poller_thread_main, (void *)&args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread");
-
- TEST_SAY("Closing consumer %s using queue\n", rd_kafka_name(c));
- TEST_CALL_ERROR__(rd_kafka_consumer_close_queue(c, queue));
-
- if (thrd_join(thrd, &ret) != thrd_success)
- TEST_FAIL("thrd_join failed");
-
- rd_kafka_queue_destroy(queue);
-}
-
-
-static void do_test(rd_bool_t with_queue) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
-#define _CONS_CNT 2
- rd_kafka_t *rk_p, *rk_c[_CONS_CNT];
- rd_kafka_topic_t *rkt_p;
- int msg_cnt = test_quick ? 100 : 1000;
- int msg_base = 0;
- int partition_cnt = 2;
- int partition;
- uint64_t testid;
- rd_kafka_topic_conf_t *default_topic_conf;
- rd_kafka_topic_partition_list_t *topics;
- rd_kafka_resp_err_t err;
- test_timing_t t_assign, t_consume;
- char errstr[512];
- int i;
-
- SUB_TEST("with_queue=%s", RD_STR_ToF(with_queue));
-
- testid = test_id_generate();
-
- /* Produce messages */
- rk_p = test_create_producer();
- rkt_p = test_create_producer_topic(rk_p, topic, NULL);
-
- for (partition = 0; partition < partition_cnt; partition++) {
- test_produce_msgs(rk_p, rkt_p, testid, partition,
- msg_base + (partition * msg_cnt), msg_cnt,
- NULL, 0);
- }
-
- rd_kafka_topic_destroy(rkt_p);
- rd_kafka_destroy(rk_p);
-
-
- test_conf_init(NULL, &default_topic_conf,
- 5 + ((test_session_timeout_ms * 3 * 2) / 1000));
- if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
- "smallest", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s\n", errstr);
-
- /* Fill in topic subscription set */
- topics = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(topics, topic, -1);
-
- /* Create consumers and start subscription */
- for (i = 0; i < _CONS_CNT; i++) {
- rk_c[i] = test_create_consumer(
- topic /*group_id*/, rebalance_cb, NULL,
- rd_kafka_topic_conf_dup(default_topic_conf));
-
- err = rd_kafka_poll_set_consumer(rk_c[i]);
- if (err)
- TEST_FAIL("poll_set_consumer: %s\n",
- rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk_c[i], topics);
- if (err)
- TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
- }
-
- rd_kafka_topic_conf_destroy(default_topic_conf);
-
- rd_kafka_topic_partition_list_destroy(topics);
-
-
- /* Wait for both consumers to get an assignment */
- TEST_SAY("Awaiting assignments for %d consumer(s)\n", _CONS_CNT);
- TIMING_START(&t_assign, "WAIT.ASSIGN");
- while (assign_cnt < _CONS_CNT)
- consume_all(rk_c, _CONS_CNT, msg_cnt,
- test_session_timeout_ms + 3000);
- TIMING_STOP(&t_assign);
-
- /* Now close one of the consumers, this will cause a rebalance. */
- TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT,
- rd_kafka_name(rk_c[0]));
- if (with_queue)
- consumer_close_queue(rk_c[0]);
- else
- TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[0]));
-
- rd_kafka_destroy(rk_c[0]);
- rk_c[0] = NULL;
-
- /* Let remaining consumers run for a while to take over the now
- * lost partitions. */
-
- if (assign_cnt != _CONS_CNT - 1)
- TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt,
- _CONS_CNT - 1);
-
- TIMING_START(&t_consume, "CONSUME.WAIT");
- consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000);
- TIMING_STOP(&t_consume);
-
- TEST_SAY("Closing remaining consumers\n");
- for (i = 0; i < _CONS_CNT; i++) {
- test_timing_t t_close;
- rd_kafka_topic_partition_list_t *sub;
- int j;
-
- if (!rk_c[i])
- continue;
-
- /* Query subscription */
- err = rd_kafka_subscription(rk_c[i], &sub);
- if (err)
- TEST_FAIL("%s: subscription() failed: %s\n",
- rd_kafka_name(rk_c[i]),
- rd_kafka_err2str(err));
- TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c[i]),
- sub->cnt);
- for (j = 0; j < sub->cnt; j++)
- TEST_SAY(" %s\n", sub->elems[j].topic);
- rd_kafka_topic_partition_list_destroy(sub);
-
- /* Run an explicit unsubscribe() (async) prior to close()
- * to trigger race condition issues on termination. */
- TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c[i]));
- err = rd_kafka_unsubscribe(rk_c[i]);
- if (err)
- TEST_FAIL("%s: unsubscribe failed: %s\n",
- rd_kafka_name(rk_c[i]),
- rd_kafka_err2str(err));
-
- TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
- TIMING_START(&t_close, "CONSUMER.CLOSE");
- if (with_queue)
- consumer_close_queue(rk_c[i]);
- else
- TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[i]));
- TIMING_STOP(&t_close);
-
- rd_kafka_destroy(rk_c[i]);
- rk_c[i] = NULL;
- }
-
- TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt);
- if (consumed_msg_cnt < msg_cnt)
- TEST_FAIL("Only %d/%d messages were consumed\n",
- consumed_msg_cnt, msg_cnt);
- else if (consumed_msg_cnt > msg_cnt)
- TEST_SAY(
- "At least %d/%d messages were consumed "
- "multiple times\n",
- consumed_msg_cnt - msg_cnt, msg_cnt);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0018_cgrp_term(int argc, char **argv) {
- do_test(rd_false /* rd_kafka_consumer_close() */);
- do_test(rd_true /* rd_kafka_consumer_close_queue() */);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c
deleted file mode 100644
index 02729c339..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * List consumer groups
- *
- * Runs two consumers in two different groups and lists them.
- */
-
-
-
-/**
- * Verify that all groups in 'groups' are seen, if so returns group_cnt,
- * else returns -1.
- */
-static int verify_groups(const struct rd_kafka_group_list *grplist,
- char **groups,
- int group_cnt) {
- int i;
- int seen = 0;
-
- for (i = 0; i < grplist->group_cnt; i++) {
- const struct rd_kafka_group_info *gi = &grplist->groups[i];
- int j;
-
- for (j = 0; j < group_cnt; j++) {
- if (strcmp(gi->group, groups[j]))
- continue;
-
- if (gi->err)
- TEST_SAY(
- "Group %s has broker-reported "
- "error: %s\n",
- gi->group, rd_kafka_err2str(gi->err));
-
- seen++;
- }
- }
-
- TEST_SAY("Found %d/%d desired groups in list of %d groups\n", seen,
- group_cnt, grplist->group_cnt);
-
- if (seen != group_cnt)
- return -1;
- else
- return seen;
-}
-
-
-/**
- * List groups by:
- * - List all groups, check that the groups in 'groups' are seen.
- * - List each group in 'groups', one by one.
- *
- * Returns 'group_cnt' if all groups in 'groups' were seen by both
- * methods, else 0, or -1 on error.
- */
-static int
-list_groups(rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) {
- rd_kafka_resp_err_t err = 0;
- const struct rd_kafka_group_list *grplist;
- int i, r;
- int fails = 0;
- int seen = 0;
- int seen_all = 0;
- int retries = 5;
-
- TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc);
-
- /* FIXME: Wait for broker to come up. This should really be abstracted
- * by librdkafka. */
- do {
- if (err) {
- TEST_SAY("Retrying group list in 1s because of: %s\n",
- rd_kafka_err2str(err));
- rd_sleep(1);
- }
- err = rd_kafka_list_groups(rk, NULL, &grplist,
- tmout_multip(5000));
- } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) &&
- retries-- > 0);
-
- if (err) {
- TEST_SAY("Failed to list all groups: %s\n",
- rd_kafka_err2str(err));
- return -1;
- }
-
- seen_all = verify_groups(grplist, groups, group_cnt);
- rd_kafka_group_list_destroy(grplist);
-
- for (i = 0; i < group_cnt; i++) {
- err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000);
- if (err) {
- TEST_SAY("Failed to list group %s: %s\n", groups[i],
- rd_kafka_err2str(err));
- fails++;
- continue;
- }
-
- r = verify_groups(grplist, &groups[i], 1);
- if (r == 1)
- seen++;
- rd_kafka_group_list_destroy(grplist);
- }
-
-
- if (seen_all != seen)
- return 0;
-
- return seen;
-}
-
-
-
-static void do_test_list_groups(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
-#define _CONS_CNT 2
- char *groups[_CONS_CNT];
- rd_kafka_t *rk, *rk_c[_CONS_CNT];
- rd_kafka_topic_partition_list_t *topics;
- rd_kafka_resp_err_t err;
- test_timing_t t_grps;
- int i;
- int groups_seen;
- rd_kafka_topic_t *rkt;
- const struct rd_kafka_group_list *grplist;
-
- SUB_TEST();
-
- /* Handle for group listings */
- rk = test_create_producer();
-
- /* Produce messages so that topic is auto created */
- rkt = test_create_topic_object(rk, topic, NULL);
- test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64);
- rd_kafka_topic_destroy(rkt);
-
- /* Query groups before creation, should not list our groups. */
- groups_seen = list_groups(rk, NULL, 0, "should be none");
- if (groups_seen != 0)
- TEST_FAIL(
- "Saw %d groups when there wasn't "
- "supposed to be any\n",
- groups_seen);
-
- /* Fill in topic subscription set */
- topics = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(topics, topic, -1);
-
- /* Create consumers and start subscription */
- for (i = 0; i < _CONS_CNT; i++) {
- groups[i] = malloc(32);
- test_str_id_generate(groups[i], 32);
- rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL);
-
- err = rd_kafka_poll_set_consumer(rk_c[i]);
- if (err)
- TEST_FAIL("poll_set_consumer: %s\n",
- rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk_c[i], topics);
- if (err)
- TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
- }
-
- rd_kafka_topic_partition_list_destroy(topics);
-
-
- TIMING_START(&t_grps, "WAIT.GROUPS");
- /* Query groups again until both groups are seen. */
- while (1) {
- groups_seen = list_groups(rk, (char **)groups, _CONS_CNT,
- "should see my groups");
- if (groups_seen == _CONS_CNT)
- break;
- rd_sleep(1);
- }
- TIMING_STOP(&t_grps);
-
- /* Try a list_groups with a low enough timeout to fail. */
- grplist = NULL;
- TIMING_START(&t_grps, "WAIT.GROUPS.TIMEOUT0");
- err = rd_kafka_list_groups(rk, NULL, &grplist, 0);
- TIMING_STOP(&t_grps);
- TEST_SAY("list_groups(timeout=0) returned %d groups and status: %s\n",
- grplist ? grplist->group_cnt : -1, rd_kafka_err2str(err));
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected list_groups(timeout=0) to fail "
- "with timeout, got %s",
- rd_kafka_err2str(err));
-
-
- TEST_SAY("Closing remaining consumers\n");
- for (i = 0; i < _CONS_CNT; i++) {
- test_timing_t t_close;
- if (!rk_c[i])
- continue;
-
- TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
- TIMING_START(&t_close, "CONSUMER.CLOSE");
- err = rd_kafka_consumer_close(rk_c[i]);
- TIMING_STOP(&t_close);
- if (err)
- TEST_FAIL("consumer_close failed: %s\n",
- rd_kafka_err2str(err));
-
- rd_kafka_destroy(rk_c[i]);
- rk_c[i] = NULL;
-
- free(groups[i]);
- }
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief #3705: Verify that list_groups() doesn't hang if unable to
- * connect to the cluster.
- */
-static void do_test_list_groups_hang(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- const struct rd_kafka_group_list *grplist;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
-
- SUB_TEST();
- test_conf_init(&conf, NULL, 20);
-
- /* An unavailable broker */
- test_conf_set(conf, "bootstrap.servers", "127.0.0.1:65531");
-
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- TIMING_START(&timing, "list_groups");
- err = rd_kafka_list_groups(rk, NULL, &grplist, 5 * 1000);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected ERR__TIMED_OUT, not %s", rd_kafka_err2name(err));
- TIMING_ASSERT(&timing, 5 * 1000, 7 * 1000);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0019_list_groups(int argc, char **argv) {
- do_test_list_groups();
- do_test_list_groups_hang();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c
deleted file mode 100644
index a8a6552fa..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Various regression tests for hangs on destroy.
- */
-
-
-
-/**
- * Request offset for nonexisting partition.
- * Will cause rd_kafka_destroy() to hang.
- */
-
-static int nonexist_part(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_t *rk;
- rd_kafka_topic_partition_list_t *parts;
- rd_kafka_resp_err_t err;
- test_timing_t t_pos;
- const int msgcnt = 100;
- uint64_t testid;
- int i;
- int it, iterations = 5;
-
- /* Produce messages */
- testid =
- test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
-
- for (it = 0; it < iterations; it++) {
- char group_id[32];
-
- test_conf_init(NULL, NULL, 15);
-
- test_str_id_generate(group_id, sizeof(group_id));
-
- TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
- group_id);
-
- /* Consume messages */
- test_consume_msgs_easy(group_id, topic, testid, -1, msgcnt,
- NULL);
-
- /*
- * Now start a new consumer and query stored offsets (positions)
- */
-
- rk = test_create_consumer(group_id, NULL, NULL, NULL);
-
- /* Fill in partition set */
- parts = rd_kafka_topic_partition_list_new(2);
- /* existing */
- rd_kafka_topic_partition_list_add(parts, topic, 0);
- /* non-existing */
- rd_kafka_topic_partition_list_add(parts, topic, 123);
-
-
- TIMING_START(&t_pos, "COMMITTED");
- err = rd_kafka_committed(rk, parts, tmout_multip(5000));
- TIMING_STOP(&t_pos);
- if (err)
- TEST_FAIL("Failed to acquire committed offsets: %s\n",
- rd_kafka_err2str(err));
-
- for (i = 0; i < parts->cnt; i++) {
- TEST_SAY("%s [%" PRId32 "] returned offset %" PRId64
- ": %s\n",
- parts->elems[i].topic,
- parts->elems[i].partition,
- parts->elems[i].offset,
- rd_kafka_err2str(parts->elems[i].err));
- if (parts->elems[i].partition == 0 &&
- parts->elems[i].offset <= 0)
- TEST_FAIL("Partition %" PRId32
- " should have a "
- "proper offset, not %" PRId64 "\n",
- parts->elems[i].partition,
- parts->elems[i].offset);
- else if (parts->elems[i].partition == 123 &&
- parts->elems[i].offset !=
- RD_KAFKA_OFFSET_INVALID)
- TEST_FAIL("Partition %" PRId32
- " should have failed\n",
- parts->elems[i].partition);
- }
-
- rd_kafka_topic_partition_list_destroy(parts);
-
- test_consumer_close(rk);
-
- /* Hangs if bug isn't fixed */
- rd_kafka_destroy(rk);
- }
-
- return 0;
-}
-
-
-/**
- * Issue #691: Producer hangs on destroy if group.id is configured.
- */
-static int producer_groupid(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
-
- TEST_SAY("producer_groupid hang test\n");
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "group.id", "dummy");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("Destroying producer\n");
- rd_kafka_destroy(rk);
-
- return 0;
-}
-
-int main_0020_destroy_hang(int argc, char **argv) {
- int fails = 0;
-
- test_conf_init(NULL, NULL, 30);
-
- fails += nonexist_part();
- fails += producer_groupid();
- if (fails > 0)
- TEST_FAIL("See %d previous error(s)\n", fails);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c
deleted file mode 100644
index 76b4dd16b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Issue #502
- * Crash if rd_kafka_topic_destroy() is called before all messages
- * have been produced.
- * This only happens when using a partitioner (producing to PARTITION_UA)
- */
-
-
-
-int main_0021_rkt_destroy(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 0);
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- const int msgcnt = 1000;
- uint64_t testid;
- int remains = 0;
-
- test_conf_init(NULL, NULL, 10);
-
-
- testid = test_id_generate();
- rk = test_create_producer();
- rkt = test_create_producer_topic(rk, topic, NULL);
-
-
- test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0,
- msgcnt, NULL, 0, 0, &remains);
-
- rd_kafka_topic_destroy(rkt);
-
- test_wait_delivery(rk, &remains);
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c
deleted file mode 100644
index 64e826d03..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Consume with batch + queue interface
- *
- */
-
-
-static void do_test_consume_batch(void) {
-#define topic_cnt 2
- char *topics[topic_cnt];
- const int partition_cnt = 2;
- rd_kafka_t *rk;
- rd_kafka_queue_t *rkq;
- rd_kafka_topic_t *rkts[topic_cnt];
- rd_kafka_resp_err_t err;
- const int msgcnt = test_quick ? 1000 : 10000;
- uint64_t testid;
- int i, p;
- int batch_cnt = 0;
- int remains;
-
- SUB_TEST();
-
- testid = test_id_generate();
-
- /* Produce messages */
- for (i = 0; i < topic_cnt; i++) {
- topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topics[i], testid, p,
- msgcnt / topic_cnt /
- partition_cnt);
- }
-
-
- /* Create simple consumer */
- rk = test_create_consumer(NULL, NULL, NULL, NULL);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_new(rk);
-
- for (i = 0; i < topic_cnt; i++) {
- /* Create topic object */
- rkts[i] = test_create_topic_object(
- rk, topics[i], "auto.offset.reset", "smallest", NULL);
-
- /* Start consuming each partition and redirect
- * messages to queue */
-
- TEST_SAY("Start consuming topic %s partitions 0..%d\n",
- rd_kafka_topic_name(rkts[i]), partition_cnt);
-
- for (p = 0; p < partition_cnt; p++) {
- err = rd_kafka_consume_start_queue(
- rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq);
- if (err)
- TEST_FAIL("Failed to start consuming: %s\n",
- rd_kafka_err2str(err));
- }
- }
-
- remains = msgcnt;
-
- /* Consume messages from common queue using batch interface. */
- TEST_SAY("Consume %d messages from queue\n", remains);
- while (remains > 0) {
- rd_kafka_message_t *rkmessage[1000];
- ssize_t r;
- test_timing_t t_batch;
-
- TIMING_START(&t_batch, "CONSUME.BATCH");
- r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000);
- TIMING_STOP(&t_batch);
-
- TEST_SAY("Batch consume iteration #%d: Consumed %" PRIdsz
- "/1000 messages\n",
- batch_cnt, r);
-
- if (r == -1)
- TEST_FAIL("Failed to consume messages: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- remains -= (int)r;
-
- for (i = 0; i < r; i++)
- rd_kafka_message_destroy(rkmessage[i]);
-
- batch_cnt++;
- }
-
-
- TEST_SAY("Stopping consumer\n");
- for (i = 0; i < topic_cnt; i++) {
- for (p = 0; p < partition_cnt; p++) {
- err = rd_kafka_consume_stop(rkts[i], p);
- if (err)
- TEST_FAIL("Failed to stop consuming: %s\n",
- rd_kafka_err2str(err));
- }
-
- rd_kafka_topic_destroy(rkts[i]);
- rd_free(topics[i]);
- }
-
- rd_kafka_queue_destroy(rkq);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-#if WITH_SASL_OAUTHBEARER
-/**
- * @brief Verify that the oauthbearer_refresh_cb() is triggered
- * when using consume_batch_queue() (as opposed to consumer_poll()).
- */
-
-static rd_bool_t refresh_called = rd_false;
-
-static void
-refresh_cb(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque) {
- TEST_SAY("Refresh callback called\n");
- TEST_ASSERT(!refresh_called);
- refresh_called = rd_true;
- rd_kafka_oauthbearer_set_token_failure(rk, "Refresh called");
-}
-
-static void do_test_consume_batch_oauthbearer_cb(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *rkq;
- rd_kafka_message_t *rkms[1];
- ssize_t r;
-
- SUB_TEST_QUICK();
-
- refresh_called = rd_false;
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "security.protocol", "sasl_plaintext");
- test_conf_set(conf, "sasl.mechanism", "OAUTHBEARER");
- rd_kafka_conf_set_oauthbearer_token_refresh_cb(conf, refresh_cb);
-
- /* Create simple consumer */
- rk = test_create_consumer(NULL, NULL, conf, NULL);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_main(rk);
-
- r = rd_kafka_consume_batch_queue(rkq, 1000, rkms, 1);
- TEST_ASSERT(r == 0, "Expected return value 0, not %d", (int)r);
-
- TEST_SAY("refresh_called = %d\n", refresh_called);
- TEST_ASSERT(refresh_called,
- "Expected refresh callback to have been called");
-
- rd_kafka_queue_destroy(rkq);
-
- rd_kafka_destroy(rk);
-}
-#endif
-
-
-int main_0022_consume_batch(int argc, char **argv) {
- do_test_consume_batch();
- return 0;
-}
-
-
-int main_0022_consume_batch_local(int argc, char **argv) {
-#if WITH_SASL_OAUTHBEARER
- do_test_consume_batch_oauthbearer_cb();
-#else
- TEST_SKIP("No OAUTHBEARER support\n");
-#endif
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c
deleted file mode 100644
index 318fc0a1b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * Tests that rdkafka's internal timers behave.
- */
-
-
-
-struct state {
- int calls;
- int64_t ts_last;
- int interval;
- int fails;
-};
-
-struct state state;
-
-
-static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
- const int64_t now = test_clock();
- /* Fake the first elapsed time since we dont really know how
- * long rd_kafka_new() takes and at what time the timer is started. */
- const int64_t elapsed =
- state.ts_last ? now - state.ts_last : state.interval;
- const int64_t overshoot = elapsed - state.interval;
- const int wiggleroom_up =
- (int)((double)state.interval *
- (!strcmp(test_mode, "bare") ? 0.2 : 1.0));
- const int wiggleroom_down = (int)((double)state.interval * 0.1);
-
- TEST_SAY("Call #%d: after %" PRId64
- "ms, %.0f%% outside "
- "interval %" PRId64 " >-%d <+%d\n",
- state.calls, elapsed / 1000,
- ((double)overshoot / state.interval) * 100.0,
- (int64_t)state.interval / 1000, wiggleroom_down / 1000,
- wiggleroom_up / 1000);
-
- if (overshoot < -wiggleroom_down || overshoot > wiggleroom_up) {
- TEST_WARN("^ outside range\n");
- state.fails++;
- }
-
- state.ts_last = now;
- state.calls++;
-
- return 0;
-}
-
-
-/**
- * Enable statistics with a set interval, make sure the stats callbacks are
- * called within reasonable intervals.
- */
-static void do_test_stats_timer(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- const int exp_calls = 10;
- test_timing_t t_new;
-
- memset(&state, 0, sizeof(state));
-
- state.interval = 600 * 1000;
-
- test_conf_init(&conf, NULL, 200);
-
- test_conf_set(conf, "statistics.interval.ms", "600");
- test_conf_set(conf, "bootstrap.servers", NULL); /*no need for brokers*/
- rd_kafka_conf_set_stats_cb(conf, stats_cb);
-
- TIMING_START(&t_new, "rd_kafka_new()");
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
- TIMING_STOP(&t_new);
-
- TEST_SAY(
- "Starting wait loop for %d expected stats_cb calls "
- "with an interval of %dms\n",
- exp_calls, state.interval / 1000);
-
-
- while (state.calls < exp_calls) {
- test_timing_t t_poll;
- TIMING_START(&t_poll, "rd_kafka_poll()");
- rd_kafka_poll(rk, 100);
- TIMING_STOP(&t_poll);
-
- if (TIMING_DURATION(&t_poll) > 150 * 1000)
- TEST_WARN(
- "rd_kafka_poll(rk,100) "
- "took more than 50%% extra\n");
- }
-
- rd_kafka_destroy(rk);
-
- if (state.calls > exp_calls)
- TEST_SAY("Got more calls than expected: %d > %d\n", state.calls,
- exp_calls);
-
- if (state.fails) {
- /* We can't rely on CIs giving our test job enough CPU to finish
- * in time, so don't error out even if the time is outside
- * the window */
- if (test_on_ci)
- TEST_WARN("%d/%d intervals failed\n", state.fails,
- state.calls);
- else
- TEST_FAIL("%d/%d intervals failed\n", state.fails,
- state.calls);
- } else
- TEST_SAY("All %d intervals okay\n", state.calls);
-}
-
-
-int main_0025_timers(int argc, char **argv) {
- do_test_stats_timer();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c
deleted file mode 100644
index c8adc3885..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Consumer: pause and resume.
- * Make sure no messages are lost or duplicated.
- */
-
-
-
-static void consume_pause(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition_cnt = 3;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_topic_partition_list_t *topics;
- rd_kafka_resp_err_t err;
- const int msgcnt = 1000;
- uint64_t testid;
- int it, iterations = 3;
- int msg_base = 0;
- int fails = 0;
- char group_id[32];
-
- SUB_TEST();
-
- test_conf_init(&conf, &tconf,
- 60 + (test_session_timeout_ms * 3 / 1000));
- test_conf_set(conf, "enable.partition.eof", "true");
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
-
- test_create_topic(NULL, topic, partition_cnt, 1);
-
- /* Produce messages */
- testid =
- test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
-
- topics = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(topics, topic, -1);
-
- for (it = 0; it < iterations; it++) {
- const int pause_cnt = 5;
- int per_pause_msg_cnt = msgcnt / pause_cnt;
- const int pause_time = 1200 /* 1.2s */;
- int eof_cnt = -1;
- int pause;
- rd_kafka_topic_partition_list_t *parts;
- test_msgver_t mv_all;
- int j;
-
- test_msgver_init(&mv_all, testid); /* All messages */
-
- /* On the last iteration reuse the previous group.id
- * to make consumer start at committed offsets which should
- * also be EOF. This to trigger #1307. */
- if (it < iterations - 1)
- test_str_id_generate(group_id, sizeof(group_id));
- else {
- TEST_SAY("Reusing previous group.id %s\n", group_id);
- per_pause_msg_cnt = 0;
- eof_cnt = partition_cnt;
- }
-
- TEST_SAY(
- "Iteration %d/%d, using group.id %s, "
- "expecting %d messages/pause and %d EOFs\n",
- it, iterations - 1, group_id, per_pause_msg_cnt, eof_cnt);
-
- rk = test_create_consumer(group_id, NULL,
- rd_kafka_conf_dup(conf),
- rd_kafka_topic_conf_dup(tconf));
-
-
- TEST_SAY("Subscribing to %d topic(s): %s\n", topics->cnt,
- topics->elems[0].topic);
- if ((err = rd_kafka_subscribe(rk, topics)))
- TEST_FAIL("Failed to subscribe: %s\n",
- rd_kafka_err2str(err));
-
-
- for (pause = 0; pause < pause_cnt; pause++) {
- int rcnt;
- test_timing_t t_assignment;
- test_msgver_t mv;
-
- test_msgver_init(&mv, testid);
- mv.fwd = &mv_all;
-
- /* Consume sub-part of the messages. */
- TEST_SAY(
- "Pause-Iteration #%d: Consume %d messages at "
- "msg_base %d\n",
- pause, per_pause_msg_cnt, msg_base);
- rcnt = test_consumer_poll(
- "consume.part", rk, testid, eof_cnt, msg_base,
- per_pause_msg_cnt == 0 ? -1 : per_pause_msg_cnt,
- &mv);
-
- TEST_ASSERT(rcnt == per_pause_msg_cnt,
- "expected %d messages, got %d",
- per_pause_msg_cnt, rcnt);
-
- test_msgver_verify("pause.iteration", &mv,
- TEST_MSGVER_PER_PART, msg_base,
- per_pause_msg_cnt);
- test_msgver_clear(&mv);
-
- msg_base += per_pause_msg_cnt;
-
- TIMING_START(&t_assignment, "rd_kafka_assignment()");
- if ((err = rd_kafka_assignment(rk, &parts)))
- TEST_FAIL("failed to get assignment: %s\n",
- rd_kafka_err2str(err));
- TIMING_STOP(&t_assignment);
-
- TEST_ASSERT(parts->cnt > 0,
- "parts->cnt %d, expected > 0", parts->cnt);
-
- TEST_SAY("Now pausing %d partition(s) for %dms\n",
- parts->cnt, pause_time);
- if ((err = rd_kafka_pause_partitions(rk, parts)))
- TEST_FAIL("Failed to pause: %s\n",
- rd_kafka_err2str(err));
-
- /* Check per-partition errors */
- for (j = 0; j < parts->cnt; j++) {
- if (parts->elems[j].err) {
- TEST_WARN(
- "pause failure for "
- "%s %" PRId32 "]: %s\n",
- parts->elems[j].topic,
- parts->elems[j].partition,
- rd_kafka_err2str(
- parts->elems[j].err));
- fails++;
- }
- }
- TEST_ASSERT(fails == 0, "See previous warnings\n");
-
- TEST_SAY(
- "Waiting for %dms, should not receive any "
- "messages during this time\n",
- pause_time);
-
- test_consumer_poll_no_msgs("silence.while.paused", rk,
- testid, pause_time);
-
- TEST_SAY("Resuming %d partitions\n", parts->cnt);
- if ((err = rd_kafka_resume_partitions(rk, parts)))
- TEST_FAIL("Failed to resume: %s\n",
- rd_kafka_err2str(err));
-
- /* Check per-partition errors */
- for (j = 0; j < parts->cnt; j++) {
- if (parts->elems[j].err) {
- TEST_WARN(
- "resume failure for "
- "%s %" PRId32 "]: %s\n",
- parts->elems[j].topic,
- parts->elems[j].partition,
- rd_kafka_err2str(
- parts->elems[j].err));
- fails++;
- }
- }
- TEST_ASSERT(fails == 0, "See previous warnings\n");
-
- rd_kafka_topic_partition_list_destroy(parts);
- }
-
- if (per_pause_msg_cnt > 0)
- test_msgver_verify("all.msgs", &mv_all,
- TEST_MSGVER_ALL_PART, 0, msgcnt);
- else
- test_msgver_verify("all.msgs", &mv_all,
- TEST_MSGVER_ALL_PART, 0, 0);
- test_msgver_clear(&mv_all);
-
- /* Should now not see any more messages. */
- test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000);
-
- test_consumer_close(rk);
-
- /* Hangs if bug isn't fixed */
- rd_kafka_destroy(rk);
- }
-
- rd_kafka_topic_partition_list_destroy(topics);
- rd_kafka_conf_destroy(conf);
- rd_kafka_topic_conf_destroy(tconf);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Verify that the paused partition state is not used after
- * the partition has been re-assigned.
- *
- * 1. Produce N messages
- * 2. Consume N/4 messages
- * 3. Pause partitions
- * 4. Manually commit offset N/2
- * 5. Unassign partitions
- * 6. Assign partitions again
- * 7. Verify that consumption starts at N/2 and not N/4
- */
-static void consume_pause_resume_after_reassign(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int32_t partition = 0;
- const int msgcnt = 4000;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_partition_list_t *partitions, *pos;
- rd_kafka_resp_err_t err;
- int exp_msg_cnt;
- uint64_t testid;
- int r;
- int msg_base = 0;
- test_msgver_t mv;
- rd_kafka_topic_partition_t *toppar;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
-
- test_create_topic(NULL, topic, (int)partition + 1, 1);
-
- /* Produce messages */
- testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
-
- /* Set start offset to beginning */
- partitions = rd_kafka_topic_partition_list_new(1);
- toppar =
- rd_kafka_topic_partition_list_add(partitions, topic, partition);
- toppar->offset = RD_KAFKA_OFFSET_BEGINNING;
-
-
- /**
- * Create consumer.
- */
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "enable.partition.eof", "true");
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_assign("assign", rk, partitions);
-
-
- exp_msg_cnt = msgcnt / 4;
- TEST_SAY("Consuming first quarter (%d) of messages\n", exp_msg_cnt);
- test_msgver_init(&mv, testid);
- r = test_consumer_poll("consume.first.quarter", rk, testid, 0, msg_base,
- exp_msg_cnt, &mv);
- TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d",
- exp_msg_cnt, r);
-
-
- TEST_SAY("Pausing partitions\n");
- if ((err = rd_kafka_pause_partitions(rk, partitions)))
- TEST_FAIL("Failed to pause: %s", rd_kafka_err2str(err));
-
- TEST_SAY("Verifying pause, should see no new messages...\n");
- test_consumer_poll_no_msgs("silence.while.paused", rk, testid, 3000);
-
- test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, msg_base,
- exp_msg_cnt);
- test_msgver_clear(&mv);
-
-
- /* Check position */
- pos = rd_kafka_topic_partition_list_copy(partitions);
- if ((err = rd_kafka_position(rk, pos)))
- TEST_FAIL("position() failed: %s", rd_kafka_err2str(err));
-
- TEST_ASSERT(!pos->elems[0].err,
- "position() returned error for our partition: %s",
- rd_kafka_err2str(pos->elems[0].err));
- TEST_SAY("Current application consume position is %" PRId64 "\n",
- pos->elems[0].offset);
- TEST_ASSERT(pos->elems[0].offset == (int64_t)exp_msg_cnt,
- "expected position %" PRId64 ", not %" PRId64,
- (int64_t)exp_msg_cnt, pos->elems[0].offset);
- rd_kafka_topic_partition_list_destroy(pos);
-
-
- toppar->offset = (int64_t)(msgcnt / 2);
- TEST_SAY("Committing (yet unread) offset %" PRId64 "\n",
- toppar->offset);
- if ((err = rd_kafka_commit(rk, partitions, 0 /*sync*/)))
- TEST_FAIL("Commit failed: %s", rd_kafka_err2str(err));
-
-
- TEST_SAY("Unassigning\n");
- test_consumer_unassign("Unassign", rk);
-
- /* Set start offset to INVALID so that the standard start offset
- * logic kicks in. */
- toppar->offset = RD_KAFKA_OFFSET_INVALID;
-
- TEST_SAY("Reassigning\n");
- test_consumer_assign("Reassign", rk, partitions);
-
-
- TEST_SAY("Resuming partitions\n");
- if ((err = rd_kafka_resume_partitions(rk, partitions)))
- TEST_FAIL("Failed to resume: %s", rd_kafka_err2str(err));
-
- msg_base = msgcnt / 2;
- exp_msg_cnt = msgcnt / 2;
- TEST_SAY("Consuming second half (%d) of messages at msg_base %d\n",
- exp_msg_cnt, msg_base);
- test_msgver_init(&mv, testid);
- r = test_consumer_poll("consume.second.half", rk, testid, 1 /*exp eof*/,
- msg_base, exp_msg_cnt, &mv);
- TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d",
- exp_msg_cnt, r);
-
- test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, msg_base,
- exp_msg_cnt);
- test_msgver_clear(&mv);
-
-
- rd_kafka_topic_partition_list_destroy(partitions);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- rd_kafka_resp_err_t err2;
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- /* Set start offset to beginning,
- * while auto.offset.reset is default at `latest`. */
-
- parts->elems[0].offset = RD_KAFKA_OFFSET_BEGINNING;
- test_consumer_assign("rebalance", rk, parts);
- TEST_SAY("Pausing partitions\n");
- if ((err2 = rd_kafka_pause_partitions(rk, parts)))
- TEST_FAIL("Failed to pause: %s",
- rd_kafka_err2str(err2));
- TEST_SAY("Resuming partitions\n");
- if ((err2 = rd_kafka_resume_partitions(rk, parts)))
- TEST_FAIL("Failed to pause: %s",
- rd_kafka_err2str(err2));
- break;
- default:
- test_consumer_unassign("rebalance", rk);
- break;
- }
-}
-
-
-/**
- * @brief Verify that the assigned offset is used after pause+resume
- * if no messages were consumed prior to pause. #2105
- *
- * We do this by setting the start offset to BEGINNING in the rebalance_cb
- * and relying on auto.offset.reset=latest (default) to catch the failure case
- * where the assigned offset was not honoured.
- */
-static void consume_subscribe_assign_pause_resume(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int32_t partition = 0;
- const int msgcnt = 1;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- uint64_t testid;
- int r;
- test_msgver_t mv;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 20);
-
- test_create_topic(NULL, topic, (int)partition + 1, 1);
-
- /* Produce messages */
- testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
-
- /**
- * Create consumer.
- */
- rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "enable.partition.eof", "true");
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(rk, topic);
-
- test_msgver_init(&mv, testid);
- r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/, 0, msgcnt,
- &mv);
- TEST_ASSERT(r == msgcnt, "expected %d messages, got %d", msgcnt, r);
-
- test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, msgcnt);
- test_msgver_clear(&mv);
-
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief seek() prior to pause() may overwrite the seek()ed offset
- * when later resume()ing. #3471
- */
-static void consume_seek_pause_resume(void) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int32_t partition = 0;
- const int msgcnt = 1000;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- uint64_t testid;
- int r;
- test_msgver_t mv;
- rd_kafka_topic_partition_list_t *parts;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 20);
-
- test_create_topic(NULL, topic, (int)partition + 1, 1);
-
- /* Produce messages */
- testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
-
- /**
- * Create consumer.
- */
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "enable.partition.eof", "true");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, partition);
-
- TEST_SAY("Assigning partition\n");
- TEST_CALL_ERR__(rd_kafka_assign(rk, parts));
-
- rd_kafka_topic_partition_list_destroy(parts);
-
-
- TEST_SAY("Consuming messages 0..100\n");
- test_msgver_init(&mv, testid);
- r = test_consumer_poll("consume", rk, testid, 0, 0, 100, &mv);
- TEST_ASSERT(r == 100, "expected %d messages, got %d", 100, r);
-
- test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, 100);
- test_msgver_clear(&mv);
-
- parts = rd_kafka_topic_partition_list_new(1);
- TEST_SAY("Seeking to offset 500\n");
- rd_kafka_topic_partition_list_add(parts, topic, partition)->offset =
- 500;
- TEST_CALL_ERROR__(rd_kafka_seek_partitions(rk, parts, -1));
-
- TEST_SAY("Pausing\n");
- TEST_CALL_ERR__(rd_kafka_pause_partitions(rk, parts));
-
- TEST_SAY("Waiting a short while for things to settle\n");
- rd_sleep(2);
-
- TEST_SAY("Resuming\n");
- TEST_CALL_ERR__(rd_kafka_resume_partitions(rk, parts));
-
- TEST_SAY("Consuming remaining messages from offset 500.. hopefully\n");
- r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/,
- 500 /* base msgid */,
- -1 /* remaining messages */, &mv);
- TEST_ASSERT_LATER(r == 500, "expected %d messages, got %d", 500, r);
-
- test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 500, 500);
- test_msgver_clear(&mv);
-
- rd_kafka_topic_partition_list_destroy(parts);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0026_consume_pause(int argc, char **argv) {
-
- consume_pause();
- consume_pause_resume_after_reassign();
- consume_subscribe_assign_pause_resume();
- consume_seek_pause_resume();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c
deleted file mode 100644
index 999d8f135..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Test long topic names (>=255 characters), issue #529.
- * This broker-side issue only seems to occur when explicitly creating
- * topics with kafka-topics.sh --create, not with auto-created topics.
- */
-
-
-int main_0028_long_topicnames(int argc, char **argv) {
- const int msgcnt = 1000;
- uint64_t testid;
- char topic[256];
- rd_kafka_t *rk_c;
-
- if (!test_can_create_topics(1))
- return 0;
-
- memset(topic, 'a', sizeof(topic) - 1);
- topic[sizeof(topic) - 1] = '\0';
-
- strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic) - 1);
-
- TEST_SAY("Using topic name of %d bytes: %s\n", (int)strlen(topic),
- topic);
-
- /* First try a non-verifying consumer. The consumer has been known
- * to crash when the broker bug kicks in. */
- rk_c = test_create_consumer(topic, NULL, NULL, NULL);
-
- /* Create topic */
- test_create_topic(rk_c, topic, 1, 1);
-
- test_consumer_subscribe(rk_c, topic);
- test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000);
- test_consumer_close(rk_c);
-
- /* Produce messages */
- testid =
- test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
-
- /* Consume messages */
- test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c
deleted file mode 100644
index 5b3595baf..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Consumer: make sure specifying offsets in assign() works.
- */
-
-
-static const int msgcnt = 100; /* per-partition msgcnt */
-static const int partitions = 4;
-
-/* method 1: lower half of partitions use fixed offset
- * upper half uses END */
-#define REB_METHOD_1 1
-/* method 2: first two partitions: fixed offset,
- * rest: INVALID (== stored == END)
- * issue #583 */
-#define REB_METHOD_2 2
-static int reb_method;
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- int i;
-
- TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err));
- test_print_partition_list(parts);
-
- if (parts->cnt < partitions)
- TEST_FAIL("rebalance_cb: Expected %d partitions, not %d",
- partitions, parts->cnt);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- for (i = 0; i < parts->cnt; i++) {
- if (i >= partitions) {
- /* Dont assign() partitions we dont want. */
- rd_kafka_topic_partition_list_del_by_idx(parts,
- i);
- continue;
- }
-
- if (reb_method == REB_METHOD_1) {
- if (i < partitions)
- parts->elems[i].offset = msgcnt / 2;
- else
- parts->elems[i].offset =
- RD_KAFKA_OFFSET_END;
- } else if (reb_method == REB_METHOD_2) {
- if (i < 2)
- parts->elems[i].offset = msgcnt / 2;
- else
- parts->elems[i].offset =
- RD_KAFKA_OFFSET_INVALID;
- }
- }
- TEST_SAY("Use these offsets:\n");
- test_print_partition_list(parts);
- test_consumer_assign("HL.REBALANCE", rk, parts);
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- test_consumer_unassign("HL.REBALANCE", rk);
- break;
-
- default:
- TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
- }
-}
-
-int main_0029_assign_offset(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_topic_partition_list_t *parts;
- uint64_t testid;
- int i;
- test_timing_t t_simple, t_hl;
- test_msgver_t mv;
-
- test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));
-
- /* Produce X messages to Y partitions so we get a
- * nice seekable 0..X offset one each partition. */
- /* Produce messages */
- testid = test_id_generate();
- rk = test_create_producer();
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- parts = rd_kafka_topic_partition_list_new(partitions);
-
- for (i = 0; i < partitions; i++) {
- test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
- /* Set start offset */
- rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
- msgcnt / 2;
- }
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
-
- /* Simple consumer */
- TIMING_START(&t_simple, "SIMPLE.CONSUMER");
- rk = test_create_consumer(topic, NULL, NULL, NULL);
- test_msgver_init(&mv, testid);
- test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
- test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
- partitions * (msgcnt / 2), &mv);
- for (i = 0; i < partitions; i++)
- test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
- topic, i, msgcnt / 2, msgcnt / 2);
- test_msgver_clear(&mv);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_simple);
-
- rd_kafka_topic_partition_list_destroy(parts);
-
-
- /* High-level consumer: method 1
- * Offsets are set in rebalance callback. */
- if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) {
- reb_method = REB_METHOD_1;
- TIMING_START(&t_hl, "HL.CONSUMER");
- test_msgver_init(&mv, testid);
- rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
- test_consumer_subscribe(rk, topic);
- test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
- partitions * (msgcnt / 2), &mv);
- for (i = 0; i < partitions; i++)
- test_msgver_verify_part("HL.MSGS", &mv,
- TEST_MSGVER_ALL_PART, topic, i,
- msgcnt / 2, msgcnt / 2);
- test_msgver_clear(&mv);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_hl);
-
-
- /* High-level consumer: method 2:
- * first two partitions are with fixed absolute offset, rest are
- * auto offset (stored, which is now at end).
- * Offsets are set in rebalance callback. */
- reb_method = REB_METHOD_2;
- TIMING_START(&t_hl, "HL.CONSUMER2");
- test_msgver_init(&mv, testid);
- rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
- test_consumer_subscribe(rk, topic);
- test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
- 2 * (msgcnt / 2), &mv);
- for (i = 0; i < partitions; i++) {
- if (i < 2)
- test_msgver_verify_part(
- "HL.MSGS2.A", &mv, TEST_MSGVER_ALL_PART,
- topic, i, msgcnt / 2, msgcnt / 2);
- }
- test_msgver_clear(&mv);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_hl);
- }
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c
deleted file mode 100644
index 9b05cb420..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Consumer: various offset commit constellations, matrix:
- * enable.auto.commit, enable.auto.offset.store, async
- */
-
-static char *topic;
-static const int msgcnt = 100;
-static const int partition = 0;
-static uint64_t testid;
-
-static int64_t expected_offset = 0;
-static int64_t committed_offset = -1;
-
-
-static void offset_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- rd_kafka_topic_partition_t *rktpar;
-
- TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err));
- if (err == RD_KAFKA_RESP_ERR__NO_OFFSET)
- return;
-
- test_print_partition_list(offsets);
- if (err)
- TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err));
- if (offsets->cnt == 0)
- TEST_FAIL(
- "Expected at least one partition in offset_commit_cb");
-
- /* Find correct partition */
- if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, topic,
- partition)))
- return;
-
- if (rktpar->err)
- TEST_FAIL("Offset commit failed for partitioń : %s",
- rd_kafka_err2str(rktpar->err));
-
- if (rktpar->offset > expected_offset)
- TEST_FAIL("Offset committed %" PRId64
- " > expected offset %" PRId64,
- rktpar->offset, expected_offset);
-
- if (rktpar->offset < committed_offset)
- TEST_FAIL("Old offset %" PRId64
- " (re)committed: "
- "should be above committed_offset %" PRId64,
- rktpar->offset, committed_offset);
- else if (rktpar->offset == committed_offset)
- TEST_SAYL(1, "Current offset re-committed: %" PRId64 "\n",
- rktpar->offset);
- else
- committed_offset = rktpar->offset;
-
- if (rktpar->offset < expected_offset) {
- TEST_SAYL(3,
- "Offset committed %" PRId64
- " < expected offset %" PRId64 "\n",
- rktpar->offset, expected_offset);
- return;
- }
-
- TEST_SAYL(3, "Expected offset committed: %" PRId64 "\n",
- rktpar->offset);
-}
-
-
-static void do_offset_test(const char *what,
- int auto_commit,
- int auto_store,
- int async,
- int subscribe) {
- test_timing_t t_all;
- char groupid[64];
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- int cnt = 0;
- const int extra_cnt = 5;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *parts;
- rd_kafka_topic_partition_t *rktpar;
- int64_t next_offset = -1;
-
- SUB_TEST_QUICK("%s", what);
-
- test_conf_init(&conf, &tconf, subscribe ? 30 : 10);
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "enable.auto.commit",
- auto_commit ? "true" : "false");
- test_conf_set(conf, "enable.auto.offset.store",
- auto_store ? "true" : "false");
- test_conf_set(conf, "auto.commit.interval.ms", "500");
- rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
- test_str_id_generate(groupid, sizeof(groupid));
- test_conf_set(conf, "group.id", groupid);
- rd_kafka_conf_set_default_topic_conf(conf, tconf);
-
- TIMING_START(&t_all, "%s", what);
-
- expected_offset = 0;
- committed_offset = -1;
-
- /* MO:
- * - Create consumer.
- * - Start consuming from beginning
- * - Perform store & commits according to settings
- * - Stop storing&committing when half of the messages are consumed,
- * - but consume 5 more to check against.
- * - Query position.
- * - Destroy consumer.
- * - Create new consumer with same group.id using stored offsets
- * - Should consume the expected message.
- */
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));
-
- rd_kafka_poll_set_consumer(rk);
-
- if (subscribe) {
- test_consumer_subscribe(rk, topic);
- } else {
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, partition);
- test_consumer_assign("ASSIGN", rk, parts);
- rd_kafka_topic_partition_list_destroy(parts);
- }
-
- while (cnt - extra_cnt < msgcnt / 2) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(rk, 10 * 1000);
- if (!rkm)
- continue;
-
- if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- TEST_FAIL("%s: Timed out waiting for message %d", what,
- cnt);
- else if (rkm->err)
- TEST_FAIL("%s: Consumer error: %s", what,
- rd_kafka_message_errstr(rkm));
-
- /* Offset of next message. */
- next_offset = rkm->offset + 1;
-
- if (cnt < msgcnt / 2) {
- if (!auto_store) {
- err = rd_kafka_offset_store(
- rkm->rkt, rkm->partition, rkm->offset);
- if (err)
- TEST_FAIL(
- "%s: offset_store failed: %s\n",
- what, rd_kafka_err2str(err));
- }
- expected_offset = rkm->offset + 1;
- if (!auto_commit) {
- test_timing_t t_commit;
- TIMING_START(&t_commit, "%s @ %" PRId64,
- async ? "commit.async"
- : "commit.sync",
- rkm->offset + 1);
- err = rd_kafka_commit_message(rk, rkm, async);
- TIMING_STOP(&t_commit);
- if (err)
- TEST_FAIL("%s: commit failed: %s\n",
- what, rd_kafka_err2str(err));
- }
-
- } else if (auto_store && auto_commit)
- expected_offset = rkm->offset + 1;
-
- rd_kafka_message_destroy(rkm);
- cnt++;
- }
-
- TEST_SAY("%s: done consuming after %d messages, at offset %" PRId64
- ", next_offset %" PRId64 "\n",
- what, cnt, expected_offset, next_offset);
-
- if ((err = rd_kafka_assignment(rk, &parts)))
- TEST_FAIL("%s: failed to get assignment(): %s\n", what,
- rd_kafka_err2str(err));
-
- /* Verify position */
- if ((err = rd_kafka_position(rk, parts)))
- TEST_FAIL("%s: failed to get position(): %s\n", what,
- rd_kafka_err2str(err));
- if (!(rktpar =
- rd_kafka_topic_partition_list_find(parts, topic, partition)))
- TEST_FAIL("%s: position(): topic lost\n", what);
- if (rktpar->offset != next_offset)
- TEST_FAIL("%s: Expected position() offset %" PRId64
- ", got %" PRId64,
- what, next_offset, rktpar->offset);
- TEST_SAY("%s: Position is at %" PRId64 ", good!\n", what,
- rktpar->offset);
-
- /* Pause messages while waiting so we can serve callbacks
- * without having more messages received. */
- if ((err = rd_kafka_pause_partitions(rk, parts)))
- TEST_FAIL("%s: failed to pause partitions: %s\n", what,
- rd_kafka_err2str(err));
- rd_kafka_topic_partition_list_destroy(parts);
-
- /* Fire off any enqueued offset_commit_cb */
- test_consumer_poll_no_msgs(what, rk, testid, 0);
-
- TEST_SAY("%s: committed_offset %" PRId64 ", expected_offset %" PRId64
- "\n",
- what, committed_offset, expected_offset);
-
- if (!auto_commit && !async) {
- /* Sync commits should be up to date at this point. */
- if (committed_offset != expected_offset)
- TEST_FAIL("%s: Sync commit: committed offset %" PRId64
- " should be same as expected offset "
- "%" PRId64,
- what, committed_offset, expected_offset);
- } else {
-
- /* Wait for offset commits to catch up */
- while (committed_offset < expected_offset) {
- TEST_SAYL(2,
- "%s: Wait for committed offset %" PRId64
- " to reach expected offset %" PRId64 "\n",
- what, committed_offset, expected_offset);
- test_consumer_poll_no_msgs(what, rk, testid, 1000);
- }
- }
-
- TEST_SAY(
- "%s: phase 1 complete, %d messages consumed, "
- "next expected offset is %" PRId64 "\n",
- what, cnt, expected_offset);
-
- /* Issue #827: cause committed() to return prematurely by specifying
- * low timeout. The bug (use after free) will only
- * be catched by valgrind.
- *
- * rusage: this triggers a bunch of protocol requests which
- * increase .ucpu, .scpu, .ctxsw.
- */
- do {
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, partition);
- err = rd_kafka_committed(rk, parts, 1);
- rd_kafka_topic_partition_list_destroy(parts);
- if (err)
- TEST_SAY("Issue #827: committed() returned %s\n",
- rd_kafka_err2str(err));
- } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);
-
- /* Query position */
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, partition);
-
- err = rd_kafka_committed(rk, parts, tmout_multip(5 * 1000));
- if (err)
- TEST_FAIL("%s: committed() failed: %s", what,
- rd_kafka_err2str(err));
- if (!(rktpar =
- rd_kafka_topic_partition_list_find(parts, topic, partition)))
- TEST_FAIL("%s: committed(): topic lost\n", what);
- if (rktpar->offset != expected_offset)
- TEST_FAIL("%s: Expected committed() offset %" PRId64
- ", got %" PRId64,
- what, expected_offset, rktpar->offset);
- TEST_SAY("%s: Committed offset is at %" PRId64 ", good!\n", what,
- rktpar->offset);
-
- rd_kafka_topic_partition_list_destroy(parts);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
-
-
- /* Fire up a new consumer and continue from where we left off. */
- TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",
- what);
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
- rd_kafka_poll_set_consumer(rk);
-
- if (subscribe) {
- test_consumer_subscribe(rk, topic);
- } else {
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, partition);
- test_consumer_assign("ASSIGN", rk, parts);
- rd_kafka_topic_partition_list_destroy(parts);
- }
-
- while (cnt < msgcnt) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(rk, 10 * 1000);
- if (!rkm)
- continue;
-
- if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- TEST_FAIL("%s: Timed out waiting for message %d", what,
- cnt);
- else if (rkm->err)
- TEST_FAIL("%s: Consumer error: %s", what,
- rd_kafka_message_errstr(rkm));
-
- if (rkm->offset != expected_offset)
- TEST_FAIL("%s: Received message offset %" PRId64
- ", expected %" PRId64 " at msgcnt %d/%d\n",
- what, rkm->offset, expected_offset, cnt,
- msgcnt);
-
- rd_kafka_message_destroy(rkm);
- expected_offset++;
- cnt++;
- }
-
-
- TEST_SAY("%s: phase 2: complete\n", what);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- TIMING_STOP(&t_all);
-
- SUB_TEST_PASS();
-}
-
-
-static void empty_offset_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque;
- int valid_offsets = 0;
- int i;
-
- TEST_SAY(
- "Offset commit callback for %d partitions: %s (expecting %s)\n",
- offsets ? offsets->cnt : 0, rd_kafka_err2str(err),
- rd_kafka_err2str(expected));
-
- if (expected != err)
- TEST_FAIL("Offset commit cb: expected %s, got %s",
- rd_kafka_err2str(expected), rd_kafka_err2str(err));
-
- for (i = 0; i < offsets->cnt; i++) {
- TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n",
- offsets->elems[i].topic, offsets->elems[i].partition,
- offsets->elems[i].offset,
- rd_kafka_err2str(offsets->elems[i].err));
-
- if (expected == RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_ASSERT(offsets->elems[i].err == expected);
- if (offsets->elems[i].offset > 0)
- valid_offsets++;
- }
-
- if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) {
- /* If no error is expected we instead expect one proper offset
- * to have been committed. */
- TEST_ASSERT(valid_offsets > 0);
- }
-}
-
-
-/**
- * Trigger an empty cgrp commit (issue #803)
- */
-static void do_empty_commit(void) {
- rd_kafka_t *rk;
- char group_id[64];
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_resp_err_t err, expect;
-
- SUB_TEST_QUICK();
-
- test_conf_init(&conf, &tconf, 20);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
- test_str_id_generate(group_id, sizeof(group_id));
-
- TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id);
-
- rk = test_create_consumer(group_id, NULL, conf, tconf);
-
- test_consumer_subscribe(rk, topic);
-
- test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL);
-
- TEST_SAY("First commit\n");
- expect = RD_KAFKA_RESP_ERR_NO_ERROR;
- err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb,
- &expect);
- if (err != expect)
- TEST_FAIL("commit failed: %s", rd_kafka_err2str(err));
- else
- TEST_SAY("First commit returned %s\n", rd_kafka_err2str(err));
-
- TEST_SAY("Second commit, should be empty\n");
- expect = RD_KAFKA_RESP_ERR__NO_OFFSET;
- err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb,
- &expect);
- if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
- TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s",
- rd_kafka_err2str(err));
- else
- TEST_SAY("Second commit returned %s\n", rd_kafka_err2str(err));
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * Commit non-existent topic (issue #704)
- */
-static void nonexist_offset_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- int i;
- int failed_offsets = 0;
-
- TEST_SAY("Offset commit callback for %d partitions: %s\n",
- offsets ? offsets->cnt : 0, rd_kafka_err2str(err));
-
- TEST_ASSERT(offsets != NULL);
-
- for (i = 0; i < offsets->cnt; i++) {
- TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n",
- offsets->elems[i].topic, offsets->elems[i].partition,
- offsets->elems[i].offset,
- rd_kafka_err2str(offsets->elems[i].err));
- failed_offsets += offsets->elems[i].err ? 1 : 0;
- }
-
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
- "expected unknown Topic or partition, not %s",
- rd_kafka_err2str(err));
- TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt);
- TEST_ASSERT(failed_offsets == offsets->cnt,
- "expected %d offsets to have failed, got %d", offsets->cnt,
- failed_offsets);
-}
-
-static void do_nonexist_commit(void) {
- rd_kafka_t *rk;
- char group_id[64];
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_topic_partition_list_t *offsets;
- const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_resp_err_t err;
-
- SUB_TEST_QUICK();
-
- test_conf_init(&conf, &tconf, 20);
- /* Offset commit deferrals when the broker is down is limited to
- * session.timeout.ms. With 0.9 brokers and api.version.request=true
- * the initial connect to all brokers will take 10*2 seconds
- * and the commit_queue() below will time out too quickly.
- * Set the session timeout high here to avoid it. */
- test_conf_set(conf, "session.timeout.ms", "60000");
-
- test_str_id_generate(group_id, sizeof(group_id));
- test_conf_set(conf, "group.id", group_id);
-
- rd_kafka_conf_set_default_topic_conf(conf, tconf);
-
- TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);
-
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
- rd_kafka_poll_set_consumer(rk);
-
- TEST_SAY("Try nonexist commit\n");
- offsets = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
- rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;
-
- err = rd_kafka_commit_queue(rk, offsets, NULL,
- nonexist_offset_commit_cb, NULL);
- TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
- if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
- TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s",
- rd_kafka_err2str(err));
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0030_offset_commit(int argc, char **argv) {
-
- topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
-
- do_empty_commit();
-
- do_nonexist_commit();
-
- do_offset_test("AUTO.COMMIT & AUTO.STORE", 1 /* enable.auto.commit */,
- 1 /* enable.auto.offset.store */, 0 /* not used. */,
- 1 /* use subscribe */);
-
- do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE",
- 0 /* enable.auto.commit */,
- 1 /* enable.auto.offset.store */, 1 /* async */,
- 1 /* use subscribe */);
-
- do_offset_test("AUTO.COMMIT.ASYNC & AUTO.STORE & ASSIGN",
- 1 /* enable.auto.commit */,
- 1 /* enable.auto.offset.store */, 0 /* not used. */,
- 0 /* use assign */);
-
- if (test_quick) {
- rd_free(topic);
- return 0;
- }
-
- do_offset_test("AUTO.COMMIT & MANUAL.STORE", 1 /* enable.auto.commit */,
- 0 /* enable.auto.offset.store */, 0 /* not used */,
- 1 /* use subscribe */);
-
- do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE",
- 0 /* enable.auto.commit */,
- 1 /* enable.auto.offset.store */, 0 /* async */,
- 1 /* use subscribe */);
-
- do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE",
- 0 /* enable.auto.commit */,
- 0 /* enable.auto.offset.store */, 1 /* sync */,
- 1 /* use subscribe */);
-
- do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE",
- 0 /* enable.auto.commit */,
- 0 /* enable.auto.offset.store */, 0 /* sync */,
- 1 /* use subscribe */);
-
- rd_free(topic);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c
deleted file mode 100644
index 327be43df..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c
+++ /dev/null
@@ -1,119 +0,0 @@
-
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Verify that rd_kafka_(query|get)_watermark_offsets() works.
- */
-
-
-int main_0031_get_offsets(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int msgcnt = test_quick ? 10 : 100;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- int64_t qry_low = -1234, qry_high = -1235;
- int64_t get_low = -1234, get_high = -1235;
- rd_kafka_resp_err_t err;
- test_timing_t t_qry, t_get;
- uint64_t testid;
-
- /* Produce messages */
- testid = test_produce_msgs_easy(topic, 0, 0, msgcnt);
-
- /* Get offsets */
- rk = test_create_consumer(NULL, NULL, NULL, NULL);
-
- TIMING_START(&t_qry, "query_watermark_offsets");
- err = rd_kafka_query_watermark_offsets(
- rk, topic, 0, &qry_low, &qry_high, tmout_multip(10 * 1000));
- TIMING_STOP(&t_qry);
- if (err)
- TEST_FAIL("query_watermark_offsets failed: %s\n",
- rd_kafka_err2str(err));
-
- if (qry_low != 0 && qry_high != msgcnt)
- TEST_FAIL(
- "Expected low,high %d,%d, but got "
- "%" PRId64 ",%" PRId64,
- 0, msgcnt, qry_low, qry_high);
-
- TEST_SAY(
- "query_watermark_offsets: "
- "offsets %" PRId64 ", %" PRId64 "\n",
- qry_low, qry_high);
-
- /* Now start consuming to update the offset cache, then query it
- * with the get_ API. */
- rkt = test_create_topic_object(rk, topic, NULL);
-
- test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING);
- test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, 0, msgcnt, 0);
- /* After at least one message has been consumed the
- * watermarks are cached. */
-
- TIMING_START(&t_get, "get_watermark_offsets");
- err = rd_kafka_get_watermark_offsets(rk, topic, 0, &get_low, &get_high);
- TIMING_STOP(&t_get);
- if (err)
- TEST_FAIL("get_watermark_offsets failed: %s\n",
- rd_kafka_err2str(err));
-
- TEST_SAY(
- "get_watermark_offsets: "
- "offsets %" PRId64 ", %" PRId64 "\n",
- get_low, get_high);
-
- if (get_high != qry_high)
- TEST_FAIL(
- "query/get discrepancies: "
- "low: %" PRId64 "/%" PRId64 ", high: %" PRId64 "/%" PRId64,
- qry_low, get_low, qry_high, get_high);
- if (get_low >= get_high)
- TEST_FAIL(
- "get_watermark_offsets: "
- "low %" PRId64 " >= high %" PRId64,
- get_low, get_high);
-
- /* FIXME: We currently dont bother checking the get_low offset
- * since it requires stats to be enabled. */
-
- test_consumer_stop("get", rkt, 0);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c
deleted file mode 100644
index f31d33ebc..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * KafkaConsumer: regex topic subscriptions
- */
-
-
-
-struct expect {
- char *name; /* sub-test name */
- const char *sub[4]; /* subscriptions */
- const char *exp[4]; /* expected topics */
- int exp_err; /* expected error from subscribe() */
- int stat[4]; /* per exp status */
- int fails;
- enum { _EXP_NONE,
- _EXP_FAIL,
- _EXP_OK,
- _EXP_ASSIGN,
- _EXP_REVOKE,
- _EXP_ASSIGNED,
- _EXP_REVOKED,
- } result;
-};
-
-static struct expect *exp_curr;
-
-static uint64_t testid;
-
-static void expect_match(struct expect *exp,
- const rd_kafka_topic_partition_list_t *parts) {
- int i;
- int e = 0;
- int fails = 0;
-
- memset(exp->stat, 0, sizeof(exp->stat));
-
- for (i = 0; i < parts->cnt; i++) {
- int found = 0;
- e = 0;
- while (exp->exp[e]) {
- if (!strcmp(parts->elems[i].topic, exp->exp[e])) {
- exp->stat[e]++;
- found++;
- }
- e++;
- }
-
- if (!found) {
- TEST_WARN("%s: got unexpected topic match: %s\n",
- exp->name, parts->elems[i].topic);
- fails++;
- }
- }
-
-
- e = 0;
- while (exp->exp[e]) {
- if (!exp->stat[e]) {
- TEST_WARN(
- "%s: expected topic not "
- "found in assignment: %s\n",
- exp->name, exp->exp[e]);
- fails++;
- } else {
- TEST_SAY("%s: expected topic %s seen in assignment\n",
- exp->name, exp->exp[e]);
- }
- e++;
- }
-
- exp->fails += fails;
- if (fails) {
- TEST_WARN("%s: see %d previous failures\n", exp->name, fails);
- exp->result = _EXP_FAIL;
- } else {
- TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name);
- exp->result = _EXP_OK;
- }
-}
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- struct expect *exp = exp_curr;
-
- TEST_ASSERT(exp_curr, "exp_curr not set");
-
- TEST_SAY("rebalance_cb: %s with %d partition(s)\n",
- rd_kafka_err2str(err), parts->cnt);
- test_print_partition_list(parts);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- /* Check that provided partitions match our expectations */
- if (exp->result != _EXP_ASSIGN) {
- TEST_WARN(
- "%s: rebalance called while expecting %d: "
- "too many or undesired assignment(s?\n",
- exp->name, exp->result);
- }
- expect_match(exp, parts);
- test_consumer_assign("rebalance", rk, parts);
- exp->result = _EXP_ASSIGNED;
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- if (exp->result != _EXP_REVOKE) {
- TEST_WARN(
- "%s: rebalance called while expecting %d: "
- "too many or undesired assignment(s?\n",
- exp->name, exp->result);
- }
-
- test_consumer_unassign("rebalance", rk);
- exp->result = _EXP_REVOKED;
- break;
-
- default:
- TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
- }
-}
-
-
-/**
- * @brief Poll the consumer once.
- */
-static void consumer_poll_once(rd_kafka_t *rk) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consumer_poll(rk, 1000);
- if (!rkmessage)
- return;
-
- if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("%s [%" PRId32
- "] reached EOF at "
- "offset %" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset);
-
- } else if (rkmessage->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) {
- if (strstr(rd_kafka_topic_name(rkmessage->rkt), "NONEXIST"))
- TEST_SAY("%s: %s: error is expected for this topic\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rd_kafka_message_errstr(rkmessage));
- else
- TEST_FAIL(
- "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
- rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
- : "(no-topic)",
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_errstr(rkmessage));
- }
-
- rd_kafka_message_destroy(rkmessage);
-}
-
-
-
-static int test_subscribe(rd_kafka_t *rk, struct expect *exp) {
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *tlist;
- int i;
- test_timing_t t_sub, t_assign, t_unsub;
-
- exp_curr = exp;
-
- test_timeout_set((test_session_timeout_ms / 1000) * 3);
-
- tlist = rd_kafka_topic_partition_list_new(4);
- TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name);
- i = 0;
- TEST_SAY("Topic subscription:\n");
- while (exp->sub[i]) {
- TEST_SAY("%s: %s\n", exp->name, exp->sub[i]);
- rd_kafka_topic_partition_list_add(tlist, exp->sub[i],
- RD_KAFKA_PARTITION_UA);
- i++;
- }
-
- /* Subscribe */
- TIMING_START(&t_sub, "subscribe");
- err = rd_kafka_subscribe(rk, tlist);
- TIMING_STOP(&t_sub);
- TEST_ASSERT(err == exp->exp_err, "subscribe() failed: %s (expected %s)",
- rd_kafka_err2str(err), rd_kafka_err2str(exp->exp_err));
-
- if (exp->exp[0]) {
- /* Wait for assignment, actual messages are ignored. */
- exp->result = _EXP_ASSIGN;
- TEST_SAY("%s: waiting for assignment\n", exp->name);
- TIMING_START(&t_assign, "assignment");
- while (exp->result == _EXP_ASSIGN)
- consumer_poll_once(rk);
- TIMING_STOP(&t_assign);
- TEST_ASSERT(exp->result == _EXP_ASSIGNED,
- "got %d instead of assignment", exp->result);
-
- } else {
- /* Not expecting any assignment */
- int64_t ts_end = test_clock() + 5000;
- exp->result = _EXP_NONE; /* Not expecting a rebalance */
- while (exp->result == _EXP_NONE && test_clock() < ts_end)
- consumer_poll_once(rk);
- TEST_ASSERT(exp->result == _EXP_NONE);
- }
-
- /* Unsubscribe */
- TIMING_START(&t_unsub, "unsubscribe");
- err = rd_kafka_unsubscribe(rk);
- TIMING_STOP(&t_unsub);
- TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_topic_partition_list_destroy(tlist);
-
- if (exp->exp[0]) {
- /* Wait for revoke, actual messages are ignored. */
- TEST_SAY("%s: waiting for revoke\n", exp->name);
- exp->result = _EXP_REVOKE;
- TIMING_START(&t_assign, "revoke");
- while (exp->result != _EXP_REVOKED)
- consumer_poll_once(rk);
- TIMING_STOP(&t_assign);
- TEST_ASSERT(exp->result == _EXP_REVOKED,
- "got %d instead of revoke", exp->result);
- } else {
- /* Not expecting any revoke */
- int64_t ts_end = test_clock() + 5000;
- exp->result = _EXP_NONE; /* Not expecting a rebalance */
- while (exp->result == _EXP_NONE && test_clock() < ts_end)
- consumer_poll_once(rk);
- TEST_ASSERT(exp->result == _EXP_NONE);
- }
-
- TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name,
- exp->fails);
-
- return exp->fails;
-}
-
-
-static int do_test(const char *assignor) {
- static char topics[3][128];
- static char nonexist_topic[128];
- const int topic_cnt = 3;
- rd_kafka_t *rk;
- const int msgcnt = 10;
- int i;
- char groupid[64];
- int fails = 0;
- rd_kafka_conf_t *conf;
-
- if (!test_check_builtin("regex")) {
- TEST_SKIP("regex support not built in\n");
- return 0;
- }
-
- testid = test_id_generate();
- test_str_id_generate(groupid, sizeof(groupid));
-
- rd_snprintf(topics[0], sizeof(topics[0]), "%s_%s",
- test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0),
- groupid);
- rd_snprintf(topics[1], sizeof(topics[1]), "%s_%s",
- test_mk_topic_name("regex_subscribe_topic_0002_dup", 0),
- groupid);
- rd_snprintf(topics[2], sizeof(topics[2]), "%s_%s",
- test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0),
- groupid);
-
- /* To avoid auto topic creation to kick in we use
- * an invalid topic name. */
- rd_snprintf(
- nonexist_topic, sizeof(nonexist_topic), "%s_%s",
- test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", 0),
- groupid);
-
- /* Produce messages to topics to ensure creation. */
- for (i = 0; i < topic_cnt; i++)
- test_produce_msgs_easy(topics[i], testid, RD_KAFKA_PARTITION_UA,
- msgcnt);
-
- test_conf_init(&conf, NULL, 20);
- test_conf_set(conf, "partition.assignment.strategy", assignor);
- /* Speed up propagation of new topics */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
- test_conf_set(conf, "allow.auto.create.topics", "true");
-
- /* Create a single consumer to handle all subscriptions.
- * Has the nice side affect of testing multiple subscriptions. */
- rk = test_create_consumer(groupid, rebalance_cb, conf, NULL);
-
- /*
- * Test cases
- */
- {
- struct expect expect = {.name = rd_strdup(tsprintf(
- "%s: no regexps (0&1)", assignor)),
- .sub = {topics[0], topics[1], NULL},
- .exp = {topics[0], topics[1], NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- }
-
- {
- struct expect expect = {.name =
- rd_strdup(tsprintf("%s: no regexps "
- "(no matches)",
- assignor)),
- .sub = {nonexist_topic, NULL},
- .exp = {NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- }
-
- {
- struct expect expect = {
- .name = rd_strdup(tsprintf("%s: regex all", assignor)),
- .sub = {rd_strdup(tsprintf("^.*_%s", groupid)), NULL},
- .exp = {topics[0], topics[1], topics[2], NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- rd_free((void *)expect.sub[0]);
- }
-
- {
- struct expect expect = {
- .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)),
- .sub = {rd_strdup(tsprintf(
- "^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", groupid)),
- NULL},
- .exp = {topics[0], topics[1], NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- rd_free((void *)expect.sub[0]);
- }
-
- {
- struct expect expect = {
- .name = rd_strdup(tsprintf("%s: regex 2", assignor)),
- .sub = {rd_strdup(
- tsprintf("^.*TOOTHPIC_000._._%s", groupid)),
- NULL},
- .exp = {topics[2], NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- rd_free((void *)expect.sub[0]);
- }
-
- {
- struct expect expect = {
- .name = rd_strdup(tsprintf("%s: regex 2 and "
- "nonexistent(not seen)",
- assignor)),
- .sub = {rd_strdup(tsprintf("^.*_000[34]_..?_%s", groupid)),
- NULL},
- .exp = {topics[2], NULL}};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- rd_free((void *)expect.sub[0]);
- }
-
- {
- struct expect expect = {
- .name = rd_strdup(
- tsprintf("%s: broken regex (no matches)", assignor)),
- .sub = {"^.*[0", NULL},
- .exp = {NULL},
- .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG};
-
- fails += test_subscribe(rk, &expect);
- rd_free(expect.name);
- }
-
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-
- if (fails)
- TEST_FAIL("See %d previous failures", fails);
-
- return 0;
-}
-
-
-int main_0033_regex_subscribe(int argc, char **argv) {
- do_test("range");
- do_test("roundrobin");
- return 0;
-}
-
-
-/**
- * @brief Subscription API tests that dont require a broker
- */
-int main_0033_regex_subscribe_local(int argc, char **argv) {
- rd_kafka_topic_partition_list_t *valids, *invalids, *none, *empty,
- *alot;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_resp_err_t err;
- char errstr[256];
- int i;
-
- valids = rd_kafka_topic_partition_list_new(0);
- invalids = rd_kafka_topic_partition_list_new(100);
- none = rd_kafka_topic_partition_list_new(1000);
- empty = rd_kafka_topic_partition_list_new(5);
- alot = rd_kafka_topic_partition_list_new(1);
-
- rd_kafka_topic_partition_list_add(valids, "not_a_regex", 0);
- rd_kafka_topic_partition_list_add(valids, "^My[vV]alid..regex+", 0);
- rd_kafka_topic_partition_list_add(valids, "^another_one$", 55);
-
- rd_kafka_topic_partition_list_add(invalids, "not_a_regex", 0);
- rd_kafka_topic_partition_list_add(invalids, "^My[vV]alid..regex+", 0);
- rd_kafka_topic_partition_list_add(invalids, "^a[b", 99);
-
- rd_kafka_topic_partition_list_add(empty, "not_a_regex", 0);
- rd_kafka_topic_partition_list_add(empty, "", 0);
- rd_kafka_topic_partition_list_add(empty, "^ok", 0);
-
- for (i = 0; i < 10000; i++) {
- char topic[32];
- rd_snprintf(topic, sizeof(topic), "^Va[lLid]_regex_%d$", i);
- rd_kafka_topic_partition_list_add(alot, topic, i);
- }
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "group.id", "group");
- test_conf_set(conf, "client.id", test_curr->name);
-
- rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
- if (!rk)
- TEST_FAIL("Failed to create consumer: %s", errstr);
-
- err = rd_kafka_subscribe(rk, valids);
- TEST_ASSERT(!err, "valids failed: %s", rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk, invalids);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "invalids failed with wrong return: %s",
- rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk, none);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "none failed with wrong return: %s", rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk, empty);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "empty failed with wrong return: %s",
- rd_kafka_err2str(err));
-
- err = rd_kafka_subscribe(rk, alot);
- TEST_ASSERT(!err, "alot failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- rd_kafka_topic_partition_list_destroy(valids);
- rd_kafka_topic_partition_list_destroy(invalids);
- rd_kafka_topic_partition_list_destroy(none);
- rd_kafka_topic_partition_list_destroy(empty);
- rd_kafka_topic_partition_list_destroy(alot);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c
deleted file mode 100644
index 9276764c8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-#include "../src/rdkafka_protocol.h"
-
-
-/**
- * Issue #559: make sure auto.offset.reset works with invalid offsets.
- */
-
-
-static void do_test_reset(const char *topic,
- int partition,
- const char *reset,
- int64_t initial_offset,
- int exp_eofcnt,
- int exp_msgcnt,
- int exp_errcnt,
- int exp_resetcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- int eofcnt = 0, msgcnt = 0, errcnt = 0, resetcnt = 0;
- rd_kafka_conf_t *conf;
-
- TEST_SAY(
- "Test auto.offset.reset=%s, "
- "expect %d msgs, %d EOFs, %d errors, %d resets\n",
- reset, exp_msgcnt, exp_eofcnt, exp_errcnt, exp_resetcnt);
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.partition.eof", "true");
-
- rk = test_create_consumer(NULL, NULL, conf, NULL);
- rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset,
- NULL);
-
- test_consumer_start(reset, rkt, partition, initial_offset);
- while (1) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000 * 10));
- if (!rkm)
- TEST_FAIL(
- "%s: no message for 10s: "
- "%d/%d messages, %d/%d EOFs, %d/%d errors\n",
- reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt,
- errcnt, exp_errcnt);
-
- if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("%s: received EOF at offset %" PRId64 "\n",
- reset, rkm->offset);
- eofcnt++;
- } else if (rkm->err == RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) {
- TEST_SAY(
- "%s: auto.offset.reset error at offset %" PRId64
- ": %s: %s\n",
- reset, rkm->offset, rd_kafka_err2name(rkm->err),
- rd_kafka_message_errstr(rkm));
- resetcnt++;
- } else if (rkm->err) {
- TEST_SAY(
- "%s: consume error at offset %" PRId64 ": %s\n",
- reset, rkm->offset, rd_kafka_message_errstr(rkm));
- errcnt++;
- } else {
- msgcnt++;
- }
-
- rd_kafka_message_destroy(rkm);
-
- if (eofcnt == exp_eofcnt && errcnt == exp_errcnt &&
- msgcnt == exp_msgcnt && resetcnt == exp_resetcnt)
- break;
- else if (eofcnt > exp_eofcnt || errcnt > exp_errcnt ||
- msgcnt > exp_msgcnt || resetcnt > exp_resetcnt)
- TEST_FAIL(
- "%s: unexpected: "
- "%d/%d messages, %d/%d EOFs, %d/%d errors, "
- "%d/%d resets\n",
- reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt,
- errcnt, exp_errcnt, resetcnt, exp_resetcnt);
- }
-
- TEST_SAY(
- "%s: Done: "
- "%d/%d messages, %d/%d EOFs, %d/%d errors, %d/%d resets\n",
- reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, errcnt, exp_errcnt,
- resetcnt, exp_resetcnt);
-
- test_consumer_stop(reset, rkt, partition);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-}
-
-int main_0034_offset_reset(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition = 0;
- const int msgcnt = test_quick ? 20 : 100;
-
- /* Produce messages */
- test_produce_msgs_easy(topic, 0, partition, msgcnt);
-
- /* auto.offset.reset=latest: Consume messages from invalid offset:
- * Should return EOF. */
- do_test_reset(topic, partition, "latest", msgcnt + 5, 1, 0, 0, 0);
-
- /* auto.offset.reset=earliest: Consume messages from invalid offset:
- * Should return messages from beginning. */
- do_test_reset(topic, partition, "earliest", msgcnt + 5, 1, msgcnt, 0,
- 0);
-
- /* auto.offset.reset=error: Consume messages from invalid offset:
- * Should return error. */
- do_test_reset(topic, partition, "error", msgcnt + 5, 0, 0, 0, 1);
-
- return 0;
-}
-
-
-/**
- * @brief Verify auto.offset.reset=error behaviour for a range of different
- * error cases.
- */
-static void offset_reset_errors(void) {
- rd_kafka_t *c;
- rd_kafka_conf_t *conf;
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
- const char *topic = "topic";
- const int32_t partition = 0;
- const int msgcnt = 10;
- const int broker_id = 1;
- rd_kafka_queue_t *queue;
- int i;
- struct {
- rd_kafka_resp_err_t inject;
- rd_kafka_resp_err_t expect;
- /** Note: don't use OFFSET_BEGINNING since it might
- * use the cached low wmark, and thus not be subject to
- * the injected mock error. Use TAIL(msgcnt) instead.*/
- int64_t start_offset;
- int64_t expect_offset;
- rd_bool_t broker_down; /**< Bring the broker down */
- } test[] = {
- {
- RD_KAFKA_RESP_ERR__TRANSPORT,
- RD_KAFKA_RESP_ERR_NO_ERROR,
- RD_KAFKA_OFFSET_TAIL(msgcnt),
- 0,
- .broker_down = rd_true,
- },
- {
- RD_KAFKA_RESP_ERR__TRANSPORT,
- RD_KAFKA_RESP_ERR_NO_ERROR,
- RD_KAFKA_OFFSET_TAIL(msgcnt),
- 0,
- /* only disconnect on the ListOffsets request */
- .broker_down = rd_false,
- },
- {RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
- RD_KAFKA_OFFSET_TAIL(msgcnt), -1},
- {RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR__NO_OFFSET,
- RD_KAFKA_OFFSET_STORED, /* There's no committed offset */
- -1},
-
- };
-
- SUB_TEST_QUICK();
-
- mcluster = test_mock_cluster_new(1, &bootstraps);
-
- /* Seed partition 0 with some messages so we can differ
- * between beginning and end. */
- test_produce_msgs_easy_v(topic, 0, partition, 0, msgcnt, 10,
- "security.protocol", "plaintext",
- "bootstrap.servers", bootstraps, NULL);
-
- test_conf_init(&conf, NULL, 60 * 5);
-
- test_conf_set(conf, "security.protocol", "plaintext");
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "enable.partition.eof", "true");
- test_conf_set(conf, "enable.auto.commit", "false");
- /* Speed up reconnects */
- test_conf_set(conf, "reconnect.backoff.max.ms", "1000");
-
- /* Raise an error (ERR__AUTO_OFFSET_RESET) so we can verify
- * if auto.offset.reset is triggered or not. */
- test_conf_set(conf, "auto.offset.reset", "error");
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- queue = rd_kafka_queue_get_consumer(c);
-
- for (i = 0; i < (int)RD_ARRAYSIZE(test); i++) {
- rd_kafka_event_t *ev;
- rd_bool_t broker_down = rd_false;
-
- /* Make sure consumer is connected */
- test_wait_topic_exists(c, topic, 5000);
-
- TEST_SAY(_C_YEL "#%d: injecting %s, expecting %s\n", i,
- rd_kafka_err2name(test[i].inject),
- rd_kafka_err2name(test[i].expect));
-
- if (test[i].broker_down) {
- TEST_SAY("Bringing down the broker\n");
- rd_kafka_mock_broker_set_down(mcluster, broker_id);
- broker_down = rd_true;
-
- } else if (test[i].inject) {
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_ListOffsets, 5, test[i].inject,
- test[i].inject, test[i].inject, test[i].inject,
- test[i].inject);
-
- /* mock handler will close the connection on this
- * request */
- if (test[i].inject == RD_KAFKA_RESP_ERR__TRANSPORT)
- broker_down = rd_true;
- }
-
- test_consumer_assign_partition("ASSIGN", c, topic, partition,
- test[i].start_offset);
-
- while (1) {
- /* Poll until we see an AUTO_OFFSET_RESET error,
- * timeout, or a message, depending on what we're
- * looking for. */
- ev = rd_kafka_queue_poll(queue, 5000);
-
- if (!ev) {
- TEST_ASSERT(broker_down,
- "#%d: poll timeout, but broker "
- "was not down",
- i);
-
- /* Bring the broker back up and continue */
- TEST_SAY("Bringing up the broker\n");
- if (test[i].broker_down)
- rd_kafka_mock_broker_set_up(mcluster,
- broker_id);
-
- broker_down = rd_false;
-
- } else if (rd_kafka_event_type(ev) ==
- RD_KAFKA_EVENT_ERROR) {
-
- if (rd_kafka_event_error(ev) !=
- RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) {
- TEST_SAY(
- "#%d: Ignoring %s event: %s\n", i,
- rd_kafka_event_name(ev),
- rd_kafka_event_error_string(ev));
- rd_kafka_event_destroy(ev);
- continue;
- }
-
- TEST_SAY(
- "#%d: injected %s, got error %s: %s\n", i,
- rd_kafka_err2name(test[i].inject),
- rd_kafka_err2name(rd_kafka_event_error(ev)),
- rd_kafka_event_error_string(ev));
-
- /* The auto reset error code is always
- * ERR__AUTO_OFFSET_RESET, and the original
- * error is provided in the error string.
- * So use err2str() to compare the error
- * string to the expected error. */
- TEST_ASSERT(
- strstr(rd_kafka_event_error_string(ev),
- rd_kafka_err2str(test[i].expect)),
- "#%d: expected %s, got %s", i,
- rd_kafka_err2name(test[i].expect),
- rd_kafka_err2name(
- rd_kafka_event_error(ev)));
-
- rd_kafka_event_destroy(ev);
- break;
-
- } else if (rd_kafka_event_type(ev) ==
- RD_KAFKA_EVENT_FETCH) {
- const rd_kafka_message_t *rkm =
- rd_kafka_event_message_next(ev);
-
- TEST_ASSERT(rkm, "#%d: got null message", i);
-
- TEST_SAY("#%d: message at offset %" PRId64
- " (%s)\n",
- i, rkm->offset,
- rd_kafka_err2name(rkm->err));
-
- TEST_ASSERT(!test[i].expect,
- "#%d: got message when expecting "
- "error",
- i);
-
- TEST_ASSERT(
- test[i].expect_offset == rkm->offset,
- "#%d: expected message offset "
- "%" PRId64 ", got %" PRId64 " (%s)",
- i, test[i].expect_offset, rkm->offset,
- rd_kafka_err2name(rkm->err));
-
- TEST_SAY(
- "#%d: got expected message at "
- "offset %" PRId64 " (%s)\n",
- i, rkm->offset,
- rd_kafka_err2name(rkm->err));
-
- rd_kafka_event_destroy(ev);
- break;
-
- } else {
- TEST_SAY("#%d: Ignoring %s event: %s\n", i,
- rd_kafka_event_name(ev),
- rd_kafka_event_error_string(ev));
- rd_kafka_event_destroy(ev);
- }
- }
-
-
-
- rd_kafka_mock_clear_request_errors(mcluster,
- RD_KAFKAP_ListOffsets);
- }
-
- rd_kafka_queue_destroy(queue);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-int main_0034_offset_reset_mock(int argc, char **argv) {
- offset_reset_errors();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c
deleted file mode 100644
index d005b1e9e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Issue #606: test that api.version.request=true works or reverts to
- * fallback within reasonable amount of time.
- * Brokers 0.9.0 and 0.9.0.1 had a regression (wouldnt close the connection)
- * which caused these requests to time out (slowly) in librdkafka.
- */
-
-
-int main_0035_api_version(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- const struct rd_kafka_metadata *metadata;
- rd_kafka_resp_err_t err;
- test_timing_t t_meta;
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "socket.timeout.ms", "12000");
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("Querying for metadata\n");
- TIMING_START(&t_meta, "metadata()");
- err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5 * 1000));
- TIMING_STOP(&t_meta);
- if (err)
- TEST_FAIL("metadata() failed: %s", rd_kafka_err2str(err));
-
- if (TIMING_DURATION(&t_meta) / 1000 > 15 * 1000)
- TEST_FAIL("metadata() took too long: %.3fms",
- (float)TIMING_DURATION(&t_meta) / 1000.0f);
-
- rd_kafka_metadata_destroy(metadata);
-
- TEST_SAY("Metadata succeeded\n");
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c
deleted file mode 100644
index 69ee9864c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Issue #641: correct handling of partial messages in FetchResponse
- *
- * General idea:
- * - Produce messages of 1000 bytes each
- * - Set fetch.message.max.bytes to 1500 so that only one full message
- * can be fetched per request.
- * - Make sure all messages are received correctly and in order.
- */
-
-
-int main_0036_partial_fetch(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition = 0;
- const int msgcnt = 100;
- const int msgsize = 1000;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
-
- TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt,
- (int)msgsize, topic, partition);
- testid = test_id_generate();
- rk = test_create_producer();
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY("Creating consumer\n");
- test_conf_init(&conf, NULL, 0);
- /* This should fetch 1.5 messages per fetch, thus resulting in
- * partial fetches, hopefully. */
- test_conf_set(conf, "fetch.message.max.bytes", "1500");
- rk = test_create_consumer(NULL, NULL, conf, NULL);
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- test_consumer_start("CONSUME", rkt, partition,
- RD_KAFKA_OFFSET_BEGINNING);
- test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
- msgcnt, 1);
- test_consumer_stop("CONSUME", rkt, partition);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c
deleted file mode 100644
index 3b543fb6f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Various regression tests for hangs on destroy.
- */
-
-
-
-/**
- * Issue #530:
- * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create.
- * But If I put a start and stop in between, there is no issue."
- */
-static int legacy_consumer_early_destroy(void) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- int pass;
- const char *topic = test_mk_topic_name(__FUNCTION__, 0);
-
- for (pass = 0; pass < 2; pass++) {
- TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass);
-
- rk = test_create_handle(RD_KAFKA_CONSUMER, NULL);
-
- if (pass == 1) {
- /* Second pass, create a topic too. */
- rkt = rd_kafka_topic_new(rk, topic, NULL);
- TEST_ASSERT(rkt, "failed to create topic: %s",
- rd_kafka_err2str(rd_kafka_last_error()));
- rd_sleep(1);
- rd_kafka_topic_destroy(rkt);
- }
-
- rd_kafka_destroy(rk);
- }
-
- return 0;
-}
-
-
-int main_0037_destroy_hang_local(int argc, char **argv) {
- int fails = 0;
-
- test_conf_init(NULL, NULL, 30);
-
- fails += legacy_consumer_early_destroy();
-
- if (fails > 0)
- TEST_FAIL("See %d previous error(s)\n", fails);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c
deleted file mode 100644
index 674964dc9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Basic performance tests.
- * These tests dont fail but provide a throughput rate indication.
- *
- * + Produce N messages to one partition, acks=1, size=100
- */
-
-
-int main_0038_performance(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition = 0;
- const int msgsize = 100;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- test_timing_t t_create, t_produce, t_consume;
- int totsize = 1024 * 1024 * (test_quick ? 8 : 128);
- int msgcnt;
-
- if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") ||
- !strcmp(test_mode, "drd"))
- totsize = 1024 * 1024 * 8; /* 8 meg, valgrind is slow. */
-
- msgcnt = totsize / msgsize;
-
- TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt,
- (int)msgsize, topic, partition);
- testid = test_id_generate();
- test_conf_init(&conf, NULL, 120);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_conf_set(conf, "queue.buffering.max.messages", "10000000");
- test_conf_set(conf, "linger.ms", "100");
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL);
-
- /* First produce one message to create the topic, etc, this might take
- * a while and we dont want this to affect the throughput timing. */
- TIMING_START(&t_create, "CREATE TOPIC");
- test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize);
- TIMING_STOP(&t_create);
-
- TIMING_START(&t_produce, "PRODUCE");
- test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt - 1, NULL,
- msgsize);
- TIMING_STOP(&t_produce);
-
- TEST_SAY("Destroying producer\n");
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY("Creating consumer\n");
- test_conf_init(&conf, NULL, 120);
- rk = test_create_consumer(NULL, NULL, conf, NULL);
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- test_consumer_start("CONSUME", rkt, partition,
- RD_KAFKA_OFFSET_BEGINNING);
- TIMING_START(&t_consume, "CONSUME");
- test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
- msgcnt, 1);
- TIMING_STOP(&t_consume);
- test_consumer_stop("CONSUME", rkt, partition);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_REPORT(
- "{ \"producer\": "
- " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f },"
- " \"consumer\": "
- "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } "
- "}",
- (double)(totsize /
- ((double)TIMING_DURATION(&t_produce) / 1000000.0f)) /
- 1000000.0f,
- (float)(msgcnt /
- ((double)TIMING_DURATION(&t_produce) / 1000000.0f)),
- (double)(totsize /
- ((double)TIMING_DURATION(&t_consume) / 1000000.0f)) /
- 1000000.0f,
- (float)(msgcnt /
- ((double)TIMING_DURATION(&t_consume) / 1000000.0f)));
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c
deleted file mode 100644
index 8d6b9f0ee..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests event API.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgid_next = 0;
-static int fails = 0;
-
-/**
- * Handle delivery reports
- */
-static void handle_drs(rd_kafka_event_t *rkev) {
- const rd_kafka_message_t *rkmessage;
-
- while ((rkmessage = rd_kafka_event_message_next(rkev))) {
- int32_t broker_id = rd_kafka_message_broker_id(rkmessage);
- int msgid = *(int *)rkmessage->_private;
- free(rkmessage->_private);
-
- TEST_SAYL(3,
- "Got rkmessage %s [%" PRId32 "] @ %" PRId64
- ": "
- "from broker %" PRId32 ": %s\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset, broker_id,
- rd_kafka_err2str(rkmessage->err));
-
-
- if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
- TEST_FAIL("Message delivery failed: %s\n",
- rd_kafka_err2str(rkmessage->err));
-
- if (msgid != msgid_next) {
- fails++;
- TEST_FAIL("Delivered msg %i, expected %i\n", msgid,
- msgid_next);
- return;
- }
-
- TEST_ASSERT(broker_id >= 0, "Message %d has no broker id set",
- msgid);
-
- msgid_next = msgid + 1;
- }
-}
-
-
-/**
- * @brief Test delivery report events
- */
-int main_0039_event_dr(int argc, char **argv) {
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char msg[128];
- int msgcnt = test_quick ? 500 : 50000;
- int i;
- test_timing_t t_produce, t_delivery;
- rd_kafka_queue_t *eventq;
-
- test_conf_init(&conf, &topic_conf, 10);
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- eventq = rd_kafka_queue_get_main(rk);
-
- rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
-
- /* Produce messages */
- TIMING_START(&t_produce, "PRODUCE");
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
- i);
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1)
- TEST_FAIL("Failed to produce message #%i: %s\n", i,
- rd_strerror(errno));
- }
- TIMING_STOP(&t_produce);
- TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt);
-
- /* Wait for messages to be delivered */
- TIMING_START(&t_delivery, "DELIVERY");
- while (rd_kafka_outq_len(rk) > 0) {
- rd_kafka_event_t *rkev;
- rkev = rd_kafka_queue_poll(eventq, 1000);
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_DR:
- TEST_SAYL(3, "%s event with %" PRIusz " messages\n",
- rd_kafka_event_name(rkev),
- rd_kafka_event_message_count(rkev));
- handle_drs(rkev);
- break;
- default:
- TEST_SAY("Unhandled event: %s\n",
- rd_kafka_event_name(rkev));
- break;
- }
- rd_kafka_event_destroy(rkev);
- }
- TIMING_STOP(&t_delivery);
-
- if (fails)
- TEST_FAIL("%i failures, see previous errors", fails);
-
- if (msgid_next != msgcnt)
- TEST_FAIL("Still waiting for messages: next %i != end %i\n",
- msgid_next, msgcnt);
-
- rd_kafka_queue_destroy(eventq);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
-
-/**
- * @brief Local test: test log events
- */
-int main_0039_event_log(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *eventq;
- int waitevent = 1;
-
- const char *fac;
- const char *msg;
- char ctx[60];
- int level;
-
- conf = rd_kafka_conf_new();
- rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);
- rd_kafka_conf_set(conf, "log.queue", "true", NULL, 0);
- rd_kafka_conf_set(conf, "debug", "all", NULL, 0);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- eventq = rd_kafka_queue_get_main(rk);
- TEST_CALL_ERR__(rd_kafka_set_log_queue(rk, eventq));
-
- while (waitevent) {
- /* reset ctx */
- memset(ctx, '$', sizeof(ctx) - 2);
- ctx[sizeof(ctx) - 1] = '\0';
-
- rd_kafka_event_t *rkev;
- rkev = rd_kafka_queue_poll(eventq, 1000);
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_LOG:
- rd_kafka_event_log(rkev, &fac, &msg, &level);
- rd_kafka_event_debug_contexts(rkev, ctx, sizeof(ctx));
- TEST_SAY(
- "Got log event: "
- "level: %d ctx: %s fac: %s: msg: %s\n",
- level, ctx, fac, msg);
- if (strchr(ctx, '$')) {
- TEST_FAIL(
- "ctx was not set by "
- "rd_kafka_event_debug_contexts()");
- }
- waitevent = 0;
- break;
- default:
- TEST_SAY("Unhandled event: %s\n",
- rd_kafka_event_name(rkev));
- break;
- }
- rd_kafka_event_destroy(rkev);
- }
-
- /* Destroy rdkafka instance */
- rd_kafka_queue_destroy(eventq);
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
-
-/**
- * @brief Local test: test event generation
- */
-int main_0039_event(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *eventq;
- int waitevent = 1;
-
- /* Set up a config with ERROR events enabled and
- * configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN
- * is promptly generated. */
-
- conf = rd_kafka_conf_new();
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
- rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- eventq = rd_kafka_queue_get_main(rk);
-
- while (waitevent) {
- rd_kafka_event_t *rkev;
- rkev = rd_kafka_queue_poll(eventq, 1000);
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_ERROR:
- TEST_SAY("Got %s%s event: %s: %s\n",
- rd_kafka_event_error_is_fatal(rkev) ? "FATAL "
- : "",
- rd_kafka_event_name(rkev),
- rd_kafka_err2name(rd_kafka_event_error(rkev)),
- rd_kafka_event_error_string(rkev));
- waitevent = 0;
- break;
- default:
- TEST_SAY("Unhandled event: %s\n",
- rd_kafka_event_name(rkev));
- break;
- }
- rd_kafka_event_destroy(rkev);
- }
-
- rd_kafka_queue_destroy(eventq);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c
deleted file mode 100644
index d47da5206..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests the queue IO event signalling.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-#include <fcntl.h>
-#ifdef _WIN32
-#include <io.h>
-#pragma comment(lib, "ws2_32.lib")
-#else
-#include <unistd.h>
-#include <poll.h>
-#endif
-
-
-
-int main_0040_io_event(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_t *rk_p, *rk_c;
- const char *topic;
- rd_kafka_topic_t *rkt_p;
- rd_kafka_queue_t *queue;
- uint64_t testid;
- int msgcnt = test_quick ? 10 : 100;
- int recvd = 0;
- int fds[2];
- int wait_multiplier = 1;
- struct pollfd pfd;
- int r;
- rd_kafka_resp_err_t err;
- enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE;
-
-#ifdef _WIN32
- TEST_SKIP("WSAPoll and pipes are not reliable on Win32 (FIXME)\n");
- return 0;
-#endif
- testid = test_id_generate();
- topic = test_mk_topic_name(__FUNCTION__, 1);
-
- rk_p = test_create_producer();
- rkt_p = test_create_producer_topic(rk_p, topic, NULL);
- err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000));
- TEST_ASSERT(!err, "Topic auto creation failed: %s",
- rd_kafka_err2str(err));
-
- test_conf_init(&conf, &tconf, 0);
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "enable.partition.eof", "false");
- /* Speed up propagation of new topics */
- test_conf_set(conf, "metadata.max.age.ms", "1000");
- test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
- rk_c = test_create_consumer(topic, NULL, conf, tconf);
-
- queue = rd_kafka_queue_get_consumer(rk_c);
-
- test_consumer_subscribe(rk_c, topic);
-
-#ifndef _WIN32
- r = pipe(fds);
-#else
- r = _pipe(fds, 2, _O_BINARY);
-#endif
- if (r == -1)
- TEST_FAIL("pipe() failed: %s\n", strerror(errno));
-
- rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);
-
- pfd.fd = fds[0];
- pfd.events = POLLIN;
- pfd.revents = 0;
-
- /**
- * 1) Wait for rebalance event
- * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
- * 3) Produce half the messages
- * 4) Expect IO
- * 5) Consume the available messages
- * 6) Wait 1 interval expecting no IO.
- * 7) Produce remaing half
- * 8) Expect IO
- * 9) Done.
- */
- while (recvd < msgcnt) {
-#ifndef _WIN32
- r = poll(&pfd, 1, 1000 * wait_multiplier);
-#else
- r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
-#endif
- if (r == -1) {
- TEST_FAIL("poll() failed: %s", strerror(errno));
-
- } else if (r == 1) {
- rd_kafka_event_t *rkev;
- char b;
- int eventcnt = 0;
-
- if (pfd.events & POLLERR)
- TEST_FAIL("Poll error\n");
- if (!(pfd.events & POLLIN)) {
- TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
- continue;
- }
-
- TEST_SAY("POLLIN\n");
- /* Read signaling token to purge socket queue and
- * eventually silence POLLIN */
-#ifndef _WIN32
- r = read(pfd.fd, &b, 1);
-#else
- r = _read((int)pfd.fd, &b, 1);
-#endif
- if (r == -1)
- TEST_FAIL("read failed: %s\n", strerror(errno));
-
- if (!expecting_io)
- TEST_WARN(
- "Got unexpected IO after %d/%d msgs\n",
- recvd, msgcnt);
-
- while ((rkev = rd_kafka_queue_poll(queue, 0))) {
- eventcnt++;
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_REBALANCE:
- TEST_SAY(
- "Got %s: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_err2str(
- rd_kafka_event_error(rkev)));
- if (expecting_io != _REBALANCE)
- TEST_FAIL(
- "Got Rebalance when "
- "expecting message\n");
- if (rd_kafka_event_error(rkev) ==
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- rd_kafka_assign(
- rk_c,
- rd_kafka_event_topic_partition_list(
- rkev));
- expecting_io = _NOPE;
- } else
- rd_kafka_assign(rk_c, NULL);
- break;
-
- case RD_KAFKA_EVENT_FETCH:
- if (expecting_io != _YEP)
- TEST_FAIL(
- "Did not expect more "
- "messages at %d/%d\n",
- recvd, msgcnt);
- recvd++;
- if (recvd == (msgcnt / 2) ||
- recvd == msgcnt)
- expecting_io = _NOPE;
- break;
-
- case RD_KAFKA_EVENT_ERROR:
- TEST_FAIL(
- "Error: %s\n",
- rd_kafka_event_error_string(rkev));
- break;
-
- default:
- TEST_SAY("Ignoring event %s\n",
- rd_kafka_event_name(rkev));
- }
-
- rd_kafka_event_destroy(rkev);
- }
- TEST_SAY("%d events, Consumed %d/%d messages\n",
- eventcnt, recvd, msgcnt);
-
- wait_multiplier = 1;
-
- } else {
- if (expecting_io == _REBALANCE) {
- continue;
- } else if (expecting_io == _YEP) {
- TEST_FAIL(
- "Did not see expected IO after %d/%d "
- "msgs\n",
- recvd, msgcnt);
- }
-
- TEST_SAY("IO poll timeout (good)\n");
-
- TEST_SAY("Got idle period, producing\n");
- test_produce_msgs(rk_p, rkt_p, testid, 0, recvd,
- msgcnt / 2, NULL, 10);
-
- expecting_io = _YEP;
- /* When running slowly (e.g., valgrind) it might take
- * some time before the first message is received
- * after producing. */
- wait_multiplier = 3;
- }
- }
- TEST_SAY("Done\n");
-
- rd_kafka_topic_destroy(rkt_p);
- rd_kafka_destroy(rk_p);
-
- rd_kafka_queue_destroy(queue);
- rd_kafka_consumer_close(rk_c);
- rd_kafka_destroy(rk_c);
-
-#ifndef _WIN32
- close(fds[0]);
- close(fds[1]);
-#else
- _close(fds[0]);
- _close(fds[1]);
-#endif
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c
deleted file mode 100644
index e243dc8ac..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Issue #597: increase fetch.message.max.bytes until large messages can
- * be fetched.
- *
- * General idea:
- * - Produce 1000 small messages < MAX_BYTES
- * - Produce 1000 large messages > MAX_BYTES
- * - Create consumer with fetch.message.max.bytes=MAX_BYTES
- * - Consume from beginning
- * - All messages should be received.
- */
-
-
-int main_0041_fetch_max_bytes(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition = 0;
- const int msgcnt = 2 * 1000;
- const int MAX_BYTES = 100000;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
-
- test_conf_init(NULL, NULL, 60);
-
- testid = test_id_generate();
- rk = test_create_producer();
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt / 2, NULL,
- MAX_BYTES / 10);
- test_produce_msgs(rk, rkt, testid, partition, msgcnt / 2, msgcnt / 2,
- NULL, MAX_BYTES * 5);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY("Creating consumer\n");
- test_conf_init(&conf, NULL, 0);
-
- test_conf_set(conf, "fetch.message.max.bytes",
- tsprintf("%d", MAX_BYTES));
-
- /* This test may be slower when running with SSL or Helgrind,
- * restart the timeout. */
- test_timeout_set(60);
-
- rk = test_create_consumer(NULL, NULL, conf, NULL);
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- test_consumer_start("CONSUME", rkt, partition,
- RD_KAFKA_OFFSET_BEGINNING);
- test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
- msgcnt, 1);
- test_consumer_stop("CONSUME", rkt, partition);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c
deleted file mode 100644
index 6ea5aa669..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * #781: handle many (?) topics.
- */
-
-
-const int msgs_per_topic = 100;
-
-
-/**
- * Request offset for nonexisting partition.
- * Will cause rd_kafka_destroy() to hang.
- */
-
-
-
-static void produce_many(char **topics, int topic_cnt, uint64_t testid) {
- rd_kafka_t *rk;
- test_timing_t t_rkt_create;
- int i;
- rd_kafka_topic_t **rkts;
-
- TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
-
- rk = test_create_producer();
-
- TEST_SAY("Creating %d topic objects\n", topic_cnt);
-
- rkts = malloc(sizeof(*rkts) * topic_cnt);
- TIMING_START(&t_rkt_create, "Topic object create");
- for (i = 0; i < topic_cnt; i++) {
- rkts[i] = test_create_topic_object(rk, topics[i], "acks", "all",
- NULL);
- }
- TIMING_STOP(&t_rkt_create);
-
- TEST_SAY("Producing %d messages to each %d topics\n", msgs_per_topic,
- topic_cnt);
- /* Produce messages to each topic (so they are created) */
- for (i = 0; i < topic_cnt; i++) {
- test_produce_msgs(rk, rkts[i], testid, 0, i * msgs_per_topic,
- msgs_per_topic, NULL, 100);
- }
-
- TEST_SAY("Destroying %d topic objects\n", topic_cnt);
- for (i = 0; i < topic_cnt; i++) {
- rd_kafka_topic_destroy(rkts[i]);
- }
- free(rkts);
-
- test_flush(rk, 30000);
-
- rd_kafka_destroy(rk);
-}
-
-
-static void legacy_consume_many(char **topics, int topic_cnt, uint64_t testid) {
- rd_kafka_t *rk;
- test_timing_t t_rkt_create;
- int i;
- rd_kafka_topic_t **rkts;
- int msg_base = 0;
-
- TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(NULL, NULL, 60);
-
- rk = test_create_consumer(NULL, NULL, NULL, NULL);
-
- TEST_SAY("Creating %d topic objects\n", topic_cnt);
-
- rkts = malloc(sizeof(*rkts) * topic_cnt);
- TIMING_START(&t_rkt_create, "Topic object create");
- for (i = 0; i < topic_cnt; i++)
- rkts[i] = test_create_topic_object(rk, topics[i], NULL);
- TIMING_STOP(&t_rkt_create);
-
- TEST_SAY("Start consumer for %d topics\n", topic_cnt);
- for (i = 0; i < topic_cnt; i++)
- test_consumer_start("legacy", rkts[i], 0,
- RD_KAFKA_OFFSET_BEGINNING);
-
- TEST_SAY("Consuming from %d messages from each %d topics\n",
- msgs_per_topic, topic_cnt);
- for (i = 0; i < topic_cnt; i++) {
- test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK,
- msg_base, msgs_per_topic, 1);
- msg_base += msgs_per_topic;
- }
-
- TEST_SAY("Stopping consumers\n");
- for (i = 0; i < topic_cnt; i++)
- test_consumer_stop("legacy", rkts[i], 0);
-
-
- TEST_SAY("Destroying %d topic objects\n", topic_cnt);
- for (i = 0; i < topic_cnt; i++)
- rd_kafka_topic_destroy(rkts[i]);
-
- free(rkts);
-
- rd_kafka_destroy(rk);
-}
-
-
-
-static void
-subscribe_consume_many(char **topics, int topic_cnt, uint64_t testid) {
- rd_kafka_t *rk;
- int i;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_topic_partition_list_t *parts;
- rd_kafka_resp_err_t err;
- test_msgver_t mv;
-
- TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(NULL, &tconf, 60);
- test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
- rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf);
-
- parts = rd_kafka_topic_partition_list_new(topic_cnt);
- for (i = 0; i < topic_cnt; i++)
- rd_kafka_topic_partition_list_add(parts, topics[i],
- RD_KAFKA_PARTITION_UA);
-
- TEST_SAY("Subscribing to %d topics\n", topic_cnt);
- err = rd_kafka_subscribe(rk, parts);
- if (err)
- TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err));
-
- rd_kafka_topic_partition_list_destroy(parts);
-
- test_msgver_init(&mv, testid);
- test_consumer_poll("consume.subscribe", rk, testid, -1, 0,
- msgs_per_topic * topic_cnt, &mv);
-
- for (i = 0; i < topic_cnt; i++)
- test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART,
- topics[i], 0, i * msgs_per_topic,
- msgs_per_topic);
- test_msgver_clear(&mv);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-}
-
-
-
-static void assign_consume_many(char **topics, int topic_cnt, uint64_t testid) {
- rd_kafka_t *rk;
- rd_kafka_topic_partition_list_t *parts;
- int i;
- test_msgver_t mv;
-
- TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(NULL, NULL, 60);
- rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL);
-
- parts = rd_kafka_topic_partition_list_new(topic_cnt);
- for (i = 0; i < topic_cnt; i++)
- rd_kafka_topic_partition_list_add(parts, topics[i], 0)->offset =
- RD_KAFKA_OFFSET_TAIL(msgs_per_topic);
-
- test_consumer_assign("consume.assign", rk, parts);
- rd_kafka_topic_partition_list_destroy(parts);
-
- test_msgver_init(&mv, testid);
- test_consumer_poll("consume.assign", rk, testid, -1, 0,
- msgs_per_topic * topic_cnt, &mv);
-
- for (i = 0; i < topic_cnt; i++)
- test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART,
- topics[i], 0, i * msgs_per_topic,
- msgs_per_topic);
- test_msgver_clear(&mv);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-}
-
-
-
-int main_0042_many_topics(int argc, char **argv) {
- char **topics;
- int topic_cnt = test_quick ? 4 : 20; /* up this as needed,
- * topic creation takes time so
- * unless hunting a bug
- * we keep this low to keep the
- * test suite run time down. */
- uint64_t testid;
- int i;
-
- test_conf_init(NULL, NULL, 60);
-
- testid = test_id_generate();
-
- /* Generate unique topic names */
- topics = malloc(sizeof(*topics) * topic_cnt);
- for (i = 0; i < topic_cnt; i++)
- topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
-
- produce_many(topics, topic_cnt, testid);
- legacy_consume_many(topics, topic_cnt, testid);
- if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) {
- subscribe_consume_many(topics, topic_cnt, testid);
- assign_consume_many(topics, topic_cnt, testid);
- }
-
- for (i = 0; i < topic_cnt; i++)
- free(topics[i]);
- free(topics);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c
deleted file mode 100644
index 3470c4ae1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * Make sure library behaves even if there is no broker connection.
- */
-
-
-
-static void test_producer_no_connection(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- int i;
- const int partition_cnt = 2;
- int msgcnt = 0;
- test_timing_t t_destroy;
-
- test_conf_init(&conf, NULL, 20);
-
- test_conf_set(conf, "bootstrap.servers", NULL);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms",
- "5000", NULL);
-
- test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100,
- NULL, 100, 0, &msgcnt);
- for (i = 0; i < partition_cnt; i++)
- test_produce_msgs_nowait(rk, rkt, 0, i, 0, 100, NULL, 100, 0,
- &msgcnt);
-
- rd_kafka_poll(rk, 1000);
-
- TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));
-
- rd_kafka_topic_destroy(rkt);
-
- TIMING_START(&t_destroy, "rd_kafka_destroy()");
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_destroy);
-}
-
-int main_0043_no_connection(int argc, char **argv) {
- test_producer_no_connection();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c
deleted file mode 100644
index 51ef318c3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * Make sure library behaves when the partition count for a topic changes.
- * This test requires to be run under trivup to be able to use kafka-topics.sh
- */
-
-
-
-/**
- * - Create topic with 2 partitions
- * - Start producing messages to UA partition
- * - Change to 4 partitions
- * - Produce more messages to UA partition
- * - Wait for DRs
- * - Close
- */
-
-static void test_producer_partition_cnt_change(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const int partition_cnt = 4;
- int msgcnt = test_quick ? 500 : 100000;
- test_timing_t t_destroy;
- int produced = 0;
-
- test_conf_init(&conf, NULL, 20);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(rk, topic, partition_cnt / 2, 1);
-
- rkt =
- test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms",
- tsprintf("%d", tmout_multip(10000)), NULL);
-
- test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt / 2, NULL, 100, 0, &produced);
-
- test_create_partitions(rk, topic, partition_cnt);
-
- test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, msgcnt / 2,
- msgcnt / 2, NULL, 100, 0, &produced);
-
- test_wait_delivery(rk, &produced);
-
- rd_kafka_topic_destroy(rkt);
-
- TIMING_START(&t_destroy, "rd_kafka_destroy()");
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_destroy);
-}
-
-int main_0044_partition_cnt(int argc, char **argv) {
- if (!test_can_create_topics(1))
- return 0;
-
- test_producer_partition_cnt_change();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c
deleted file mode 100644
index f804613d7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Verify that subscription is updated on metadata changes:
- * - topic additions
- * - topic deletions
- * - partition count changes
- */
-
-
-
-/**
- * Wait for REBALANCE ASSIGN event and perform assignment
- *
- * Va-args are \p topic_cnt tuples of the expected assignment:
- * { const char *topic, int partition_cnt }
- */
-static void await_assignment(const char *pfx,
- rd_kafka_t *rk,
- rd_kafka_queue_t *queue,
- int topic_cnt,
- ...) {
- rd_kafka_event_t *rkev;
- rd_kafka_topic_partition_list_t *tps;
- int i;
- va_list ap;
- int fails = 0;
- int exp_part_cnt = 0;
-
- TEST_SAY("%s: waiting for assignment\n", pfx);
- rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
- if (!rkev)
- TEST_FAIL("timed out waiting for assignment");
- TEST_ASSERT(rd_kafka_event_error(rkev) ==
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- "expected ASSIGN, got %s",
- rd_kafka_err2str(rd_kafka_event_error(rkev)));
- tps = rd_kafka_event_topic_partition_list(rkev);
-
- TEST_SAY("%s: assignment:\n", pfx);
- test_print_partition_list(tps);
-
- va_start(ap, topic_cnt);
- for (i = 0; i < topic_cnt; i++) {
- const char *topic = va_arg(ap, const char *);
- int partition_cnt = va_arg(ap, int);
- int p;
- TEST_SAY("%s: expecting %s with %d partitions\n", pfx, topic,
- partition_cnt);
- for (p = 0; p < partition_cnt; p++) {
- if (!rd_kafka_topic_partition_list_find(tps, topic,
- p)) {
- TEST_FAIL_LATER(
- "%s: expected partition %s [%d] "
- "not found in assginment",
- pfx, topic, p);
- fails++;
- }
- }
- exp_part_cnt += partition_cnt;
- }
- va_end(ap);
-
- TEST_ASSERT(exp_part_cnt == tps->cnt,
- "expected assignment of %d partitions, got %d",
- exp_part_cnt, tps->cnt);
-
- if (fails > 0)
- TEST_FAIL("%s: assignment mismatch: see above", pfx);
-
- rd_kafka_assign(rk, tps);
- rd_kafka_event_destroy(rkev);
-}
-
-
-/**
- * Wait for REBALANCE REVOKE event and perform unassignment.
- */
-static void
-await_revoke(const char *pfx, rd_kafka_t *rk, rd_kafka_queue_t *queue) {
- rd_kafka_event_t *rkev;
-
- TEST_SAY("%s: waiting for revoke\n", pfx);
- rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
- if (!rkev)
- TEST_FAIL("timed out waiting for revoke");
- TEST_ASSERT(rd_kafka_event_error(rkev) ==
- RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- "expected REVOKE, got %s",
- rd_kafka_err2str(rd_kafka_event_error(rkev)));
- rd_kafka_assign(rk, NULL);
- rd_kafka_event_destroy(rkev);
-}
-
-/**
- * Wait \p timeout_ms to make sure no rebalance was triggered.
- */
-static void await_no_rebalance(const char *pfx,
- rd_kafka_t *rk,
- rd_kafka_queue_t *queue,
- int timeout_ms) {
- rd_kafka_event_t *rkev;
-
- TEST_SAY("%s: waiting for %d ms to not see rebalance\n", pfx,
- timeout_ms);
- rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms);
- if (!rkev)
- return;
- TEST_ASSERT(rkev, "did not expect %s: %s", rd_kafka_event_name(rkev),
- rd_kafka_err2str(rd_kafka_event_error(rkev)));
- rd_kafka_event_destroy(rkev);
-}
-
-static void do_test_non_exist_and_partchange(void) {
- char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *queue;
-
- /**
- * Test #1:
- * - Subscribe to non-existing topic.
- * - Verify empty assignment
- * - Create topic
- * - Verify new assignment containing topic
- */
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
-
- /* Decrease metadata interval to speed up topic change discovery. */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
- rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
- queue = rd_kafka_queue_get_consumer(rk);
-
- TEST_SAY("#1: Subscribing to %s\n", topic_a);
- test_consumer_subscribe(rk, topic_a);
-
- /* Should not see a rebalance since no topics are matched. */
- await_no_rebalance("#1: empty", rk, queue, 10000);
-
- TEST_SAY("#1: creating topic %s\n", topic_a);
- test_create_topic(NULL, topic_a, 2, 1);
-
- await_assignment("#1: proper", rk, queue, 1, topic_a, 2);
-
-
- /**
- * Test #2 (continue with #1 consumer)
- * - Increase the partition count
- * - Verify updated assignment
- */
- test_kafka_topics("--alter --topic %s --partitions 4", topic_a);
- await_revoke("#2", rk, queue);
-
- await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4);
-
- test_consumer_close(rk);
- rd_kafka_queue_destroy(queue);
- rd_kafka_destroy(rk);
-
- rd_free(topic_a);
-
- SUB_TEST_PASS();
-}
-
-
-
-static void do_test_regex(void) {
- char *base_topic = rd_strdup(test_mk_topic_name("topic", 1));
- char *topic_b = rd_strdup(tsprintf("%s_b", base_topic));
- char *topic_c = rd_strdup(tsprintf("%s_c", base_topic));
- char *topic_d = rd_strdup(tsprintf("%s_d", base_topic));
- char *topic_e = rd_strdup(tsprintf("%s_e", base_topic));
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *queue;
-
- /**
- * Regex test:
- * - Create topic b
- * - Subscribe to b & d & e
- * - Verify b assignment
- * - Create topic c
- * - Verify no rebalance
- * - Create topic d
- * - Verify b & d assignment
- */
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
-
- /* Decrease metadata interval to speed up topic change discovery. */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
- rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
- queue = rd_kafka_queue_get_consumer(rk);
-
- TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b);
- test_create_topic(NULL, topic_b, 2, 1);
- rd_sleep(1); // FIXME: do check&wait loop instead
-
- TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d,
- topic_e);
- test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic));
-
- await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b,
- 2);
-
- TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c);
- test_create_topic(NULL, topic_c, 4, 1);
-
- /* Should not see a rebalance since no topics are matched. */
- await_no_rebalance("Regex: empty", rk, queue, 10000);
-
- TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d);
- test_create_topic(NULL, topic_d, 1, 1);
-
- await_revoke("Regex: rebalance after topic creation", rk, queue);
-
- await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2,
- topic_d, 1);
-
- test_consumer_close(rk);
- rd_kafka_queue_destroy(queue);
- rd_kafka_destroy(rk);
-
- rd_free(base_topic);
- rd_free(topic_b);
- rd_free(topic_c);
- rd_free(topic_d);
- rd_free(topic_e);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @remark Requires scenario=noautocreate.
- */
-static void do_test_topic_remove(void) {
- char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1));
- char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1));
- int parts_f = 5;
- int parts_g = 9;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *queue;
- rd_kafka_topic_partition_list_t *topics;
- rd_kafka_resp_err_t err;
-
- /**
- * Topic removal test:
- * - Create topic f & g
- * - Subscribe to f & g
- * - Verify f & g assignment
- * - Remove topic f
- * - Verify g assignment
- * - Remove topic g
- * - Verify empty assignment
- */
-
- SUB_TEST("Topic removal testing");
-
- test_conf_init(&conf, NULL, 60);
-
- /* Decrease metadata interval to speed up topic change discovery. */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
- rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
- queue = rd_kafka_queue_get_consumer(rk);
-
- TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f);
- test_create_topic(NULL, topic_f, parts_f, 1);
-
- TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g);
- test_create_topic(NULL, topic_g, parts_g, 1);
-
- rd_sleep(1); // FIXME: do check&wait loop instead
-
- TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g);
- topics = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(topics, topic_f,
- RD_KAFKA_PARTITION_UA);
- rd_kafka_topic_partition_list_add(topics, topic_g,
- RD_KAFKA_PARTITION_UA);
- err = rd_kafka_subscribe(rk, topics);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s",
- rd_kafka_err2str(err));
- rd_kafka_topic_partition_list_destroy(topics);
-
- await_assignment("Topic removal: both topics exist", rk, queue, 2,
- topic_f, parts_f, topic_g, parts_g);
-
- TEST_SAY("Topic removal: removing %s\n", topic_f);
- test_kafka_topics("--delete --topic %s", topic_f);
-
- await_revoke("Topic removal: rebalance after topic removal", rk, queue);
-
- await_assignment("Topic removal: one topic exists", rk, queue, 1,
- topic_g, parts_g);
-
- TEST_SAY("Topic removal: removing %s\n", topic_g);
- test_kafka_topics("--delete --topic %s", topic_g);
-
- await_revoke("Topic removal: rebalance after 2nd topic removal", rk,
- queue);
-
- /* Should not see another rebalance since all topics now removed */
- await_no_rebalance("Topic removal: empty", rk, queue, 10000);
-
- test_consumer_close(rk);
- rd_kafka_queue_destroy(queue);
- rd_kafka_destroy(rk);
-
- rd_free(topic_f);
- rd_free(topic_g);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Subscribe to a regex and continually create a lot of matching topics,
- * triggering many rebalances.
- *
- * This is using the mock cluster.
- *
- */
-static void do_test_regex_many_mock(const char *assignment_strategy,
- rd_bool_t lots_of_topics) {
- const char *base_topic = "topic";
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
- int topic_cnt = lots_of_topics ? 300 : 50;
- int await_assignment_every = lots_of_topics ? 150 : 15;
- int i;
-
- SUB_TEST("%s with %d topics", assignment_strategy, topic_cnt);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
- test_conf_init(&conf, NULL, 60 * 5);
-
- test_conf_set(conf, "security.protocol", "plaintext");
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "partition.assignment.strategy",
- assignment_strategy);
- /* Decrease metadata interval to speed up topic change discovery. */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "3000");
-
- rk = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(rk, tsprintf("^%s_.*", base_topic));
-
- for (i = 0; i < topic_cnt; i++) {
- char topic[256];
-
- rd_snprintf(topic, sizeof(topic), "%s_%d", base_topic, i);
-
-
- TEST_SAY("Creating topic %s\n", topic);
- TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic,
- 1 + (i % 8), 1));
-
- test_consumer_poll_no_msgs("POLL", rk, 0,
- lots_of_topics ? 100 : 300);
-
- /* Wait for an assignment to let the consumer catch up on
- * all rebalancing. */
- if (i % await_assignment_every == await_assignment_every - 1)
- test_consumer_wait_assignment(rk, rd_true /*poll*/);
- else if (!lots_of_topics)
- rd_usleep(100 * 1000, NULL);
- }
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-
-int main_0045_subscribe_update(int argc, char **argv) {
-
- if (!test_can_create_topics(1))
- return 0;
-
- do_test_regex();
-
- return 0;
-}
-
-int main_0045_subscribe_update_non_exist_and_partchange(int argc, char **argv) {
-
- do_test_non_exist_and_partchange();
-
- return 0;
-}
-
-int main_0045_subscribe_update_topic_remove(int argc, char **argv) {
-
- if (!test_can_create_topics(1))
- return 0;
-
- do_test_topic_remove();
-
- return 0;
-}
-
-
-int main_0045_subscribe_update_mock(int argc, char **argv) {
- do_test_regex_many_mock("range", rd_false);
- do_test_regex_many_mock("cooperative-sticky", rd_false);
- do_test_regex_many_mock("cooperative-sticky", rd_true);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c
deleted file mode 100644
index 541c03037..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Issue #345, #821
- * Test that topic_new() + topic_destroy() can be used as a topic-lookup cache,
- * i.e., as long as the app topic refcount stays above 1 the app can call
- * new() and destroy() any number of times (symetrically).
- */
-
-
-int main_0046_rkt_cache(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- const char *topic = test_mk_topic_name(__FUNCTION__, 0);
- int i;
-
- rk = test_create_producer();
-
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- for (i = 0; i < 100; i++) {
- rd_kafka_topic_t *rkt2;
-
- rkt2 = rd_kafka_topic_new(rk, topic, NULL);
- TEST_ASSERT(rkt2 != NULL);
-
- rd_kafka_topic_destroy(rkt2);
- }
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c
deleted file mode 100644
index d90004a3a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Issue #756
- *
- * Partially sent buffers that timeout would cause the next request sent
- * to appear inside the partially sent buffer, eventually leading to an
- * InvalidReceiveException exception on the broker.
- *
- * This is easily triggered by:
- * - decrease socket buffers
- * - decrease message timeout
- * - produce a bunch of large messages that will need to be partially sent
- * - requests should timeout which should cause the connection to be closed
- * by librdkafka.
- *
- * How do we monitor for correctness?
- * - the broker shall not close the connection (but we might)
- */
-
-static int got_timeout_err = 0;
-
-static void
-my_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
- got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT);
-
- if (err == RD_KAFKA_RESP_ERR__TIMED_OUT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
- TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err),
- reason);
- else
- TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err),
- reason);
-}
-
-int main_0047_partial_buf_tmout(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- const char *topic = test_mk_topic_name(__FUNCTION__, 0);
- rd_kafka_conf_t *conf;
- const size_t msg_size = 10000;
- int msgcounter = 0;
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "socket.send.buffer.bytes", "1000");
- test_conf_set(conf, "batch.num.messages", "100");
- test_conf_set(conf, "queue.buffering.max.messages", "10000000");
- rd_kafka_conf_set_error_cb(conf, my_error_cb);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", "300",
- NULL);
-
- while (got_timeout_err == 0) {
- test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
- 10000, NULL, msg_size, 0, &msgcounter);
- rd_kafka_flush(rk, 100);
- }
-
- TEST_ASSERT(got_timeout_err > 0);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c
deleted file mode 100644
index 84efee7db..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Various partitioner tests
- *
- * - Issue #797 - deadlock on failed partitioning
- * - Verify that partitioning works across partitioners.
- */
-
-int32_t my_invalid_partitioner(const rd_kafka_topic_t *rkt,
- const void *keydata,
- size_t keylen,
- int32_t partition_cnt,
- void *rkt_opaque,
- void *msg_opaque) {
- int32_t partition = partition_cnt + 10;
- TEST_SAYL(4, "partition \"%.*s\" to %" PRId32 "\n", (int)keylen,
- (const char *)keydata, partition);
- return partition;
-}
-
-
-/* FIXME: This doesn't seem to trigger the bug in #797.
- * Still a useful test though. */
-static void do_test_failed_partitioning(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- rd_kafka_topic_conf_t *tconf;
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- int i;
- int msgcnt = test_quick ? 100 : 10000;
-
- test_conf_init(&conf, &tconf, 0);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner);
- test_topic_conf_set(tconf, "message.timeout.ms",
- tsprintf("%d", tmout_multip(10000)));
- rkt = rd_kafka_topic_new(rk, topic, tconf);
- TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error()));
-
- /* Produce some messages (to p 0) to create topic */
- test_produce_msgs(rk, rkt, 0, 0, 0, 2, NULL, 0);
-
- /* Now use partitioner */
- for (i = 0; i < msgcnt; i++) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, 0, NULL, 0,
- NULL, 0, NULL) == -1)
- err = rd_kafka_last_error();
- if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
- TEST_FAIL(
- "produce(): "
- "Expected UNKNOWN_PARTITION, got %s\n",
- rd_kafka_err2str(err));
- }
- test_flush(rk, 5000);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-}
-
-
-static void part_dr_msg_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque) {
- int32_t *partp = rkmessage->_private;
- int *remainsp = opaque;
-
- if (rkmessage->err) {
- /* Will fail later */
- TEST_WARN("Delivery failed: %s\n",
- rd_kafka_err2str(rkmessage->err));
- *partp = -1;
- } else {
- *partp = rkmessage->partition;
- }
-
- (*remainsp)--;
-}
-
-/**
- * @brief Test single \p partitioner
- */
-static void do_test_partitioner(const char *topic,
- const char *partitioner,
- int msgcnt,
- const char **keys,
- const int32_t *exp_part) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- int i;
- int32_t *parts;
- int remains = msgcnt;
- int randcnt = 0;
- int fails = 0;
-
- TEST_SAY(_C_MAG "Test partitioner \"%s\"\n", partitioner);
-
- test_conf_init(&conf, NULL, 30);
- rd_kafka_conf_set_opaque(conf, &remains);
- rd_kafka_conf_set_dr_msg_cb(conf, part_dr_msg_cb);
- test_conf_set(conf, "partitioner", partitioner);
- test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- parts = malloc(msgcnt * sizeof(*parts));
- for (i = 0; i < msgcnt; i++)
- parts[i] = -1;
-
- /*
- * Produce messages
- */
- for (i = 0; i < msgcnt; i++) {
- rd_kafka_resp_err_t err;
-
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_KEY(keys[i], keys[i] ? strlen(keys[i]) : 0),
- RD_KAFKA_V_OPAQUE(&parts[i]), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s",
- rd_kafka_err2str(err));
-
- randcnt += exp_part[i] == -1;
- }
-
- rd_kafka_flush(rk, tmout_multip(10000));
-
- TEST_ASSERT(remains == 0, "Expected remains=%d, not %d for %d messages",
- 0, remains, msgcnt);
-
- /*
- * Verify produced partitions to expected partitions.
- */
-
- /* First look for produce failures */
- for (i = 0; i < msgcnt; i++) {
- if (parts[i] == -1) {
- TEST_WARN("Message #%d (exp part %" PRId32
- ") "
- "was not successfully produced\n",
- i, exp_part[i]);
- fails++;
- }
- }
-
- TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
-
-
- if (randcnt == msgcnt) {
- /* If all expected partitions are random make sure
- * the produced partitions have some form of
- * random distribution */
- int32_t last_part = parts[0];
- int samecnt = 0;
-
- for (i = 0; i < msgcnt; i++) {
- samecnt += parts[i] == last_part;
- last_part = parts[i];
- }
-
- TEST_ASSERT(samecnt < msgcnt,
- "No random distribution, all on partition %" PRId32,
- last_part);
- } else {
- for (i = 0; i < msgcnt; i++) {
- if (exp_part[i] != -1 && parts[i] != exp_part[i]) {
- TEST_WARN(
- "Message #%d expected partition "
- "%" PRId32 " but got %" PRId32 ": %s\n",
- i, exp_part[i], parts[i], keys[i]);
- fails++;
- }
- }
-
-
- TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
- }
-
- free(parts);
-
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN "Test partitioner \"%s\": PASS\n", partitioner);
-}
-
-extern uint32_t rd_crc32(const char *, size_t);
-
-/**
- * @brief Test all builtin partitioners
- */
-static void do_test_partitioners(void) {
- int part_cnt = test_quick ? 7 : 17;
-#define _MSG_CNT 5
- const char *unaligned = "123456";
- /* Message keys */
- const char *keys[_MSG_CNT] = {
- NULL,
- "", // empty
- unaligned + 1,
- "this is another string with more length to it perhaps", "hejsan"};
- struct {
- const char *partitioner;
- /* Expected partition per message (see keys above) */
- int32_t exp_part[_MSG_CNT];
- } ptest[] = {{"random", {-1, -1, -1, -1, -1}},
- {"consistent",
- {/* These constants were acquired using
- * the 'crc32' command on OSX */
- 0x0 % part_cnt, 0x0 % part_cnt, 0xb1b451d7 % part_cnt,
- 0xb0150df7 % part_cnt, 0xd077037e % part_cnt}},
- {"consistent_random",
- {-1, -1, 0xb1b451d7 % part_cnt, 0xb0150df7 % part_cnt,
- 0xd077037e % part_cnt}},
- {"murmur2",
- {/* .. using tests/java/Murmur2Cli */
- 0x106e08d9 % part_cnt, 0x106e08d9 % part_cnt,
- 0x058d780f % part_cnt, 0x4f7703da % part_cnt,
- 0x5ec19395 % part_cnt}},
- {"murmur2_random",
- {-1, 0x106e08d9 % part_cnt, 0x058d780f % part_cnt,
- 0x4f7703da % part_cnt, 0x5ec19395 % part_cnt}},
- {"fnv1a",
- {/* .. using https://play.golang.org/p/hRkA4xtYyJ6 */
- 0x7ee3623b % part_cnt, 0x7ee3623b % part_cnt,
- 0x27e6f469 % part_cnt, 0x155e3e5f % part_cnt,
- 0x17b1e27a % part_cnt}},
- {"fnv1a_random",
- {-1, 0x7ee3623b % part_cnt, 0x27e6f469 % part_cnt,
- 0x155e3e5f % part_cnt, 0x17b1e27a % part_cnt}},
- {NULL}};
- int pi;
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
-
- test_create_topic(NULL, topic, part_cnt, 1);
-
- for (pi = 0; ptest[pi].partitioner; pi++) {
- do_test_partitioner(topic, ptest[pi].partitioner, _MSG_CNT,
- keys, ptest[pi].exp_part);
- }
-}
-
-int main_0048_partitioner(int argc, char **argv) {
- if (test_can_create_topics(0))
- do_test_partitioners();
- do_test_failed_partitioning();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c
deleted file mode 100644
index 6083a1a76..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#if WITH_SOCKEM
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Verify that consumtion continues after broker connectivity failure.
- */
-
-static int simulate_network_down = 0;
-
-/**
- * @brief Sockem connect, called from **internal librdkafka thread** through
- * librdkafka's connect_cb
- */
-static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
- int r;
-
- TEST_LOCK();
- r = simulate_network_down;
- TEST_UNLOCK();
-
- if (r) {
- sockem_close(skm);
- return ECONNREFUSED;
- } else {
- /* Let it go real slow so we dont consume all
- * the messages right away. */
- sockem_set(skm, "rx.thruput", 100000, NULL);
- }
- return 0;
-}
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION)
- return 0;
- return 1;
-}
-
-
-int main_0049_consume_conn_close(int argc, char **argv) {
- rd_kafka_t *rk;
- const char *topic = test_mk_topic_name("0049_consume_conn_close", 1);
- uint64_t testid;
- int msgcnt = test_quick ? 100 : 10000;
- test_msgver_t mv;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_topic_partition_list_t *assignment;
- rd_kafka_resp_err_t err;
-
- if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
- TEST_SKIP(
- "KNOWN ISSUE: ApiVersionRequest+SaslHandshake "
- "will not play well with sudden disconnects\n");
- return 0;
- }
-
- test_conf_init(&conf, &tconf, 60);
- /* Want an even number so it is divisable by two without surprises */
- msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1;
-
- testid = test_id_generate();
- test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
-
-
- test_socket_enable(conf);
- test_curr->connect_cb = connect_cb;
- test_curr->is_fatal_cb = is_fatal_cb;
-
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
-
- rk = test_create_consumer(topic, NULL, conf, tconf);
-
- test_consumer_subscribe(rk, topic);
-
- test_msgver_init(&mv, testid);
-
- test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt / 2, &mv);
-
- err = rd_kafka_assignment(rk, &assignment);
- TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err));
- TEST_ASSERT(assignment->cnt > 0, "empty assignment");
-
- TEST_SAY("Bringing down the network\n");
-
- TEST_LOCK();
- simulate_network_down = 1;
- TEST_UNLOCK();
- test_socket_close_all(test_curr, 1 /*reinit*/);
-
- TEST_SAY("Waiting for session timeout to expire (6s), and then some\n");
-
- /* Commit an offset, which should fail, to trigger the offset commit
- * callback fallback (CONSUMER_ERR) */
- assignment->elems[0].offset = 123456789;
- TEST_SAY("Committing offsets while down, should fail eventually\n");
- err = rd_kafka_commit(rk, assignment, 1 /*async*/);
- TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err));
- rd_kafka_topic_partition_list_destroy(assignment);
-
- rd_sleep(10);
-
- TEST_SAY("Bringing network back up\n");
- TEST_LOCK();
- simulate_network_down = 0;
- TEST_UNLOCK();
-
- TEST_SAY("Continuing to consume..\n");
- test_consumer_poll("consume.up2", rk, testid, -1, msgcnt / 2,
- msgcnt / 2, &mv);
-
- test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
- 0, msgcnt);
-
- test_msgver_clear(&mv);
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- return 0;
-}
-
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c
deleted file mode 100644
index d55e6e09a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Verify that quick subscription additions work.
- * * Create topics T1,T2,T3
- * * Create consumer
- * * Subscribe to T1
- * * Subscribe to T1,T2
- * * Subscribe to T1,T2,T3
- * * Verify that all messages from all three topics are consumed
- * * Subscribe to T1,T3
- * * Verify that there were no duplicate messages.
- */
-
-int main_0050_subscribe_adds(int argc, char **argv) {
- rd_kafka_t *rk;
-#define TOPIC_CNT 3
- char *topic[TOPIC_CNT] = {
- rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)),
- rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)),
- rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)),
- };
- uint64_t testid;
- int msgcnt = test_quick ? 100 : 10000;
- test_msgver_t mv;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- int i;
- rd_kafka_topic_partition_list_t *tlist;
- rd_kafka_resp_err_t err;
-
- msgcnt = (msgcnt / TOPIC_CNT) * TOPIC_CNT;
- testid = test_id_generate();
-
- rk = test_create_producer();
- for (i = 0; i < TOPIC_CNT; i++) {
- rd_kafka_topic_t *rkt;
-
- rkt = test_create_producer_topic(rk, topic[i], NULL);
-
- test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA,
- (msgcnt / TOPIC_CNT) * i,
- (msgcnt / TOPIC_CNT), NULL, 1000);
-
- rd_kafka_topic_destroy(rkt);
- }
-
- rd_kafka_destroy(rk);
-
- test_conf_init(&conf, &tconf, 60);
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
-
- rk = test_create_consumer(topic[0], NULL, conf, tconf);
-
- tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT);
- for (i = 0; i < TOPIC_CNT; i++) {
- rd_kafka_topic_partition_list_add(tlist, topic[i],
- RD_KAFKA_PARTITION_UA);
- TEST_SAY("Subscribe to %d topic(s):\n", tlist->cnt);
- test_print_partition_list(tlist);
-
- err = rd_kafka_subscribe(rk, tlist);
- TEST_ASSERT(!err, "subscribe() failed: %s",
- rd_kafka_err2str(err));
- }
-
- test_msgver_init(&mv, testid);
-
- test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv);
-
- /* Now remove T2 */
- rd_kafka_topic_partition_list_del(tlist, topic[1],
- RD_KAFKA_PARTITION_UA);
- err = rd_kafka_subscribe(rk, tlist);
- TEST_ASSERT(!err, "subscribe() failed: %s", rd_kafka_err2str(err));
-
- test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5));
-
-
- test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
- 0, msgcnt);
-
- test_msgver_clear(&mv);
-
- rd_kafka_topic_partition_list_destroy(tlist);
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- for (i = 0; i < TOPIC_CNT; i++)
- rd_free(topic[i]);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c
deleted file mode 100644
index 6f97b2ee4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Verify that quick assignment additions work.
- * * Create topics T1,T2,T3
- * * Create consumer
- * * Assign T1
- * * Assign T1,T2
- * * Assign T1,T2,T3
- * * Verify that all messages from all three topics are consumed
- * * Assign T1,T3
- * * Verify that there were no duplicate messages.
- */
-
-int main_0051_assign_adds(int argc, char **argv) {
- rd_kafka_t *rk;
-#define TOPIC_CNT 3
- char *topic[TOPIC_CNT] = {
- rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)),
- rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)),
- rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)),
- };
- uint64_t testid;
- int msgcnt = test_quick ? 100 : 1000;
- test_msgver_t mv;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- int i;
- rd_kafka_topic_partition_list_t *tlist;
- rd_kafka_resp_err_t err;
-
- msgcnt = (msgcnt / TOPIC_CNT) * TOPIC_CNT;
- testid = test_id_generate();
-
- rk = test_create_producer();
- for (i = 0; i < TOPIC_CNT; i++) {
- rd_kafka_topic_t *rkt;
-
- rkt = test_create_producer_topic(rk, topic[i], NULL);
-
- test_produce_msgs(rk, rkt, testid, 0, (msgcnt / TOPIC_CNT) * i,
- (msgcnt / TOPIC_CNT), NULL, 100);
-
- rd_kafka_topic_destroy(rkt);
- }
-
- rd_kafka_destroy(rk);
-
- test_conf_init(&conf, &tconf, 60);
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
-
- rk = test_create_consumer(topic[0], NULL, conf, tconf);
-
- tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT);
- for (i = 0; i < TOPIC_CNT; i++) {
- rd_kafka_topic_partition_list_add(tlist, topic[i], 0);
- TEST_SAY("Assign %d topic(s):\n", tlist->cnt);
- test_print_partition_list(tlist);
-
- err = rd_kafka_assign(rk, tlist);
- TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err));
- }
-
- test_msgver_init(&mv, testid);
-
- TEST_SAY("Expecting to consume all %d messages from %d topics\n",
- msgcnt, TOPIC_CNT);
-
- test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv);
-
- /* Now remove T2 */
- rd_kafka_topic_partition_list_del(tlist, topic[1], 0);
- err = rd_kafka_assign(rk, tlist);
- TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err));
-
- TEST_SAY(
- "Should not see any messages for session.timeout.ms+some more\n");
- test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5));
-
- test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
- 0, msgcnt);
-
- test_msgver_clear(&mv);
-
- rd_kafka_topic_partition_list_destroy(tlist);
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- for (i = 0; i < TOPIC_CNT; i++)
- rd_free(topic[i]);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c
deleted file mode 100644
index ef9b89878..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * Verify message timestamp behaviour on supporting brokers (>=0.10.0.0).
- * Issue #858
- */
-struct timestamp_range {
- int64_t min;
- int64_t max;
-};
-
-static const struct timestamp_range invalid_timestamp = {-1, -1};
-static struct timestamp_range broker_timestamp;
-static struct timestamp_range my_timestamp;
-
-static void prepare_timestamps(void) {
- struct timeval ts;
- rd_gettimeofday(&ts, NULL);
-
- /* broker timestamps expected to be within 600 seconds */
- broker_timestamp.min = (int64_t)ts.tv_sec * 1000LLU;
- broker_timestamp.max = broker_timestamp.min + (600 * 1000LLU);
-
- /* client timestamps: set in the future (24 hours)
- * to be outside of broker timestamps */
- my_timestamp.min = my_timestamp.max =
- (int64_t)ts.tv_sec + (24 * 3600 * 1000LLU);
-}
-
-/**
- * @brief Produce messages according to compress \p codec
- */
-static void produce_msgs(const char *topic,
- int partition,
- uint64_t testid,
- int msgcnt,
- const char *broker_version,
- const char *codec) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- int i;
- char key[128], buf[100];
- int msgcounter = msgcnt;
-
- test_conf_init(&conf, NULL, 0);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_conf_set(conf, "compression.codec", codec);
- test_conf_set(conf, "broker.version.fallback", broker_version);
- if (!strncmp(broker_version, "0.8", 3) ||
- !strncmp(broker_version, "0.9", 3)) {
- test_conf_set(conf, "api.version.request", "false");
- test_conf_set(conf, "enable.idempotence", "false");
- }
-
- /* Make sure to trigger a bunch of MessageSets */
- test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt / 5));
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- for (i = 0; i < msgcnt; i++) {
- rd_kafka_resp_err_t err;
-
- test_prepare_msg(testid, partition, i, buf, sizeof(buf), key,
- sizeof(key));
-
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_VALUE(buf, sizeof(buf)),
- RD_KAFKA_V_KEY(key, sizeof(key)),
- RD_KAFKA_V_TIMESTAMP(my_timestamp.min),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
- if (err)
- TEST_FAIL("producev() failed at msg #%d/%d: %s", i,
- msgcnt, rd_kafka_err2str(err));
- }
-
- TEST_SAY("Waiting for %d messages to be produced\n", msgcounter);
- while (msgcounter > 0)
- rd_kafka_poll(rk, 100);
-
- rd_kafka_destroy(rk);
-}
-
-static void
-consume_msgs_verify_timestamps(const char *topic,
- int partition,
- uint64_t testid,
- int msgcnt,
- const struct timestamp_range *exp_timestamp) {
- test_msgver_t mv;
-
- test_msgver_init(&mv, testid);
- test_consume_msgs_easy_mv(topic, topic, -1, testid, -1, msgcnt, NULL,
- &mv);
-
- test_msgver_verify0(
- __FUNCTION__, __LINE__, topic, &mv,
- TEST_MSGVER_RANGE | TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_TIMESTAMP,
- (struct test_mv_vs) {.msg_base = 0,
- .exp_cnt = msgcnt,
- .timestamp_min = exp_timestamp->min,
- .timestamp_max = exp_timestamp->max});
-
- test_msgver_clear(&mv);
-}
-
-
-
-static void test_timestamps(const char *broker_tstype,
- const char *broker_version,
- const char *codec,
- const struct timestamp_range *exp_timestamps) {
- const char *topic =
- test_mk_topic_name(tsprintf("0052_msg_timestamps_%s_%s_%s",
- broker_tstype, broker_version, codec),
- 1);
- const int msgcnt = 20;
- uint64_t testid = test_id_generate();
-
- if ((!strncmp(broker_version, "0.9", 3) ||
- !strncmp(broker_version, "0.8", 3)) &&
- !test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
- TEST_SAY(_C_YEL
- "Skipping %s, %s test: "
- "SaslHandshake not supported by broker v%s" _C_CLR
- "\n",
- broker_tstype, codec, broker_version);
- return;
- }
-
- TEST_SAY(_C_MAG "Timestamp test using %s\n", topic);
- test_timeout_set(30);
-
- test_kafka_topics(
- "--create --topic \"%s\" "
- "--replication-factor 1 --partitions 1 "
- "--config message.timestamp.type=%s",
- topic, broker_tstype);
-
- TEST_SAY(_C_MAG "Producing %d messages to %s\n", msgcnt, topic);
- produce_msgs(topic, 0, testid, msgcnt, broker_version, codec);
-
- TEST_SAY(_C_MAG
- "Consuming and verifying %d messages from %s "
- "with expected timestamps %" PRId64 "..%" PRId64 "\n",
- msgcnt, topic, exp_timestamps->min, exp_timestamps->max);
-
- consume_msgs_verify_timestamps(topic, 0, testid, msgcnt,
- exp_timestamps);
-}
-
-
-int main_0052_msg_timestamps(int argc, char **argv) {
-
- if (!test_can_create_topics(1))
- return 0;
-
- if (test_needs_auth()) {
- TEST_SKIP("Test cluster requires authentication/SSL\n");
- return 0;
- }
-
- /* Broker version limits the producer's feature set,
- * for 0.9.0.0 no timestamp will be transmitted,
- * but for 0.10.1.0 (or newer, api.version.request will be true)
- * the producer will set the timestamp.
- * In all cases we want a reasonable timestamp back.
- *
- * Explicit broker LogAppendTime setting will overwrite
- * any producer-provided offset.
- *
- * Using the old non-timestamp-aware protocol without
- * LogAppendTime will cause unset/invalid timestamps .
- *
- * Any other option should honour the producer create timestamps.
- */
- prepare_timestamps();
-
- test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp);
- test_timestamps("LogAppendTime", "0.10.1.0", "none", &broker_timestamp);
- test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp);
- test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp);
-#if WITH_ZLIB
- test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp);
- test_timestamps("LogAppendTime", "0.10.1.0", "gzip", &broker_timestamp);
- test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp);
- test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp);
-#endif
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp
deleted file mode 100644
index a61755c30..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp
+++ /dev/null
@@ -1,535 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <fstream>
-#include <iterator>
-#include <string>
-#include "testcpp.h"
-
-#if WITH_RAPIDJSON
-#include <rapidjson/document.h>
-#include <rapidjson/schema.h>
-#include <rapidjson/filereadstream.h>
-#include <rapidjson/stringbuffer.h>
-#include <rapidjson/error/en.h>
-#include <rapidjson/prettywriter.h>
-#endif
-
-static const char *stats_schema_path = "../src/statistics_schema.json";
-
-#if WITH_RAPIDJSON
-/**
- * @brief Statistics schema validator
- */
-class TestSchemaValidator {
- public:
- TestSchemaValidator() {
- }
- TestSchemaValidator(const std::string schema_path) {
- /* Read schema from file */
- schema_path_ = schema_path;
-
- std::ifstream f(schema_path.c_str());
- if (!f.is_open())
- Test::Fail(tostr() << "Failed to open schema " << schema_path << ": "
- << strerror(errno));
- std::string schema_str((std::istreambuf_iterator<char>(f)),
- (std::istreambuf_iterator<char>()));
-
- /* Parse schema */
- sd_ = new rapidjson::Document();
- if (sd_->Parse(schema_str.c_str()).HasParseError())
- Test::Fail(tostr() << "Failed to parse statistics schema: "
- << rapidjson::GetParseError_En(sd_->GetParseError())
- << " at " << sd_->GetErrorOffset());
-
- schema_ = new rapidjson::SchemaDocument(*sd_);
- validator_ = new rapidjson::SchemaValidator(*schema_);
- }
-
- ~TestSchemaValidator() {
- if (sd_)
- delete sd_;
- if (schema_)
- delete schema_;
- if (validator_)
- delete validator_;
- }
-
- void validate(const std::string &json_doc) {
- /* Parse JSON to validate */
- rapidjson::Document d;
- if (d.Parse(json_doc.c_str()).HasParseError())
- Test::Fail(tostr() << "Failed to parse stats JSON: "
- << rapidjson::GetParseError_En(d.GetParseError())
- << " at " << d.GetErrorOffset());
-
- /* Validate using schema */
- if (!d.Accept(*validator_)) {
- rapidjson::StringBuffer sb;
-
- validator_->GetInvalidSchemaPointer().StringifyUriFragment(sb);
- Test::Say(tostr() << "Schema: " << sb.GetString() << "\n");
- Test::Say(tostr() << "Invalid keyword: "
- << validator_->GetInvalidSchemaKeyword() << "\n");
- sb.Clear();
-
- validator_->GetInvalidDocumentPointer().StringifyUriFragment(sb);
- Test::Say(tostr() << "Invalid document: " << sb.GetString() << "\n");
- sb.Clear();
-
- Test::Fail(tostr() << "JSON validation using schema " << schema_path_
- << " failed");
- }
-
- Test::Say(3, "JSON document validated using schema " + schema_path_ + "\n");
- }
-
- private:
- std::string schema_path_;
- rapidjson::Document *sd_;
- rapidjson::SchemaDocument *schema_;
- rapidjson::SchemaValidator *validator_;
-};
-
-
-#else
-
-/* Dummy validator doing nothing when RapidJSON is unavailable */
-class TestSchemaValidator {
- public:
- TestSchemaValidator() {
- }
- TestSchemaValidator(const std::string schema_path) {
- }
-
- ~TestSchemaValidator() {
- }
-
- void validate(const std::string &json_doc) {
- }
-};
-
-#endif
-
-class myEventCb : public RdKafka::EventCb {
- public:
- myEventCb(const std::string schema_path) :
- validator_(TestSchemaValidator(schema_path)) {
- stats_cnt = 0;
- }
-
- int stats_cnt;
- std::string last; /**< Last stats document */
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_STATS:
- if (!(stats_cnt % 10))
- Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << event.str()
- << "\n");
- if (event.str().length() > 20)
- stats_cnt += 1;
- validator_.validate(event.str());
- last = event.str();
- break;
- default:
- break;
- }
- }
-
- private:
- TestSchemaValidator validator_;
-};
-
-
-/**
- * @brief Verify that stats are emitted according to statistics.interval.ms
- */
-void test_stats_timing() {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
- myEventCb my_event = myEventCb(stats_schema_path);
- std::string errstr;
-
- if (conf->set("statistics.interval.ms", "100", errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- if (conf->set("event_cb", &my_event, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- int64_t t_start = test_clock();
-
- while (my_event.stats_cnt < 12)
- p->poll(1000);
-
- int elapsed = (int)((test_clock() - t_start) / 1000);
- const int expected_time = 1200;
-
- Test::Say(tostr() << my_event.stats_cnt
- << " (expected 12) stats callbacks received in " << elapsed
- << "ms (expected " << expected_time << "ms +-25%)\n");
-
- if (elapsed < expected_time * 0.75 || elapsed > expected_time * 1.25) {
- /* We can't rely on CIs giving our test job enough CPU to finish
- * in time, so don't error out even if the time is outside the window */
- if (test_on_ci)
- Test::Say(tostr() << "WARNING: Elapsed time " << elapsed
- << "ms outside +-25% window (" << expected_time
- << "ms), cnt " << my_event.stats_cnt);
- else
- Test::Fail(tostr() << "Elapsed time " << elapsed
- << "ms outside +-25% window (" << expected_time
- << "ms), cnt " << my_event.stats_cnt);
- }
- delete p;
-}
-
-
-
-#if WITH_RAPIDJSON
-
-/**
- * @brief Expected partition stats
- */
-struct exp_part_stats {
- std::string topic; /**< Topic */
- int32_t part; /**< Partition id */
- int msgcnt; /**< Expected message count */
- int msgsize; /**< Expected per message size.
- * This includes both key and value lengths */
-
- /* Calculated */
- int64_t totsize; /**< Message size sum */
-};
-
-/**
- * @brief Verify end-to-end producer and consumer stats.
- */
-static void verify_e2e_stats(const std::string &prod_stats,
- const std::string &cons_stats,
- struct exp_part_stats *exp_parts,
- int partcnt) {
- /**
- * Parse JSON stats
- * These documents are already validated in the Event callback.
- */
- rapidjson::Document p;
- if (p.Parse<rapidjson::kParseValidateEncodingFlag>(prod_stats.c_str())
- .HasParseError())
- Test::Fail(tostr() << "Failed to parse producer stats JSON: "
- << rapidjson::GetParseError_En(p.GetParseError())
- << " at " << p.GetErrorOffset());
-
- rapidjson::Document c;
- if (c.Parse<rapidjson::kParseValidateEncodingFlag>(cons_stats.c_str())
- .HasParseError())
- Test::Fail(tostr() << "Failed to parse consumer stats JSON: "
- << rapidjson::GetParseError_En(c.GetParseError())
- << " at " << c.GetErrorOffset());
-
- assert(p.HasMember("name"));
- assert(c.HasMember("name"));
- assert(p.HasMember("type"));
- assert(c.HasMember("type"));
-
- Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString()
- << " and Consumer " << c["name"].GetString() << "\n");
-
- assert(!strcmp(p["type"].GetString(), "producer"));
- assert(!strcmp(c["type"].GetString(), "consumer"));
-
- int64_t exp_tot_txmsgs = 0;
- int64_t exp_tot_txmsg_bytes = 0;
- int64_t exp_tot_rxmsgs = 0;
- int64_t exp_tot_rxmsg_bytes = 0;
-
- for (int part = 0; part < partcnt; part++) {
- /*
- * Find partition stats.
- */
-
- /* Construct the partition path. */
- char path[256];
- rd_snprintf(path, sizeof(path), "/topics/%s/partitions/%d",
- exp_parts[part].topic.c_str(), exp_parts[part].part);
- Test::Say(tostr() << "Looking up partition " << exp_parts[part].part
- << " with path " << path << "\n");
-
- /* Even though GetValueByPointer() takes a "char[]" it can only be used
- * with perfectly sized char buffers or string literals since it
- * does not respect NUL terminators.
- * So instead convert the path to a Pointer.*/
- rapidjson::Pointer jpath((const char *)path);
-
- rapidjson::Value *pp = rapidjson::GetValueByPointer(p, jpath);
- if (!pp)
- Test::Fail(tostr() << "Producer: could not find " << path << " in "
- << prod_stats << "\n");
-
- rapidjson::Value *cp = rapidjson::GetValueByPointer(c, jpath);
- if (!pp)
- Test::Fail(tostr() << "Consumer: could not find " << path << " in "
- << cons_stats << "\n");
-
- assert(pp->HasMember("partition"));
- assert(pp->HasMember("txmsgs"));
- assert(pp->HasMember("txbytes"));
-
- assert(cp->HasMember("partition"));
- assert(cp->HasMember("rxmsgs"));
- assert(cp->HasMember("rxbytes"));
-
- Test::Say(tostr() << "partition: " << (*pp)["partition"].GetInt() << "\n");
-
- int64_t txmsgs = (*pp)["txmsgs"].GetInt();
- int64_t txbytes = (*pp)["txbytes"].GetInt();
- int64_t rxmsgs = (*cp)["rxmsgs"].GetInt();
- int64_t rxbytes = (*cp)["rxbytes"].GetInt();
-
- exp_tot_txmsgs += txmsgs;
- exp_tot_txmsg_bytes += txbytes;
- exp_tot_rxmsgs += rxmsgs;
- exp_tot_rxmsg_bytes += rxbytes;
-
- Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt()
- << ": "
- << "txmsgs: " << txmsgs << " vs "
- << exp_parts[part].msgcnt << ", "
- << "txbytes: " << txbytes << " vs "
- << exp_parts[part].totsize << "\n");
- Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt()
- << ": "
- << "rxmsgs: " << rxmsgs << " vs "
- << exp_parts[part].msgcnt << ", "
- << "rxbytes: " << rxbytes << " vs "
- << exp_parts[part].totsize << "\n");
- }
-
- /* Check top-level total stats */
-
- assert(p.HasMember("txmsgs"));
- assert(p.HasMember("txmsg_bytes"));
- assert(p.HasMember("rxmsgs"));
- assert(p.HasMember("rxmsg_bytes"));
-
- int64_t tot_txmsgs = p["txmsgs"].GetInt();
- int64_t tot_txmsg_bytes = p["txmsg_bytes"].GetInt();
- int64_t tot_rxmsgs = c["rxmsgs"].GetInt();
- int64_t tot_rxmsg_bytes = c["rxmsg_bytes"].GetInt();
-
- Test::Say(tostr() << "Producer total: "
- << "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs
- << ", "
- << "txbytes: " << tot_txmsg_bytes << " vs "
- << exp_tot_txmsg_bytes << "\n");
- Test::Say(tostr() << "Consumer total: "
- << "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs
- << ", "
- << "rxbytes: " << tot_rxmsg_bytes << " vs "
- << exp_tot_rxmsg_bytes << "\n");
-}
-
-/**
- * @brief Verify stats JSON structure and individual metric fields.
- *
- * To capture as much verifiable data as possible we run a full
- * producer - consumer end to end test and verify that counters
- * and states are emitted accordingly.
- *
- * Requires RapidJSON (for parsing the stats).
- */
-static void test_stats() {
- std::string errstr;
- RdKafka::Conf *conf;
- myEventCb producer_event(stats_schema_path);
- myEventCb consumer_event(stats_schema_path);
-
- std::string topic = Test::mk_topic_name("0053_stats", 1);
-
- const int partcnt = 2;
- int msgcnt = (test_quick ? 10 : 100) * partcnt;
- const int msgsize = 6 * 1024;
-
- /*
- * Common config for producer and consumer
- */
- Test::conf_init(&conf, NULL, 60);
- if (conf->set("statistics.interval.ms", "1000", errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
-
- /*
- * Create Producer
- */
- if (conf->set("event_cb", &producer_event, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
-
- /*
- * Create Consumer
- */
- conf->set("group.id", topic, errstr);
- conf->set("auto.offset.reset", "earliest", errstr);
- conf->set("enable.partition.eof", "false", errstr);
- if (conf->set("event_cb", &consumer_event, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /*
- * Set up consumer assignment (but assign after producing
- * since there will be no topics now) and expected partitions
- * for later verification.
- */
- std::vector<RdKafka::TopicPartition *> toppars;
- struct exp_part_stats exp_parts[partcnt] = {};
-
- for (int32_t part = 0; part < (int32_t)partcnt; part++) {
- toppars.push_back(RdKafka::TopicPartition::create(
- topic, part, RdKafka::Topic::OFFSET_BEGINNING));
- exp_parts[part].topic = topic;
- exp_parts[part].part = part;
- exp_parts[part].msgcnt = msgcnt / partcnt;
- exp_parts[part].msgsize = msgsize;
- exp_parts[part].totsize = 0;
- }
-
- /*
- * Produce messages
- */
- uint64_t testid = test_id_generate();
-
- char key[256];
- char *buf = (char *)malloc(msgsize);
-
- for (int32_t part = 0; part < (int32_t)partcnt; part++) {
- for (int i = 0; i < msgcnt / partcnt; i++) {
- test_prepare_msg(testid, part, i, buf, msgsize, key, sizeof(key));
- RdKafka::ErrorCode err =
- p->produce(topic, part, RdKafka::Producer::RK_MSG_COPY, buf, msgsize,
- key, sizeof(key), -1, NULL);
- if (err)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- exp_parts[part].totsize += msgsize + sizeof(key);
- p->poll(0);
- }
- }
-
- free(buf);
-
- Test::Say("Waiting for final message delivery\n");
- /* Wait for delivery */
- p->flush(15 * 1000);
-
- /*
- * Start consuming partitions
- */
- c->assign(toppars);
- RdKafka::TopicPartition::destroy(toppars);
-
- /*
- * Consume the messages
- */
- int recvcnt = 0;
- Test::Say(tostr() << "Consuming " << msgcnt << " messages\n");
- while (recvcnt < msgcnt) {
- RdKafka::Message *msg = c->consume(-1);
- if (msg->err())
- Test::Fail("Consume failed: " + msg->errstr());
-
- int msgid;
- TestMessageVerify(testid, -1, &msgid, msg);
- recvcnt++;
- delete msg;
- }
-
- /*
- * Producer:
- * Wait for one last stats emit when all messages have been delivered.
- */
- int prev_cnt = producer_event.stats_cnt;
- while (prev_cnt == producer_event.stats_cnt) {
- Test::Say("Waiting for final producer stats event\n");
- p->poll(100);
- }
-
- /*
- * Consumer:
- * Wait for a one last stats emit when all messages have been received,
- * since previous stats may have been enqueued but not served we
- * skip the first 2.
- */
- prev_cnt = consumer_event.stats_cnt;
- while (prev_cnt + 2 >= consumer_event.stats_cnt) {
- Test::Say(tostr() << "Waiting for final consumer stats event: "
- << consumer_event.stats_cnt << "\n");
- c->poll(100);
- }
-
-
- verify_e2e_stats(producer_event.last, consumer_event.last, exp_parts,
- partcnt);
-
-
- c->close();
-
- delete p;
- delete c;
-}
-#endif
-
-extern "C" {
-int main_0053_stats_timing(int argc, char **argv) {
- test_stats_timing();
- return 0;
-}
-
-int main_0053_stats(int argc, char **argv) {
-#if WITH_RAPIDJSON
- test_stats();
-#else
- Test::Skip("RapidJSON >=1.1.0 not available\n");
-#endif
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp
deleted file mode 100644
index 58c88b4a1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-/**
- * Test offset_for_times (KIP-79): time-based offset lookups.
- */
-
-
-static int verify_offset(const RdKafka::TopicPartition *tp,
- int64_t timestamp,
- int64_t exp_offset,
- RdKafka::ErrorCode exp_err) {
- int fails = 0;
- if (tp->err() != exp_err) {
- Test::FailLater(tostr()
- << " " << tp->topic() << " [" << tp->partition() << "] "
- << "expected error " << RdKafka::err2str(exp_err)
- << ", got " << RdKafka::err2str(tp->err()) << "\n");
- fails++;
- }
-
- if (!exp_err && tp->offset() != exp_offset) {
- Test::FailLater(tostr()
- << " " << tp->topic() << " [" << tp->partition() << "] "
- << "expected offset " << exp_offset << " for timestamp "
- << timestamp << ", got " << tp->offset() << "\n");
- fails++;
- }
-
- return fails;
-}
-
-
-static void test_offset_time(void) {
- std::vector<RdKafka::TopicPartition *> query_parts;
- std::string topic = Test::mk_topic_name("0054-offset_time", 1);
- RdKafka::Conf *conf, *tconf;
- int64_t timestamps[] = {
- /* timestamp, expected offset */
- 1234,
- 0,
- 999999999999,
- 1,
- };
- const int timestamp_cnt = 2;
- int fails = 0;
- std::string errstr;
-
- Test::conf_init(&conf, &tconf, 0);
-
- /* Need acks=all to make sure OffsetRequest correctly reads fully
- * written Produce record. */
- Test::conf_set(tconf, "acks", "all");
- Test::conf_set(conf, "api.version.request", "true");
- conf->set("dr_cb", &Test::DrCb, errstr);
- conf->set("default_topic_conf", tconf, errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 97, timestamps[0]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 98, timestamps[0]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 99, timestamps[0]));
-
- /* First query timestamps before topic exists, should fail. */
- Test::Say("Attempting first offsetsForTimes() query (should fail)\n");
- RdKafka::ErrorCode err = p->offsetsForTimes(query_parts, tmout_multip(10000));
- Test::Say("offsetsForTimes #1 with non-existing partitions returned " +
- RdKafka::err2str(err) + "\n");
- Test::print_TopicPartitions("offsetsForTimes #1", query_parts);
-
- if (err != RdKafka::ERR__UNKNOWN_PARTITION)
- Test::Fail(
- "offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, "
- "not " +
- RdKafka::err2str(err));
-
- Test::Say("Producing to " + topic + "\n");
- for (int partition = 0; partition < 2; partition++) {
- for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
- err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
- (void *)topic.c_str(), topic.size(), NULL, 0,
- timestamps[ti], NULL);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- }
- }
-
- if (p->flush(tmout_multip(5000)) != 0)
- Test::Fail("Not all messages flushed");
-
-
- for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
- RdKafka::TopicPartition::destroy(query_parts);
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
-
- Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
- << timestamps[ti] << "\n");
- err = p->offsetsForTimes(query_parts, tmout_multip(5000));
- Test::print_TopicPartitions("offsetsForTimes", query_parts);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err));
-
- fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- }
-
- /* repeat test with -1 timeout */
- for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
- RdKafka::TopicPartition::destroy(query_parts);
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
-
- Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
- << timestamps[ti] << " with a timeout of -1\n");
- err = p->offsetsForTimes(query_parts, -1);
- Test::print_TopicPartitions("offsetsForTimes", query_parts);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err));
-
- fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- }
-
- /* And a negative test with a request that should timeout instantly. */
- for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
- RdKafka::TopicPartition::destroy(query_parts);
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
-
- Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
- << timestamps[ti]
- << " with minimal timeout (should fail)\n");
- err = p->offsetsForTimes(query_parts, 0);
- Test::print_TopicPartitions("offsetsForTimes", query_parts);
- if (err != RdKafka::ERR__TIMED_OUT)
- Test::Fail(
- "expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " +
- RdKafka::err2str(err));
- }
-
- /* Include non-existent partitions */
- for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
- RdKafka::TopicPartition::destroy(query_parts);
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 2, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 20, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 3, timestamps[ti]));
- query_parts.push_back(
- RdKafka::TopicPartition::create(topic, 21, timestamps[ti]));
- Test::Say("Attempting offsetsForTimes() with non-existent partitions\n");
- err = p->offsetsForTimes(query_parts, -1);
- Test::print_TopicPartitions("offsetsForTimes", query_parts);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("expected offsetsForTimes(timeout=0) to succeed, not " +
- RdKafka::err2str(err));
- fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[2], timestamps[ti], -1,
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[3], timestamps[ti], -1,
- RdKafka::ERR__UNKNOWN_PARTITION);
- fails += verify_offset(query_parts[4], timestamps[ti], -1,
- RdKafka::ERR_NO_ERROR);
- fails += verify_offset(query_parts[5], timestamps[ti], -1,
- RdKafka::ERR__UNKNOWN_PARTITION);
- }
-
-
- if (fails > 0)
- Test::Fail(tostr() << "See " << fails << " previous error(s)");
-
- RdKafka::TopicPartition::destroy(query_parts);
-
- delete p;
- delete conf;
- delete tconf;
-}
-
-extern "C" {
-int main_0054_offset_time(int argc, char **argv) {
- test_offset_time();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c
deleted file mode 100644
index e0244cec9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-#define _MSG_COUNT 10
-struct latconf {
- const char *name;
- const char *conf[16];
- int min; /* Minimum expected latency */
- int max; /* Maximum expected latency */
-
- float rtt; /* Network+broker latency */
-
-
- char linger_ms_conf[32]; /**< Read back to show actual value */
-
- /* Result vector */
- rd_bool_t passed;
- float latency[_MSG_COUNT];
- float sum;
- int cnt;
- int wakeups;
-};
-
-static int tot_wakeups = 0;
-
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- struct latconf *latconf = opaque;
- int64_t *ts_send = (int64_t *)rkmessage->_private;
- float delivery_time;
-
- if (rkmessage->err)
- TEST_FAIL("%s: delivery failed: %s\n", latconf->name,
- rd_kafka_err2str(rkmessage->err));
-
- if (!rkmessage->_private)
- return; /* Priming message, ignore. */
-
- delivery_time = (float)(test_clock() - *ts_send) / 1000.0f;
-
- free(ts_send);
-
- TEST_ASSERT(latconf->cnt < _MSG_COUNT, "");
-
- TEST_SAY("%s: Message %d delivered in %.3fms\n", latconf->name,
- latconf->cnt, delivery_time);
-
- latconf->latency[latconf->cnt++] = delivery_time;
- latconf->sum += delivery_time;
-}
-
-
-/**
- * @brief A stats callback to get the per-broker wakeup counts.
- *
- * The JSON "parsing" here is crude..
- */
-static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
- const char *t = json;
- int cnt = 0;
- int total = 0;
-
- /* Since we're only producing to one partition there will only be
- * one broker, the leader, who's wakeup counts we're interested in, but
- * we also want to know that other broker threads aren't spinning
- * like crazy. So just summarize all the wakeups from all brokers. */
- while ((t = strstr(t, "\"wakeups\":"))) {
- int wakeups;
- const char *next;
-
- t += strlen("\"wakeups\":");
- while (isspace((int)*t))
- t++;
- wakeups = strtol(t, (char **)&next, 0);
-
- TEST_ASSERT(t != next, "No wakeup number found at \"%.*s...\"",
- 16, t);
-
- total += wakeups;
- cnt++;
-
- t = next;
- }
-
- TEST_ASSERT(cnt > 0, "No brokers found in stats");
-
- tot_wakeups = total;
-
- return 0;
-}
-
-
-static int verify_latency(struct latconf *latconf) {
- float avg;
- int fails = 0;
- double ext_overhead =
- latconf->rtt + 5.0 /* broker ProduceRequest handling time, maybe */;
-
- ext_overhead *= test_timeout_multiplier;
-
- avg = latconf->sum / (float)latconf->cnt;
-
- TEST_SAY(
- "%s: average latency %.3fms, allowed range %d..%d +%.0fms, "
- "%d wakeups\n",
- latconf->name, avg, latconf->min, latconf->max, ext_overhead,
- tot_wakeups);
-
- if (avg < (float)latconf->min ||
- avg > (float)latconf->max + ext_overhead) {
- TEST_FAIL_LATER(
- "%s: average latency %.3fms is "
- "outside range %d..%d +%.0fms",
- latconf->name, avg, latconf->min, latconf->max,
- ext_overhead);
- fails++;
- }
-
- latconf->wakeups = tot_wakeups;
- if (latconf->wakeups < 10 || latconf->wakeups > 1000) {
- TEST_FAIL_LATER(
- "%s: broker wakeups out of range: %d, "
- "expected 10..1000",
- latconf->name, latconf->wakeups);
- fails++;
- }
-
-
- return fails;
-}
-
-static void measure_rtt(struct latconf *latconf, rd_kafka_t *rk) {
- rd_kafka_resp_err_t err;
- const struct rd_kafka_metadata *md;
- int64_t ts = test_clock();
-
- err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- latconf->rtt = (float)(test_clock() - ts) / 1000.0f;
-
- TEST_SAY("%s: broker base RTT is %.3fms\n", latconf->name,
- latconf->rtt);
- rd_kafka_metadata_destroy(md);
-}
-
-
-
-static void test_producer_latency(const char *topic, struct latconf *latconf) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_resp_err_t err;
- int i;
- size_t sz;
- rd_bool_t with_transactions = rd_false;
-
- SUB_TEST("%s (linger.ms=%d)", latconf->name);
-
- test_conf_init(&conf, NULL, 60);
-
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
- rd_kafka_conf_set_opaque(conf, latconf);
- rd_kafka_conf_set_stats_cb(conf, stats_cb);
- test_conf_set(conf, "statistics.interval.ms", "100");
- tot_wakeups = 0;
-
- for (i = 0; latconf->conf[i]; i += 2) {
- TEST_SAY("%s: set conf %s = %s\n", latconf->name,
- latconf->conf[i], latconf->conf[i + 1]);
- test_conf_set(conf, latconf->conf[i], latconf->conf[i + 1]);
- if (!strcmp(latconf->conf[i], "transactional.id"))
- with_transactions = rd_true;
- }
-
- sz = sizeof(latconf->linger_ms_conf);
- rd_kafka_conf_get(conf, "linger.ms", latconf->linger_ms_conf, &sz);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- if (with_transactions) {
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 10 * 1000));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
- }
-
- TEST_SAY("%s: priming producer\n", latconf->name);
- /* Send a priming message to make sure everything is up
- * and functional before starting measurements */
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("priming", 7),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
- if (err)
- TEST_FAIL("%s: priming producev failed: %s", latconf->name,
- rd_kafka_err2str(err));
-
- if (with_transactions) {
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- } else {
- /* Await delivery */
- rd_kafka_flush(rk, tmout_multip(5000));
- }
-
- /* Get a network+broker round-trip-time base time. */
- measure_rtt(latconf, rk);
-
- TEST_SAY("%s: producing %d messages\n", latconf->name, _MSG_COUNT);
- for (i = 0; i < _MSG_COUNT; i++) {
- int64_t *ts_send;
- int pre_cnt = latconf->cnt;
-
- if (with_transactions)
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- ts_send = malloc(sizeof(*ts_send));
- *ts_send = test_clock();
-
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(ts_send), RD_KAFKA_V_END);
- if (err)
- TEST_FAIL("%s: producev #%d failed: %s", latconf->name,
- i, rd_kafka_err2str(err));
-
- /* Await delivery */
- while (latconf->cnt == pre_cnt)
- rd_kafka_poll(rk, 5000);
-
- if (with_transactions) {
- test_timing_t timing;
- TIMING_START(&timing, "commit_transaction");
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- TIMING_ASSERT_LATER(&timing, 0,
- (int)(latconf->rtt + 50.0));
- }
- }
-
- while (tot_wakeups == 0)
- rd_kafka_poll(rk, 100); /* Get final stats_cb */
-
- rd_kafka_destroy(rk);
-
- if (verify_latency(latconf))
- return; /* verify_latency() has already
- * called TEST_FAIL_LATER() */
-
-
- latconf->passed = rd_true;
-
- SUB_TEST_PASS();
-}
-
-
-static float find_min(const struct latconf *latconf) {
- int i;
- float v = 1000000;
-
- for (i = 0; i < latconf->cnt; i++)
- if (latconf->latency[i] < v)
- v = latconf->latency[i];
-
- return v;
-}
-
-static float find_max(const struct latconf *latconf) {
- int i;
- float v = 0;
-
- for (i = 0; i < latconf->cnt; i++)
- if (latconf->latency[i] > v)
- v = latconf->latency[i];
-
- return v;
-}
-
-int main_0055_producer_latency(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0055_producer_latency", 1);
- struct latconf latconfs[] = {
- {"standard settings", {NULL}, 5, 5}, /* default is now 5ms */
- {"low linger.ms (0ms)", {"linger.ms", "0", NULL}, 0, 0},
- {"microsecond linger.ms (0.001ms)",
- {"linger.ms", "0.001", NULL},
- 0,
- 1},
- {"high linger.ms (3000ms)",
- {"linger.ms", "3000", NULL},
- 3000,
- 3100},
- {"linger.ms < 1000 (500ms)", /* internal block_max_ms */
- {"linger.ms", "500", NULL},
- 500,
- 600},
- {"no acks (0ms)",
- {"linger.ms", "0", "acks", "0", "enable.idempotence", "false",
- NULL},
- 0,
- 0},
- {"idempotence (10ms)",
- {"linger.ms", "10", "enable.idempotence", "true", NULL},
- 10,
- 10},
- {"transactions (35ms)",
- {"linger.ms", "35", "transactional.id", topic, NULL},
- 35,
- 50 + 35 /* extra time for AddPartitions..*/},
- {NULL}};
- struct latconf *latconf;
-
- if (test_on_ci) {
- TEST_SKIP("Latency measurements not reliable on CI\n");
- return 0;
- }
-
- /* Create topic without replicas to keep broker-side latency down */
- test_create_topic(NULL, topic, 1, 1);
-
- for (latconf = latconfs; latconf->name; latconf++)
- test_producer_latency(topic, latconf);
-
- TEST_SAY(_C_YEL "Latency tests summary:\n" _C_CLR);
- TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s %8s\n", "Name",
- "linger.ms", "MinExp", "MaxExp", "RTT", "Min", "Average",
- "Max", "Wakeups");
-
- for (latconf = latconfs; latconf->name; latconf++)
- TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g %8d%s\n",
- latconf->name, latconf->linger_ms_conf, latconf->min,
- latconf->max, latconf->rtt, find_min(latconf),
- latconf->sum / latconf->cnt, find_max(latconf),
- latconf->wakeups,
- latconf->passed ? "" : _C_RED " FAILED");
-
-
- TEST_LATER_CHECK("");
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c
deleted file mode 100644
index e6205ddb6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-/**
- * KafkaConsumer balanced group with multithreading tests
- *
- * Runs a consumer subscribing to a topic with multiple partitions and farms
- * consuming of each partition to a separate thread.
- */
-
-#define MAX_THRD_CNT 4
-
-static int assign_cnt = 0;
-static int consumed_msg_cnt = 0;
-static int consumers_running = 0;
-static int exp_msg_cnt;
-
-static mtx_t lock;
-static thrd_t tids[MAX_THRD_CNT];
-
-typedef struct part_consume_info_s {
- rd_kafka_queue_t *rkqu;
- int partition;
-} part_consume_info_t;
-
-static int is_consuming() {
- int result;
- mtx_lock(&lock);
- result = consumers_running;
- mtx_unlock(&lock);
- return result;
-}
-
-static int partition_consume(void *args) {
- part_consume_info_t *info = (part_consume_info_t *)args;
- rd_kafka_queue_t *rkqu = info->rkqu;
- int partition = info->partition;
- int64_t ts_start = test_clock();
- int max_time = (test_session_timeout_ms + 3000) * 1000;
- int running = 1;
-
- free(args); /* Free the parameter struct dynamically allocated for us */
-
- while (ts_start + max_time > test_clock() && running &&
- is_consuming()) {
- rd_kafka_message_t *rkmsg;
-
- rkmsg = rd_kafka_consume_queue(rkqu, 500);
-
- if (!rkmsg)
- continue;
- else if (rkmsg->err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
- running = 0;
- else if (rkmsg->err) {
- mtx_lock(&lock);
- TEST_FAIL(
- "Message error "
- "(at offset %" PRId64
- " after "
- "%d/%d messages and %dms): %s",
- rkmsg->offset, consumed_msg_cnt, exp_msg_cnt,
- (int)(test_clock() - ts_start) / 1000,
- rd_kafka_message_errstr(rkmsg));
- mtx_unlock(&lock);
- } else {
- if (rkmsg->partition != partition) {
- mtx_lock(&lock);
- TEST_FAIL(
- "Message consumed has partition %d "
- "but we expected partition %d.",
- rkmsg->partition, partition);
- mtx_unlock(&lock);
- }
- }
- rd_kafka_message_destroy(rkmsg);
-
- mtx_lock(&lock);
- if (running && ++consumed_msg_cnt >= exp_msg_cnt) {
- TEST_SAY("All messages consumed\n");
- running = 0;
- }
- mtx_unlock(&lock);
- }
-
- rd_kafka_queue_destroy(rkqu);
-
- return thrd_success;
-}
-
-static thrd_t spawn_thread(rd_kafka_queue_t *rkqu, int partition) {
- thrd_t thr;
- part_consume_info_t *info = malloc(sizeof(part_consume_info_t));
-
- info->rkqu = rkqu;
- info->partition = partition;
-
- if (thrd_create(&thr, &partition_consume, info) != thrd_success) {
- TEST_FAIL("Failed to create consumer thread.");
- }
- return thr;
-}
-
-static int rebalanced = 0;
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque) {
- int i;
- char *memberid = rd_kafka_memberid(rk);
-
- TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
- rd_kafka_name(rk), memberid, rd_kafka_err2str(err));
-
- if (memberid)
- free(memberid);
-
- test_print_partition_list(partitions);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- assign_cnt++;
-
- rd_kafka_assign(rk, partitions);
- mtx_lock(&lock);
- consumers_running = 1;
- mtx_unlock(&lock);
-
- for (i = 0; i < partitions->cnt && i < MAX_THRD_CNT; ++i) {
- rd_kafka_topic_partition_t part = partitions->elems[i];
- rd_kafka_queue_t *rkqu;
- /* This queue is loosed in partition-consume. */
- rkqu = rd_kafka_queue_get_partition(rk, part.topic,
- part.partition);
-
- rd_kafka_queue_forward(rkqu, NULL);
- tids[part.partition] =
- spawn_thread(rkqu, part.partition);
- }
-
- rebalanced = 1;
-
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- if (assign_cnt == 0)
- TEST_FAIL("asymetric rebalance_cb");
- assign_cnt--;
- rd_kafka_assign(rk, NULL);
- mtx_lock(&lock);
- consumers_running = 0;
- mtx_unlock(&lock);
-
- break;
-
- default:
- TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
- break;
- }
-}
-
-static void get_assignment(rd_kafka_t *rk_c) {
- while (!rebalanced) {
- rd_kafka_message_t *rkmsg;
- rkmsg = rd_kafka_consumer_poll(rk_c, 500);
- if (rkmsg)
- rd_kafka_message_destroy(rkmsg);
- }
-}
-
-int main_0056_balanced_group_mt(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_t *rk_p, *rk_c;
- rd_kafka_topic_t *rkt_p;
- int msg_cnt = test_quick ? 100 : 1000;
- int msg_base = 0;
- int partition_cnt = 2;
- int partition;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *default_topic_conf;
- rd_kafka_topic_partition_list_t *sub, *topics;
- rd_kafka_resp_err_t err;
- test_timing_t t_assign, t_close, t_consume;
- int i;
-
- exp_msg_cnt = msg_cnt * partition_cnt;
-
- testid = test_id_generate();
-
- /* Produce messages */
- rk_p = test_create_producer();
- rkt_p = test_create_producer_topic(rk_p, topic, NULL);
-
- for (partition = 0; partition < partition_cnt; partition++) {
- test_produce_msgs(rk_p, rkt_p, testid, partition,
- msg_base + (partition * msg_cnt), msg_cnt,
- NULL, 0);
- }
-
- rd_kafka_topic_destroy(rkt_p);
- rd_kafka_destroy(rk_p);
-
- if (mtx_init(&lock, mtx_plain) != thrd_success)
- TEST_FAIL("Cannot create mutex.");
-
- test_conf_init(&conf, &default_topic_conf,
- (test_session_timeout_ms * 3) / 1000);
-
- test_conf_set(conf, "enable.partition.eof", "true");
-
- test_topic_conf_set(default_topic_conf, "auto.offset.reset",
- "smallest");
-
- /* Fill in topic subscription set */
- topics = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);
-
- /* Create consumers and start subscription */
- rk_c = test_create_consumer(topic /*group_id*/, rebalance_cb, conf,
- default_topic_conf);
-
- test_consumer_subscribe(rk_c, topic);
-
- rd_kafka_topic_partition_list_destroy(topics);
-
- /* Wait for both consumers to get an assignment */
- TIMING_START(&t_assign, "WAIT.ASSIGN");
- get_assignment(rk_c);
- TIMING_STOP(&t_assign);
-
- TIMING_START(&t_consume, "CONSUME.WAIT");
- for (i = 0; i < MAX_THRD_CNT; ++i) {
- int res;
- if (tids[i] != 0)
- thrd_join(tids[i], &res);
- }
- TIMING_STOP(&t_consume);
-
- TEST_SAY("Closing remaining consumers\n");
- /* Query subscription */
- err = rd_kafka_subscription(rk_c, &sub);
- TEST_ASSERT(!err, "%s: subscription () failed: %s", rd_kafka_name(rk_c),
- rd_kafka_err2str(err));
- TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c), sub->cnt);
- for (i = 0; i < sub->cnt; ++i)
- TEST_SAY(" %s\n", sub->elems[i].topic);
- rd_kafka_topic_partition_list_destroy(sub);
-
- /* Run an explicit unsubscribe () (async) prior to close ()
- * to trigger race condition issues on termination. */
- TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c));
- err = rd_kafka_unsubscribe(rk_c);
- TEST_ASSERT(!err, "%s: unsubscribe failed: %s", rd_kafka_name(rk_c),
- rd_kafka_err2str(err));
-
- TEST_SAY("Closing %s\n", rd_kafka_name(rk_c));
- TIMING_START(&t_close, "CONSUMER.CLOSE");
- err = rd_kafka_consumer_close(rk_c);
- TIMING_STOP(&t_close);
- TEST_ASSERT(!err, "consumer_close failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_destroy(rk_c);
- rk_c = NULL;
-
- TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, exp_msg_cnt);
- TEST_ASSERT(consumed_msg_cnt >= exp_msg_cnt,
- "Only %d/%d messages were consumed", consumed_msg_cnt,
- exp_msg_cnt);
-
- if (consumed_msg_cnt > exp_msg_cnt)
- TEST_SAY(
- "At least %d/%d messages were consumed "
- "multiple times\n",
- consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt);
-
- mtx_destroy(&lock);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp
deleted file mode 100644
index 0b50b40ad..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-/**
- * Proper handling of invalid topic names, not by local client enforcement
- * but by proper propagation of broker errors.
- *
- * E.g.: produce messages to invalid topic should fail quickly, not by timeout.
- */
-
-
-
-#define check_err(ERR, EXP) \
- do { \
- if ((ERR) != (EXP)) \
- Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " \
- << "Expected " << RdKafka::err2str(EXP) << ", got " \
- << RdKafka::err2str(ERR)); \
- } while (0)
-
-class DrCb0057 : public RdKafka::DeliveryReportCb {
- public:
- void dr_cb(RdKafka::Message &msg) {
- std::string val((const char *)msg.payload());
-
- Test::Say(tostr() << "DeliveryReport for " << val << " message on "
- << msg.topic_name() << " [" << msg.partition()
- << "]: " << msg.errstr() << "\n");
-
- if (val == "good")
- check_err(msg.err(), RdKafka::ERR_NO_ERROR);
- else if (val == "bad") {
- if (test_broker_version >= TEST_BRKVER(0, 8, 2, 2))
- check_err(msg.err(), RdKafka::ERR_TOPIC_EXCEPTION);
- else
- check_err(msg.err(), RdKafka::ERR_UNKNOWN);
- }
- }
-};
-
-static void test_invalid_topic(void) {
- std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1);
- std::string topic_good = Test::mk_topic_name("0057-invalid_topic_good", 1);
- RdKafka::Conf *conf;
- std::string errstr;
-
- Test::conf_init(&conf, NULL, 0);
-
- DrCb0057 MyDr;
- conf->set("dr_cb", &MyDr, errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- RdKafka::ErrorCode err;
-
- for (int i = -1; i < 3; i++) {
- err = p->produce(topic_bad, i, RdKafka::Producer::RK_MSG_COPY,
- (void *)"bad", 4, NULL, 0, 0, NULL);
- if (err) /* Error is probably delayed until delivery report */
- check_err(err, RdKafka::ERR_TOPIC_EXCEPTION);
-
- err = p->produce(topic_good, i, RdKafka::Producer::RK_MSG_COPY,
- (void *)"good", 5, NULL, 0, 0, NULL);
- check_err(err, RdKafka::ERR_NO_ERROR);
- }
-
- p->flush(tmout_multip(10000));
-
- if (p->outq_len() > 0)
- Test::Fail(tostr() << "Expected producer to be flushed, " << p->outq_len()
- << " messages remain");
-
- delete p;
- delete conf;
-}
-
-extern "C" {
-int main_0057_invalid_topic(int argc, char **argv) {
- test_invalid_topic();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp
deleted file mode 100644
index 4da46e7f7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-
-/**
- * @brief Test log callbacks and log queues
- */
-
-class myLogCb : public RdKafka::EventCb {
- private:
- enum { _EXP_NONE, _EXP_LOG } state_;
- int cnt_;
-
- public:
- myLogCb() : state_(_EXP_NONE), cnt_(0) {
- }
- void expecting(bool b) {
- state_ = b ? _EXP_LOG : _EXP_NONE;
- }
- int count() {
- return cnt_;
- }
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_LOG:
- cnt_++;
- Test::Say(tostr() << "Log: "
- << "level " << event.severity() << ", facility "
- << event.fac() << ", str " << event.str() << "\n");
- if (state_ != _EXP_LOG)
- Test::Fail(
- "Received unexpected "
- "log message");
- break;
- default:
- break;
- }
- }
-};
-
-static void test_log(std::string what, bool main_queue) {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
- myLogCb my_log;
- std::string errstr;
-
- Test::conf_set(conf, "client.id", test_curr_name());
- Test::conf_set(conf, "debug", "generic"); // generate some logs
- Test::conf_set(conf, "log.queue", "true");
-
- if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- Test::Say(what + "Creating producer, not expecting any log messages\n");
- my_log.expecting(false);
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail(what + "Failed to create Producer: " + errstr);
- delete conf;
-
- RdKafka::Queue *queue = NULL;
- if (!main_queue) {
- queue = RdKafka::Queue::create(p);
- queue->poll(1000);
- } else {
- p->poll(1000);
- }
-
- Test::Say(what + "Setting log queue\n");
- p->set_log_queue(queue); /* Redirect logs to main queue */
-
- Test::Say(what + "Expecting at least one log message\n");
- my_log.expecting(true);
- if (queue)
- queue->poll(1000);
- else
- p->poll(1000); /* Should not spontaneously call logs */
-
- Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n");
- if (my_log.count() < 1)
- Test::Fail(what +
- "No logs seen: expected at least one broker "
- "failure");
-
- if (queue)
- delete queue;
- delete (p);
-}
-
-extern "C" {
-int main_0058_log(int argc, char **argv) {
- test_log("main.queue: ", true);
- test_log("local.queue: ", false);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp
deleted file mode 100644
index 67508ff82..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-/**
- * binary search by timestamp: excercices KafkaConsumer's seek() API.
- */
-
-
-static std::string topic;
-static const int partition = 0;
-static int64_t golden_timestamp = -1;
-static int64_t golden_offset = -1;
-
-/**
- * @brief Seek to offset and consume that message.
- *
- * Asserts on failure.
- */
-static RdKafka::Message *get_msg(RdKafka::KafkaConsumer *c,
- int64_t offset,
- bool use_seek) {
- RdKafka::TopicPartition *next =
- RdKafka::TopicPartition::create(topic, partition, offset);
- RdKafka::ErrorCode err;
-
- /* Since seek() can only be used to change the currently consumed
- * offset we need to start consuming the first time we run this
- * loop by calling assign() */
-
- test_timing_t t_seek;
- TIMING_START(&t_seek, "seek");
- if (!use_seek) {
- std::vector<RdKafka::TopicPartition *> parts;
- parts.push_back(next);
- err = c->assign(parts);
- if (err)
- Test::Fail("assign() failed: " + RdKafka::err2str(err));
- } else {
- err = c->seek(*next, tmout_multip(5000));
- if (err)
- Test::Fail("seek() failed: " + RdKafka::err2str(err));
- }
- TIMING_STOP(&t_seek);
- delete next;
-
- test_timing_t t_consume;
- TIMING_START(&t_consume, "consume");
-
- RdKafka::Message *msg = c->consume(tmout_multip(5000));
- if (!msg)
- Test::Fail("consume() returned NULL");
- TIMING_STOP(&t_consume);
-
- if (msg->err())
- Test::Fail("consume() returned error: " + msg->errstr());
-
- if (msg->offset() != offset)
- Test::Fail(tostr() << "seek()ed to offset " << offset
- << " but consume() returned offset " << msg->offset());
-
- return msg;
-}
-
-class MyDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
- void dr_cb(RdKafka::Message &msg) {
- if (msg.err())
- Test::Fail("Delivery failed: " + msg.errstr());
-
- if (!msg.msg_opaque())
- return;
-
- RdKafka::MessageTimestamp ts = msg.timestamp();
- if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
- Test::Fail(tostr() << "Dr msg timestamp type wrong: " << ts.type);
-
- golden_timestamp = ts.timestamp;
- golden_offset = msg.offset();
- }
-};
-
-static void do_test_bsearch(void) {
- RdKafka::Conf *conf, *tconf;
- int msgcnt = 1000;
- int64_t timestamp;
- std::string errstr;
- RdKafka::ErrorCode err;
- MyDeliveryReportCb my_dr;
-
- topic = Test::mk_topic_name("0059-bsearch", 1);
- Test::conf_init(&conf, &tconf, 0);
- Test::conf_set(tconf, "acks", "all");
- Test::conf_set(conf, "api.version.request", "true");
- conf->set("dr_cb", &my_dr, errstr);
- conf->set("default_topic_conf", tconf, errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
- delete tconf;
-
- timestamp = 1000;
- for (int i = 0; i < msgcnt; i++) {
- err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
- (void *)topic.c_str(), topic.size(), NULL, 0, timestamp,
- i == 357 ? (void *)1 /*golden*/ : NULL);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- timestamp += 100 + (timestamp % 9);
- }
-
- if (p->flush(tmout_multip(5000)) != 0)
- Test::Fail("Not all messages flushed");
-
- Test::Say(tostr() << "Produced " << msgcnt << " messages, "
- << "golden message with timestamp " << golden_timestamp
- << " at offset " << golden_offset << "\n");
-
- delete p;
-
- /*
- * Now find the golden message using bsearch
- */
-
- /* Create consumer */
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "group.id", topic);
- Test::conf_set(conf, "api.version.request", "true");
- Test::conf_set(conf, "fetch.wait.max.ms", "1");
- Test::conf_set(conf, "fetch.error.backoff.ms", "1");
- Test::conf_set(conf, "queued.min.messages", "1");
- Test::conf_set(conf, "enable.auto.commit", "false");
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- Test::Say("Find initial middle offset\n");
- int64_t low, high;
- test_timing_t t_qr;
- TIMING_START(&t_qr, "query_watermark_offsets");
- err = c->query_watermark_offsets(topic, partition, &low, &high,
- tmout_multip(5000));
- TIMING_STOP(&t_qr);
- if (err)
- Test::Fail("query_watermark_offsets failed: " + RdKafka::err2str(err));
-
- /* Divide and conquer */
- test_timing_t t_bsearch;
- TIMING_START(&t_bsearch, "actual bsearch");
- int itcnt = 0;
- do {
- int64_t mid;
-
- mid = low + ((high - low) / 2);
-
- Test::Say(1, tostr() << "Get message at mid point of " << low << ".."
- << high << " -> " << mid << "\n");
-
- RdKafka::Message *msg = get_msg(c, mid,
- /* use assign() on first iteration,
- * then seek() */
- itcnt > 0);
-
- RdKafka::MessageTimestamp ts = msg->timestamp();
- if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
- Test::Fail(tostr() << "Expected CreateTime timestamp, not " << ts.type
- << " at offset " << msg->offset());
-
- Test::Say(1, tostr() << "Message at offset " << msg->offset()
- << " with timestamp " << ts.timestamp << "\n");
-
- if (ts.timestamp == golden_timestamp) {
- Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp
- << " at offset " << msg->offset() << " in "
- << itcnt + 1 << " iterations\n");
- delete msg;
- break;
- }
-
- if (low == high) {
- Test::Fail(tostr() << "Search exhausted at offset " << msg->offset()
- << " with timestamp " << ts.timestamp
- << " without finding golden timestamp "
- << golden_timestamp << " at offset " << golden_offset);
-
- } else if (ts.timestamp < golden_timestamp)
- low = msg->offset() + 1;
- else if (ts.timestamp > golden_timestamp)
- high = msg->offset() - 1;
-
- delete msg;
- itcnt++;
- } while (true);
- TIMING_STOP(&t_bsearch);
-
- c->close();
-
- delete c;
-}
-
-extern "C" {
-int main_0059_bsearch(int argc, char **argv) {
- do_test_bsearch();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp
deleted file mode 100644
index 156b8a57a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-/**
- * Verify prioritization of non-message ops.
- * MO:
- *
- * - Seed topic with 1000 messages
- * - Start consumer with auto offset commit disabled,
- * but with commit and stats callbacks registered,
- * - Consume one message
- * - Commit that message manually
- * - Consume one message per second
- * - The commit callback should be fired within reasonable time, long before
- * - The stats callback should behave the same.
- * all messages are consumed.
- */
-
-
-
-class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb {
- public:
- int seen_commit;
- int seen_stats;
-
- void offset_commit_cb(RdKafka::ErrorCode err,
- std::vector<RdKafka::TopicPartition *> &offsets) {
- if (err)
- Test::Fail("Offset commit failed: " + RdKafka::err2str(err));
-
- seen_commit++;
- Test::Say("Got commit callback!\n");
- }
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_STATS:
- Test::Say("Got stats callback!\n");
- seen_stats++;
- break;
- default:
- break;
- }
- }
-};
-
-
-
-static void do_test_commit_cb(void) {
- const int msgcnt = test_quick ? 100 : 1000;
- std::string errstr;
- RdKafka::ErrorCode err;
- std::string topic = Test::mk_topic_name("0060-op_prio", 1);
-
- test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt);
-
- /*
- * Create consumer
- */
-
- /* Create consumer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "group.id", topic);
- Test::conf_set(conf, "socket.timeout.ms", "10000");
- Test::conf_set(conf, "enable.auto.commit", "false");
- Test::conf_set(conf, "enable.partition.eof", "false");
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "statistics.interval.ms", "1000");
-
- MyCbs cbs;
- cbs.seen_commit = 0;
- cbs.seen_stats = 0;
- if (conf->set("offset_commit_cb", (RdKafka::OffsetCommitCb *)&cbs, errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to set commit callback: " + errstr);
- if (conf->set("event_cb", (RdKafka::EventCb *)&cbs, errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to set event callback: " + errstr);
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Subscribe */
- std::vector<std::string> topics;
- topics.push_back(topic);
- if ((err = c->subscribe(topics)))
- Test::Fail("subscribe failed: " + RdKafka::err2str(err));
-
- /* Wait for messages and commit callback. */
- Test::Say("Consuming topic " + topic + "\n");
- int cnt = 0;
- while (!cbs.seen_commit || !cbs.seen_stats) {
- RdKafka::Message *msg = c->consume(tmout_multip(1000));
- if (!msg->err()) {
- cnt++;
- Test::Say(tostr() << "Received message #" << cnt << "\n");
- if (cnt > 10)
- Test::Fail(tostr() << "Should've seen the "
- "offset commit ("
- << cbs.seen_commit
- << ") and "
- "stats callbacks ("
- << cbs.seen_stats << ") by now");
-
- /* Commit the first message to trigger the offset commit_cb */
- if (cnt == 1) {
- err = c->commitAsync(msg);
- if (err)
- Test::Fail("commitAsync() failed: " + RdKafka::err2str(err));
- rd_sleep(1); /* Sleep to simulate slow processing, making sure
- * that the offset commit callback op gets
- * inserted on the consume queue in front of
- * the messages. */
- }
-
- } else if (msg->err() == RdKafka::ERR__TIMED_OUT)
- ; /* Stil rebalancing? */
- else
- Test::Fail("consume() failed: " + msg->errstr());
- delete msg;
- }
-
- c->close();
- delete c;
-}
-
-extern "C" {
-int main_0060_op_prio(int argc, char **argv) {
- do_test_commit_cb();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp
deleted file mode 100644
index 759541583..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * Verify consumer_lag
- */
-
-static std::string topic;
-
-class StatsCb : public RdKafka::EventCb {
- public:
- int64_t calc_lag; // calculated lag
- int lag_valid; // number of times lag has been valid
-
- StatsCb() {
- calc_lag = -1;
- lag_valid = 0;
- }
-
- /**
- * @brief Event callback
- */
- void event_cb(RdKafka::Event &event) {
- if (event.type() == RdKafka::Event::EVENT_LOG) {
- Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac()
- << ": " << event.str() << "\n");
- return;
- } else if (event.type() != RdKafka::Event::EVENT_STATS) {
- Test::Say(tostr() << "Dropping event " << event.type() << "\n");
- return;
- }
-
- int64_t consumer_lag = parse_json(event.str().c_str());
-
- Test::Say(3, tostr() << "Stats: consumer_lag is " << consumer_lag << "\n");
- if (consumer_lag == -1) {
- Test::Say(2, "Skipping old stats with invalid consumer_lag\n");
- return; /* Old stats generated before first message consumed */
- } else if (consumer_lag != calc_lag)
- Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag
- << ", expected " << calc_lag << "\n");
- else
- lag_valid++;
- }
-
-
- /**
- * @brief Naiive JSON parsing, find the consumer_lag for partition 0
- * and return it.
- */
- static int64_t parse_json(const char *json_doc) {
- const std::string match_topic(std::string("\"") + topic + "\":");
- const char *search[] = {
- "\"topics\":", match_topic.c_str(), "\"partitions\":",
- "\"0\":", "\"consumer_lag_stored\":", NULL};
- const char *remain = json_doc;
-
- for (const char **sp = search; *sp; sp++) {
- const char *t = strstr(remain, *sp);
- if (!t)
- Test::Fail(tostr() << "Couldnt find " << *sp
- << " in remaining stats output:\n"
- << remain << "\n====================\n"
- << json_doc << "\n");
- remain = t + strlen(*sp);
- }
-
- while (*remain == ' ')
- remain++;
-
- if (!*remain)
- Test::Fail("Nothing following consumer_lag");
-
- int64_t lag = strtoull(remain, NULL, 0);
- if (lag == -1) {
- Test::Say(tostr() << "Consumer lag " << lag << " is invalid, stats:\n");
- Test::Say(3, tostr() << json_doc << "\n");
- }
- return lag;
- }
-};
-
-
-/**
- * @brief Produce \p msgcnt in a transaction that is aborted.
- */
-static void produce_aborted_txns(const std::string &topic,
- int32_t partition,
- int msgcnt) {
- RdKafka::Producer *p;
- RdKafka::Conf *conf;
- RdKafka::Error *error;
-
- Test::Say(tostr() << "Producing " << msgcnt << " transactional messages "
- << "which will be aborted\n");
- Test::conf_init(&conf, NULL, 0);
-
- Test::conf_set(conf, "transactional.id", "txn_id_" + topic);
-
- std::string errstr;
- p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- error = p->init_transactions(-1);
- if (error)
- Test::Fail("init_transactions() failed: " + error->str());
-
- error = p->begin_transaction();
- if (error)
- Test::Fail("begin_transaction() failed: " + error->str());
-
- for (int i = 0; i < msgcnt; i++) {
- RdKafka::ErrorCode err;
-
- err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, &i,
- sizeof(i), NULL, 0, 0, NULL);
- if (err)
- Test::Fail("produce() failed: " + RdKafka::err2str(err));
- }
-
- /* Flush is typically not needed for transactions since
- * commit_transaction() will do it automatically, but in the case of
- * abort_transaction() nothing might have been sent to the broker yet,
- * so call flush() here so we know the messages are sent and the
- * partitions are added to the transaction, so that a control(abort)
- * message is written to the partition. */
- p->flush(-1);
-
- error = p->abort_transaction(-1);
- if (error)
- Test::Fail("abort_transaction() failed: " + error->str());
-
- delete p;
-}
-
-
-static void do_test_consumer_lag(bool with_txns) {
- int msgcnt = test_quick ? 5 : 10;
- int txn_msgcnt = 3;
- int addcnt = 0;
- std::string errstr;
- RdKafka::ErrorCode err;
-
- SUB_TEST("Test consumer lag %s transactions", with_txns ? "with" : "without");
-
- topic = Test::mk_topic_name("0061-consumer_lag", 1);
-
- test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt);
-
- if (with_txns) {
- /* After the standard messages have been produced,
- * produce some transactional messages that are aborted to advance
- * the end offset with control messages. */
- produce_aborted_txns(topic, 0, txn_msgcnt);
- addcnt = txn_msgcnt + 1 /* ctrl msg */;
- }
-
- /*
- * Create consumer
- */
-
- /* Create consumer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 40);
- StatsCb stats;
- if (conf->set("event_cb", &stats, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("set event_cb failed: " + errstr);
- Test::conf_set(conf, "group.id", topic);
- Test::conf_set(conf, "enable.auto.commit", "false");
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "statistics.interval.ms", "100");
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Assign partitions */
- std::vector<RdKafka::TopicPartition *> parts;
- parts.push_back(RdKafka::TopicPartition::create(topic, 0));
- if ((err = c->assign(parts)))
- Test::Fail("assign failed: " + RdKafka::err2str(err));
- RdKafka::TopicPartition::destroy(parts);
-
- /* Start consuming */
- Test::Say("Consuming topic " + topic + "\n");
- int cnt = 0;
- while (cnt < msgcnt + addcnt) {
- RdKafka::Message *msg = c->consume(1000);
-
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- if (with_txns && cnt >= msgcnt && stats.calc_lag == 0)
- addcnt = 0; /* done */
- break;
- case RdKafka::ERR__PARTITION_EOF:
- Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after "
- << cnt << "/" << msgcnt
- << " messages: " << msg->errstr());
- break;
-
- case RdKafka::ERR_NO_ERROR:
- /* Proper message. Update calculated lag for later
- * checking in stats callback */
- if (msg->offset() + 1 >= msgcnt && with_txns)
- stats.calc_lag = 0;
- else
- stats.calc_lag = (msgcnt + addcnt) - (msg->offset() + 1);
- cnt++;
- Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt
- << " at offset " << msg->offset() << " (calc lag "
- << stats.calc_lag << ")\n");
- /* Slow down message "processing" to make sure we get
- * at least one stats callback per message. */
- if (cnt < msgcnt)
- rd_sleep(1);
- break;
-
- default:
- Test::Fail("Consume error: " + msg->errstr());
- break;
- }
-
- delete msg;
- }
- Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n");
- if (stats.lag_valid == 0)
- Test::Fail("No valid consumer_lag in statistics seen");
-
- c->close();
- delete c;
-
- SUB_TEST_PASS();
-}
-
-extern "C" {
-int main_0061_consumer_lag(int argc, char **argv) {
- do_test_consumer_lag(false /*no txns*/);
- if (test_broker_version >= TEST_BRKVER(0, 11, 0, 0))
- do_test_consumer_lag(true /*txns*/);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c
deleted file mode 100644
index bdddda5e0..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2017, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests messages are produced in order.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int stats_count = 0;
-
-/**
- * Handle stats
- */
-static void handle_stats(rd_kafka_event_t *rkev) {
- const char *stats_json = NULL;
- stats_json = rd_kafka_event_stats(rkev);
- if (stats_json != NULL) {
- TEST_SAY("Stats: %s\n", stats_json);
- stats_count++;
- } else {
- TEST_FAIL("Stats: failed to get stats\n");
- }
-}
-
-int main_0062_stats_event(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- test_timing_t t_delivery;
- rd_kafka_queue_t *eventq;
- const int iterations = 5;
- int i;
- test_conf_init(NULL, NULL, 10);
-
- /* Set up a global config object */
- conf = rd_kafka_conf_new();
- rd_kafka_conf_set(conf, "statistics.interval.ms", "100", NULL, 0);
-
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- eventq = rd_kafka_queue_get_main(rk);
-
- /* Wait for stats event */
- for (i = 0; i < iterations; i++) {
- TIMING_START(&t_delivery, "STATS_EVENT");
- stats_count = 0;
- while (stats_count == 0) {
- rd_kafka_event_t *rkev;
- rkev = rd_kafka_queue_poll(eventq, 100);
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_STATS:
- TEST_SAY("%s event\n",
- rd_kafka_event_name(rkev));
- handle_stats(rkev);
- break;
- case RD_KAFKA_EVENT_NONE:
- break;
- default:
- TEST_SAY("Ignore event: %s\n",
- rd_kafka_event_name(rkev));
- break;
- }
- rd_kafka_event_destroy(rkev);
- }
- TIMING_STOP(&t_delivery);
-
- if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 ||
- TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) {
- /* CIs and valgrind are too flaky/slow to
- * make this failure meaningful. */
- if (!test_on_ci && !strcmp(test_mode, "bare")) {
- TEST_FAIL(
- "Stats duration %.3fms is >= 50%% "
- "outside statistics.interval.ms 100",
- (float)TIMING_DURATION(&t_delivery) /
- 1000.0f);
- } else {
- TEST_WARN(
- "Stats duration %.3fms is >= 50%% "
- "outside statistics.interval.ms 100\n",
- (float)TIMING_DURATION(&t_delivery) /
- 1000.0f);
- }
- }
- }
-
- rd_kafka_queue_destroy(eventq);
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp
deleted file mode 100644
index dda8d6ddb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * Test Handle::clusterid() and Handle::controllerid()
- */
-
-static void do_test_clusterid(void) {
- Test::Say("[ do_test_clusterid ]\n");
-
- /*
- * Create client with appropriate protocol support for
- * retrieving clusterid
- */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "api.version.request", "true");
- std::string errstr;
- RdKafka::Producer *p_good = RdKafka::Producer::create(conf, errstr);
- if (!p_good)
- Test::Fail("Failed to create client: " + errstr);
- delete conf;
-
- /*
- * Create client with lacking protocol support.
- */
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "api.version.request", "false");
- Test::conf_set(conf, "broker.version.fallback", "0.9.0");
- RdKafka::Producer *p_bad = RdKafka::Producer::create(conf, errstr);
- if (!p_bad)
- Test::Fail("Failed to create client: " + errstr);
- delete conf;
-
-
- std::string clusterid;
-
- /*
- * good producer, give the first call a timeout to allow time
- * for background metadata requests to finish.
- */
- std::string clusterid_good_1 = p_good->clusterid(tmout_multip(2000));
- if (clusterid_good_1.empty())
- Test::Fail("good producer(w timeout): ClusterId is empty");
- Test::Say("good producer(w timeout): ClusterId " + clusterid_good_1 + "\n");
-
- /* Then retrieve a cached copy. */
- std::string clusterid_good_2 = p_good->clusterid(0);
- if (clusterid_good_2.empty())
- Test::Fail("good producer(0): ClusterId is empty");
- Test::Say("good producer(0): ClusterId " + clusterid_good_2 + "\n");
-
- if (clusterid_good_1 != clusterid_good_2)
- Test::Fail("Good ClusterId mismatch: " + clusterid_good_1 +
- " != " + clusterid_good_2);
-
- /*
- * Try bad producer, should return empty string.
- */
- std::string clusterid_bad_1 = p_bad->clusterid(tmout_multip(2000));
- if (!clusterid_bad_1.empty())
- Test::Fail("bad producer(w timeout): ClusterId should be empty, not " +
- clusterid_bad_1);
- std::string clusterid_bad_2 = p_bad->clusterid(0);
- if (!clusterid_bad_2.empty())
- Test::Fail("bad producer(0): ClusterId should be empty, not " +
- clusterid_bad_2);
-
- delete p_good;
- delete p_bad;
-}
-
-
-/**
- * @brief controllerid() testing.
- * This instantiates its own client to avoid having the value cached
- * from do_test_clusterid(), but they are basically the same tests.
- */
-static void do_test_controllerid(void) {
- Test::Say("[ do_test_controllerid ]\n");
-
- /*
- * Create client with appropriate protocol support for
- * retrieving controllerid
- */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "api.version.request", "true");
- std::string errstr;
- RdKafka::Producer *p_good = RdKafka::Producer::create(conf, errstr);
- if (!p_good)
- Test::Fail("Failed to create client: " + errstr);
- delete conf;
-
- /*
- * Create client with lacking protocol support.
- */
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "api.version.request", "false");
- Test::conf_set(conf, "broker.version.fallback", "0.9.0");
- RdKafka::Producer *p_bad = RdKafka::Producer::create(conf, errstr);
- if (!p_bad)
- Test::Fail("Failed to create client: " + errstr);
- delete conf;
-
- /*
- * good producer, give the first call a timeout to allow time
- * for background metadata requests to finish.
- */
- int32_t controllerid_good_1 = p_good->controllerid(tmout_multip(2000));
- if (controllerid_good_1 == -1)
- Test::Fail("good producer(w timeout): Controllerid is -1");
- Test::Say(tostr() << "good producer(w timeout): Controllerid "
- << controllerid_good_1 << "\n");
-
- /* Then retrieve a cached copy. */
- int32_t controllerid_good_2 = p_good->controllerid(0);
- if (controllerid_good_2 == -1)
- Test::Fail("good producer(0): Controllerid is -1");
- Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2
- << "\n");
-
- if (controllerid_good_1 != controllerid_good_2)
- Test::Fail(tostr() << "Good Controllerid mismatch: " << controllerid_good_1
- << " != " << controllerid_good_2);
-
- /*
- * Try bad producer, should return -1
- */
- int32_t controllerid_bad_1 = p_bad->controllerid(tmout_multip(2000));
- if (controllerid_bad_1 != -1)
- Test::Fail(
- tostr() << "bad producer(w timeout): Controllerid should be -1, not "
- << controllerid_bad_1);
- int32_t controllerid_bad_2 = p_bad->controllerid(0);
- if (controllerid_bad_2 != -1)
- Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not "
- << controllerid_bad_2);
-
- delete p_good;
- delete p_bad;
-}
-
-extern "C" {
-int main_0063_clusterid(int argc, char **argv) {
- do_test_clusterid();
- do_test_controllerid();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c
deleted file mode 100644
index e5c5b047a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2017, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-#include <ctype.h>
-
-/**
- * Verify interceptor functionality.
- *
- * Producer MO:
- * - create a chain of N interceptors
- * - allocate a state struct with unique id for each message produced,
- * provide as msg_opaque and reference from payload.
- * - in on_send: verify expected interceptor order by counting number
- * of consecutive bits.
- * - in on_acknowledge: same
- * - produce message to invalid topic which should trigger on_send+on_ack..
- * from within produce().
- *
- * Consumer MO:
- * - create a chain of M interceptors
- * - subscribe to the previously produced topic
- * - in on_consume: find message by id, verify expected order by bit counting.
- * - on on_commit: just count order per on_commit chain run.
- */
-
-
-#define msgcnt 100
-static const int producer_ic_cnt = 5;
-static const int consumer_ic_cnt = 10;
-
-/* The base values help differentiating opaque values between interceptors */
-static const int on_send_base = 1 << 24;
-static const int on_ack_base = 1 << 25;
-static const int on_consume_base = 1 << 26;
-static const int on_commit_base = 1 << 27;
-static const int base_mask = 0xff << 24;
-
-#define _ON_SEND 0
-#define _ON_ACK 1
-#define _ON_CONSUME 2
-#define _ON_CNT 3
-struct msg_state {
- int id;
- int bits[_ON_CNT]; /* Bit field, one bit per interceptor */
- mtx_t lock;
-};
-
-/* Per-message state */
-static struct msg_state msgs[msgcnt];
-
-/* on_commit bits */
-static int on_commit_bits = 0;
-
-/**
- * @brief Verify that \p bits matches the number of expected interceptor
- * call cnt.
- *
- * Verify interceptor order: the lower bits of ic_id
- * denotes the order in which interceptors were added and it
- * must be reflected here, meaning that all lower bits must be set,
- * and no higher ones.
- */
-static void msg_verify_ic_cnt(const struct msg_state *msg,
- const char *what,
- int bits,
- int exp_cnt) {
- int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0;
-
- TEST_ASSERT(bits == exp_bits,
- "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", msg->id,
- what, exp_bits, exp_cnt, bits);
-}
-
-/*
- * @brief Same as msg_verify_ic_cnt() without the msg reliance
- */
-static void verify_ic_cnt(const char *what, int bits, int exp_cnt) {
- int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0;
-
- TEST_ASSERT(bits == exp_bits, "%s: expected bits 0x%x (%d), got 0x%x",
- what, exp_bits, exp_cnt, bits);
-}
-
-
-
-static void verify_msg(const char *what,
- int base,
- int bitid,
- rd_kafka_message_t *rkmessage,
- void *ic_opaque) {
- const char *id_str = rkmessage->key;
- struct msg_state *msg;
- int id;
- int ic_id = (int)(intptr_t)ic_opaque;
-
- /* Verify opaque (base | ic id) */
- TEST_ASSERT((ic_id & base_mask) == base);
- ic_id &= ~base_mask;
-
- /* Find message by id */
- TEST_ASSERT(rkmessage->key && rkmessage->key_len > 0 &&
- id_str[(int)rkmessage->key_len - 1] == '\0' &&
- strlen(id_str) > 0 && isdigit(*id_str));
- id = atoi(id_str);
- TEST_ASSERT(id >= 0 && id < msgcnt, "%s: bad message id %s", what,
- id_str);
- msg = &msgs[id];
-
- mtx_lock(&msg->lock);
-
- TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", id,
- msg->id);
-
- /* Verify message opaque */
- if (!strcmp(what, "on_send") || !strncmp(what, "on_ack", 6))
- TEST_ASSERT(rkmessage->_private == (void *)msg);
-
- TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", what,
- ic_id, id, msg->id);
-
- msg_verify_ic_cnt(msg, what, msg->bits[bitid], ic_id);
-
- /* Set this interceptor's bit */
- msg->bits[bitid] |= 1 << ic_id;
-
- mtx_unlock(&msg->lock);
-}
-
-
-static rd_kafka_resp_err_t
-on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- TEST_ASSERT(ic_opaque != NULL);
- verify_msg("on_send", on_send_base, _ON_SEND, rkmessage, ic_opaque);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-static rd_kafka_resp_err_t
-on_ack(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- TEST_ASSERT(ic_opaque != NULL);
- verify_msg("on_ack", on_ack_base, _ON_ACK, rkmessage, ic_opaque);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-static rd_kafka_resp_err_t
-on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- TEST_ASSERT(ic_opaque != NULL);
- verify_msg("on_consume", on_consume_base, _ON_CONSUME, rkmessage,
- ic_opaque);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static rd_kafka_resp_err_t
-on_commit(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
- int ic_id = (int)(intptr_t)ic_opaque;
-
- /* Since on_commit is triggered a bit randomly and not per
- * message we only try to make sure it gets fully set at least once. */
- TEST_ASSERT(ic_opaque != NULL);
-
- /* Verify opaque (base | ic id) */
- TEST_ASSERT((ic_id & base_mask) == on_commit_base);
- ic_id &= ~base_mask;
-
- TEST_ASSERT(ic_opaque != NULL);
-
- TEST_SAYL(3, "on_commit: interceptor #%d called: %s\n", ic_id,
- rd_kafka_err2str(err));
- if (test_level >= 4)
- test_print_partition_list(offsets);
-
- /* Check for rollover where a previous on_commit stint was
- * succesful and it just now started over */
- if (on_commit_bits > 0 && ic_id == 0) {
- /* Verify completeness of previous stint */
- verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);
- /* Reset */
- on_commit_bits = 0;
- }
-
- verify_ic_cnt("on_commit", on_commit_bits, ic_id);
-
- /* Set this interceptor's bit */
- on_commit_bits |= 1 << ic_id;
-
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void do_test_produce(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int msgid,
- int exp_fail,
- int exp_ic_cnt) {
- rd_kafka_resp_err_t err;
- char key[16];
- struct msg_state *msg = &msgs[msgid];
- int i;
-
- /* Message state should be empty, no interceptors should have
- * been called yet.. */
- for (i = 0; i < _ON_CNT; i++)
- TEST_ASSERT(msg->bits[i] == 0);
-
- mtx_init(&msg->lock, mtx_plain);
- msg->id = msgid;
- rd_snprintf(key, sizeof(key), "%d", msgid);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(partition),
- RD_KAFKA_V_KEY(key, strlen(key) + 1),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(msg), RD_KAFKA_V_END);
-
- mtx_lock(&msg->lock);
- msg_verify_ic_cnt(msg, "on_send", msg->bits[_ON_SEND], exp_ic_cnt);
-
- if (err) {
- msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK],
- exp_ic_cnt);
- TEST_ASSERT(exp_fail, "producev() failed: %s",
- rd_kafka_err2str(err));
- } else {
- msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0);
- TEST_ASSERT(!exp_fail,
- "expected produce failure for msg #%d, not %s",
- msgid, rd_kafka_err2str(err));
- }
- mtx_unlock(&msg->lock);
-}
-
-
-
-static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- int i;
-
- for (i = 0; i < producer_ic_cnt; i++) {
- rd_kafka_resp_err_t err;
-
- err = rd_kafka_interceptor_add_on_send(
- rk, tsprintf("on_send:%d", i), on_send,
- (void *)(intptr_t)(on_send_base | i));
- TEST_ASSERT(!err, "add_on_send failed: %s",
- rd_kafka_err2str(err));
-
- err = rd_kafka_interceptor_add_on_acknowledgement(
- rk, tsprintf("on_acknowledgement:%d", i), on_ack,
- (void *)(intptr_t)(on_ack_base | i));
- TEST_ASSERT(!err, "add_on_ack.. failed: %s",
- rd_kafka_err2str(err));
-
-
- /* Add consumer interceptors as well to make sure
- * they are not called. */
- err = rd_kafka_interceptor_add_on_consume(
- rk, tsprintf("on_consume:%d", i), on_consume, NULL);
- TEST_ASSERT(!err, "add_on_consume failed: %s",
- rd_kafka_err2str(err));
-
-
- err = rd_kafka_interceptor_add_on_commit(
- rk, tsprintf("on_commit:%d", i), on_commit, NULL);
- TEST_ASSERT(!err, "add_on_commit failed: %s",
- rd_kafka_err2str(err));
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-static void do_test_producer(const char *topic) {
- rd_kafka_conf_t *conf;
- int i;
- rd_kafka_t *rk;
-
- TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(&conf, NULL, 0);
-
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_prodcer",
- on_new_producer, NULL);
-
- /* Create producer */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- for (i = 0; i < msgcnt - 1; i++)
- do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0,
- producer_ic_cnt);
-
- /* Wait for messages to be delivered */
- test_flush(rk, -1);
-
- /* Now send a message that will fail in produce()
- * due to bad partition */
- do_test_produce(rk, topic, 1234, i, 1, producer_ic_cnt);
-
-
- /* Verify acks */
- for (i = 0; i < msgcnt; i++) {
- struct msg_state *msg = &msgs[i];
- mtx_lock(&msg->lock);
- msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK],
- producer_ic_cnt);
- mtx_unlock(&msg->lock);
- }
-
- rd_kafka_destroy(rk);
-}
-
-
-static rd_kafka_resp_err_t on_new_consumer(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- int i;
-
- for (i = 0; i < consumer_ic_cnt; i++) {
- rd_kafka_interceptor_add_on_consume(
- rk, tsprintf("on_consume:%d", i), on_consume,
- (void *)(intptr_t)(on_consume_base | i));
-
- rd_kafka_interceptor_add_on_commit(
- rk, tsprintf("on_commit:%d", i), on_commit,
- (void *)(intptr_t)(on_commit_base | i));
-
- /* Add producer interceptors as well to make sure they
- * are not called. */
- rd_kafka_interceptor_add_on_send(rk, tsprintf("on_send:%d", i),
- on_send, NULL);
-
- rd_kafka_interceptor_add_on_acknowledgement(
- rk, tsprintf("on_acknowledgement:%d", i), on_ack, NULL);
- }
-
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void do_test_consumer(const char *topic) {
-
- rd_kafka_conf_t *conf;
- int i;
- rd_kafka_t *rk;
-
- TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
-
- test_conf_init(&conf, NULL, 0);
-
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_consumer",
- on_new_consumer, NULL);
-
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- /* Create producer */
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(rk, topic);
-
- /* Consume messages (-1 for the one that failed producing) */
- test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt - 1,
- NULL);
-
- /* Verify on_consume */
- for (i = 0; i < msgcnt - 1; i++) {
- struct msg_state *msg = &msgs[i];
- mtx_lock(&msg->lock);
- msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME],
- consumer_ic_cnt);
- mtx_unlock(&msg->lock);
- }
-
- /* Verify that the produce-failed message didnt have
- * interceptors called */
- mtx_lock(&msgs[msgcnt - 1].lock);
- msg_verify_ic_cnt(&msgs[msgcnt - 1], "on_consume",
- msgs[msgcnt - 1].bits[_ON_CONSUME], 0);
- mtx_unlock(&msgs[msgcnt - 1].lock);
-
- test_consumer_close(rk);
-
- verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);
-
- rd_kafka_destroy(rk);
-}
-
-/**
- * @brief Interceptors must not be copied automatically by conf_dup()
- * unless the interceptors have added on_conf_dup().
- * This behaviour makes sure an interceptor's instance
- * is not duplicated without the interceptor's knowledge or
- * assistance.
- */
-static void do_test_conf_copy(const char *topic) {
- rd_kafka_conf_t *conf, *conf2;
- int i;
- rd_kafka_t *rk;
-
- TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
-
- memset(&msgs[0], 0, sizeof(msgs));
-
- test_conf_init(&conf, NULL, 0);
-
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_conf_copy",
- on_new_producer, NULL);
-
- /* Now copy the configuration to verify that interceptors are
- * NOT copied. */
- conf2 = conf;
- conf = rd_kafka_conf_dup(conf2);
- rd_kafka_conf_destroy(conf2);
-
- /* Create producer */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- for (i = 0; i < msgcnt - 1; i++)
- do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, 0);
-
- /* Wait for messages to be delivered */
- test_flush(rk, -1);
-
- /* Verify acks */
- for (i = 0; i < msgcnt; i++) {
- struct msg_state *msg = &msgs[i];
- mtx_lock(&msg->lock);
- msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0);
- mtx_unlock(&msg->lock);
- }
-
- rd_kafka_destroy(rk);
-}
-
-
-int main_0064_interceptors(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
-
- do_test_producer(topic);
-
- do_test_consumer(topic);
-
- do_test_conf_copy(topic);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp
deleted file mode 100644
index 6f2dbb0ac..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * Verify that yield() works.
- *
- * In two iterations, do:
- * - Register a DR callback that counts the number of messages and
- * calls yield() in iteration 1, and not in iteration 2.
- * - Produce 100 messages quickly (to ensure same-batch)
- * - Verify that only one DR callback is triggered per poll() call
- * in iteration 1, and all messages in iteration 2.
- */
-
-class DrCb0065 : public RdKafka::DeliveryReportCb {
- public:
- int cnt; // dr messages seen
- bool do_yield; // whether to yield for each message or not
- RdKafka::Producer *p;
-
- DrCb0065(bool yield) : cnt(0), do_yield(yield), p(NULL) {
- }
-
- void dr_cb(RdKafka::Message &message) {
- if (message.err())
- Test::Fail("DR: message failed: " + RdKafka::err2str(message.err()));
-
- Test::Say(3, tostr() << "DR #" << cnt << "\n");
- cnt++;
-
- if (do_yield)
- p->yield();
- }
-};
-
-
-static void do_test_producer(bool do_yield) {
- int msgcnt = 100;
- std::string errstr;
- RdKafka::ErrorCode err;
- std::string topic = Test::mk_topic_name("0065_yield", 1);
-
- /*
- * Create Producer
- */
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- DrCb0065 dr(do_yield);
- conf->set("dr_cb", &dr, errstr);
- /* Make sure messages are produced in batches of 100 */
- conf->set("batch.num.messages", "100", errstr);
- conf->set("linger.ms", "10000", errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create producer: " + errstr);
- delete conf;
-
- dr.p = p;
-
- Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << "Producing "
- << msgcnt << " messages to " << topic << "\n");
-
- for (int i = 0; i < msgcnt; i++) {
- err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"hi", 2,
- NULL, 0, 0, NULL);
- if (err)
- Test::Fail("produce() failed: " + RdKafka::err2str(err));
- }
-
-
- int exp_msgs_per_poll = do_yield ? 1 : msgcnt;
-
- while (dr.cnt < msgcnt) {
- int pre_cnt = dr.cnt;
- p->poll(1000);
-
- int this_dr_cnt = dr.cnt - pre_cnt;
- if (this_dr_cnt == 0) {
- /* Other callbacks may cause poll() to return early
- * before DRs are available, ignore these. */
- Test::Say(3, "Zero DRs called, ignoring\n");
- continue;
- }
-
- if (this_dr_cnt != exp_msgs_per_poll)
- Test::Fail(tostr() << "Expected " << exp_msgs_per_poll
- << " DRs per poll() call, got " << this_dr_cnt);
- else
- Test::Say(3, tostr() << dr.cnt << "/" << msgcnt << "\n");
- }
-
- if (dr.cnt != msgcnt)
- Test::Fail(tostr() << "Expected " << msgcnt << " DRs, got " << dr.cnt);
-
- Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ")
- << "Success: " << dr.cnt << " DRs received in batches of "
- << exp_msgs_per_poll << "\n");
-
- delete p;
-}
-
-extern "C" {
-int main_0065_yield(int argc, char **argv) {
- do_test_producer(1 /*yield*/);
- do_test_producer(0 /*dont yield*/);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp
deleted file mode 100644
index 9f9f31240..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-#ifdef _WIN32
-#include <direct.h>
-#endif
-
-
-extern "C" {
-#include "interceptor_test/interceptor_test.h"
-
-struct ictest ictest;
-};
-
-
-/**
- * Verify plugin.library.paths and interceptors
- * using interceptor_test/...
- *
- */
-
-
-static void do_test_plugin() {
- std::string errstr;
- std::string topic = Test::mk_topic_name("0066_plugins", 1);
- static const char *config[] = {
- "session.timeout.ms",
- "6000", /* Before plugin */
- "plugin.library.paths",
- "interceptor_test/interceptor_test",
- "socket.timeout.ms",
- "12", /* After plugin */
- "interceptor_test.config1",
- "one",
- "interceptor_test.config2",
- "two",
- "topic.metadata.refresh.interval.ms",
- "1234",
- NULL,
- };
-
- char cwd[512], *pcwd;
-#ifdef _WIN32
- pcwd = _getcwd(cwd, sizeof(cwd) - 1);
-#else
- pcwd = getcwd(cwd, sizeof(cwd) - 1);
-#endif
- if (pcwd)
- Test::Say(tostr() << "running test from cwd " << cwd << "\n");
-
- /* Interceptor back-channel config */
- ictest_init(&ictest);
- ictest_cnt_init(&ictest.conf_init, 1, 1000);
- ictest_cnt_init(&ictest.on_new, 1, 1);
-
- /* Config for intercepted client */
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
- for (int i = 0; config[i]; i += 2) {
- Test::Say(tostr() << "set(" << config[i] << ", " << config[i + 1] << ")\n");
- if (conf->set(config[i], config[i + 1], errstr))
- Test::Fail(tostr() << "set(" << config[i] << ") failed: " << errstr);
- }
-
- /* Create producer */
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create producer: " + errstr);
-
- if (ictest.on_new.cnt < ictest.on_new.min ||
- ictest.on_new.cnt > ictest.on_new.max)
- Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt
- << " not within range " << ictest.on_new.min << ".."
- << ictest.on_new.max);
-
- /* Verification */
- if (!ictest.config1 || strcmp(ictest.config1, "one"))
- Test::Fail(tostr() << "config1 was " << ictest.config1);
- if (!ictest.config2 || strcmp(ictest.config2, "two"))
- Test::Fail(tostr() << "config2 was " << ictest.config2);
- if (!ictest.session_timeout_ms || strcmp(ictest.session_timeout_ms, "6000"))
- Test::Fail(tostr() << "session.timeout.ms was "
- << ictest.session_timeout_ms);
- if (!ictest.socket_timeout_ms || strcmp(ictest.socket_timeout_ms, "12"))
- Test::Fail(tostr() << "socket.timeout.ms was " << ictest.socket_timeout_ms);
-
- delete conf;
-
- delete p;
-
- ictest_free(&ictest);
-}
-
-extern "C" {
-int main_0066_plugins(int argc, char **argv) {
- do_test_plugin();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp
deleted file mode 100644
index f71489fa1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-
-
-/**
- * Issue #1306
- *
- * Consume from an empty topic using Consumer and KafkaConsumer.
- */
-
-
-static void do_test_empty_topic_consumer() {
- std::string errstr;
- std::string topic = Test::mk_topic_name("0067_empty_topic", 1);
- const int32_t partition = 0;
-
- RdKafka::Conf *conf;
-
- Test::conf_init(&conf, NULL, 0);
-
- Test::conf_set(conf, "enable.partition.eof", "true");
- Test::conf_set(conf, "allow.auto.create.topics", "true");
-
- /* Create simple consumer */
- RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
- if (!consumer)
- Test::Fail("Failed to create Consumer: " + errstr);
-
- RdKafka::Topic *rkt = RdKafka::Topic::create(consumer, topic, NULL, errstr);
- if (!rkt)
- Test::Fail("Simple Topic failed: " + errstr);
-
-
- /* Create the topic through a metadata request. */
- Test::Say("Creating empty topic " + topic + "\n");
- RdKafka::Metadata *md;
- RdKafka::ErrorCode err =
- consumer->metadata(false, rkt, &md, tmout_multip(10 * 1000));
- if (err)
- Test::Fail("Failed to create topic " + topic + ": " +
- RdKafka::err2str(err));
- delete md;
-
- /* Start consumer */
- err = consumer->start(rkt, partition, RdKafka::Topic::OFFSET_BEGINNING);
- if (err)
- Test::Fail("Consume start() failed: " + RdKafka::err2str(err));
-
- /* Consume using legacy consumer, should give an EOF and nothing else. */
- Test::Say("Simple Consumer: consuming\n");
- RdKafka::Message *msg =
- consumer->consume(rkt, partition, tmout_multip(10 * 1000));
- if (msg->err() != RdKafka::ERR__PARTITION_EOF)
- Test::Fail("Simple consume() expected EOF, got " +
- RdKafka::err2str(msg->err()));
- delete msg;
-
- /* Nothing else should come now, just a consume() timeout */
- msg = consumer->consume(rkt, partition, 1 * 1000);
- if (msg->err() != RdKafka::ERR__TIMED_OUT)
- Test::Fail("Simple consume() expected timeout, got " +
- RdKafka::err2str(msg->err()));
- delete msg;
-
- consumer->stop(rkt, partition);
-
- delete rkt;
- delete consumer;
-
-
- /*
- * Now do the same thing using the high-level KafkaConsumer.
- */
-
- Test::conf_set(conf, "group.id", topic);
-
- Test::conf_set(conf, "enable.partition.eof", "true");
- Test::conf_set(conf, "allow.auto.create.topics", "true");
-
- RdKafka::KafkaConsumer *kconsumer =
- RdKafka::KafkaConsumer::create(conf, errstr);
- if (!kconsumer)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
-
- std::vector<RdKafka::TopicPartition *> part;
- part.push_back(RdKafka::TopicPartition::create(topic, partition));
-
- err = kconsumer->assign(part);
- if (err)
- Test::Fail("assign() failed: " + RdKafka::err2str(err));
-
- RdKafka::TopicPartition::destroy(part);
-
- Test::Say("KafkaConsumer: consuming\n");
- msg = kconsumer->consume(tmout_multip(5 * 1000));
- if (msg->err() != RdKafka::ERR__PARTITION_EOF)
- Test::Fail("KafkaConsumer consume() expected EOF, got " +
- RdKafka::err2str(msg->err()));
- delete msg;
-
- /* Nothing else should come now, just a consume() timeout */
- msg = kconsumer->consume(1 * 1000);
- if (msg->err() != RdKafka::ERR__TIMED_OUT)
- Test::Fail("KafkaConsumer consume() expected timeout, got " +
- RdKafka::err2str(msg->err()));
- delete msg;
-
- kconsumer->close();
-
- delete kconsumer;
- delete conf;
-}
-
-extern "C" {
-int main_0067_empty_topic(int argc, char **argv) {
- do_test_empty_topic_consumer();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c
deleted file mode 100644
index a7ad37e16..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#if WITH_SOCKEM
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * Force produce requests to timeout to test error handling.
- */
-
-/**
- * @brief Sockem connect, called from **internal librdkafka thread** through
- * librdkafka's connect_cb
- */
-static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
-
- /* Let delay be high to trigger the local timeout */
- sockem_set(skm, "delay", 10000, NULL);
- return 0;
-}
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- return 0;
- return 1;
-}
-
-static int msg_dr_cnt = 0;
-static int msg_dr_fail_cnt = 0;
-
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- msg_dr_cnt++;
- if (rkmessage->err != RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
- TEST_FAIL_LATER(
- "Expected message to fail with MSG_TIMED_OUT, "
- "got: %s",
- rd_kafka_err2str(rkmessage->err));
- else {
- TEST_ASSERT_LATER(rd_kafka_message_status(rkmessage) ==
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED,
- "Message should have status "
- "PossiblyPersisted (%d), not %d",
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED,
- rd_kafka_message_status(rkmessage));
- msg_dr_fail_cnt++;
- }
-}
-
-
-
-int main_0068_produce_timeout(int argc, char **argv) {
- rd_kafka_t *rk;
- const char *topic = test_mk_topic_name("0068_produce_timeout", 1);
- uint64_t testid;
- const int msgcnt = 10;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- int msgcounter = 0;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
- test_socket_enable(conf);
- test_curr->connect_cb = connect_cb;
- test_curr->is_fatal_cb = is_fatal_cb;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, "message.timeout.ms",
- "2000", NULL);
-
- TEST_SAY("Auto-creating topic %s\n", topic);
- test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
-
- TEST_SAY("Producing %d messages that should timeout\n", msgcnt);
- test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, 0,
- &msgcounter);
-
-
- TEST_SAY("Flushing..\n");
- rd_kafka_flush(rk, 10000);
-
- TEST_SAY("%d/%d delivery reports, where of %d with proper error\n",
- msg_dr_cnt, msgcnt, msg_dr_fail_cnt);
-
- TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt,
- msg_dr_cnt);
- TEST_ASSERT(msg_dr_fail_cnt == msgcnt, "expected %d, got %d", msgcnt,
- msg_dr_fail_cnt);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
-
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c
deleted file mode 100644
index 933e53775..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/**
- * Issue #1371:
- * Run two consumers in the same group for a 2-partition topic,
- * alter the topic to have 4 partitions, kill off the first consumer,
- * the second consumer will segfault.
- */
-
-#include "rdkafka.h"
-
-
-static rd_kafka_t *c1, *c2;
-static rd_kafka_resp_err_t state1, state2;
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- rd_kafka_resp_err_t *statep = NULL;
-
- if (rk == c1)
- statep = &state1;
- else if (rk == c2)
- statep = &state2;
- else
- TEST_FAIL("Invalid rk %p", rk);
-
- TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk),
- rd_kafka_err2str(err));
- test_print_partition_list(parts);
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- rd_kafka_assign(rk, parts);
- else if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
- rd_kafka_assign(rk, NULL);
-
- *statep = err;
-}
-
-
-int main_0069_consumer_add_parts(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
- int64_t ts_start;
- int wait_sec;
-
- test_conf_init(NULL, NULL, 60);
-
- TEST_SAY("Creating 2 consumers\n");
- c1 = test_create_consumer(topic, rebalance_cb, NULL, NULL);
- c2 = test_create_consumer(topic, rebalance_cb, NULL, NULL);
-
- TEST_SAY("Creating topic %s with 2 partitions\n", topic);
- test_create_topic(c1, topic, 2, 1);
-
- test_wait_topic_exists(c1, topic, 10 * 1000);
-
- TEST_SAY("Subscribing\n");
- test_consumer_subscribe(c1, topic);
- test_consumer_subscribe(c2, topic);
-
-
- TEST_SAY("Waiting for initial assignment for both consumers\n");
- while (state1 != RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ||
- state2 != RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- test_consumer_poll_no_msgs("wait-rebalance", c1, 0, 1000);
- test_consumer_poll_no_msgs("wait-rebalance", c2, 0, 1000);
- }
-
-
- TEST_SAY("Changing partition count for topic %s\n", topic);
- test_create_partitions(NULL, topic, 4);
-
- TEST_SAY(
- "Closing consumer 1 (to quickly trigger rebalance with new "
- "partitions)\n");
- test_consumer_close(c1);
- rd_kafka_destroy(c1);
-
- TEST_SAY("Wait 10 seconds for consumer 2 not to crash\n");
- wait_sec = test_quick ? 5 : 10;
- ts_start = test_clock();
- do {
- test_consumer_poll_no_msgs("wait-stable", c2, 0, 1000);
- } while (test_clock() < ts_start + (wait_sec * 1000000));
-
- TEST_ASSERT(state2 == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- "Expected consumer 2 to have assignment, not in state %s",
- rd_kafka_err2str(state2));
-
- test_consumer_close(c2);
- rd_kafka_destroy(c2);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp
deleted file mode 100644
index fac48185c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "testcpp.h"
-#include <cstring>
-
-/**
- * Verification of difference between empty and null Key and Value
- */
-
-
-static int check_equal(const char *exp,
- const char *actual,
- size_t len,
- std::string what) {
- size_t exp_len = exp ? strlen(exp) : 0;
- int failures = 0;
-
- if (!actual && len != 0) {
- Test::FailLater(tostr()
- << what << ": expected length 0 for Null, not " << len);
- failures++;
- }
-
- if (exp) {
- if (!actual) {
- Test::FailLater(tostr()
- << what << ": expected \"" << exp << "\", not Null");
- failures++;
-
- } else if (len != exp_len || strncmp(exp, actual, exp_len)) {
- Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \""
- << actual << "\" (" << len << " bytes)");
- failures++;
- }
-
- } else {
- if (actual) {
- Test::FailLater(tostr() << what << ": expected Null, not \"" << actual
- << "\" (" << len << " bytes)");
- failures++;
- }
- }
-
- if (!failures)
- Test::Say(3, tostr() << what << ": matched expectation\n");
-
- return failures;
-}
-
-
-static void do_test_null_empty(bool api_version_request) {
- std::string topic = Test::mk_topic_name("0070_null_empty", 1);
- const int partition = 0;
-
- Test::Say(tostr() << "Testing with api.version.request="
- << api_version_request << " on topic " << topic
- << " partition " << partition << "\n");
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
- Test::conf_set(conf, "api.version.request",
- api_version_request ? "true" : "false");
- Test::conf_set(conf, "acks", "all");
-
-
- std::string errstr;
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- const int msgcnt = 8;
- static const char *msgs[msgcnt * 2] = {NULL, NULL, "key2", NULL, "key3",
- "val3", NULL, "val4", "", NULL,
- NULL, "", "", ""};
-
- RdKafka::ErrorCode err;
-
- for (int i = 0; i < msgcnt * 2; i += 2) {
- Test::Say(3, tostr() << "Produce message #" << (i / 2) << ": key=\""
- << (msgs[i] ? msgs[i] : "Null") << "\", value=\""
- << (msgs[i + 1] ? msgs[i + 1] : "Null") << "\"\n");
- err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
- /* Value */
- (void *)msgs[i + 1], msgs[i + 1] ? strlen(msgs[i + 1]) : 0,
- /* Key */
- (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- }
-
- if (p->flush(tmout_multip(3 * 5000)) != 0)
- Test::Fail("Not all messages flushed");
-
- Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic
- << "\n");
-
- delete p;
-
- /*
- * Now consume messages from the beginning, making sure they match
- * what was produced.
- */
-
- /* Create consumer */
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "group.id", topic);
- Test::conf_set(conf, "api.version.request",
- api_version_request ? "true" : "false");
- Test::conf_set(conf, "enable.auto.commit", "false");
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Assign the partition */
- std::vector<RdKafka::TopicPartition *> parts;
- parts.push_back(RdKafka::TopicPartition::create(
- topic, partition, RdKafka::Topic::OFFSET_BEGINNING));
- err = c->assign(parts);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("assign() failed: " + RdKafka::err2str(err));
- RdKafka::TopicPartition::destroy(parts);
-
- /* Start consuming */
- int failures = 0;
- for (int i = 0; i < msgcnt * 2; i += 2) {
- RdKafka::Message *msg = c->consume(tmout_multip(5000));
- if (msg->err())
- Test::Fail(tostr() << "consume() failed at message " << (i / 2) << ": "
- << msg->errstr());
-
- /* verify key */
- failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL,
- msg->key_len(),
- tostr() << "message #" << (i / 2) << " (offset "
- << msg->offset() << ") key");
- /* verify key_pointer() API as too */
- failures +=
- check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(),
- tostr() << "message #" << (i / 2) << " (offset "
- << msg->offset() << ") key");
-
- /* verify value */
- failures +=
- check_equal(msgs[i + 1], (const char *)msg->payload(), msg->len(),
- tostr() << "message #" << (i / 2) << " (offset "
- << msg->offset() << ") value");
- delete msg;
- }
-
- Test::Say(tostr() << "Done consuming, closing. " << failures
- << " test failures\n");
- if (failures)
- Test::Fail(tostr() << "See " << failures << " previous test failure(s)");
-
- c->close();
- delete c;
-}
-
-
-extern "C" {
-int main_0070_null_empty(int argc, char **argv) {
- if (test_broker_version >= TEST_BRKVER(0, 10, 0, 0))
- do_test_null_empty(true);
- do_test_null_empty(false);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c
deleted file mode 100644
index 0576d611a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * Local (no broker) unit-like tests of Message Headers
- */
-
-
-
-static int exp_msgid = 0;
-
-struct expect {
- const char *name;
- const char *value;
-};
-
-/**
- * @brief returns the message id
- */
-static int expect_check(const char *what,
- const struct expect *expected,
- const rd_kafka_message_t *rkmessage) {
- const struct expect *exp;
- rd_kafka_resp_err_t err;
- size_t idx = 0;
- const char *name;
- const char *value;
- size_t size;
- rd_kafka_headers_t *hdrs;
- int msgid;
-
- if (rkmessage->len != sizeof(msgid))
- TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)",
- what, rkmessage->len);
-
- memcpy(&msgid, rkmessage->payload, rkmessage->len);
-
- if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) {
- if (msgid == 0)
- return 0; /* No headers expected for first message */
-
- TEST_FAIL("%s: Expected headers in message %d: %s", what, msgid,
- rd_kafka_err2str(err));
- } else {
- TEST_ASSERT(msgid != 0,
- "%s: first message should have no headers", what);
- }
-
- /* msgid should always be first and has a variable value so hard to
- * match with the expect struct. */
- for (idx = 0, exp = expected; !rd_kafka_header_get_all(
- hdrs, idx, &name, (const void **)&value, &size);
- idx++, exp++) {
-
- TEST_SAYL(3,
- "%s: Msg #%d: "
- "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n",
- what, msgid, idx, name, value ? value : "(NULL)",
- exp->name, exp->value ? exp->value : "(NULL)");
-
- if (strcmp(name, exp->name))
- TEST_FAIL("%s: Expected header %s at idx #%" PRIusz
- ", not %s",
- what, exp->name, idx - 1, name);
-
- if (!strcmp(name, "msgid")) {
- int vid;
-
- /* Special handling: compare msgid header value
- * to message body, should be identical */
- if (size != rkmessage->len || size != sizeof(int))
- TEST_FAIL(
- "%s: "
- "Expected msgid/int-sized payload "
- "%" PRIusz ", got %" PRIusz,
- what, size, rkmessage->len);
-
- /* Copy to avoid unaligned access (by cast) */
- memcpy(&vid, value, size);
-
- if (vid != msgid)
- TEST_FAIL("%s: Header msgid %d != payload %d",
- what, vid, msgid);
-
- if (exp_msgid != vid)
- TEST_FAIL("%s: Expected msgid %d, not %d", what,
- exp_msgid, vid);
- continue;
- }
-
- if (!exp->value) {
- /* Expected NULL value */
- TEST_ASSERT(!value,
- "%s: Expected NULL value for %s, got %s",
- what, exp->name, value);
-
- } else {
- TEST_ASSERT(value,
- "%s: "
- "Expected non-NULL value for %s, got NULL",
- what, exp->name);
-
- TEST_ASSERT(size == strlen(exp->value),
- "%s: Expected size %" PRIusz
- " for %s, "
- "not %" PRIusz,
- what, strlen(exp->value), exp->name, size);
-
- TEST_ASSERT(value[size] == '\0',
- "%s: "
- "Expected implicit null-terminator for %s",
- what, exp->name);
-
- TEST_ASSERT(!strcmp(exp->value, value),
- "%s: "
- "Expected value %s for %s, not %s",
- what, exp->value, exp->name, value);
- }
- }
-
- TEST_ASSERT(exp->name == NULL,
- "%s: Expected the expected, but stuck at %s which was "
- "unexpected",
- what, exp->name);
-
- return msgid;
-}
-
-
-/**
- * @brief Delivery report callback
- */
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"}, {"null", NULL}, {"empty", ""},
- {"send1", "1"}, {"multi", "multi5"}, {NULL}};
- const struct expect replace_expected[] = {
- {"msgid", NULL}, {"new", "one"},
- {"this is the", NULL}, {"replaced headers\"", ""},
- {"new", "right?"}, {NULL}};
- const struct expect *exp;
- rd_kafka_headers_t *new_hdrs;
- int msgid;
-
- TEST_ASSERT(rkmessage->err == RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
- "Expected message to fail with MSG_TIMED_OUT, not %s",
- rd_kafka_err2str(rkmessage->err));
-
- msgid = expect_check(__FUNCTION__, expected, rkmessage);
-
- /* Replace entire headers list */
- if (msgid > 0) {
- new_hdrs = rd_kafka_headers_new(1);
- rd_kafka_header_add(new_hdrs, "msgid", -1, &msgid,
- sizeof(msgid));
- for (exp = &replace_expected[1]; exp->name; exp++)
- rd_kafka_header_add(new_hdrs, exp->name, -1, exp->value,
- -1);
-
- rd_kafka_message_set_headers((rd_kafka_message_t *)rkmessage,
- new_hdrs);
-
- expect_check(__FUNCTION__, replace_expected, rkmessage);
- }
-
- exp_msgid++;
-}
-
-static void expect_iter(const char *what,
- const rd_kafka_headers_t *hdrs,
- const char *name,
- const char **expected,
- size_t cnt) {
- size_t idx;
- rd_kafka_resp_err_t err;
- const void *value;
- size_t size;
-
- for (idx = 0;
- !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size));
- idx++) {
- TEST_ASSERT(idx < cnt,
- "%s: too many headers matching '%s', "
- "expected %" PRIusz,
- what, name, cnt);
- TEST_SAYL(3,
- "%s: get(%" PRIusz
- ", '%s') "
- "expecting '%s' =? '%s'\n",
- what, idx, name, expected[idx], (const char *)value);
-
-
- TEST_ASSERT(
- !strcmp((const char *)value, expected[idx]),
- "%s: get(%" PRIusz ", '%s') expected '%s', not '%s'", what,
- idx, name, expected[idx], (const char *)value);
- }
-
- TEST_ASSERT(idx == cnt,
- "%s: expected %" PRIusz
- " headers matching '%s', not %" PRIusz,
- what, cnt, name, idx);
-}
-
-
-
-/**
- * @brief First on_send() interceptor
- */
-static rd_kafka_resp_err_t
-on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"},
- {"multi", "multi1"},
- {"multi", "multi2"},
- {"multi", "multi3"},
- {"null", NULL},
- {"empty", ""},
- {NULL}};
- const char *expect_iter_multi[4] = {
- "multi1", "multi2", "multi3", "multi4" /* added below */
- };
- const char *expect_iter_static[1] = {"hey"};
- rd_kafka_headers_t *hdrs;
- size_t header_cnt;
- rd_kafka_resp_err_t err;
- const void *value;
- size_t size;
-
- expect_check(__FUNCTION__, expected, rkmessage);
-
- err = rd_kafka_message_headers(rkmessage, &hdrs);
- if (err) /* First message has no headers. */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 7, "Expected 7 length got %" PRIusz "",
- header_cnt);
-
- rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1);
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 8, "Expected 8 length got %" PRIusz "",
- header_cnt);
-
- /* test iter() */
- expect_iter(__FUNCTION__, hdrs, "multi", expect_iter_multi, 4);
- expect_iter(__FUNCTION__, hdrs, "static", expect_iter_static, 1);
- expect_iter(__FUNCTION__, hdrs, "notexists", NULL, 0);
-
- rd_kafka_header_add(hdrs, "send1", -1, "1", -1);
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 9, "Expected 9 length got %" PRIusz "",
- header_cnt);
-
- rd_kafka_header_remove(hdrs, "multi");
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 5, "Expected 5 length got %" PRIusz "",
- header_cnt);
-
- rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1);
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 6, "Expected 6 length got %" PRIusz "",
- header_cnt);
-
- /* test get_last() */
- err = rd_kafka_header_get_last(hdrs, "multi", &value, &size);
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- TEST_ASSERT(size == strlen("multi5") &&
- !strcmp((const char *)value, "multi5"),
- "expected 'multi5', not '%s'", (const char *)value);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Second on_send() interceptor
- */
-static rd_kafka_resp_err_t
-on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"}, {"null", NULL}, {"empty", ""},
- {"send1", "1"}, {"multi", "multi5"}, {NULL}};
-
- expect_check(__FUNCTION__, expected, rkmessage);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief on_new() interceptor to set up message interceptors
- * from rd_kafka_new().
- */
-static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL);
- rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-int main_0072_headers_ut(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 0);
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- int i;
- size_t header_cnt;
- const int msgcnt = 10;
- rd_kafka_resp_err_t err;
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "message.timeout.ms", "1");
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
- rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* First message is without headers (negative testing) */
- i = 0;
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err));
- exp_msgid++;
-
- for (i = 1; i < msgcnt; i++, exp_msgid++) {
- /* Use headers list on one message */
- if (i == 3) {
- rd_kafka_headers_t *hdrs = rd_kafka_headers_new(4);
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 0,
- "Expected 0 length got %" PRIusz "",
- header_cnt);
-
- rd_kafka_headers_t *copied;
-
- rd_kafka_header_add(hdrs, "msgid", -1, &i, sizeof(i));
- rd_kafka_header_add(hdrs, "static", -1, "hey", -1);
- rd_kafka_header_add(hdrs, "multi", -1, "multi1", -1);
- rd_kafka_header_add(hdrs, "multi", -1, "multi2", 6);
- rd_kafka_header_add(hdrs, "multi", -1, "multi3",
- strlen("multi3"));
- rd_kafka_header_add(hdrs, "null", -1, NULL, 0);
-
- /* Make a copy of the headers to verify copy() */
- copied = rd_kafka_headers_copy(hdrs);
-
- header_cnt = rd_kafka_header_cnt(hdrs);
- TEST_ASSERT(header_cnt == 6,
- "Expected 6 length got %" PRIusz "",
- header_cnt);
-
- rd_kafka_headers_destroy(hdrs);
-
- /* Last header ("empty") is added below */
-
- /* Try unsupported _V_HEADER() and _V_HEADERS() mix,
- * must fail with CONFLICT */
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_HEADER("will_be_removed", "yep", -1),
- RD_KAFKA_V_HEADERS(copied),
- RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__CONFLICT,
- "producev(): expected CONFLICT, got %s",
- rd_kafka_err2str(err));
-
- /* Proper call using only _V_HEADERS() */
- rd_kafka_header_add(copied, "empty", -1, "", -1);
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_HEADERS(copied), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s",
- rd_kafka_err2str(err));
-
- } else {
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
- RD_KAFKA_V_HEADER("static", "hey", -1),
- RD_KAFKA_V_HEADER("multi", "multi1", -1),
- RD_KAFKA_V_HEADER("multi", "multi2", 6),
- RD_KAFKA_V_HEADER("multi", "multi3",
- strlen("multi3")),
- RD_KAFKA_V_HEADER("null", NULL, 0),
- RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s",
- rd_kafka_err2str(err));
- }
- }
-
- /* Reset expected message id for dr */
- exp_msgid = 0;
-
- /* Wait for timeouts and delivery reports */
- rd_kafka_flush(rk, 5000);
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c
deleted file mode 100644
index e7e5c4074..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * Message Headers end-to-end tests
- */
-
-
-
-static int exp_msgid = 0;
-
-struct expect {
- const char *name;
- const char *value;
-};
-
-
-
-static void expect_check(const char *what,
- const struct expect *expected,
- rd_kafka_message_t *rkmessage,
- int is_const) {
- const struct expect *exp;
- rd_kafka_resp_err_t err;
- size_t idx = 0;
- const char *name;
- const char *value;
- size_t size;
- rd_kafka_headers_t *hdrs;
- int msgid;
-
- if (rkmessage->len != sizeof(msgid))
- TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)",
- what, rkmessage->len);
-
- memcpy(&msgid, rkmessage->payload, rkmessage->len);
-
- if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) {
- if (msgid == 0) {
- rd_kafka_resp_err_t err2;
- TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", what,
- msgid);
-
- err2 =
- rd_kafka_message_detach_headers(rkmessage, &hdrs);
- TEST_ASSERT(err == err2,
- "expected detach_headers() error %s "
- "to match headers() error %s",
- rd_kafka_err2str(err2),
- rd_kafka_err2str(err));
-
- return; /* No headers expected for first message */
- }
-
- TEST_FAIL("%s: Expected headers in message %d: %s", what, msgid,
- rd_kafka_err2str(err));
- } else {
- TEST_ASSERT(msgid != 0,
- "%s: first message should have no headers", what);
- }
-
- test_headers_dump(what, 3, hdrs);
-
- for (idx = 0, exp = expected; !rd_kafka_header_get_all(
- hdrs, idx, &name, (const void **)&value, &size);
- idx++, exp++) {
-
- TEST_SAYL(3,
- "%s: Msg #%d: "
- "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n",
- what, msgid, idx, name, value ? value : "(NULL)",
- exp->name, exp->value ? exp->value : "(NULL)");
-
- if (strcmp(name, exp->name))
- TEST_FAIL(
- "%s: Msg #%d: "
- "Expected header %s at idx #%" PRIusz
- ", not '%s' (%" PRIusz ")",
- what, msgid, exp->name, idx, name, strlen(name));
-
- if (!strcmp(name, "msgid")) {
- int vid;
-
- /* Special handling: compare msgid header value
- * to message body, should be identical */
- if (size != rkmessage->len || size != sizeof(int))
- TEST_FAIL(
- "%s: "
- "Expected msgid/int-sized payload "
- "%" PRIusz ", got %" PRIusz,
- what, size, rkmessage->len);
-
- /* Copy to avoid unaligned access (by cast) */
- memcpy(&vid, value, size);
-
- if (vid != msgid)
- TEST_FAIL("%s: Header msgid %d != payload %d",
- what, vid, msgid);
-
- if (exp_msgid != vid)
- TEST_FAIL("%s: Expected msgid %d, not %d", what,
- exp_msgid, vid);
- continue;
- }
-
- if (!exp->value) {
- /* Expected NULL value */
- TEST_ASSERT(!value,
- "%s: Expected NULL value for %s, got %s",
- what, exp->name, value);
-
- } else {
- TEST_ASSERT(value,
- "%s: "
- "Expected non-NULL value for %s, got NULL",
- what, exp->name);
-
- TEST_ASSERT(size == strlen(exp->value),
- "%s: Expected size %" PRIusz
- " for %s, "
- "not %" PRIusz,
- what, strlen(exp->value), exp->name, size);
-
- TEST_ASSERT(value[size] == '\0',
- "%s: "
- "Expected implicit null-terminator for %s",
- what, exp->name);
-
- TEST_ASSERT(!strcmp(exp->value, value),
- "%s: "
- "Expected value %s for %s, not %s",
- what, exp->value, exp->name, value);
- }
- }
-
- TEST_ASSERT(exp->name == NULL,
- "%s: Expected the expected, but stuck at %s which was "
- "unexpected",
- what, exp->name);
-
- if (!strcmp(what, "handle_consumed_msg") && !is_const &&
- (msgid % 3) == 0) {
- rd_kafka_headers_t *dhdrs;
-
- err = rd_kafka_message_detach_headers(rkmessage, &dhdrs);
- TEST_ASSERT(!err, "detach_headers() should not fail, got %s",
- rd_kafka_err2str(err));
- TEST_ASSERT(hdrs == dhdrs);
-
- /* Verify that a new headers object can be obtained */
- err = rd_kafka_message_headers(rkmessage, &hdrs);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR);
- TEST_ASSERT(hdrs != dhdrs);
- rd_kafka_headers_destroy(dhdrs);
-
- expect_check("post_detach_headers", expected, rkmessage,
- is_const);
- }
-}
-
-
-/**
- * @brief Final (as in no more header modifications) message check.
- */
-static void
-msg_final_check(const char *what, rd_kafka_message_t *rkmessage, int is_const) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"}, {"null", NULL}, {"empty", ""},
- {"send1", "1"}, {"multi", "multi5"}, {NULL}};
-
- expect_check(what, expected, rkmessage, is_const);
-
- exp_msgid++;
-}
-
-/**
- * @brief Handle consumed message, must be identical to dr_msg_cb
- */
-static void handle_consumed_msg(rd_kafka_message_t *rkmessage) {
- msg_final_check(__FUNCTION__, rkmessage, 0);
-}
-
-/**
- * @brief Delivery report callback
- */
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- TEST_ASSERT(!rkmessage->err, "Message delivery failed: %s",
- rd_kafka_err2str(rkmessage->err));
-
- msg_final_check(__FUNCTION__, (rd_kafka_message_t *)rkmessage, 1);
-}
-
-
-/**
- * @brief First on_send() interceptor
- */
-static rd_kafka_resp_err_t
-on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"},
- {"multi", "multi1"},
- {"multi", "multi2"},
- {"multi", "multi3"},
- {"null", NULL},
- {"empty", ""},
- {NULL}};
- rd_kafka_headers_t *hdrs;
- rd_kafka_resp_err_t err;
-
- expect_check(__FUNCTION__, expected, rkmessage, 0);
-
- err = rd_kafka_message_headers(rkmessage, &hdrs);
- if (err) /* First message has no headers. */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1);
- rd_kafka_header_add(hdrs, "send1", -1, "1", -1);
- rd_kafka_header_remove(hdrs, "multi");
- rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Second on_send() interceptor
- */
-static rd_kafka_resp_err_t
-on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- const struct expect expected[] = {
- {"msgid", NULL}, /* special handling */
- {"static", "hey"}, {"null", NULL}, {"empty", ""},
- {"send1", "1"}, {"multi", "multi5"}, {NULL}};
-
- expect_check(__FUNCTION__, expected, rkmessage, 0);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief on_new() interceptor to set up message interceptors
- * from rd_kafka_new().
- */
-static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL);
- rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void do_produce(const char *topic, int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- int i;
- rd_kafka_resp_err_t err;
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "acks", "all");
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
- rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* First message is without headers (negative testing) */
- i = 0;
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err));
- exp_msgid++;
-
- for (i = 1; i < msgcnt; i++, exp_msgid++) {
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE(&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
- RD_KAFKA_V_HEADER("static", "hey", -1),
- RD_KAFKA_V_HEADER("multi", "multi1", -1),
- RD_KAFKA_V_HEADER("multi", "multi2", 6),
- RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")),
- RD_KAFKA_V_HEADER("null", NULL, 0),
- RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() failed: %s",
- rd_kafka_err2str(err));
- }
-
- /* Reset expected message id for dr */
- exp_msgid = 0;
-
- /* Wait for timeouts and delivery reports */
- rd_kafka_flush(rk, tmout_multip(5000));
-
- rd_kafka_destroy(rk);
-}
-
-static void do_consume(const char *topic, int msgcnt) {
- rd_kafka_t *rk;
- rd_kafka_topic_partition_list_t *parts;
-
- rk = test_create_consumer(topic, NULL, NULL, NULL);
-
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, 0)->offset =
- RD_KAFKA_OFFSET_BEGINNING;
-
- test_consumer_assign("assign", rk, parts);
-
- rd_kafka_topic_partition_list_destroy(parts);
-
- exp_msgid = 0;
-
- while (exp_msgid < msgcnt) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(rk, 1000);
- if (!rkm)
- continue;
-
- if (rkm->err)
- TEST_FAIL(
- "consume error while expecting msgid %d/%d: "
- "%s",
- exp_msgid, msgcnt, rd_kafka_message_errstr(rkm));
-
- handle_consumed_msg(rkm);
-
- rd_kafka_message_destroy(rkm);
- }
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-}
-
-
-int main_0073_headers(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
- const int msgcnt = 10;
-
- do_produce(topic, msgcnt);
- do_consume(topic, msgcnt);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c
deleted file mode 100644
index 544a84734..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * @brief Simple producev() and produceva() verification
- */
-
-/**
- * @brief Verify #1478: The internal shared rkt reference was not destroyed
- * when producev() failed.
- */
-static void do_test_srkt_leak(void) {
- rd_kafka_conf_t *conf;
- char buf[2000];
- rd_kafka_t *rk;
- rd_kafka_resp_err_t err;
- rd_kafka_error_t *error;
- rd_kafka_vu_t vus[3];
-
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "message.max.bytes", "1000");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"),
- RD_KAFKA_V_VALUE(buf, sizeof(buf)),
- RD_KAFKA_V_END);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
- "expected MSG_SIZE_TOO_LARGE, not %s",
- rd_kafka_err2str(err));
-
- vus[0].vtype = RD_KAFKA_VTYPE_TOPIC;
- vus[0].u.cstr = "test";
- vus[1].vtype = RD_KAFKA_VTYPE_VALUE;
- vus[1].u.mem.ptr = buf;
- vus[1].u.mem.size = sizeof(buf);
- vus[2].vtype = RD_KAFKA_VTYPE_HEADER;
- vus[2].u.header.name = "testheader";
- vus[2].u.header.val = "test value";
- vus[2].u.header.size = -1;
-
- error = rd_kafka_produceva(rk, vus, 3);
- TEST_ASSERT(error, "expected failure");
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
- "expected MSG_SIZE_TOO_LARGE, not %s",
- rd_kafka_error_string(error));
- TEST_SAY("produceva() error (expected): %s\n",
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_destroy(rk);
-}
-
-
-int main_0074_producev(int argc, char **argv) {
- do_test_srkt_leak();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c
deleted file mode 100644
index 7e1e4f0f5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#if WITH_SOCKEM
-#include "rdkafka.h"
-
-#include <stdarg.h>
-#include <errno.h>
-
-/**
- * Request retry testing
- */
-
-/* Hang on to the first broker socket we see in connect_cb,
- * reject all the rest (connection refused) to make sure we're only
- * playing with one single broker for this test. */
-static struct {
- mtx_t lock;
- cnd_t cnd;
- sockem_t *skm;
- thrd_t thrd;
- struct {
- int64_t ts_at; /* to ctrl thread: at this time, set delay */
- int delay;
- int ack; /* from ctrl thread: new delay acked */
- } cmd;
- struct {
- int64_t ts_at; /* to ctrl thread: at this time, set delay */
- int delay;
-
- } next;
- int term;
-} ctrl;
-
-static int ctrl_thrd_main(void *arg) {
-
-
- mtx_lock(&ctrl.lock);
- while (!ctrl.term) {
- int64_t now;
-
- cnd_timedwait_ms(&ctrl.cnd, &ctrl.lock, 10);
-
- if (ctrl.cmd.ts_at) {
- ctrl.next.ts_at = ctrl.cmd.ts_at;
- ctrl.next.delay = ctrl.cmd.delay;
- ctrl.cmd.ts_at = 0;
- ctrl.cmd.ack = 1;
- printf(_C_CYA
- "## %s: sockem: "
- "receieved command to set delay "
- "to %d in %dms\n" _C_CLR,
- __FILE__, ctrl.next.delay,
- (int)(ctrl.next.ts_at - test_clock()) / 1000);
- }
-
- now = test_clock();
- if (ctrl.next.ts_at && now > ctrl.next.ts_at) {
- assert(ctrl.skm);
- printf(_C_CYA
- "## %s: "
- "sockem: setting socket delay to %d\n" _C_CLR,
- __FILE__, ctrl.next.delay);
- sockem_set(ctrl.skm, "delay", ctrl.next.delay, NULL);
- ctrl.next.ts_at = 0;
- cnd_signal(&ctrl.cnd); /* signal back to caller */
- }
- }
- mtx_unlock(&ctrl.lock);
-
- return 0;
-}
-
-
-/**
- * @brief Sockem connect, called from **internal librdkafka thread** through
- * librdkafka's connect_cb
- */
-static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
-
- mtx_lock(&ctrl.lock);
- if (ctrl.skm) {
- /* Reject all but the first connect */
- mtx_unlock(&ctrl.lock);
- return ECONNREFUSED;
- }
-
- ctrl.skm = skm;
-
- /* signal wakeup to main thread */
- cnd_broadcast(&ctrl.cnd);
- mtx_unlock(&ctrl.lock);
-
- return 0;
-}
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- return 0;
- return 1;
-}
-
-/**
- * @brief Set socket delay to kick in after \p after ms
- */
-static void set_delay(int after, int delay) {
- TEST_SAY("Set delay to %dms (after %dms)\n", delay, after);
-
- mtx_lock(&ctrl.lock);
- ctrl.cmd.ts_at = test_clock() + (after * 1000);
- ctrl.cmd.delay = delay;
- ctrl.cmd.ack = 0;
- cnd_broadcast(&ctrl.cnd);
-
- /* Wait for ack from sockem thread */
- while (!ctrl.cmd.ack) {
- TEST_SAY("Waiting for sockem control ack\n");
- cnd_timedwait_ms(&ctrl.cnd, &ctrl.lock, 1000);
- }
- mtx_unlock(&ctrl.lock);
-}
-
-/**
- * @brief Test that Metadata requests are retried properly when
- * timing out due to high broker rtt.
- */
-static void do_test_low_socket_timeout(const char *topic) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- rd_kafka_resp_err_t err;
- const struct rd_kafka_metadata *md;
- int res;
-
- mtx_init(&ctrl.lock, mtx_plain);
- cnd_init(&ctrl.cnd);
-
- TEST_SAY("Test Metadata request retries on timeout\n");
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "socket.timeout.ms", "1000");
- test_conf_set(conf, "socket.max.fails", "12345");
- test_conf_set(conf, "retry.backoff.ms", "5000");
- /* Avoid api version requests (with their own timeout) to get in
- * the way of our test */
- test_conf_set(conf, "api.version.request", "false");
- test_socket_enable(conf);
- test_curr->connect_cb = connect_cb;
- test_curr->is_fatal_cb = is_fatal_cb;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- TEST_SAY("Waiting for sockem connect..\n");
- mtx_lock(&ctrl.lock);
- while (!ctrl.skm)
- cnd_wait(&ctrl.cnd, &ctrl.lock);
- mtx_unlock(&ctrl.lock);
-
- TEST_SAY(
- "Connected, fire off a undelayed metadata() to "
- "make sure connection is up\n");
-
- err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000));
- TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
- rd_kafka_err2str(err));
- rd_kafka_metadata_destroy(md);
-
- if (thrd_create(&ctrl.thrd, ctrl_thrd_main, NULL) != thrd_success)
- TEST_FAIL("Failed to create sockem ctrl thread");
-
- set_delay(0, 3000); /* Takes effect immediately */
-
- /* After two retries, remove the delay, the third retry
- * should kick in and work. */
- set_delay(
- ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) -
- 2000,
- 0);
-
- TEST_SAY(
- "Calling metadata() again which should succeed after "
- "3 internal retries\n");
- /* Metadata should be returned after the third retry */
- err = rd_kafka_metadata(
- rk, 0, rkt, &md,
- ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) +
- 5000);
- TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err));
- TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
- rd_kafka_err2str(err));
- rd_kafka_metadata_destroy(md);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- /* Join controller thread */
- mtx_lock(&ctrl.lock);
- ctrl.term = 1;
- mtx_unlock(&ctrl.lock);
- thrd_join(ctrl.thrd, &res);
-
- cnd_destroy(&ctrl.cnd);
- mtx_destroy(&ctrl.lock);
-}
-
-int main_0075_retry(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0075_retry", 1);
-
- do_test_low_socket_timeout(topic);
-
- return 0;
-}
-
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c
deleted file mode 100644
index 16d6f602c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#include <stdarg.h>
-#include <errno.h>
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- return 0;
- return 1;
-}
-
-
-#if WITH_SOCKEM
-/**
- * Producer message retry testing
- */
-
-/* Hang on to the first broker socket we see in connect_cb,
- * reject all the rest (connection refused) to make sure we're only
- * playing with one single broker for this test. */
-
-#include "sockem_ctrl.h"
-
-
-/**
- * @brief Test produce retries.
- *
- * @param should_fail If true, do negative testing which should fail.
- */
-static void do_test_produce_retries(const char *topic,
- int idempotence,
- int try_fail,
- int should_fail) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- uint64_t testid;
- rd_kafka_resp_err_t err;
- int msgcnt = 1;
- sockem_ctrl_t ctrl;
-
- TEST_SAY(_C_BLU
- "Test produce retries "
- "(idempotence=%d,try_fail=%d,should_fail=%d)\n",
- idempotence, try_fail, should_fail);
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
-
- if (should_fail &&
- !strcmp(test_conf_get(conf, "enable.sparse.connections"), "true")) {
- rd_kafka_conf_destroy(conf);
- TEST_SAY(_C_YEL
- "Sparse connections enabled: "
- "skipping connection-timing related test\n");
- return;
- }
-
- sockem_ctrl_init(&ctrl);
-
- test_conf_set(conf, "socket.timeout.ms", "1000");
- /* Avoid disconnects on request timeouts */
- test_conf_set(conf, "socket.max.fails", "100");
- test_conf_set(conf, "enable.idempotence",
- idempotence ? "true" : "false");
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED;
- if (!try_fail) {
- test_conf_set(conf, "retries", "5");
- } else {
- /* enable.idempotence=true request retries >= 1 which
- * makes the test pass. Adjust expected error accordingly. */
- if (idempotence)
- test_conf_set(conf, "retries", "5");
- else
- test_conf_set(conf, "retries", "0");
- if (should_fail) {
- test_curr->exp_dr_err =
- RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
- test_curr->exp_dr_status =
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
- }
- }
- test_conf_set(conf, "retry.backoff.ms", "5000");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_socket_enable(conf);
- test_curr->is_fatal_cb = is_fatal_cb;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- /* Create the topic to make sure connections are up and ready. */
- err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
- TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err));
-
- /* Set initial delay to 3s */
- sockem_ctrl_set_delay(&ctrl, 0, 3000); /* Takes effect immediately */
-
- /* After two retries, remove the delay, the third retry
- * should kick in and work. */
- sockem_ctrl_set_delay(
- &ctrl,
- ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) -
- 2000,
- 0);
-
- test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, msgcnt,
- NULL, 0);
-
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- if (!should_fail) {
- TEST_SAY("Verifying messages with consumer\n");
- test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
- }
-
- sockem_ctrl_term(&ctrl);
-
- TEST_SAY(_C_GRN
- "Test produce retries "
- "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n",
- idempotence, try_fail, should_fail);
-}
-#endif
-
-
-
-/**
- * @brief Simple on_request_sent interceptor that simply disconnects
- * the socket when first ProduceRequest is seen.
- * Sub-sequent ProduceRequests will not trigger a disconnect, to allow
- * for retries.
- */
-static mtx_t produce_disconnect_lock;
-static int produce_disconnects = 0;
-static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- void *ic_opaque) {
-
- /* Ignore if not a ProduceRequest */
- if (ApiKey != 0)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- mtx_lock(&produce_disconnect_lock);
- if (produce_disconnects == 0) {
- char buf[512];
- ssize_t r;
- printf(_C_CYA "%s:%d: shutting down socket %d (%s)\n" _C_CLR,
- __FILE__, __LINE__, sockfd, brokername);
-#ifdef _WIN32
- closesocket(sockfd);
-#else
- close(sockfd);
-#endif
- /* There is a chance the broker responded in the
- * time it took us to get here, so purge the
- * socket recv buffer to make sure librdkafka does not see
- * the response. */
- while ((r = recv(sockfd, buf, sizeof(buf), 0)) > 0)
- printf(_C_CYA
- "%s:%d: "
- "purged %" PRIdsz " bytes from socket\n",
- __FILE__, __LINE__, r);
- produce_disconnects = 1;
- }
- mtx_unlock(&produce_disconnect_lock);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- return rd_kafka_interceptor_add_on_request_sent(
- rk, "disconnect_on_send", on_request_sent, NULL);
-}
-
-/**
- * @brief Test produce retries by disconnecting right after ProduceRequest
- * has been sent.
- *
- * @param should_fail If true, do negative testing which should fail.
- */
-static void do_test_produce_retries_disconnect(const char *topic,
- int idempotence,
- int try_fail,
- int should_fail) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- uint64_t testid;
- rd_kafka_resp_err_t err;
- int msgcnt = 1;
- int partition_cnt;
-
- TEST_SAY(_C_BLU
- "Test produce retries by disconnect "
- "(idempotence=%d,try_fail=%d,should_fail=%d)\n",
- idempotence, try_fail, should_fail);
-
- test_curr->is_fatal_cb = is_fatal_cb;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_conf_set(conf, "socket.timeout.ms", test_quick ? "3000" : "10000");
- test_conf_set(conf, "message.timeout.ms",
- test_quick ? "9000" : "30000");
- test_conf_set(conf, "enable.idempotence",
- idempotence ? "true" : "false");
- if (!try_fail) {
- test_conf_set(conf, "retries", "1");
- } else {
- /* enable.idempotence=true request retries >= 1 which
- * makes the test pass. */
- if (!idempotence)
- test_conf_set(conf, "retries", "0");
- }
-
- mtx_init(&produce_disconnect_lock, mtx_plain);
- produce_disconnects = 0;
-
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
- on_new_producer, NULL);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- err = test_produce_sync(rk, rkt, testid, 0);
-
- if (should_fail) {
- if (!err)
- TEST_FAIL("Expected produce to fail\n");
- else
- TEST_SAY("Produced message failed as expected: %s\n",
- rd_kafka_err2str(err));
- } else {
- if (err)
- TEST_FAIL("Produced message failed: %s\n",
- rd_kafka_err2str(err));
- else
- TEST_SAY("Produced message delivered\n");
- }
-
- mtx_lock(&produce_disconnect_lock);
- TEST_ASSERT(produce_disconnects == 1, "expected %d disconnects, not %d",
- 1, produce_disconnects);
- mtx_unlock(&produce_disconnect_lock);
-
-
- partition_cnt = test_get_partition_count(rk, topic, tmout_multip(5000));
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY("Verifying messages with consumer\n");
- test_consume_msgs_easy(NULL, topic, testid, partition_cnt,
- /* Since we don't know the number of
- * messages that got thru on the socket
- * before disconnect we can't let the
- * expected message count be 0 in case of
- * should_fail, so instead ignore the message
- * count (-1). */
- should_fail ? -1 : msgcnt, NULL);
-
- TEST_SAY(_C_GRN
- "Test produce retries by disconnect "
- "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n",
- idempotence, try_fail, should_fail);
-}
-
-
-int main_0076_produce_retry(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0076_produce_retry", 1);
- const rd_bool_t has_idempotence =
- test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
-
-#if WITH_SOCKEM
- if (has_idempotence) {
- /* Idempotence, no try fail, should succeed. */
- do_test_produce_retries(topic, 1, 0, 0);
- /* Idempotence, try fail, should succeed. */
- do_test_produce_retries(topic, 1, 1, 0);
- }
- /* No idempotence, try fail, should fail. */
- do_test_produce_retries(topic, 0, 1, 1);
-#endif
-
- if (has_idempotence) {
- /* Idempotence, no try fail, should succeed. */
- do_test_produce_retries_disconnect(topic, 1, 0, 0);
- /* Idempotence, try fail, should succeed. */
- do_test_produce_retries_disconnect(topic, 1, 1, 0);
- }
- /* No idempotence, try fail, should fail. */
- do_test_produce_retries_disconnect(topic, 0, 1, 1);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c
deleted file mode 100644
index 01667114c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * @brief Verify handling of compacted topics.
- *
- * General idea:
- * - create a compacted topic with a low cleanup interval to promote quick
- * compaction.
- * - produce messages for 3 keys and interleave with unkeyed messages.
- * interleave tombstones for k1 and k2, but not k3.
- * - consume before compaction - verify all messages in place
- * - wait for compaction
- * - consume after compaction - verify expected messages.
- */
-
-
-
-/**
- * @brief Get low watermark in partition, we use this see if compaction
- * has kicked in.
- */
-static int64_t
-get_low_wmark(rd_kafka_t *rk, const char *topic, int32_t partition) {
- rd_kafka_resp_err_t err;
- int64_t low, high;
-
- err = rd_kafka_query_watermark_offsets(rk, topic, partition, &low,
- &high, tmout_multip(10000));
-
- TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", topic,
- (int)partition, rd_kafka_err2str(err));
-
- return low;
-}
-
-
-/**
- * @brief Wait for compaction by checking for
- * partition low-watermark increasing */
-static void wait_compaction(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t low_offset,
- int timeout_ms) {
- int64_t low = -1;
- int64_t ts_start = test_clock();
-
- TEST_SAY(
- "Waiting for compaction to kick in and increase the "
- "Low watermark offset from %" PRId64 " on %s [%" PRId32 "]\n",
- low_offset, topic, partition);
-
- while (1) {
- low = get_low_wmark(rk, topic, partition);
-
- TEST_SAY("Low watermark offset for %s [%" PRId32
- "] is "
- "%" PRId64 " (want > %" PRId64 ")\n",
- topic, partition, low, low_offset);
-
- if (low > low_offset)
- break;
-
- if (ts_start + (timeout_ms * 1000) < test_clock())
- break;
-
- rd_sleep(5);
- }
-}
-
-static void produce_compactable_msgs(const char *topic,
- int32_t partition,
- uint64_t testid,
- int msgcnt,
- size_t msgsize) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- int i;
- char *val;
- char key[16];
- rd_kafka_resp_err_t err;
- int msgcounter = msgcnt;
-
- if (!testid)
- testid = test_id_generate();
-
- test_str_id_generate(key, sizeof(key));
-
- val = calloc(1, msgsize);
-
- TEST_SAY("Producing %d messages (total of %" PRIusz
- " bytes) of "
- "compactable messages\n",
- msgcnt, (size_t)msgcnt * msgsize);
-
- test_conf_init(&conf, NULL, 0);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- /* Make sure batch size does not exceed segment.bytes since that
- * will make the ProduceRequest fail. */
- test_conf_set(conf, "batch.num.messages", "1");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- for (i = 0; i < msgcnt - 1; i++) {
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(partition),
- RD_KAFKA_V_KEY(key, sizeof(key) - 1),
- RD_KAFKA_V_VALUE(val, msgsize),
- RD_KAFKA_V_OPAQUE(&msgcounter),
- RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err));
- }
-
- /* Final message is the tombstone */
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(partition),
- RD_KAFKA_V_KEY(key, sizeof(key) - 1),
- RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err));
-
- test_flush(rk, tmout_multip(10000));
- TEST_ASSERT(msgcounter == 0, "%d messages unaccounted for", msgcounter);
-
- rd_kafka_destroy(rk);
-
- free(val);
-}
-
-
-
-static void do_test_compaction(int msgs_per_key, const char *compression) {
- const char *topic = test_mk_topic_name(__FILE__, 1);
-#define _KEY_CNT 4
- const char *keys[_KEY_CNT] = {"k1", "k2", "k3",
- NULL /*generate unique*/};
- int msgcnt = msgs_per_key * _KEY_CNT;
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- uint64_t testid;
- int32_t partition = 0;
- int cnt = 0;
- test_msgver_t mv;
- test_msgver_t mv_correct;
- int msgcounter = 0;
- const int fillcnt = 20;
-
- testid = test_id_generate();
-
- TEST_SAY(
- _C_MAG
- "Test compaction on topic %s with %s compression (%d messages)\n",
- topic, compression ? compression : "no", msgcnt);
-
- test_kafka_topics(
- "--create --topic \"%s\" "
- "--partitions %d "
- "--replication-factor 1 "
- "--config cleanup.policy=compact "
- "--config segment.ms=10000 "
- "--config segment.bytes=10000 "
- "--config min.cleanable.dirty.ratio=0.01 "
- "--config delete.retention.ms=86400 "
- "--config file.delete.delay.ms=10000 "
- "--config max.compaction.lag.ms=100",
- topic, partition + 1);
-
- test_conf_init(&conf, NULL, 120);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- if (compression)
- test_conf_set(conf, "compression.codec", compression);
- /* Limit max batch size below segment.bytes to avoid messages
- * to accumulate into a batch that will be rejected by the broker. */
- test_conf_set(conf, "message.max.bytes", "6000");
- test_conf_set(conf, "linger.ms", "10");
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- /* The low watermark is not updated on message deletion(compaction)
- * but on segment deletion, so fill up the first segment with
- * random messages eligible for hasty compaction. */
- produce_compactable_msgs(topic, 0, partition, fillcnt, 1000);
-
- /* Populate a correct msgver for later comparison after compact. */
- test_msgver_init(&mv_correct, testid);
-
- TEST_SAY("Producing %d messages for %d keys\n", msgcnt, _KEY_CNT);
- for (cnt = 0; cnt < msgcnt;) {
- int k;
-
- for (k = 0; k < _KEY_CNT; k++) {
- rd_kafka_resp_err_t err;
- int is_last = cnt + _KEY_CNT >= msgcnt;
- /* Let keys[0] have some tombstones */
- int is_tombstone = (k == 0 && (is_last || !(cnt % 7)));
- char *valp;
- size_t valsize;
- char rdk_msgid[256];
- char unique_key[16];
- const void *key;
- size_t keysize;
- int64_t offset = fillcnt + cnt;
-
- test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), testid,
- partition, cnt);
-
- if (is_tombstone) {
- valp = NULL;
- valsize = 0;
- } else {
- valp = rdk_msgid;
- valsize = strlen(valp);
- }
-
- if (!(key = keys[k])) {
- rd_snprintf(unique_key, sizeof(unique_key),
- "%d", cnt);
- key = unique_key;
- }
- keysize = strlen(key);
-
- /* All unique-key messages should remain intact
- * after compaction. */
- if (!keys[k] || is_last) {
- TEST_SAYL(4,
- "Add to correct msgvec: "
- "msgid: %d: %s is_last=%d, "
- "is_tomb=%d\n",
- cnt, (const char *)key, is_last,
- is_tombstone);
- test_msgver_add_msg00(
- __FUNCTION__, __LINE__, rd_kafka_name(rk),
- &mv_correct, testid, topic, partition,
- offset, -1, -1, 0, cnt);
- }
-
-
- msgcounter++;
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_KEY(key, keysize),
- RD_KAFKA_V_VALUE(valp, valsize),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1),
- /* msgcounter as msg_opaque is used
- * by test delivery report callback to
- * count number of messages. */
- RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev(#%d) failed: %s", cnt,
- rd_kafka_err2str(err));
-
- cnt++;
- }
- }
-
- TEST_ASSERT(cnt == msgcnt, "cnt %d != msgcnt %d", cnt, msgcnt);
-
- msgcounter = cnt;
- test_wait_delivery(rk, &msgcounter);
-
- /* Trigger compaction by filling up the segment with dummy messages,
- * do it in chunks to avoid too good compression which then won't
- * fill up the segments..
- * We can't reuse the existing producer instance because it
- * might be using compression which makes it hard to know how
- * much data we need to produce to trigger compaction. */
- produce_compactable_msgs(topic, 0, partition, 20, 1024);
-
- /* Wait for compaction:
- * this doesn't really work because the low watermark offset
- * is not updated on compaction if the first segment is not deleted.
- * But it serves as a pause to let compaction kick in
- * which is triggered by the dummy produce above. */
- wait_compaction(rk, topic, partition, 0, 20 * 1000);
-
- TEST_SAY(_C_YEL "Verify messages after compaction\n");
- /* After compaction we expect the following messages:
- * last message for each of k1, k2, k3, all messages for unkeyed. */
- test_msgver_init(&mv, testid);
- mv.msgid_hdr = "rdk_msgid";
- test_consume_msgs_easy_mv(NULL, topic, -1, testid, 1, -1, NULL, &mv);
- test_msgver_verify_compare("post-compaction", &mv, &mv_correct,
- TEST_MSGVER_BY_MSGID |
- TEST_MSGVER_BY_OFFSET);
- test_msgver_clear(&mv);
-
- test_msgver_clear(&mv_correct);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN "Compaction test with %s compression: PASS\n",
- compression ? compression : "no");
-}
-
-int main_0077_compaction(int argc, char **argv) {
-
- if (!test_can_create_topics(1))
- return 0;
-
- if (test_needs_auth()) {
- TEST_SKIP("Test cluster requires authentication/SSL\n");
- return 0;
- }
-
- do_test_compaction(10, NULL);
-
- if (test_quick) {
- TEST_SAY(
- "Skipping further compaction tests "
- "due to quick mode\n");
- return 0;
- }
-
- do_test_compaction(1000, NULL);
-#if WITH_SNAPPY
- do_test_compaction(10, "snappy");
-#endif
-#if WITH_ZSTD
- do_test_compaction(10, "zstd");
-#endif
-#if WITH_ZLIB
- do_test_compaction(10000, "gzip");
-#endif
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp
deleted file mode 100644
index 41d6886cb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "rdkafka.h" /* Include before rdkafkacpp.h (from testcpp.h) */
-#include "testcpp.h"
-#include <cstring>
-
-/**
- * @name Verify that the c_ptr()'s returned from C++ can be used
- * to interact directly with the C API.
- */
-
-
-extern "C" {
-int main_0078_c_from_cpp(int argc, char **argv) {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
- std::string errstr;
-
- if (conf->set("client.id", "myclient", errstr))
- Test::Fail("conf->set() failed: " + errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- delete conf;
-
- /*
- * Acquire rd_kafka_t and compare its name to the configured client.id
- */
- rd_kafka_t *rk = p->c_ptr();
- if (!rk)
- Test::Fail("Failed to acquire c_ptr");
-
- std::string name = p->name();
- std::string c_name = rd_kafka_name(rk);
-
- Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n");
- if (c_name != name)
- Test::Fail("Expected C client name " + c_name + " to match C++ " + name);
-
- /*
- * Create topic object, acquire rd_kafka_topic_t and compare
- * its topic name.
- */
-
- RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr);
- if (!topic)
- Test::Fail("Failed to create Topic: " + errstr);
-
- rd_kafka_topic_t *rkt = topic->c_ptr();
- if (!rkt)
- Test::Fail("Failed to acquire topic c_ptr");
-
- std::string topicname = topic->name();
- std::string c_topicname = rd_kafka_topic_name(rkt);
-
- Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname +
- "\n");
- if (c_topicname != topicname)
- Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " +
- topicname);
-
- delete topic;
- delete p;
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c
deleted file mode 100644
index 506dd62a3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#ifndef _WIN32
-#include <unistd.h>
-#include <sys/wait.h>
-#endif
-
-/**
- * @brief Forking a threaded process will not transfer threads (such as
- * librdkafka's background threads) to the child process.
- * There is no way such a forked client instance will work
- * in the child process, but it should not crash on destruction: #1674
- */
-
-int main_0079_fork(int argc, char **argv) {
-
-#if __SANITIZE_ADDRESS__
- TEST_SKIP(
- "AddressSanitizer is enabled: this test leaks memory (due to "
- "fork())\n");
- return 0;
-#endif
-#ifdef _WIN32
- TEST_SKIP("No fork() support on Windows");
- return 0;
-#else
- pid_t pid;
- rd_kafka_t *rk;
- int status;
-
- rk = test_create_producer();
-
- rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
-
- pid = fork();
- TEST_ASSERT(pid != 1, "fork() failed: %s", strerror(errno));
-
- if (pid == 0) {
- /* Child process */
-
- /* This call will enqueue the message on a queue
- * which is not served by any thread, but it should not crash */
- rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"),
- RD_KAFKA_V_VALUE("hello", 5), RD_KAFKA_V_END);
-
- /* Don't crash on us */
- rd_kafka_destroy(rk);
-
- exit(0);
- }
-
- /* Parent process, wait for child to exit cleanly. */
- if (waitpid(pid, &status, 0) == -1)
- TEST_FAIL("waitpid(%d) failed: %s", (int)pid, strerror(errno));
-
- if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
- TEST_FAIL("child exited with status %d", WEXITSTATUS(status));
-
- rd_kafka_destroy(rk);
-
- return 0;
-#endif
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c
deleted file mode 100644
index 9d049e5b1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c
+++ /dev/null
@@ -1,2535 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * @brief Admin API local dry-run unit-tests.
- */
-
-#define MY_SOCKET_TIMEOUT_MS 100
-#define MY_SOCKET_TIMEOUT_MS_STR "100"
-
-
-
-static mtx_t last_event_lock;
-static cnd_t last_event_cnd;
-static rd_kafka_event_t *last_event = NULL;
-
-/**
- * @brief The background event callback is called automatically
- * by librdkafka from a background thread.
- */
-static void
-background_event_cb(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque) {
- mtx_lock(&last_event_lock);
- TEST_ASSERT(!last_event,
- "Multiple events seen in background_event_cb "
- "(existing %s, new %s)",
- rd_kafka_event_name(last_event), rd_kafka_event_name(rkev));
- last_event = rkev;
- mtx_unlock(&last_event_lock);
- cnd_broadcast(&last_event_cnd);
- rd_sleep(1);
-}
-
-static rd_kafka_event_t *wait_background_event_cb(void) {
- rd_kafka_event_t *rkev;
- mtx_lock(&last_event_lock);
- while (!(rkev = last_event))
- cnd_wait(&last_event_cnd, &last_event_lock);
- last_event = NULL;
- mtx_unlock(&last_event_lock);
-
- return rkev;
-}
-
-
-/**
- * @brief CreateTopics tests
- *
- *
- *
- */
-static void do_test_CreateTopics(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_background_event_cb,
- int with_options) {
- rd_kafka_queue_t *q;
-#define MY_NEW_TOPICS_CNT 6
- rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_CreateTopics_result_t *res;
- const rd_kafka_topic_result_t **restopics;
- size_t restopic_cnt;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s CreateTopics with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /**
- * Construct NewTopic array with different properties for
- * different partitions.
- */
- for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- int num_parts = i * 51 + 1;
- int num_replicas = jitter(1, MY_NEW_TOPICS_CNT - 1);
- int set_config = (i & 2);
- int set_replicas = !(i % 1);
-
- new_topics[i] = rd_kafka_NewTopic_new(
- topic, num_parts, set_replicas ? -1 : num_replicas, NULL,
- 0);
-
- if (set_config) {
- /*
- * Add various (unverified) configuration properties
- */
- err = rd_kafka_NewTopic_set_config(new_topics[i],
- "dummy.doesntexist",
- "butThere'sNothing "
- "to verify that");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- err = rd_kafka_NewTopic_set_config(
- new_topics[i], "try.a.null.value", NULL);
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- err = rd_kafka_NewTopic_set_config(new_topics[i],
- "or.empty", "");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- if (set_replicas) {
- int32_t p;
- int32_t replicas[MY_NEW_TOPICS_CNT];
- int j;
-
- for (j = 0; j < num_replicas; j++)
- replicas[j] = j;
-
- /*
- * Set valid replica assignments
- */
- for (p = 0; p < num_parts; p++) {
- /* Try adding an existing out of order,
- * should fail */
- if (p == 1) {
- err =
- rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], p + 1, replicas,
- num_replicas, errstr,
- sizeof(errstr));
- TEST_ASSERT(
- err ==
- RD_KAFKA_RESP_ERR__INVALID_ARG,
- "%s", rd_kafka_err2str(err));
- }
-
- err = rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], p, replicas, num_replicas,
- errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
- }
-
- /* Try to add an existing partition, should fail */
- err = rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], 0, replicas, num_replicas, NULL, 0);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s",
- rd_kafka_err2str(err));
-
- } else {
- int32_t dummy_replicas[1] = {1};
-
- /* Test invalid partition */
- err = rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], num_parts + 1, dummy_replicas, 1,
- errstr, sizeof(errstr));
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "%s: %s", rd_kafka_err2str(err),
- err == RD_KAFKA_RESP_ERR_NO_ERROR ? ""
- : errstr);
-
- /* Setting replicas with with default replicas != -1
- * is an error. */
- err = rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], 0, dummy_replicas, 1, errstr,
- sizeof(errstr));
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "%s: %s", rd_kafka_err2str(err),
- err == RD_KAFKA_RESP_ERR_NO_ERROR ? ""
- : errstr);
- }
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- my_opaque = (void *)123;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
-
- TIMING_START(&timing, "CreateTopics");
- TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout);
- rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (with_background_event_cb) {
- /* Result event will be triggered by callback from
- * librdkafka background queue thread. */
- TIMING_START(&timing, "CreateTopics.wait_background_event_cb");
- rkev = wait_background_event_cb();
- } else {
- /* Poll result queue */
- TIMING_START(&timing, "CreateTopics.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- }
-
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("CreateTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_CreateTopics_result(rkev);
- TEST_ASSERT(res, "expected CreateTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected CreateTopics to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Attempt to extract topics anyway, should return NULL. */
- restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);
- TEST_ASSERT(!restopics && restopic_cnt == 0,
- "expected no result_topics, got %p cnt %" PRIusz, restopics,
- restopic_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_NewTopic_destroy_array(new_topics, MY_NEW_TOPICS_CNT);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief DeleteTopics tests
- *
- *
- *
- */
-static void do_test_DeleteTopics(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options) {
- rd_kafka_queue_t *q;
-#define MY_DEL_TOPICS_CNT 4
- rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteTopics_result_t *res;
- const rd_kafka_topic_result_t **restopics;
- size_t restopic_cnt;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s DeleteTopics with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_DEL_TOPICS_CNT; i++)
- del_topics[i] = rd_kafka_DeleteTopic_new(
- test_mk_topic_name(__FUNCTION__, 1));
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)456;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TIMING_START(&timing, "DeleteTopics");
- TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout);
- rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /* Poll result queue */
- TIMING_START(&timing, "DeleteTopics.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DeleteTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteTopics_result(rkev);
- TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected DeleteTopics to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Attempt to extract topics anyway, should return NULL. */
- restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);
- TEST_ASSERT(!restopics && restopic_cnt == 0,
- "expected no result_topics, got %p cnt %" PRIusz, restopics,
- restopic_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_DeleteTopic_destroy_array(del_topics, MY_DEL_TOPICS_CNT);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-#undef MY_DEL_TOPICS_CNT
-
- SUB_TEST_QUICK();
-}
-
-/**
- * @brief DeleteGroups tests
- *
- *
- *
- */
-static void do_test_DeleteGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options,
- rd_bool_t destroy) {
- rd_kafka_queue_t *q;
-#define MY_DEL_GROUPS_CNT 4
- char *group_names[MY_DEL_GROUPS_CNT];
- rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteGroups_result_t *res;
- const rd_kafka_group_result_t **resgroups;
- size_t resgroup_cnt;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s DeleteGroups with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- del_groups[i] = rd_kafka_DeleteGroup_new(group_names[i]);
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)456;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TIMING_START(&timing, "DeleteGroups");
- TEST_SAY("Call DeleteGroups, timeout is %dms\n", exp_timeout);
- rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (destroy)
- goto destroy;
-
- /* Poll result queue */
- TIMING_START(&timing, "DeleteGroups.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DeleteGroups: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteGroups_result(rkev);
- TEST_ASSERT(res, "expected DeleteGroups_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting no error (errors will be per-group) */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR,
- "expected DeleteGroups to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Extract groups, should return MY_DEL_GROUPS_CNT groups. */
- resgroups = rd_kafka_DeleteGroups_result_groups(res, &resgroup_cnt);
- TEST_ASSERT(resgroups && resgroup_cnt == MY_DEL_GROUPS_CNT,
- "expected %d result_groups, got %p cnt %" PRIusz,
- MY_DEL_GROUPS_CNT, resgroups, resgroup_cnt);
-
- /* The returned groups should be in the original order, and
- * should all have timed out. */
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- TEST_ASSERT(!strcmp(group_names[i],
- rd_kafka_group_result_name(resgroups[i])),
- "expected group '%s' at position %d, not '%s'",
- group_names[i], i,
- rd_kafka_group_result_name(resgroups[i]));
- TEST_ASSERT(rd_kafka_error_code(rd_kafka_group_result_error(
- resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected group '%s' to have timed out, got %s",
- group_names[i],
- rd_kafka_error_string(
- rd_kafka_group_result_error(resgroups[i])));
- }
-
- rd_kafka_event_destroy(rkev);
-
-destroy:
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- rd_kafka_DeleteGroup_destroy(del_groups[i]);
- rd_free(group_names[i]);
- }
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-#undef MY_DEL_GROUPS_CNT
-
- SUB_TEST_QUICK();
-}
-
-/**
- * @brief ListConsumerGroups tests
- *
- *
- *
- */
-static void do_test_ListConsumerGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options,
- rd_bool_t destroy) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_ListConsumerGroups_result_t *res;
- const rd_kafka_error_t **errors;
- size_t errors_cnt, valid_cnt;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s ListConsumerGroups with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (with_options) {
- rd_kafka_consumer_group_state_t duplicate[2] = {
- RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY,
- RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY};
-
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
-
- /* Test duplicate error on match states */
- rd_kafka_error_t *error =
- rd_kafka_AdminOptions_set_match_consumer_group_states(
- options, duplicate, 2);
- TEST_ASSERT(error && rd_kafka_error_code(error), "%s",
- "Expected error on duplicate states,"
- " got no error");
- rd_kafka_error_destroy(error);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr)));
-
- if (useq) {
- my_opaque = (void *)456;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TIMING_START(&timing, "ListConsumerGroups");
- TEST_SAY("Call ListConsumerGroups, timeout is %dms\n", exp_timeout);
- rd_kafka_ListConsumerGroups(rk, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (destroy)
- goto destroy;
-
- /* Poll result queue */
- TIMING_START(&timing, "ListConsumerGroups.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("ListConsumerGroups: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_ListConsumerGroups_result(rkev);
- TEST_ASSERT(res, "expected ListConsumerGroups_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting no error here, the real error will be in the error array */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(
- err == RD_KAFKA_RESP_ERR_NO_ERROR,
- "expected ListConsumerGroups to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- errors = rd_kafka_ListConsumerGroups_result_errors(rkev, &errors_cnt);
- TEST_ASSERT(errors_cnt == 1, "expected one error, got %" PRIusz,
- errors_cnt);
- rd_kafka_ListConsumerGroups_result_valid(rkev, &valid_cnt);
- TEST_ASSERT(valid_cnt == 0, "expected zero valid groups, got %" PRIusz,
- valid_cnt);
-
- err = rd_kafka_error_code(errors[0]);
- errstr2 = rd_kafka_error_string(errors[0]);
- TEST_ASSERT(
- err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected ListConsumerGroups to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- rd_kafka_event_destroy(rkev);
-
-destroy:
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
- SUB_TEST_PASS();
-}
-
-/**
- * @brief DescribeConsumerGroups tests
- *
- *
- *
- */
-static void do_test_DescribeConsumerGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options,
- rd_bool_t destroy) {
- rd_kafka_queue_t *q;
-#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4
- const char *group_names[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteGroups_result_t *res;
- const rd_kafka_ConsumerGroupDescription_t **resgroups;
- size_t resgroup_cnt;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)456;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TIMING_START(&timing, "DescribeConsumerGroups");
- TEST_SAY("Call DescribeConsumerGroups, timeout is %dms\n", exp_timeout);
- rd_kafka_DescribeConsumerGroups(
- rk, group_names, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (destroy)
- goto destroy;
-
- /* Poll result queue */
- TIMING_START(&timing, "DescribeConsumerGroups.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DescribeConsumerGroups: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DescribeConsumerGroups_result(rkev);
- TEST_ASSERT(res, "expected DescribeConsumerGroups_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting no error (errors will be per-group) */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(
- err == RD_KAFKA_RESP_ERR_NO_ERROR,
- "expected DescribeConsumerGroups to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- /* Extract groups, should return TEST_DESCRIBE_GROUPS_CNT groups. */
- resgroups =
- rd_kafka_DescribeConsumerGroups_result_groups(res, &resgroup_cnt);
- TEST_ASSERT(resgroups &&
- resgroup_cnt == TEST_DESCRIBE_CONSUMER_GROUPS_CNT,
- "expected %d result_groups, got %p cnt %" PRIusz,
- TEST_DESCRIBE_CONSUMER_GROUPS_CNT, resgroups, resgroup_cnt);
-
- /* The returned groups should be in the original order, and
- * should all have timed out. */
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- TEST_ASSERT(
- !strcmp(group_names[i],
- rd_kafka_ConsumerGroupDescription_group_id(
- resgroups[i])),
- "expected group '%s' at position %d, not '%s'",
- group_names[i], i,
- rd_kafka_ConsumerGroupDescription_group_id(resgroups[i]));
- TEST_ASSERT(
- rd_kafka_error_code(rd_kafka_ConsumerGroupDescription_error(
- resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected group '%s' to have timed out, got %s",
- group_names[i],
- rd_kafka_error_string(
- rd_kafka_ConsumerGroupDescription_error(resgroups[i])));
- }
-
- rd_kafka_event_destroy(rkev);
-
-destroy:
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- rd_free((char *)group_names[i]);
- }
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT
-
- SUB_TEST_PASS();
-}
-
-static void do_test_DeleteRecords(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options,
- rd_bool_t destroy) {
- rd_kafka_queue_t *q;
-#define MY_DEL_RECORDS_CNT 4
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_topic_partition_list_t *offsets = NULL;
- rd_kafka_DeleteRecords_t *del_records;
- const rd_kafka_DeleteRecords_result_t *res;
- char *topics[MY_DEL_RECORDS_CNT];
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s DeleteRecords with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
- topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)4567;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- offsets = rd_kafka_topic_partition_list_new(MY_DEL_RECORDS_CNT);
-
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
- rd_kafka_topic_partition_list_add(offsets, topics[i], i)
- ->offset = RD_KAFKA_OFFSET_END;
-
- del_records = rd_kafka_DeleteRecords_new(offsets);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- TIMING_START(&timing, "DeleteRecords");
- TEST_SAY("Call DeleteRecords, timeout is %dms\n", exp_timeout);
- rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
-
- rd_kafka_DeleteRecords_destroy(del_records);
-
- if (destroy)
- goto destroy;
-
- /* Poll result queue */
- TIMING_START(&timing, "DeleteRecords.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DeleteRecords: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteRecords_result(rkev);
- TEST_ASSERT(res, "expected DeleteRecords_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error (pre-fanout leader_req will fail) */
- err = rd_kafka_event_error(rkev);
- TEST_ASSERT(err, "expected DeleteRecords to fail");
-
- rd_kafka_event_destroy(rkev);
-
-destroy:
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
- rd_free(topics[i]);
-
-#undef MY_DEL_RECORDS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_DeleteConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options) {
- rd_kafka_queue_t *q;
-#define MY_DEL_CGRPOFFS_CNT 1
- rd_kafka_AdminOptions_t *options = NULL;
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
- rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets[MY_DEL_CGRPOFFS_CNT];
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s DeleteConsumerGroupOffsets with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_DEL_CGRPOFFS_CNT; i++) {
- rd_kafka_topic_partition_list_t *partitions =
- rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
- rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 1);
- cgoffsets[i] = rd_kafka_DeleteConsumerGroupOffsets_new(
- "mygroup", partitions);
- rd_kafka_topic_partition_list_destroy(partitions);
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)99981;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TIMING_START(&timing, "DeleteConsumerGroupOffsets");
- TEST_SAY("Call DeleteConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_DeleteConsumerGroupOffsets(rk, cgoffsets, MY_DEL_CGRPOFFS_CNT,
- options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
-
- /* Poll result queue */
- TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- TEST_ASSERT(err, "expected DeleteConsumerGroupOffsets to fail");
-
- rd_kafka_event_destroy(rkev);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- rd_kafka_DeleteConsumerGroupOffsets_destroy_array(cgoffsets,
- MY_DEL_CGRPOFFS_CNT);
-
-#undef MY_DEL_CGRPOFFS_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief AclBinding tests
- *
- *
- *
- */
-static void do_test_AclBinding() {
- int i;
- char errstr[512];
- rd_kafka_AclBinding_t *new_acl;
-
- rd_bool_t valid_resource_types[] = {rd_false, rd_false, rd_true,
- rd_true, rd_true, rd_false};
- rd_bool_t valid_resource_pattern_types[] = {
- rd_false, rd_false, rd_false, rd_true, rd_true, rd_false};
- rd_bool_t valid_acl_operation[] = {
- rd_false, rd_false, rd_true, rd_true, rd_true, rd_true, rd_true,
- rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
- rd_bool_t valid_acl_permission_type[] = {rd_false, rd_false, rd_true,
- rd_true, rd_false};
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const char *principal = "User:test";
- const char *host = "*";
-
- SUB_TEST_QUICK();
-
- // Valid acl binding
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(new_acl, "expected AclBinding");
- rd_kafka_AclBinding_destroy(new_acl);
-
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid resource name"),
- "expected error string \"Invalid resource name\", not %s",
- errstr);
-
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid principal"),
- "expected error string \"Invalid principal\", not %s",
- errstr);
-
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid host"),
- "expected error string \"Invalid host\", not %s", errstr);
-
- for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
- host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_resource_types[i]) {
- TEST_ASSERT(new_acl, "expected AclBinding");
- rd_kafka_AclBinding_destroy(new_acl);
- } else
- TEST_ASSERT(
- !new_acl &&
- !strcmp(errstr, "Invalid resource type"),
- "expected error string \"Invalid resource type\", "
- "not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_resource_pattern_types[i]) {
- TEST_ASSERT(new_acl, "expected AclBinding");
- rd_kafka_AclBinding_destroy(new_acl);
- } else
- TEST_ASSERT(
- !new_acl &&
- !strcmp(errstr,
- "Invalid resource pattern type"),
- "expected error string \"Invalid resource pattern "
- "type\", not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_acl_operation[i]) {
- TEST_ASSERT(new_acl, "expected AclBinding");
- rd_kafka_AclBinding_destroy(new_acl);
- } else
- TEST_ASSERT(!new_acl &&
- !strcmp(errstr, "Invalid operation"),
- "expected error string \"Invalid "
- "operation\", not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
- *errstr = '\0';
- new_acl = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
- if (i >= 0 && valid_acl_permission_type[i]) {
- TEST_ASSERT(new_acl, "expected AclBinding");
- rd_kafka_AclBinding_destroy(new_acl);
- } else
- TEST_ASSERT(
- !new_acl &&
- !strcmp(errstr, "Invalid permission type"),
- "expected error string \"permission type\", not %s",
- errstr);
- }
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief AclBindingFilter tests
- *
- *
- *
- */
-static void do_test_AclBindingFilter() {
- int i;
- char errstr[512];
- rd_kafka_AclBindingFilter_t *new_acl_filter;
-
- rd_bool_t valid_resource_types[] = {rd_false, rd_true, rd_true,
- rd_true, rd_true, rd_false};
- rd_bool_t valid_resource_pattern_types[] = {
- rd_false, rd_true, rd_true, rd_true, rd_true, rd_false};
- rd_bool_t valid_acl_operation[] = {
- rd_false, rd_true, rd_true, rd_true, rd_true, rd_true, rd_true,
- rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
- rd_bool_t valid_acl_permission_type[] = {rd_false, rd_true, rd_true,
- rd_true, rd_false};
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const char *principal = "User:test";
- const char *host = "*";
-
- SUB_TEST_QUICK();
-
- // Valid acl binding
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
-
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
-
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
-
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
-
- for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
- host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_resource_types[i]) {
- TEST_ASSERT(new_acl_filter,
- "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
- } else
- TEST_ASSERT(
- !new_acl_filter &&
- !strcmp(errstr, "Invalid resource type"),
- "expected error string \"Invalid resource type\", "
- "not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_resource_pattern_types[i]) {
- TEST_ASSERT(new_acl_filter,
- "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
- } else
- TEST_ASSERT(
- !new_acl_filter &&
- !strcmp(errstr,
- "Invalid resource pattern type"),
- "expected error string \"Invalid resource pattern "
- "type\", not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- if (i >= 0 && valid_acl_operation[i]) {
- TEST_ASSERT(new_acl_filter,
- "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
- } else
- TEST_ASSERT(!new_acl_filter &&
- !strcmp(errstr, "Invalid operation"),
- "expected error string \"Invalid "
- "operation\", not %s",
- errstr);
- }
- for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
- *errstr = '\0';
- new_acl_filter = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
- if (i >= 0 && valid_acl_permission_type[i]) {
- TEST_ASSERT(new_acl_filter,
- "expected AclBindingFilter");
- rd_kafka_AclBinding_destroy(new_acl_filter);
- } else
- TEST_ASSERT(
- !new_acl_filter &&
- !strcmp(errstr, "Invalid permission type"),
- "expected error string \"permission type\", not %s",
- errstr);
- }
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief CreateAcls tests
- *
- *
- *
- */
-static void do_test_CreateAcls(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- rd_bool_t with_background_event_cb,
- rd_bool_t with_options) {
- rd_kafka_queue_t *q;
-#define MY_NEW_ACLS_CNT 2
- rd_kafka_AclBinding_t *new_acls[MY_NEW_ACLS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_CreateAcls_result_t *res;
- const rd_kafka_acl_result_t **resacls;
- size_t resacls_cnt;
- void *my_opaque = NULL, *opaque;
- const char *principal = "User:test";
- const char *host = "*";
-
- SUB_TEST_QUICK("%s CreaetAcls with %s, timeout %dms", rd_kafka_name(rk),
- what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /**
- * Construct AclBinding array
- */
- for (i = 0; i < MY_NEW_ACLS_CNT; i++) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- new_acls[i] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- my_opaque = (void *)123;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
-
- TIMING_START(&timing, "CreateAcls");
- TEST_SAY("Call CreateAcls, timeout is %dms\n", exp_timeout);
- rd_kafka_CreateAcls(rk, new_acls, MY_NEW_ACLS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (with_background_event_cb) {
- /* Result event will be triggered by callback from
- * librdkafka background queue thread. */
- TIMING_START(&timing, "CreateAcls.wait_background_event_cb");
- rkev = wait_background_event_cb();
- } else {
- /* Poll result queue */
- TIMING_START(&timing, "CreateAcls.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- }
-
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("CreateAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_CreateAcls_result(rkev);
- TEST_ASSERT(res, "expected CreateAcls_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected CreateAcls to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Attempt to extract acls results anyway, should return NULL. */
- resacls = rd_kafka_CreateAcls_result_acls(res, &resacls_cnt);
- TEST_ASSERT(!resacls && resacls_cnt == 0,
- "expected no acl result, got %p cnt %" PRIusz, resacls,
- resacls_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_AclBinding_destroy_array(new_acls, MY_NEW_ACLS_CNT);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
-#undef MY_NEW_ACLS_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief DescribeAcls tests
- *
- *
- *
- */
-static void do_test_DescribeAcls(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- rd_bool_t with_background_event_cb,
- rd_bool_t with_options) {
- rd_kafka_queue_t *q;
- rd_kafka_AclBindingFilter_t *describe_acls;
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DescribeAcls_result_t *res;
- const rd_kafka_AclBinding_t **res_acls;
- size_t res_acls_cnt;
- void *my_opaque = NULL, *opaque;
- const char *principal = "User:test";
- const char *host = "*";
-
- SUB_TEST_QUICK("%s DescribeAcls with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /**
- * Construct AclBindingFilter
- */
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- describe_acls = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_PREFIXED,
- principal, host, RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- my_opaque = (void *)123;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
-
- TIMING_START(&timing, "DescribeAcls");
- TEST_SAY("Call DescribeAcls, timeout is %dms\n", exp_timeout);
- rd_kafka_DescribeAcls(rk, describe_acls, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (with_background_event_cb) {
- /* Result event will be triggered by callback from
- * librdkafka background queue thread. */
- TIMING_START(&timing, "DescribeAcls.wait_background_event_cb");
- rkev = wait_background_event_cb();
- } else {
- /* Poll result queue */
- TIMING_START(&timing, "DescribeAcls.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- }
-
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DescribeAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DescribeAcls_result(rkev);
- TEST_ASSERT(res, "expected DescribeAcls_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected DescribeAcls to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Attempt to extract result acls anyway, should return NULL. */
- res_acls = rd_kafka_DescribeAcls_result_acls(res, &res_acls_cnt);
- TEST_ASSERT(!res_acls && res_acls_cnt == 0,
- "expected no result acls, got %p cnt %" PRIusz, res_acls,
- res_acls_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_AclBinding_destroy(describe_acls);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief DeleteAcls tests
- *
- *
- *
- */
-static void do_test_DeleteAcls(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- rd_bool_t with_background_event_cb,
- rd_bool_t with_options) {
-#define DELETE_ACLS_FILTERS_CNT 2
- rd_kafka_queue_t *q;
- rd_kafka_AclBindingFilter_t *delete_acls[DELETE_ACLS_FILTERS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteAcls_result_t *res;
- const rd_kafka_DeleteAcls_result_response_t **res_response;
- size_t res_response_cnt;
- void *my_opaque = NULL, *opaque;
- const char *principal = "User:test";
- const char *host = "*";
-
- SUB_TEST_QUICK("%s DeleteAcls with %s, timeout %dms", rd_kafka_name(rk),
- what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /**
- * Construct AclBindingFilter array
- */
- for (i = 0; i < DELETE_ACLS_FILTERS_CNT; i++) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- delete_acls[i] = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic,
- RD_KAFKA_RESOURCE_PATTERN_PREFIXED, principal, host,
- RD_KAFKA_ACL_OPERATION_ALL,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- my_opaque = (void *)123;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
-
- TIMING_START(&timing, "DeleteAcls");
- TEST_SAY("Call DeleteAcls, timeout is %dms\n", exp_timeout);
- rd_kafka_DeleteAcls(rk, delete_acls, DELETE_ACLS_FILTERS_CNT, options,
- q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- if (with_background_event_cb) {
- /* Result event will be triggered by callback from
- * librdkafka background queue thread. */
- TIMING_START(&timing, "DeleteAcls.wait_background_event_cb");
- rkev = wait_background_event_cb();
- } else {
- /* Poll result queue */
- TIMING_START(&timing, "DeleteAcls.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- }
-
- TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("DeleteAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteAcls_result(rkev);
- TEST_ASSERT(res, "expected DeleteAcls_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected DeleteAcls to return error %s, not %s (%s)",
- rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- /* Attempt to extract result responses anyway, should return NULL. */
- res_response =
- rd_kafka_DeleteAcls_result_responses(res, &res_response_cnt);
- TEST_ASSERT(!res_response && res_response_cnt == 0,
- "expected no result response, got %p cnt %" PRIusz,
- res_response, res_response_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_AclBinding_destroy_array(delete_acls, DELETE_ACLS_FILTERS_CNT);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
-#undef DELETE_ACLS_FILTERS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_AlterConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options) {
- rd_kafka_queue_t *q;
-#define MY_ALTER_CGRPOFFS_CNT 1
- rd_kafka_AdminOptions_t *options = NULL;
- const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
- rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets[MY_ALTER_CGRPOFFS_CNT];
- rd_kafka_AlterConsumerGroupOffsets_t
- *cgoffsets_empty[MY_ALTER_CGRPOFFS_CNT];
- rd_kafka_AlterConsumerGroupOffsets_t
- *cgoffsets_negative[MY_ALTER_CGRPOFFS_CNT];
- rd_kafka_AlterConsumerGroupOffsets_t
- *cgoffsets_duplicate[MY_ALTER_CGRPOFFS_CNT];
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- void *my_opaque = NULL, *opaque;
-
- SUB_TEST_QUICK("%s AlterConsumerGroupOffsets with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_ALTER_CGRPOFFS_CNT; i++) {
- /* Call with three correct topic partitions. */
- rd_kafka_topic_partition_list_t *partitions =
- rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 9)
- ->offset = 9;
- rd_kafka_topic_partition_list_add(partitions, "topic3", 15)
- ->offset = 15;
- rd_kafka_topic_partition_list_add(partitions, "topic1", 1)
- ->offset = 1;
- cgoffsets[i] = rd_kafka_AlterConsumerGroupOffsets_new(
- "mygroup", partitions);
- rd_kafka_topic_partition_list_destroy(partitions);
-
- /* Call with empty topic-partition list. */
- rd_kafka_topic_partition_list_t *partitions_empty =
- rd_kafka_topic_partition_list_new(0);
- cgoffsets_empty[i] = rd_kafka_AlterConsumerGroupOffsets_new(
- "mygroup", partitions_empty);
- rd_kafka_topic_partition_list_destroy(partitions_empty);
-
- /* Call with a topic-partition having negative offset. */
- rd_kafka_topic_partition_list_t *partitions_negative =
- rd_kafka_topic_partition_list_new(4);
- rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
- 9)
- ->offset = 9;
- rd_kafka_topic_partition_list_add(partitions_negative, "topic3",
- 15)
- ->offset = 15;
- rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
- 1)
- ->offset = 1;
- rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
- 2)
- ->offset = -3;
- cgoffsets_negative[i] = rd_kafka_AlterConsumerGroupOffsets_new(
- "mygroup", partitions_negative);
- rd_kafka_topic_partition_list_destroy(partitions_negative);
-
- /* Call with duplicate partitions. */
- rd_kafka_topic_partition_list_t *partitions_duplicate =
- rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(partitions_duplicate,
- "topic1", 9)
- ->offset = 9;
- rd_kafka_topic_partition_list_add(partitions_duplicate,
- "topic3", 15)
- ->offset = 15;
- rd_kafka_topic_partition_list_add(partitions_duplicate,
- "topic1", 9)
- ->offset = 1;
-
- cgoffsets_duplicate[i] = rd_kafka_AlterConsumerGroupOffsets_new(
- "mygroup", partitions_duplicate);
- rd_kafka_topic_partition_list_destroy(partitions_duplicate);
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)99981;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- /* Empty topic-partition list */
- TIMING_START(&timing, "AlterConsumerGroupOffsets");
- TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_empty,
- MY_ALTER_CGRPOFFS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
- rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_empty,
- MY_ALTER_CGRPOFFS_CNT);
-
- /* Poll result queue */
- TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, 0, 10);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- const char *event_errstr_empty = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(strcmp(event_errstr_empty,
- "Non-empty topic partition list must be present") ==
- 0,
- "expected \"Non-empty topic partition list must be "
- "present\", not \"%s\"",
- event_errstr_empty);
- rd_kafka_event_destroy(rkev);
-
- /* Negative topic-partition offset */
- TIMING_START(&timing, "AlterConsumerGroupOffsets");
- TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_negative,
- MY_ALTER_CGRPOFFS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
- rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_negative,
- MY_ALTER_CGRPOFFS_CNT);
- /* Poll result queue */
- TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, 0, 10);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- const char *event_errstr_negative = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(
- strcmp(event_errstr_negative,
- "All topic-partition offsets must be >= 0") == 0,
- "expected \"All topic-partition offsets must be >= 0\", not \"%s\"",
- event_errstr_negative);
- rd_kafka_event_destroy(rkev);
-
- /* Duplicate topic-partition offset */
- TIMING_START(&timing, "AlterConsumerGroupOffsets");
- TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_duplicate,
- MY_ALTER_CGRPOFFS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
- rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_duplicate,
- MY_ALTER_CGRPOFFS_CNT);
- /* Poll result queue */
- TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, 0, 10);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- const char *event_errstr_duplicate = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
- "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(strcmp(event_errstr_duplicate,
- "Duplicate partitions not allowed") == 0,
- "expected \"Duplicate partitions not allowed\", not \"%s\"",
- event_errstr_duplicate);
- rd_kafka_event_destroy(rkev);
-
- /* Correct topic-partition list, local timeout */
- TIMING_START(&timing, "AlterConsumerGroupOffsets");
- TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets, MY_ALTER_CGRPOFFS_CNT,
- options, q);
- TIMING_ASSERT_LATER(&timing, 0, 10);
- /* Poll result queue */
- TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- const char *event_errstr = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected RD_KAFKA_RESP_ERR__TIMED_OUT, not %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(strcmp(event_errstr,
- "Failed while waiting for response from broker: "
- "Local: Timed out") == 0,
- "expected \"Failed while waiting for response from broker: "
- "Local: Timed out\", not \"%s\"",
- event_errstr);
- rd_kafka_event_destroy(rkev);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets,
- MY_ALTER_CGRPOFFS_CNT);
-
-#undef MY_ALTER_CGRPOFFS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_ListConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int with_options,
- rd_bool_t null_toppars) {
- rd_kafka_queue_t *q;
-#define MY_LIST_CGRPOFFS_CNT 1
- rd_kafka_AdminOptions_t *options = NULL;
- const rd_kafka_ListConsumerGroupOffsets_result_t *res;
- rd_kafka_ListConsumerGroupOffsets_t *cgoffsets[MY_LIST_CGRPOFFS_CNT];
- rd_kafka_ListConsumerGroupOffsets_t
- *cgoffsets_empty[MY_LIST_CGRPOFFS_CNT];
- rd_kafka_ListConsumerGroupOffsets_t
- *cgoffsets_duplicate[MY_LIST_CGRPOFFS_CNT];
- int exp_timeout = MY_SOCKET_TIMEOUT_MS;
- int i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- void *my_opaque = NULL, *opaque;
- const char *errstr_ptr;
-
- SUB_TEST_QUICK("%s ListConsumerGroupOffsets with %s, timeout %dms",
- rd_kafka_name(rk), what, exp_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- for (i = 0; i < MY_LIST_CGRPOFFS_CNT; i++) {
- rd_kafka_topic_partition_list_t *partitions =
- rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
- rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 1);
- if (null_toppars) {
- cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new(
- "mygroup", NULL);
- } else {
- cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new(
- "mygroup", partitions);
- }
- rd_kafka_topic_partition_list_destroy(partitions);
-
- rd_kafka_topic_partition_list_t *partitions_empty =
- rd_kafka_topic_partition_list_new(0);
- cgoffsets_empty[i] = rd_kafka_ListConsumerGroupOffsets_new(
- "mygroup", partitions_empty);
- rd_kafka_topic_partition_list_destroy(partitions_empty);
-
- partitions = rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
- rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
- rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
- cgoffsets_duplicate[i] = rd_kafka_ListConsumerGroupOffsets_new(
- "mygroup", partitions);
- rd_kafka_topic_partition_list_destroy(partitions);
- }
-
- if (with_options) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
-
- exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, exp_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (useq) {
- my_opaque = (void *)99981;
- rd_kafka_AdminOptions_set_opaque(options, my_opaque);
- }
- }
-
- TEST_SAY(
- "Call ListConsumerGroupOffsets with empty topic-partition list.\n");
- rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_empty,
- MY_LIST_CGRPOFFS_CNT, options, q);
- rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_empty,
- MY_LIST_CGRPOFFS_CNT);
- /* Poll result queue */
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TEST_SAY("ListConsumerGroupOffsets: got %s\n",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
-
- errstr_ptr = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(
- !strcmp(errstr_ptr,
- "NULL or non-empty topic partition list must be passed"),
- "expected error string \"NULL or non-empty topic partition list "
- "must be passed\", not %s",
- errstr_ptr);
-
- rd_kafka_event_destroy(rkev);
-
-
- TEST_SAY(
- "Call ListConsumerGroupOffsets with topic-partition list"
- "containing duplicates.\n");
- rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_duplicate, 1, options,
- q);
- rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_duplicate,
- MY_LIST_CGRPOFFS_CNT);
- /* Poll result queue */
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TEST_SAY("ListConsumerGroupOffsets: got %s\n",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
-
- errstr_ptr = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!strcmp(errstr_ptr, "Duplicate partitions not allowed"),
- "expected error string \"Duplicate partitions not allowed\""
- ", not %s",
- errstr_ptr);
-
- rd_kafka_event_destroy(rkev);
-
-
- TIMING_START(&timing, "ListConsumerGroupOffsets");
- TEST_SAY("Call ListConsumerGroupOffsets, timeout is %dms\n",
- exp_timeout);
- rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets, MY_LIST_CGRPOFFS_CNT,
- options, q);
- rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets,
- MY_LIST_CGRPOFFS_CNT);
- TIMING_ASSERT_LATER(&timing, 0, 10);
-
- /* Poll result queue */
- TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll");
- rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
- TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
- TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
- TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fs\n",
- rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
-
- /* Convert event to proper result */
- res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- opaque = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
- my_opaque, opaque);
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
-
- errstr_ptr = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!strcmp(errstr_ptr,
- "Failed while waiting for response from broker: "
- "Local: Timed out"),
- "expected error string \"Failed while waiting for response "
- "from broker: Local: Timed out\", not %s",
- errstr_ptr);
-
- rd_kafka_event_destroy(rkev);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
-#undef MY_LIST_CGRPOFFS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test a mix of APIs using the same replyq.
- *
- * - Create topics A,B
- * - Delete topic B
- * - Create topic C
- * - Delete groups A,B,C
- * - Delete records from A,B,C
- * - Create extra partitions for topic D
- */
-static void do_test_mix(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
- char *topics[] = {"topicA", "topicB", "topicC"};
- int cnt = 0;
- struct waiting {
- rd_kafka_event_type_t evtype;
- int seen;
- };
- struct waiting id1 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
- struct waiting id2 = {RD_KAFKA_EVENT_DELETETOPICS_RESULT};
- struct waiting id3 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
- struct waiting id4 = {RD_KAFKA_EVENT_DELETEGROUPS_RESULT};
- struct waiting id5 = {RD_KAFKA_EVENT_DELETERECORDS_RESULT};
- struct waiting id6 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT};
- struct waiting id7 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT};
- struct waiting id8 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT};
- struct waiting id9 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
- rd_kafka_topic_partition_list_t *offsets;
-
-
- SUB_TEST_QUICK();
-
- offsets = rd_kafka_topic_partition_list_new(3);
- rd_kafka_topic_partition_list_add(offsets, topics[0], 0)->offset =
- RD_KAFKA_OFFSET_END;
- rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset =
- RD_KAFKA_OFFSET_END;
- rd_kafka_topic_partition_list_add(offsets, topics[2], 0)->offset =
- RD_KAFKA_OFFSET_END;
-
- test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1);
- test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2);
- test_CreateTopics_simple(rk, rkqu, &topics[2], 1, 1, &id3);
- test_DeleteGroups_simple(rk, rkqu, topics, 3, &id4);
- test_DeleteRecords_simple(rk, rkqu, offsets, &id5);
- test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id6);
- test_DeleteConsumerGroupOffsets_simple(rk, rkqu, "mygroup", offsets,
- &id7);
- test_DeleteConsumerGroupOffsets_simple(rk, rkqu, NULL, NULL, &id8);
- /* Use broker-side defaults for partition count */
- test_CreateTopics_simple(rk, rkqu, topics, 2, -1, &id9);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
- while (cnt < 9) {
- rd_kafka_event_t *rkev;
- struct waiting *w;
-
- rkev = rd_kafka_queue_poll(rkqu, -1);
- TEST_ASSERT(rkev);
-
- TEST_SAY("Got event %s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- w = rd_kafka_event_opaque(rkev);
- TEST_ASSERT(w);
-
- TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev),
- "Expected evtype %d, not %d (%s)", w->evtype,
- rd_kafka_event_type(rkev),
- rd_kafka_event_name(rkev));
-
- TEST_ASSERT(w->seen == 0, "Duplicate results");
-
- w->seen++;
- cnt++;
-
- rd_kafka_event_destroy(rkev);
- }
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test AlterConfigs and DescribeConfigs
- */
-static void do_test_configs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
-#define MY_CONFRES_CNT RD_KAFKA_RESOURCE__CNT + 2
- rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
- rd_kafka_AdminOptions_t *options;
- rd_kafka_event_t *rkev;
- rd_kafka_resp_err_t err;
- const rd_kafka_AlterConfigs_result_t *res;
- const rd_kafka_ConfigResource_t **rconfigs;
- size_t rconfig_cnt;
- char errstr[128];
- int i;
-
- SUB_TEST_QUICK();
-
- /* Check invalids */
- configs[0] = rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)-1,
- "something");
- TEST_ASSERT(!configs[0]);
-
- configs[0] =
- rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)0, NULL);
- TEST_ASSERT(!configs[0]);
-
-
- for (i = 0; i < MY_CONFRES_CNT; i++) {
- int set_config = !(i % 2);
-
- /* librdkafka shall not limit the use of illogical
- * or unknown settings, they are enforced by the broker. */
- configs[i] = rd_kafka_ConfigResource_new(
- (rd_kafka_ResourceType_t)i, "3");
- TEST_ASSERT(configs[i] != NULL);
-
- if (set_config) {
- rd_kafka_ConfigResource_set_config(configs[i],
- "some.conf",
- "which remains "
- "unchecked");
- rd_kafka_ConfigResource_set_config(
- configs[i], "some.conf.null", NULL);
- }
- }
-
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
- err = rd_kafka_AdminOptions_set_request_timeout(options, 1000, errstr,
- sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
- /* AlterConfigs */
- rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu);
-
- rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
- 2000);
-
- TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected timeout, not %s",
- rd_kafka_event_error_string(rkev));
-
- res = rd_kafka_event_AlterConfigs_result(rkev);
- TEST_ASSERT(res);
-
- rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt);
- TEST_ASSERT(!rconfigs && !rconfig_cnt,
- "Expected no result resources, got %" PRIusz, rconfig_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- /* DescribeConfigs: reuse same configs and options */
- rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
- rd_kafka_ConfigResource_destroy_array(configs, MY_CONFRES_CNT);
-
- rkev = test_wait_admin_result(
- rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 2000);
-
- TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected timeout, not %s",
- rd_kafka_event_error_string(rkev));
-
- res = rd_kafka_event_DescribeConfigs_result(rkev);
- TEST_ASSERT(res);
-
- rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
- TEST_ASSERT(!rconfigs && !rconfig_cnt,
- "Expected no result resources, got %" PRIusz, rconfig_cnt);
-
- rd_kafka_event_destroy(rkev);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify that an unclean rd_kafka_destroy() does not hang or crash.
- */
-static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) {
- rd_kafka_t *rk;
- char errstr[512];
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *q;
- rd_kafka_event_t *rkev;
- rd_kafka_DeleteTopic_t *topic;
- test_timing_t t_destroy;
-
- SUB_TEST_QUICK("Test unclean destroy using %s",
- with_mainq ? "mainq" : "tempq");
-
- test_conf_init(&conf, NULL, 0);
- /* Remove brokers, if any, since this is a local test and we
- * rely on the controller not being found. */
- test_conf_set(conf, "bootstrap.servers", "");
- test_conf_set(conf, "socket.timeout.ms", "60000");
-
- rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
-
- if (with_mainq)
- q = rd_kafka_queue_get_main(rk);
- else
- q = rd_kafka_queue_new(rk);
-
- topic = rd_kafka_DeleteTopic_new("test");
- rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q);
- rd_kafka_DeleteTopic_destroy(topic);
-
- /* We're not expecting a result yet since DeleteTopics will attempt
- * to look up the controller for socket.timeout.ms (1 minute). */
- rkev = rd_kafka_queue_poll(q, 100);
- TEST_ASSERT(!rkev, "Did not expect result: %s",
- rd_kafka_event_name(rkev));
-
- rd_kafka_queue_destroy(q);
-
- TEST_SAY(
- "Giving rd_kafka_destroy() 5s to finish, "
- "despite Admin API request being processed\n");
- test_timeout_set(5);
- TIMING_START(&t_destroy, "rd_kafka_destroy()");
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_destroy);
-
- SUB_TEST_PASS();
-
- /* Restore timeout */
- test_timeout_set(60);
-}
-
-
-/**
- * @brief Test AdminOptions
- */
-static void do_test_options(rd_kafka_t *rk) {
-#define _all_apis \
- { \
- RD_KAFKA_ADMIN_OP_CREATETOPICS, \
- RD_KAFKA_ADMIN_OP_DELETETOPICS, \
- RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \
- RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \
- RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \
- RD_KAFKA_ADMIN_OP_DELETERECORDS, \
- RD_KAFKA_ADMIN_OP_CREATEACLS, \
- RD_KAFKA_ADMIN_OP_DESCRIBEACLS, \
- RD_KAFKA_ADMIN_OP_DELETEACLS, \
- RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, \
- RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, \
- RD_KAFKA_ADMIN_OP_DELETEGROUPS, \
- RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, \
- RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, \
- RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \
- RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \
- }
- struct {
- const char *setter;
- const rd_kafka_admin_op_t valid_apis[16];
- } matrix[] = {
- {"request_timeout", _all_apis},
- {"operation_timeout",
- {RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_DELETETOPICS,
- RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
- RD_KAFKA_ADMIN_OP_DELETERECORDS}},
- {"validate_only",
- {RD_KAFKA_ADMIN_OP_CREATETOPICS,
- RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
- RD_KAFKA_ADMIN_OP_ALTERCONFIGS}},
- {"broker", _all_apis},
- {"require_stable_offsets",
- {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS}},
- {"match_consumer_group_states",
- {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS}},
- {"opaque", _all_apis},
- {NULL},
- };
- int i;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_consumer_group_state_t state[1] = {
- RD_KAFKA_CONSUMER_GROUP_STATE_STABLE};
-
- SUB_TEST_QUICK();
-
- for (i = 0; matrix[i].setter; i++) {
- static const rd_kafka_admin_op_t all_apis[] = _all_apis;
- const rd_kafka_admin_op_t *for_api;
-
- for (for_api = all_apis;; for_api++) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_resp_err_t exp_err =
- RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_error_t *error = NULL;
- char errstr[512];
- int fi;
-
- options = rd_kafka_AdminOptions_new(rk, *for_api);
- TEST_ASSERT(options, "AdminOptions_new(%d) failed",
- *for_api);
-
- if (!strcmp(matrix[i].setter, "request_timeout"))
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, 1234, errstr, sizeof(errstr));
- else if (!strcmp(matrix[i].setter, "operation_timeout"))
- err =
- rd_kafka_AdminOptions_set_operation_timeout(
- options, 12345, errstr, sizeof(errstr));
- else if (!strcmp(matrix[i].setter, "validate_only"))
- err = rd_kafka_AdminOptions_set_validate_only(
- options, 1, errstr, sizeof(errstr));
- else if (!strcmp(matrix[i].setter, "broker"))
- err = rd_kafka_AdminOptions_set_broker(
- options, 5, errstr, sizeof(errstr));
- else if (!strcmp(matrix[i].setter,
- "require_stable_offsets"))
- error =
- rd_kafka_AdminOptions_set_require_stable_offsets(
- options, 0);
- else if (!strcmp(matrix[i].setter,
- "match_consumer_group_states"))
- error =
- rd_kafka_AdminOptions_set_match_consumer_group_states(
- options, state, 1);
- else if (!strcmp(matrix[i].setter, "opaque")) {
- rd_kafka_AdminOptions_set_opaque(
- options, (void *)options);
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- } else
- TEST_FAIL("Invalid setter: %s",
- matrix[i].setter);
-
- if (error) {
- err = rd_kafka_error_code(error);
- snprintf(errstr, sizeof(errstr), "%s",
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
- }
-
-
- TEST_SAYL(3,
- "AdminOptions_set_%s on "
- "RD_KAFKA_ADMIN_OP_%d options "
- "returned %s: %s\n",
- matrix[i].setter, *for_api,
- rd_kafka_err2name(err),
- err ? errstr : "success");
-
- /* Scan matrix valid_apis to see if this
- * setter should be accepted or not. */
- if (exp_err) {
- /* An expected error is already set */
- } else if (*for_api != RD_KAFKA_ADMIN_OP_ANY) {
- exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
-
- for (fi = 0; matrix[i].valid_apis[fi]; fi++) {
- if (matrix[i].valid_apis[fi] ==
- *for_api)
- exp_err =
- RD_KAFKA_RESP_ERR_NO_ERROR;
- }
- } else {
- exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (err != exp_err)
- TEST_FAIL_LATER(
- "Expected AdminOptions_set_%s "
- "for RD_KAFKA_ADMIN_OP_%d "
- "options to return %s, "
- "not %s",
- matrix[i].setter, *for_api,
- rd_kafka_err2name(exp_err),
- rd_kafka_err2name(err));
-
- rd_kafka_AdminOptions_destroy(options);
-
- if (*for_api == RD_KAFKA_ADMIN_OP_ANY)
- break; /* This was the last one */
- }
- }
-
- /* Try an invalid for_api */
- options = rd_kafka_AdminOptions_new(rk, (rd_kafka_admin_op_t)1234);
- TEST_ASSERT(!options,
- "Expected AdminOptions_new() to fail "
- "with an invalid for_api, didn't.");
-
- TEST_LATER_CHECK();
-
- SUB_TEST_PASS();
-}
-
-
-static rd_kafka_t *create_admin_client(rd_kafka_type_t cltype) {
- rd_kafka_t *rk;
- char errstr[512];
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, NULL, 0);
- /* Remove brokers, if any, since this is a local test and we
- * rely on the controller not being found. */
- test_conf_set(conf, "bootstrap.servers", "");
- test_conf_set(conf, "socket.timeout.ms", MY_SOCKET_TIMEOUT_MS_STR);
- /* For use with the background queue */
- rd_kafka_conf_set_background_event_cb(conf, background_event_cb);
-
- rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
-
- return rk;
-}
-
-
-static void do_test_apis(rd_kafka_type_t cltype) {
- rd_kafka_t *rk;
- rd_kafka_queue_t *mainq, *backgroundq;
-
- mtx_init(&last_event_lock, mtx_plain);
- cnd_init(&last_event_cnd);
-
- do_test_unclean_destroy(cltype, 0 /*tempq*/);
- do_test_unclean_destroy(cltype, 1 /*mainq*/);
-
- rk = create_admin_client(cltype);
-
- mainq = rd_kafka_queue_get_main(rk);
- backgroundq = rd_kafka_queue_get_background(rk);
-
- do_test_options(rk);
-
- do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0);
- do_test_CreateTopics("temp queue, no options, background_event_cb", rk,
- backgroundq, 1, 0);
- do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1);
- do_test_CreateTopics("main queue, options", rk, mainq, 0, 1);
-
- do_test_DeleteTopics("temp queue, no options", rk, NULL, 0);
- do_test_DeleteTopics("temp queue, options", rk, NULL, 1);
- do_test_DeleteTopics("main queue, options", rk, mainq, 1);
-
- do_test_ListConsumerGroups("temp queue, no options", rk, NULL, 0,
- rd_false);
- do_test_ListConsumerGroups("temp queue, options", rk, NULL, 1,
- rd_false);
- do_test_ListConsumerGroups("main queue", rk, mainq, 0, rd_false);
-
- do_test_DescribeConsumerGroups("temp queue, no options", rk, NULL, 0,
- rd_false);
- do_test_DescribeConsumerGroups("temp queue, options", rk, NULL, 1,
- rd_false);
- do_test_DescribeConsumerGroups("main queue, options", rk, mainq, 1,
- rd_false);
-
- do_test_DeleteGroups("temp queue, no options", rk, NULL, 0, rd_false);
- do_test_DeleteGroups("temp queue, options", rk, NULL, 1, rd_false);
- do_test_DeleteGroups("main queue, options", rk, mainq, 1, rd_false);
-
- do_test_DeleteRecords("temp queue, no options", rk, NULL, 0, rd_false);
- do_test_DeleteRecords("temp queue, options", rk, NULL, 1, rd_false);
- do_test_DeleteRecords("main queue, options", rk, mainq, 1, rd_false);
-
- do_test_DeleteConsumerGroupOffsets("temp queue, no options", rk, NULL,
- 0);
- do_test_DeleteConsumerGroupOffsets("temp queue, options", rk, NULL, 1);
- do_test_DeleteConsumerGroupOffsets("main queue, options", rk, mainq, 1);
-
- do_test_AclBinding();
- do_test_AclBindingFilter();
-
- do_test_CreateAcls("temp queue, no options", rk, NULL, rd_false,
- rd_false);
- do_test_CreateAcls("temp queue, options", rk, NULL, rd_false, rd_true);
- do_test_CreateAcls("main queue, options", rk, mainq, rd_false, rd_true);
-
- do_test_DescribeAcls("temp queue, no options", rk, NULL, rd_false,
- rd_false);
- do_test_DescribeAcls("temp queue, options", rk, NULL, rd_false,
- rd_true);
- do_test_DescribeAcls("main queue, options", rk, mainq, rd_false,
- rd_true);
-
- do_test_DeleteAcls("temp queue, no options", rk, NULL, rd_false,
- rd_false);
- do_test_DeleteAcls("temp queue, options", rk, NULL, rd_false, rd_true);
- do_test_DeleteAcls("main queue, options", rk, mainq, rd_false, rd_true);
-
- do_test_AlterConsumerGroupOffsets("temp queue, no options", rk, NULL,
- 0);
- do_test_AlterConsumerGroupOffsets("temp queue, options", rk, NULL, 1);
- do_test_AlterConsumerGroupOffsets("main queue, options", rk, mainq, 1);
-
- do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0,
- rd_false);
- do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1,
- rd_false);
- do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1,
- rd_false);
- do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0,
- rd_true);
- do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1,
- rd_true);
- do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1,
- rd_true);
-
- do_test_mix(rk, mainq);
-
- do_test_configs(rk, mainq);
-
- rd_kafka_queue_destroy(backgroundq);
- rd_kafka_queue_destroy(mainq);
-
- rd_kafka_destroy(rk);
-
- /*
- * Tests which require a unique unused client instance.
- */
- rk = create_admin_client(cltype);
- mainq = rd_kafka_queue_get_main(rk);
- do_test_DeleteRecords("main queue, options, destroy", rk, mainq, 1,
- rd_true /*destroy instance before finishing*/);
- rd_kafka_queue_destroy(mainq);
- rd_kafka_destroy(rk);
-
- rk = create_admin_client(cltype);
- mainq = rd_kafka_queue_get_main(rk);
- do_test_DeleteGroups("main queue, options, destroy", rk, mainq, 1,
- rd_true /*destroy instance before finishing*/);
- rd_kafka_queue_destroy(mainq);
- rd_kafka_destroy(rk);
-
-
- /* Done */
- mtx_destroy(&last_event_lock);
- cnd_destroy(&last_event_cnd);
-}
-
-
-int main_0080_admin_ut(int argc, char **argv) {
- do_test_apis(RD_KAFKA_PRODUCER);
- do_test_apis(RD_KAFKA_CONSUMER);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c
deleted file mode 100644
index 7da2dff15..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c
+++ /dev/null
@@ -1,3797 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-#include "../src/rdstring.h"
-
-/**
- * @brief Admin API integration tests.
- */
-
-
-static int32_t *avail_brokers;
-static size_t avail_broker_cnt;
-
-
-
-static void do_test_CreateTopics(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int op_timeout,
- rd_bool_t validate_only) {
- rd_kafka_queue_t *q;
-#define MY_NEW_TOPICS_CNT 7
- char *topics[MY_NEW_TOPICS_CNT];
- rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0};
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- /* Expected topics in metadata */
- rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- /* Not expected topics in metadata */
- rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
- int exp_not_mdtopic_cnt = 0;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_CreateTopics_result_t *res;
- const rd_kafka_topic_result_t **restopics;
- size_t restopic_cnt;
- int metadata_tmout;
- int num_replicas = (int)avail_broker_cnt;
- int32_t *replicas;
-
- SUB_TEST_QUICK(
- "%s CreateTopics with %s, "
- "op_timeout %d, validate_only %d",
- rd_kafka_name(rk), what, op_timeout, validate_only);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /* Set up replicas */
- replicas = rd_alloca(sizeof(*replicas) * num_replicas);
- for (i = 0; i < num_replicas; i++)
- replicas[i] = avail_brokers[i];
-
- /**
- * Construct NewTopic array with different properties for
- * different partitions.
- */
- for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
- char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- int use_defaults =
- i == 6 && test_broker_version >= TEST_BRKVER(2, 4, 0, 0);
- int num_parts = !use_defaults ? (i * 7 + 1) : -1;
- int set_config = (i & 1);
- int add_invalid_config = (i == 1);
- int set_replicas = !use_defaults && !(i % 3);
- rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- topics[i] = topic;
- new_topics[i] = rd_kafka_NewTopic_new(
- topic, num_parts, set_replicas ? -1 : num_replicas, NULL,
- 0);
-
- if (set_config) {
- /*
- * Add various configuration properties
- */
- err = rd_kafka_NewTopic_set_config(
- new_topics[i], "compression.type", "lz4");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- err = rd_kafka_NewTopic_set_config(
- new_topics[i], "delete.retention.ms", "900");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
- if (add_invalid_config) {
- /* Add invalid config property */
- err = rd_kafka_NewTopic_set_config(
- new_topics[i], "dummy.doesntexist",
- "broker is verifying this");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG;
- }
-
- TEST_SAY(
- "Expecting result for topic #%d: %s "
- "(set_config=%d, add_invalid_config=%d, "
- "set_replicas=%d, use_defaults=%d)\n",
- i, rd_kafka_err2name(this_exp_err), set_config,
- add_invalid_config, set_replicas, use_defaults);
-
- if (set_replicas) {
- int32_t p;
-
- /*
- * Set valid replica assignments
- */
- for (p = 0; p < num_parts; p++) {
- err = rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], p, replicas, num_replicas,
- errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
- }
- }
-
- if (this_exp_err || validate_only) {
- exp_topicerr[i] = this_exp_err;
- exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
-
- } else {
- exp_mdtopics[exp_mdtopic_cnt].topic = topic;
- exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts;
- exp_mdtopic_cnt++;
- }
- }
-
- if (op_timeout != -1 || validate_only) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
-
- if (op_timeout != -1) {
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, op_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
- if (validate_only) {
- err = rd_kafka_AdminOptions_set_validate_only(
- options, validate_only, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
- }
-
- TIMING_START(&timing, "CreateTopics");
- TEST_SAY("Call CreateTopics\n");
- rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /* Poll result queue for CreateTopics result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- TIMING_START(&timing, "CreateTopics.queue_poll");
- do {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("CreateTopics: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
- } while (rd_kafka_event_type(rkev) !=
- RD_KAFKA_EVENT_CREATETOPICS_RESULT);
-
- /* Convert event to proper result */
- res = rd_kafka_event_CreateTopics_result(rkev);
- TEST_ASSERT(res, "expected CreateTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected CreateTopics to return %s, not %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- /* Extract topics */
- restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);
-
-
- /* Scan topics for proper fields and expected failures. */
- for (i = 0; i < (int)restopic_cnt; i++) {
- const rd_kafka_topic_result_t *terr = restopics[i];
-
- /* Verify that topic order matches our request. */
- if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
- TEST_FAIL_LATER(
- "Topic result order mismatch at #%d: "
- "expected %s, got %s",
- i, topics[i], rd_kafka_topic_result_name(terr));
-
- TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i,
- rd_kafka_topic_result_name(terr),
- rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
- rd_kafka_topic_result_error_string(terr));
- if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
- TEST_FAIL_LATER("Expected %s, not %d: %s",
- rd_kafka_err2name(exp_topicerr[i]),
- rd_kafka_topic_result_error(terr),
- rd_kafka_err2name(
- rd_kafka_topic_result_error(terr)));
- }
-
- /**
- * Verify that the expecteded topics are created and the non-expected
- * are not. Allow it some time to propagate.
- */
- if (validate_only) {
- /* No topics should have been created, give it some time
- * before checking. */
- rd_sleep(2);
- metadata_tmout = 5 * 1000;
- } else {
- if (op_timeout > 0)
- metadata_tmout = op_timeout + 1000;
- else
- metadata_tmout = 10 * 1000;
- }
-
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt,
- exp_not_mdtopics, exp_not_mdtopic_cnt,
- metadata_tmout);
-
- rd_kafka_event_destroy(rkev);
-
- for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
- rd_kafka_NewTopic_destroy(new_topics[i]);
- rd_free(topics[i]);
- }
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_NEW_TOPICS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test deletion of topics
- *
- *
- */
-static void do_test_DeleteTopics(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int op_timeout) {
- rd_kafka_queue_t *q;
- const int skip_topic_cnt = 2;
-#define MY_DEL_TOPICS_CNT 9
- char *topics[MY_DEL_TOPICS_CNT];
- rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0};
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- /* Expected topics in metadata */
- rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- /* Not expected topics in metadata */
- rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
- int exp_not_mdtopic_cnt = 0;
- int i;
- char errstr[512];
- const char *errstr2;
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteTopics_result_t *res;
- const rd_kafka_topic_result_t **restopics;
- size_t restopic_cnt;
- int metadata_tmout;
-
- SUB_TEST_QUICK("%s DeleteTopics with %s, op_timeout %d",
- rd_kafka_name(rk), what, op_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /**
- * Construct DeleteTopic array
- */
- for (i = 0; i < MY_DEL_TOPICS_CNT; i++) {
- char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt;
-
- topics[i] = topic;
-
- del_topics[i] = rd_kafka_DeleteTopic_new(topic);
-
- if (notexist_topic)
- exp_topicerr[i] =
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else {
- exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
- }
-
- exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
- }
-
- if (op_timeout != -1) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, op_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- /* Create the topics first, minus the skip count. */
- test_CreateTopics_simple(rk, NULL, topics,
- MY_DEL_TOPICS_CNT - skip_topic_cnt,
- 2 /*num_partitions*/, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
- 15 * 1000);
-
- TIMING_START(&timing, "DeleteTopics");
- TEST_SAY("Call DeleteTopics\n");
- rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /* Poll result queue for DeleteTopics result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- TIMING_START(&timing, "DeleteTopics.queue_poll");
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("DeleteTopics: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_DELETETOPICS_RESULT)
- break;
-
- rd_kafka_event_destroy(rkev);
- }
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteTopics_result(rkev);
- TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected DeleteTopics to return %s, not %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- /* Extract topics */
- restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);
-
-
- /* Scan topics for proper fields and expected failures. */
- for (i = 0; i < (int)restopic_cnt; i++) {
- const rd_kafka_topic_result_t *terr = restopics[i];
-
- /* Verify that topic order matches our request. */
- if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
- TEST_FAIL_LATER(
- "Topic result order mismatch at #%d: "
- "expected %s, got %s",
- i, topics[i], rd_kafka_topic_result_name(terr));
-
- TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i,
- rd_kafka_topic_result_name(terr),
- rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
- rd_kafka_topic_result_error_string(terr));
- if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
- TEST_FAIL_LATER("Expected %s, not %d: %s",
- rd_kafka_err2name(exp_topicerr[i]),
- rd_kafka_topic_result_error(terr),
- rd_kafka_err2name(
- rd_kafka_topic_result_error(terr)));
- }
-
- /**
- * Verify that the expected topics are deleted and the non-expected
- * are not. Allow it some time to propagate.
- */
- if (op_timeout > 0)
- metadata_tmout = op_timeout + 1000;
- else
- metadata_tmout = 10 * 1000;
-
- test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics,
- exp_not_mdtopic_cnt, metadata_tmout);
-
- rd_kafka_event_destroy(rkev);
-
- for (i = 0; i < MY_DEL_TOPICS_CNT; i++) {
- rd_kafka_DeleteTopic_destroy(del_topics[i]);
- rd_free(topics[i]);
- }
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_DEL_TOPICS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test creation of partitions
- *
- *
- */
-static void do_test_CreatePartitions(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int op_timeout) {
- rd_kafka_queue_t *q;
-#define MY_CRP_TOPICS_CNT 9
- char *topics[MY_CRP_TOPICS_CNT];
- rd_kafka_NewTopic_t *new_topics[MY_CRP_TOPICS_CNT];
- rd_kafka_NewPartitions_t *crp_topics[MY_CRP_TOPICS_CNT];
- rd_kafka_AdminOptions_t *options = NULL;
- /* Expected topics in metadata */
- rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}};
- rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}};
- int exp_mdtopic_cnt = 0;
- int i;
- char errstr[512];
- rd_kafka_resp_err_t err;
- test_timing_t timing;
- int metadata_tmout;
- int num_replicas = (int)avail_broker_cnt;
-
- SUB_TEST_QUICK("%s CreatePartitions with %s, op_timeout %d",
- rd_kafka_name(rk), what, op_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- /* Set up two expected partitions with different replication sets
- * so they can be matched by the metadata checker later.
- * Even partitions use exp_mdparts[0] while odd partitions
- * use exp_mdparts[1]. */
-
- /* Set valid replica assignments (even, and odd (reverse) ) */
- exp_mdparts[0].replicas =
- rd_alloca(sizeof(*exp_mdparts[0].replicas) * num_replicas);
- exp_mdparts[1].replicas =
- rd_alloca(sizeof(*exp_mdparts[1].replicas) * num_replicas);
- exp_mdparts[0].replica_cnt = num_replicas;
- exp_mdparts[1].replica_cnt = num_replicas;
- for (i = 0; i < num_replicas; i++) {
- exp_mdparts[0].replicas[i] = avail_brokers[i];
- exp_mdparts[1].replicas[i] =
- avail_brokers[num_replicas - i - 1];
- }
-
- /**
- * Construct CreatePartitions array
- */
- for (i = 0; i < MY_CRP_TOPICS_CNT; i++) {
- char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- int initial_part_cnt = 1 + (i * 2);
- int new_part_cnt = 1 + (i / 2);
- int final_part_cnt = initial_part_cnt + new_part_cnt;
- int set_replicas = !(i % 2);
- int pi;
-
- topics[i] = topic;
-
- /* Topic to create with initial partition count */
- new_topics[i] = rd_kafka_NewTopic_new(
- topic, initial_part_cnt, set_replicas ? -1 : num_replicas,
- NULL, 0);
-
- /* .. and later add more partitions to */
- crp_topics[i] = rd_kafka_NewPartitions_new(
- topic, final_part_cnt, errstr, sizeof(errstr));
-
- if (set_replicas) {
- exp_mdtopics[exp_mdtopic_cnt].partitions = rd_alloca(
- final_part_cnt *
- sizeof(*exp_mdtopics[exp_mdtopic_cnt].partitions));
-
- for (pi = 0; pi < final_part_cnt; pi++) {
- const rd_kafka_metadata_partition_t *exp_mdp =
- &exp_mdparts[pi & 1];
-
- exp_mdtopics[exp_mdtopic_cnt].partitions[pi] =
- *exp_mdp; /* copy */
-
- exp_mdtopics[exp_mdtopic_cnt]
- .partitions[pi]
- .id = pi;
-
- if (pi < initial_part_cnt) {
- /* Set replica assignment
- * for initial partitions */
- err =
- rd_kafka_NewTopic_set_replica_assignment(
- new_topics[i], pi,
- exp_mdp->replicas,
- (size_t)exp_mdp->replica_cnt,
- errstr, sizeof(errstr));
- TEST_ASSERT(!err,
- "NewTopic_set_replica_"
- "assignment: %s",
- errstr);
- } else {
- /* Set replica assignment for new
- * partitions */
- err =
- rd_kafka_NewPartitions_set_replica_assignment(
- crp_topics[i],
- pi - initial_part_cnt,
- exp_mdp->replicas,
- (size_t)exp_mdp->replica_cnt,
- errstr, sizeof(errstr));
- TEST_ASSERT(!err,
- "NewPartitions_set_replica_"
- "assignment: %s",
- errstr);
- }
- }
- }
-
- TEST_SAY(_C_YEL
- "Topic %s with %d initial partitions will grow "
- "by %d to %d total partitions with%s replicas set\n",
- topics[i], initial_part_cnt, new_part_cnt,
- final_part_cnt, set_replicas ? "" : "out");
-
- exp_mdtopics[exp_mdtopic_cnt].topic = topic;
- exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt;
-
- exp_mdtopic_cnt++;
- }
-
- if (op_timeout != -1) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, op_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
- /*
- * Create topics with initial partition count
- */
- TIMING_START(&timing, "CreateTopics");
- TEST_SAY("Creating topics with initial partition counts\n");
- rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, 15000);
- TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT);
-
-
- /*
- * Create new partitions
- */
- TIMING_START(&timing, "CreatePartitions");
- TEST_SAY("Creating partitions\n");
- rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, options,
- q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, 15000);
- TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT);
-
-
- /**
- * Verify that the expected topics are deleted and the non-expected
- * are not. Allow it some time to propagate.
- */
- if (op_timeout > 0)
- metadata_tmout = op_timeout + 1000;
- else
- metadata_tmout = 10 * 1000;
-
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
- metadata_tmout);
-
- for (i = 0; i < MY_CRP_TOPICS_CNT; i++)
- rd_free(topics[i]);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_CRP_TOPICS_CNT
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Print the ConfigEntrys in the provided array.
- */
-static void test_print_ConfigEntry_array(const rd_kafka_ConfigEntry_t **entries,
- size_t entry_cnt,
- unsigned int depth) {
- const char *indent = &" "[4 - (depth > 4 ? 4 : depth)];
- size_t ei;
-
- for (ei = 0; ei < entry_cnt; ei++) {
- const rd_kafka_ConfigEntry_t *e = entries[ei];
- const rd_kafka_ConfigEntry_t **syns;
- size_t syn_cnt;
-
- syns = rd_kafka_ConfigEntry_synonyms(e, &syn_cnt);
-
-#define YN(v) ((v) ? "y" : "n")
- TEST_SAYL(
- 3,
- "%s#%" PRIusz "/%" PRIusz
- ": Source %s (%d): \"%s\"=\"%s\" "
- "[is read-only=%s, default=%s, sensitive=%s, "
- "synonym=%s] with %" PRIusz " synonym(s)\n",
- indent, ei, entry_cnt,
- rd_kafka_ConfigSource_name(rd_kafka_ConfigEntry_source(e)),
- rd_kafka_ConfigEntry_source(e),
- rd_kafka_ConfigEntry_name(e),
- rd_kafka_ConfigEntry_value(e)
- ? rd_kafka_ConfigEntry_value(e)
- : "(NULL)",
- YN(rd_kafka_ConfigEntry_is_read_only(e)),
- YN(rd_kafka_ConfigEntry_is_default(e)),
- YN(rd_kafka_ConfigEntry_is_sensitive(e)),
- YN(rd_kafka_ConfigEntry_is_synonym(e)), syn_cnt);
-#undef YN
-
- if (syn_cnt > 0)
- test_print_ConfigEntry_array(syns, syn_cnt, depth + 1);
- }
-}
-
-
-/**
- * @brief Test AlterConfigs
- */
-static void do_test_AlterConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
-#define MY_CONFRES_CNT 3
- char *topics[MY_CONFRES_CNT];
- rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
- rd_kafka_AdminOptions_t *options;
- rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT];
- rd_kafka_event_t *rkev;
- rd_kafka_resp_err_t err;
- const rd_kafka_AlterConfigs_result_t *res;
- const rd_kafka_ConfigResource_t **rconfigs;
- size_t rconfig_cnt;
- char errstr[128];
- const char *errstr2;
- int ci = 0;
- int i;
- int fails = 0;
-
- SUB_TEST_QUICK();
-
- /*
- * Only create one topic, the others will be non-existent.
- */
- for (i = 0; i < MY_CONFRES_CNT; i++)
- rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1));
-
- test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL);
-
- test_wait_topic_exists(rk, topics[0], 10000);
-
- /*
- * ConfigResource #0: valid topic config
- */
- configs[ci] =
- rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
-
- err = rd_kafka_ConfigResource_set_config(configs[ci],
- "compression.type", "gzip");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms",
- "12345678");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
- ci++;
-
-
- if (test_broker_version >= TEST_BRKVER(1, 1, 0, 0)) {
- /*
- * ConfigResource #1: valid broker config
- */
- configs[ci] = rd_kafka_ConfigResource_new(
- RD_KAFKA_RESOURCE_BROKER,
- tsprintf("%" PRId32, avail_brokers[0]));
-
- err = rd_kafka_ConfigResource_set_config(
- configs[ci], "sasl.kerberos.min.time.before.relogin",
- "58000");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
- ci++;
- } else {
- TEST_WARN(
- "Skipping RESOURCE_BROKER test on unsupported "
- "broker version\n");
- }
-
- /*
- * ConfigResource #2: valid topic config, non-existent topic
- */
- configs[ci] =
- rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
-
- err = rd_kafka_ConfigResource_set_config(configs[ci],
- "compression.type", "lz4");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- err = rd_kafka_ConfigResource_set_config(
- configs[ci], "offset.metadata.max.bytes", "12345");
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
-
- if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0))
- exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else
- exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN;
- ci++;
-
-
- /*
- * Timeout options
- */
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ALTERCONFIGS);
- err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr,
- sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
-
- /*
- * Fire off request
- */
- rd_kafka_AlterConfigs(rk, configs, ci, options, rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
-
- /*
- * Wait for result
- */
- rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
- 10000 + 1000);
-
- /*
- * Extract result
- */
- res = rd_kafka_event_AlterConfigs_result(rkev);
- TEST_ASSERT(res, "Expected AlterConfigs result, not %s",
- rd_kafka_event_name(rkev));
-
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!err, "Expected success, not %s: %s",
- rd_kafka_err2name(err), errstr2);
-
- rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt);
- TEST_ASSERT((int)rconfig_cnt == ci,
- "Expected %d result resources, got %" PRIusz "\n", ci,
- rconfig_cnt);
-
- /*
- * Verify status per resource
- */
- for (i = 0; i < (int)rconfig_cnt; i++) {
- const rd_kafka_ConfigEntry_t **entries;
- size_t entry_cnt;
-
- err = rd_kafka_ConfigResource_error(rconfigs[i]);
- errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]);
-
- entries =
- rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt);
-
- TEST_SAY(
- "ConfigResource #%d: type %s (%d), \"%s\": "
- "%" PRIusz " ConfigEntries, error %s (%s)\n",
- i,
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(rconfigs[i])),
- rd_kafka_ConfigResource_type(rconfigs[i]),
- rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt,
- rd_kafka_err2name(err), errstr2 ? errstr2 : "");
-
- test_print_ConfigEntry_array(entries, entry_cnt, 1);
-
- if (rd_kafka_ConfigResource_type(rconfigs[i]) !=
- rd_kafka_ConfigResource_type(configs[i]) ||
- strcmp(rd_kafka_ConfigResource_name(rconfigs[i]),
- rd_kafka_ConfigResource_name(configs[i]))) {
- TEST_FAIL_LATER(
- "ConfigResource #%d: "
- "expected type %s name %s, "
- "got type %s name %s",
- i,
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(configs[i])),
- rd_kafka_ConfigResource_name(configs[i]),
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(rconfigs[i])),
- rd_kafka_ConfigResource_name(rconfigs[i]));
- fails++;
- continue;
- }
-
-
- if (err != exp_err[i]) {
- TEST_FAIL_LATER(
- "ConfigResource #%d: "
- "expected %s (%d), got %s (%s)",
- i, rd_kafka_err2name(exp_err[i]), exp_err[i],
- rd_kafka_err2name(err), errstr2 ? errstr2 : "");
- fails++;
- }
- }
-
- TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_ConfigResource_destroy_array(configs, ci);
-
- TEST_LATER_CHECK();
-#undef MY_CONFRES_CNT
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test DescribeConfigs
- */
-static void do_test_DescribeConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
-#define MY_CONFRES_CNT 3
- char *topics[MY_CONFRES_CNT];
- rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
- rd_kafka_AdminOptions_t *options;
- rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT];
- rd_kafka_event_t *rkev;
- rd_kafka_resp_err_t err;
- const rd_kafka_DescribeConfigs_result_t *res;
- const rd_kafka_ConfigResource_t **rconfigs;
- size_t rconfig_cnt;
- char errstr[128];
- const char *errstr2;
- int ci = 0;
- int i;
- int fails = 0;
- int max_retry_describe = 3;
-
- SUB_TEST_QUICK();
-
- /*
- * Only create one topic, the others will be non-existent.
- */
- rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1));
- for (i = 1; i < MY_CONFRES_CNT; i++)
- rd_strdupa(&topics[i],
- test_mk_topic_name("DescribeConfigs_notexist", 1));
-
- test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL);
-
- /*
- * ConfigResource #0: topic config, no config entries.
- */
- configs[ci] =
- rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
- exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
- ci++;
-
- /*
- * ConfigResource #1:broker config, no config entries
- */
- configs[ci] = rd_kafka_ConfigResource_new(
- RD_KAFKA_RESOURCE_BROKER, tsprintf("%" PRId32, avail_brokers[0]));
-
- exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
- ci++;
-
- /*
- * ConfigResource #2: topic config, non-existent topic, no config entr.
- */
- configs[ci] =
- rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
- /* FIXME: This is a bug in the broker (<v2.0.0), it returns a full
- * response for unknown topics.
- * https://issues.apache.org/jira/browse/KAFKA-6778
- */
- if (test_broker_version < TEST_BRKVER(2, 0, 0, 0))
- exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
- else
- exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- ci++;
-
-
-retry_describe:
- /*
- * Timeout options
- */
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
- err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr,
- sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
-
- /*
- * Fire off request
- */
- rd_kafka_DescribeConfigs(rk, configs, ci, options, rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
-
- /*
- * Wait for result
- */
- rkev = test_wait_admin_result(
- rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 10000 + 1000);
-
- /*
- * Extract result
- */
- res = rd_kafka_event_DescribeConfigs_result(rkev);
- TEST_ASSERT(res, "Expected DescribeConfigs result, not %s",
- rd_kafka_event_name(rkev));
-
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!err, "Expected success, not %s: %s",
- rd_kafka_err2name(err), errstr2);
-
- rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
- TEST_ASSERT((int)rconfig_cnt == ci,
- "Expected %d result resources, got %" PRIusz "\n", ci,
- rconfig_cnt);
-
- /*
- * Verify status per resource
- */
- for (i = 0; i < (int)rconfig_cnt; i++) {
- const rd_kafka_ConfigEntry_t **entries;
- size_t entry_cnt;
-
- err = rd_kafka_ConfigResource_error(rconfigs[i]);
- errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]);
-
- entries =
- rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt);
-
- TEST_SAY(
- "ConfigResource #%d: type %s (%d), \"%s\": "
- "%" PRIusz " ConfigEntries, error %s (%s)\n",
- i,
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(rconfigs[i])),
- rd_kafka_ConfigResource_type(rconfigs[i]),
- rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt,
- rd_kafka_err2name(err), errstr2 ? errstr2 : "");
-
- test_print_ConfigEntry_array(entries, entry_cnt, 1);
-
- if (rd_kafka_ConfigResource_type(rconfigs[i]) !=
- rd_kafka_ConfigResource_type(configs[i]) ||
- strcmp(rd_kafka_ConfigResource_name(rconfigs[i]),
- rd_kafka_ConfigResource_name(configs[i]))) {
- TEST_FAIL_LATER(
- "ConfigResource #%d: "
- "expected type %s name %s, "
- "got type %s name %s",
- i,
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(configs[i])),
- rd_kafka_ConfigResource_name(configs[i]),
- rd_kafka_ResourceType_name(
- rd_kafka_ConfigResource_type(rconfigs[i])),
- rd_kafka_ConfigResource_name(rconfigs[i]));
- fails++;
- continue;
- }
-
-
- if (err != exp_err[i]) {
- if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART &&
- max_retry_describe-- > 0) {
- TEST_WARN(
- "ConfigResource #%d: "
- "expected %s (%d), got %s (%s): "
- "this is typically a temporary "
- "error while the new resource "
- "is propagating: retrying",
- i, rd_kafka_err2name(exp_err[i]),
- exp_err[i], rd_kafka_err2name(err),
- errstr2 ? errstr2 : "");
- rd_kafka_event_destroy(rkev);
- rd_sleep(1);
- goto retry_describe;
- }
-
- TEST_FAIL_LATER(
- "ConfigResource #%d: "
- "expected %s (%d), got %s (%s)",
- i, rd_kafka_err2name(exp_err[i]), exp_err[i],
- rd_kafka_err2name(err), errstr2 ? errstr2 : "");
- fails++;
- }
- }
-
- TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_ConfigResource_destroy_array(configs, ci);
-
- TEST_LATER_CHECK();
-#undef MY_CONFRES_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test CreateAcls
- */
-static void
-do_test_CreateAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
- rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
- size_t resacl_cnt;
- test_timing_t timing;
- rd_kafka_resp_err_t err;
- char errstr[128];
- const char *errstr2;
- const char *user_test1 = "User:test1";
- const char *user_test2 = "User:test2";
- const char *base_topic_name;
- char topic1_name[512];
- char topic2_name[512];
- rd_kafka_AclBinding_t *acl_bindings[2];
- rd_kafka_ResourcePatternType_t pattern_type_first_topic =
- RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
- rd_kafka_AdminOptions_t *admin_options;
- rd_kafka_event_t *rkev_acl_create;
- const rd_kafka_CreateAcls_result_t *acl_res;
- const rd_kafka_acl_result_t **acl_res_acls;
- unsigned int i;
-
- SUB_TEST_QUICK();
-
- if (version == 0)
- pattern_type_first_topic = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
-
- base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
-
- rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
- rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
-
-
- acl_bindings[0] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_first_topic,
- user_test1, "*", RD_KAFKA_ACL_OPERATION_READ,
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, NULL, 0);
- acl_bindings[1] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic2_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, "*",
- RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
-
-
- admin_options =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
- err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
- errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
- TIMING_START(&timing, "CreateAcls");
- TEST_SAY("Call CreateAcls\n");
- rd_kafka_CreateAcls(rk, acl_bindings, 2, admin_options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /*
- * Wait for result
- */
- rkev_acl_create = test_wait_admin_result(
- q, RD_KAFKA_EVENT_CREATEACLS_RESULT, 10000 + 1000);
-
- err = rd_kafka_event_error(rkev_acl_create);
- errstr2 = rd_kafka_event_error_string(rkev_acl_create);
-
- if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "Expected unsupported feature, not: %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(!strcmp(errstr2,
- "ACLs Admin API (KIP-140) not supported "
- "by broker, requires broker "
- "version >= 0.11.0.0"),
- "Expected a different message, not: %s", errstr2);
- TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
- }
-
- if (version > 0 && test_broker_version < TEST_BRKVER(2, 0, 0, 0)) {
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "Expected unsupported feature, not: %s",
- rd_kafka_err2name(err));
- TEST_ASSERT(!strcmp(errstr2,
- "Broker only supports LITERAL "
- "resource pattern types"),
- "Expected a different message, not: %s", errstr2);
- TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
- }
-
- TEST_ASSERT(!err, "Expected success, not %s: %s",
- rd_kafka_err2name(err), errstr2);
-
- /*
- * Extract result
- */
- acl_res = rd_kafka_event_CreateAcls_result(rkev_acl_create);
- TEST_ASSERT(acl_res, "Expected CreateAcls result, not %s",
- rd_kafka_event_name(rkev_acl_create));
-
- acl_res_acls = rd_kafka_CreateAcls_result_acls(acl_res, &resacl_cnt);
- TEST_ASSERT(resacl_cnt == 2, "Expected 2, not %zu", resacl_cnt);
-
- for (i = 0; i < resacl_cnt; i++) {
- const rd_kafka_acl_result_t *acl_res_acl = *(acl_res_acls + i);
- const rd_kafka_error_t *error =
- rd_kafka_acl_result_error(acl_res_acl);
-
- TEST_ASSERT(!error,
- "Expected RD_KAFKA_RESP_ERR_NO_ERROR, not %s",
- rd_kafka_error_string(error));
- }
-
- rd_kafka_AdminOptions_destroy(admin_options);
- rd_kafka_event_destroy(rkev_acl_create);
- rd_kafka_AclBinding_destroy_array(acl_bindings, 2);
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test DescribeAcls
- */
-static void
-do_test_DescribeAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
- rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
- size_t acl_binding_results_cntp;
- test_timing_t timing;
- rd_kafka_resp_err_t err;
- uint32_t i;
- char errstr[128];
- const char *errstr2;
- const char *user_test1 = "User:test1";
- const char *user_test2 = "User:test2";
- const char *any_host = "*";
- const char *topic_name;
- rd_kafka_AclBinding_t *acl_bindings_create[2];
- rd_kafka_AclBinding_t *acl_bindings_describe;
- rd_kafka_AclBinding_t *acl;
- const rd_kafka_DescribeAcls_result_t *acl_describe_result;
- const rd_kafka_AclBinding_t **acl_binding_results;
- rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
- rd_bool_t broker_version1 =
- test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
- rd_kafka_resp_err_t create_err;
- rd_kafka_AdminOptions_t *admin_options;
- rd_kafka_event_t *rkev_acl_describe;
- const rd_kafka_error_t *error;
-
- SUB_TEST_QUICK();
-
- if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
- SUB_TEST_SKIP(
- "Skipping DESCRIBE_ACLS test on unsupported "
- "broker version\n");
- return;
- }
-
- pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
- if (!broker_version1)
- pattern_type_first_topic_create =
- RD_KAFKA_RESOURCE_PATTERN_LITERAL;
-
- topic_name = test_mk_topic_name(__FUNCTION__, 1);
-
- acl_bindings_create[0] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic_name,
- pattern_type_first_topic_create, user_test1, any_host,
- RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
- acl_bindings_create[1] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
- RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
-
- create_err =
- test_CreateAcls_simple(rk, NULL, acl_bindings_create, 2, NULL);
-
- TEST_ASSERT(!create_err, "create error: %s",
- rd_kafka_err2str(create_err));
-
- acl_bindings_describe = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic_name,
- RD_KAFKA_RESOURCE_PATTERN_MATCH, NULL, NULL,
- RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
- 0);
-
- admin_options =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
- err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
- errstr, sizeof(errstr));
-
- TIMING_START(&timing, "DescribeAcls");
- TEST_SAY("Call DescribeAcls\n");
- rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /*
- * Wait for result
- */
- rkev_acl_describe = test_wait_admin_result(
- q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
-
- err = rd_kafka_event_error(rkev_acl_describe);
- errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
-
- if (!broker_version1) {
- TEST_ASSERT(
- err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "expected RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, not %s",
- rd_kafka_err2str(err));
- TEST_ASSERT(strcmp(errstr2,
- "Broker only supports LITERAL and ANY "
- "resource pattern types") == 0,
- "expected another message, not %s", errstr2);
- } else {
- TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
- errstr2);
- }
-
- if (!err) {
-
- acl_describe_result =
- rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
-
- TEST_ASSERT(acl_describe_result,
- "acl_describe_result should not be NULL");
-
- acl_binding_results_cntp = 0;
- acl_binding_results = rd_kafka_DescribeAcls_result_acls(
- acl_describe_result, &acl_binding_results_cntp);
-
- TEST_ASSERT(acl_binding_results_cntp == 2,
- "acl_binding_results_cntp should be 2, not %zu",
- acl_binding_results_cntp);
-
- for (i = 0; i < acl_binding_results_cntp; i++) {
- acl = (rd_kafka_AclBinding_t *)acl_binding_results[i];
-
- if (strcmp(rd_kafka_AclBinding_principal(acl),
- user_test1) == 0) {
- TEST_ASSERT(
- rd_kafka_AclBinding_restype(acl) ==
- RD_KAFKA_RESOURCE_TOPIC,
- "acl->restype should be "
- "RD_KAFKA_RESOURCE_TOPIC, not %s",
- rd_kafka_ResourceType_name(
- rd_kafka_AclBinding_restype(acl)));
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_name(acl),
- topic_name) == 0,
- "acl->name should be %s, not %s",
- topic_name, rd_kafka_AclBinding_name(acl));
- TEST_ASSERT(
- rd_kafka_AclBinding_resource_pattern_type(
- acl) == pattern_type_first_topic_create,
- "acl->resource_pattern_type should be %s, "
- "not %s",
- rd_kafka_ResourcePatternType_name(
- pattern_type_first_topic_create),
- rd_kafka_ResourcePatternType_name(
- rd_kafka_AclBinding_resource_pattern_type(
- acl)));
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_principal(acl),
- user_test1) == 0,
- "acl->principal should be %s, not %s",
- user_test1,
- rd_kafka_AclBinding_principal(acl));
-
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_host(acl),
- any_host) == 0,
- "acl->host should be %s, not %s", any_host,
- rd_kafka_AclBinding_host(acl));
-
- TEST_ASSERT(
- rd_kafka_AclBinding_operation(acl) ==
- RD_KAFKA_ACL_OPERATION_READ,
- "acl->operation should be %s, not %s",
- rd_kafka_AclOperation_name(
- RD_KAFKA_ACL_OPERATION_READ),
- rd_kafka_AclOperation_name(
- rd_kafka_AclBinding_operation(acl)));
-
- TEST_ASSERT(
- rd_kafka_AclBinding_permission_type(acl) ==
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- "acl->permission_type should be %s, not %s",
- rd_kafka_AclPermissionType_name(
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
- rd_kafka_AclPermissionType_name(
- rd_kafka_AclBinding_permission_type(
- acl)));
-
- error = rd_kafka_AclBinding_error(acl);
- TEST_ASSERT(!error,
- "acl->error should be NULL, not %s",
- rd_kafka_error_string(error));
-
- } else {
- TEST_ASSERT(
- rd_kafka_AclBinding_restype(acl) ==
- RD_KAFKA_RESOURCE_TOPIC,
- "acl->restype should be "
- "RD_KAFKA_RESOURCE_TOPIC, not %s",
- rd_kafka_ResourceType_name(
- rd_kafka_AclBinding_restype(acl)));
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_name(acl),
- topic_name) == 0,
- "acl->name should be %s, not %s",
- topic_name, rd_kafka_AclBinding_name(acl));
- TEST_ASSERT(
- rd_kafka_AclBinding_resource_pattern_type(
- acl) ==
- RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- "acl->resource_pattern_type should be %s, "
- "not %s",
- rd_kafka_ResourcePatternType_name(
- RD_KAFKA_RESOURCE_PATTERN_LITERAL),
- rd_kafka_ResourcePatternType_name(
- rd_kafka_AclBinding_resource_pattern_type(
- acl)));
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_principal(acl),
- user_test2) == 0,
- "acl->principal should be %s, not %s",
- user_test2,
- rd_kafka_AclBinding_principal(acl));
-
- TEST_ASSERT(
- strcmp(rd_kafka_AclBinding_host(acl),
- any_host) == 0,
- "acl->host should be %s, not %s", any_host,
- rd_kafka_AclBinding_host(acl));
-
- TEST_ASSERT(
- rd_kafka_AclBinding_operation(acl) ==
- RD_KAFKA_ACL_OPERATION_WRITE,
- "acl->operation should be %s, not %s",
- rd_kafka_AclOperation_name(
- RD_KAFKA_ACL_OPERATION_WRITE),
- rd_kafka_AclOperation_name(
- rd_kafka_AclBinding_operation(acl)));
-
- TEST_ASSERT(
- rd_kafka_AclBinding_permission_type(acl) ==
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- "acl->permission_type should be %s, not %s",
- rd_kafka_AclPermissionType_name(
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
- rd_kafka_AclPermissionType_name(
- rd_kafka_AclBinding_permission_type(
- acl)));
-
-
- error = rd_kafka_AclBinding_error(acl);
- TEST_ASSERT(!error,
- "acl->error should be NULL, not %s",
- rd_kafka_error_string(error));
- }
- }
- }
-
- rd_kafka_AclBinding_destroy(acl_bindings_describe);
- rd_kafka_event_destroy(rkev_acl_describe);
-
- acl_bindings_describe = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
- RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
- NULL, 0);
-
- TIMING_START(&timing, "DescribeAcls");
- rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /*
- * Wait for result
- */
- rkev_acl_describe = test_wait_admin_result(
- q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
-
- err = rd_kafka_event_error(rkev_acl_describe);
- errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
-
- TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
- errstr2);
-
- acl_describe_result =
- rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
-
- TEST_ASSERT(acl_describe_result,
- "acl_describe_result should not be NULL");
-
- acl_binding_results_cntp = 0;
- acl_binding_results = rd_kafka_DescribeAcls_result_acls(
- acl_describe_result, &acl_binding_results_cntp);
-
- TEST_ASSERT(acl_binding_results_cntp == 1,
- "acl_binding_results_cntp should be 1, not %zu",
- acl_binding_results_cntp);
-
- acl = (rd_kafka_AclBinding_t *)acl_binding_results[0];
-
- TEST_ASSERT(
- rd_kafka_AclBinding_restype(acl) == RD_KAFKA_RESOURCE_TOPIC,
- "acl->restype should be RD_KAFKA_RESOURCE_TOPIC, not %s",
- rd_kafka_ResourceType_name(rd_kafka_AclBinding_restype(acl)));
- TEST_ASSERT(strcmp(rd_kafka_AclBinding_name(acl), topic_name) == 0,
- "acl->name should be %s, not %s", topic_name,
- rd_kafka_AclBinding_name(acl));
- TEST_ASSERT(rd_kafka_AclBinding_resource_pattern_type(acl) ==
- RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- "acl->resource_pattern_type should be %s, not %s",
- rd_kafka_ResourcePatternType_name(
- RD_KAFKA_RESOURCE_PATTERN_LITERAL),
- rd_kafka_ResourcePatternType_name(
- rd_kafka_AclBinding_resource_pattern_type(acl)));
- TEST_ASSERT(strcmp(rd_kafka_AclBinding_principal(acl), user_test2) == 0,
- "acl->principal should be %s, not %s", user_test2,
- rd_kafka_AclBinding_principal(acl));
-
- TEST_ASSERT(strcmp(rd_kafka_AclBinding_host(acl), any_host) == 0,
- "acl->host should be %s, not %s", any_host,
- rd_kafka_AclBinding_host(acl));
-
- TEST_ASSERT(
- rd_kafka_AclBinding_permission_type(acl) ==
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- "acl->permission_type should be %s, not %s",
- rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
- rd_kafka_AclPermissionType_name(
- rd_kafka_AclBinding_permission_type(acl)));
-
- error = rd_kafka_AclBinding_error(acl);
- TEST_ASSERT(!error, "acl->error should be NULL, not %s",
- rd_kafka_error_string(error));
-
- rd_kafka_AclBinding_destroy(acl_bindings_describe);
- rd_kafka_event_destroy(rkev_acl_describe);
- rd_kafka_AdminOptions_destroy(admin_options);
- rd_kafka_AclBinding_destroy_array(acl_bindings_create, 2);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Count acls by acl filter
- */
-static size_t
-do_test_acls_count(rd_kafka_t *rk,
- rd_kafka_AclBindingFilter_t *acl_bindings_describe,
- rd_kafka_queue_t *q) {
- char errstr[128];
- rd_kafka_resp_err_t err;
- rd_kafka_AdminOptions_t *admin_options_describe;
- rd_kafka_event_t *rkev_acl_describe;
- const rd_kafka_DescribeAcls_result_t *acl_describe_result;
- const char *errstr2;
- size_t acl_binding_results_cntp;
-
- admin_options_describe =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
- rd_kafka_AdminOptions_set_request_timeout(admin_options_describe, 10000,
- errstr, sizeof(errstr));
-
- rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options_describe,
- q);
- /*
- * Wait for result
- */
- rkev_acl_describe = test_wait_admin_result(
- q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
-
- err = rd_kafka_event_error(rkev_acl_describe);
- errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
-
- TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
- errstr2);
-
- acl_describe_result =
- rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
-
- TEST_ASSERT(acl_describe_result,
- "acl_describe_result should not be NULL");
-
- acl_binding_results_cntp = 0;
- rd_kafka_DescribeAcls_result_acls(acl_describe_result,
- &acl_binding_results_cntp);
- rd_kafka_event_destroy(rkev_acl_describe);
- rd_kafka_AdminOptions_destroy(admin_options_describe);
-
- return acl_binding_results_cntp;
-}
-
-/**
- * @brief Test DeleteAcls
- */
-static void
-do_test_DeleteAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
- rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
- test_timing_t timing;
- uint32_t i;
- char errstr[128];
- const char *user_test1 = "User:test1";
- const char *user_test2 = "User:test2";
- const char *any_host = "*";
- const char *base_topic_name;
- char topic1_name[512];
- char topic2_name[512];
- size_t acl_binding_results_cntp;
- size_t DeleteAcls_result_responses_cntp;
- size_t matching_acls_cntp;
- rd_kafka_AclBinding_t *acl_bindings_create[3];
- rd_kafka_AclBindingFilter_t *acl_bindings_describe;
- rd_kafka_AclBindingFilter_t *acl_bindings_delete;
- rd_kafka_event_t *rkev_acl_delete;
- rd_kafka_AdminOptions_t *admin_options_delete;
- const rd_kafka_DeleteAcls_result_t *acl_delete_result;
- const rd_kafka_DeleteAcls_result_response_t *
- *DeleteAcls_result_responses;
- const rd_kafka_DeleteAcls_result_response_t *DeleteAcls_result_response;
- const rd_kafka_AclBinding_t **matching_acls;
- const rd_kafka_AclBinding_t *matching_acl;
- rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
- rd_kafka_ResourcePatternType_t pattern_type_delete;
- rd_bool_t broker_version1 =
- test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
- rd_kafka_resp_err_t create_err;
- rd_kafka_ResourceType_t restype;
- rd_kafka_ResourcePatternType_t resource_pattern_type;
- rd_kafka_AclOperation_t operation;
- rd_kafka_AclPermissionType_t permission_type;
- const char *name;
- const char *principal;
- const rd_kafka_error_t *error;
-
- SUB_TEST_QUICK();
-
- if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
- SUB_TEST_SKIP(
- "Skipping DELETE_ACLS test on unsupported "
- "broker version\n");
- return;
- }
-
- pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
- pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_MATCH;
- if (!broker_version1) {
- pattern_type_first_topic_create =
- RD_KAFKA_RESOURCE_PATTERN_LITERAL;
- pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
- }
-
- base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
-
- rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
- rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
-
- acl_bindings_create[0] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic1_name,
- pattern_type_first_topic_create, user_test1, any_host,
- RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
- acl_bindings_create[1] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic1_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
- RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
- acl_bindings_create[2] = rd_kafka_AclBinding_new(
- RD_KAFKA_RESOURCE_TOPIC, topic2_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
- RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- NULL, 0);
-
- acl_bindings_delete = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_delete, NULL,
- NULL, RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
- NULL, 0);
-
- acl_bindings_describe = acl_bindings_delete;
-
- create_err =
- test_CreateAcls_simple(rk, NULL, acl_bindings_create, 3, NULL);
-
- TEST_ASSERT(!create_err, "create error: %s",
- rd_kafka_err2str(create_err));
-
- admin_options_delete =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEACLS);
- rd_kafka_AdminOptions_set_request_timeout(admin_options_delete, 10000,
- errstr, sizeof(errstr));
-
- acl_binding_results_cntp =
- do_test_acls_count(rk, acl_bindings_describe, q);
- TEST_ASSERT(acl_binding_results_cntp == 2,
- "acl_binding_results_cntp should not be 2, not %zu\n",
- acl_binding_results_cntp);
-
- TIMING_START(&timing, "DeleteAcls");
- rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
- q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /*
- * Wait for result
- */
- rkev_acl_delete = test_wait_admin_result(
- q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
-
- acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
-
- TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
-
- DeleteAcls_result_responses_cntp = 0;
- DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
- acl_delete_result, &DeleteAcls_result_responses_cntp);
-
- TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
- "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
- DeleteAcls_result_responses_cntp);
-
- DeleteAcls_result_response = DeleteAcls_result_responses[0];
-
- TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
- DeleteAcls_result_response));
-
- matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
- DeleteAcls_result_response, &matching_acls_cntp);
-
- TEST_ASSERT(matching_acls_cntp == 2,
- "matching_acls_cntp should be 2, not %zu\n",
- matching_acls_cntp);
-
- for (i = 0; i < matching_acls_cntp; i++) {
- rd_kafka_ResourceType_t restype;
- rd_kafka_ResourcePatternType_t resource_pattern_type;
- rd_kafka_AclOperation_t operation;
- rd_kafka_AclPermissionType_t permission_type;
- const char *name;
- const char *principal;
-
- matching_acl = matching_acls[i];
- error = rd_kafka_AclBinding_error(matching_acl);
- restype = rd_kafka_AclBinding_restype(matching_acl);
- name = rd_kafka_AclBinding_name(matching_acl);
- resource_pattern_type =
- rd_kafka_AclBinding_resource_pattern_type(matching_acl);
- principal = rd_kafka_AclBinding_principal(matching_acl);
- operation = rd_kafka_AclBinding_operation(matching_acl);
- permission_type =
- rd_kafka_AclBinding_permission_type(matching_acl);
-
- TEST_ASSERT(!error, "expected success, not %s",
- rd_kafka_error_string(error));
- TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
- "expected RD_KAFKA_RESOURCE_TOPIC not %s",
- rd_kafka_ResourceType_name(restype));
- TEST_ASSERT(strcmp(name, topic1_name) == 0,
- "expected %s not %s", topic1_name, name);
- TEST_ASSERT(permission_type ==
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- "expected %s not %s",
- rd_kafka_AclPermissionType_name(
- RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
- rd_kafka_AclPermissionType_name(permission_type));
-
- if (strcmp(user_test1, principal) == 0) {
- TEST_ASSERT(resource_pattern_type ==
- pattern_type_first_topic_create,
- "expected %s not %s",
- rd_kafka_ResourcePatternType_name(
- pattern_type_first_topic_create),
- rd_kafka_ResourcePatternType_name(
- resource_pattern_type));
-
- TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_READ,
- "expected %s not %s",
- rd_kafka_AclOperation_name(
- RD_KAFKA_ACL_OPERATION_READ),
- rd_kafka_AclOperation_name(operation));
-
- } else {
- TEST_ASSERT(resource_pattern_type ==
- RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- "expected %s not %s",
- rd_kafka_ResourcePatternType_name(
- RD_KAFKA_RESOURCE_PATTERN_LITERAL),
- rd_kafka_ResourcePatternType_name(
- resource_pattern_type));
-
- TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
- "expected %s not %s",
- rd_kafka_AclOperation_name(
- RD_KAFKA_ACL_OPERATION_WRITE),
- rd_kafka_AclOperation_name(operation));
- }
- }
-
- acl_binding_results_cntp =
- do_test_acls_count(rk, acl_bindings_describe, q);
- TEST_ASSERT(acl_binding_results_cntp == 0,
- "acl_binding_results_cntp should be 0, not %zu\n",
- acl_binding_results_cntp);
-
- rd_kafka_event_destroy(rkev_acl_delete);
- rd_kafka_AclBinding_destroy(acl_bindings_delete);
-
- acl_bindings_delete = rd_kafka_AclBindingFilter_new(
- RD_KAFKA_RESOURCE_TOPIC, topic2_name,
- RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
- RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
- 0);
- acl_bindings_describe = acl_bindings_delete;
-
- TIMING_START(&timing, "DeleteAcls");
- rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
- q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- /*
- * Wait for result
- */
- rkev_acl_delete = test_wait_admin_result(
- q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
-
- acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
-
- TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
-
- DeleteAcls_result_responses_cntp = 0;
- DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
- acl_delete_result, &DeleteAcls_result_responses_cntp);
-
- TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
- "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
- DeleteAcls_result_responses_cntp);
-
- DeleteAcls_result_response = DeleteAcls_result_responses[0];
-
- TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
- DeleteAcls_result_response));
-
- matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
- DeleteAcls_result_response, &matching_acls_cntp);
-
- TEST_ASSERT(matching_acls_cntp == 1,
- "matching_acls_cntp should be 1, not %zu\n",
- matching_acls_cntp);
-
- matching_acl = matching_acls[0];
- error = rd_kafka_AclBinding_error(matching_acl);
- restype = rd_kafka_AclBinding_restype(matching_acl);
- name = rd_kafka_AclBinding_name(matching_acl);
- resource_pattern_type =
- rd_kafka_AclBinding_resource_pattern_type(matching_acl);
- principal = rd_kafka_AclBinding_principal(matching_acl);
- operation = rd_kafka_AclBinding_operation(matching_acl);
- permission_type = rd_kafka_AclBinding_permission_type(matching_acl);
-
- TEST_ASSERT(!error, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
- rd_kafka_error_string(error));
- TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
- "expected RD_KAFKA_RESOURCE_TOPIC not %s",
- rd_kafka_ResourceType_name(restype));
- TEST_ASSERT(strcmp(name, topic2_name) == 0, "expected %s not %s",
- topic2_name, name);
- TEST_ASSERT(
- permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
- "expected %s not %s",
- rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
- rd_kafka_AclPermissionType_name(permission_type));
- TEST_ASSERT(strcmp(user_test2, principal) == 0, "expected %s not %s",
- user_test2, principal);
- TEST_ASSERT(resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_LITERAL,
- "expected %s not %s",
- rd_kafka_ResourcePatternType_name(
- RD_KAFKA_RESOURCE_PATTERN_LITERAL),
- rd_kafka_ResourcePatternType_name(resource_pattern_type));
-
- TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
- "expected %s not %s",
- rd_kafka_AclOperation_name(RD_KAFKA_ACL_OPERATION_WRITE),
- rd_kafka_AclOperation_name(operation));
-
- acl_binding_results_cntp =
- do_test_acls_count(rk, acl_bindings_describe, q);
- TEST_ASSERT(acl_binding_results_cntp == 0,
- "acl_binding_results_cntp should be 0, not %zu\n",
- acl_binding_results_cntp);
-
- rd_kafka_AclBinding_destroy(acl_bindings_delete);
- rd_kafka_event_destroy(rkev_acl_delete);
- rd_kafka_AdminOptions_destroy(admin_options_delete);
-
- rd_kafka_AclBinding_destroy_array(acl_bindings_create, 3);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Verify that an unclean rd_kafka_destroy() does not hang.
- */
-static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) {
- rd_kafka_t *rk;
- char errstr[512];
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *q;
- rd_kafka_NewTopic_t *topic;
- test_timing_t t_destroy;
-
- SUB_TEST_QUICK("Test unclean destroy using %s",
- with_mainq ? "mainq" : "tempq");
-
- test_conf_init(&conf, NULL, 0);
-
- rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
-
- if (with_mainq)
- q = rd_kafka_queue_get_main(rk);
- else
- q = rd_kafka_queue_new(rk);
-
- topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1,
- NULL, 0);
- rd_kafka_CreateTopics(rk, &topic, 1, NULL, q);
- rd_kafka_NewTopic_destroy(topic);
-
- rd_kafka_queue_destroy(q);
-
- TEST_SAY(
- "Giving rd_kafka_destroy() 5s to finish, "
- "despite Admin API request being processed\n");
- test_timeout_set(5);
- TIMING_START(&t_destroy, "rd_kafka_destroy()");
- rd_kafka_destroy(rk);
- TIMING_STOP(&t_destroy);
-
- SUB_TEST_PASS();
-
- /* Restore timeout */
- test_timeout_set(60);
-}
-
-
-
-/**
- * @brief Test deletion of records
- *
- *
- */
-static void do_test_DeleteRecords(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int op_timeout) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_topic_partition_list_t *offsets = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define MY_DEL_RECORDS_CNT 3
- rd_kafka_topic_partition_list_t *results = NULL;
- int i;
- const int partitions_cnt = 3;
- const int msgs_cnt = 100;
- char *topics[MY_DEL_RECORDS_CNT];
- rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_RECORDS_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_DeleteRecords_t *del_records;
- const rd_kafka_DeleteRecords_result_t *res;
-
- SUB_TEST_QUICK("%s DeleteRecords with %s, op_timeout %d",
- rd_kafka_name(rk), what, op_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (op_timeout != -1) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, op_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
- char pfx[32];
- char *topic;
-
- rd_snprintf(pfx, sizeof(pfx), "DeleteRecords-topic%d", i);
- topic = rd_strdup(test_mk_topic_name(pfx, 1));
-
- topics[i] = topic;
- exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
- }
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, topics, MY_DEL_RECORDS_CNT,
- partitions_cnt /*num_partitions*/, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
- 15 * 1000);
-
- /* Produce 100 msgs / partition */
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
- int32_t partition;
- for (partition = 0; partition < partitions_cnt; partition++) {
- test_produce_msgs_easy(topics[i], 0, partition,
- msgs_cnt);
- }
- }
-
- offsets = rd_kafka_topic_partition_list_new(10);
-
- /* Wipe all data from topic 0 */
- for (i = 0; i < partitions_cnt; i++)
- rd_kafka_topic_partition_list_add(offsets, topics[0], i)
- ->offset = RD_KAFKA_OFFSET_END;
-
- /* Wipe all data from partition 0 in topic 1 */
- rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset =
- RD_KAFKA_OFFSET_END;
-
- /* Wipe some data from partition 2 in topic 1 */
- rd_kafka_topic_partition_list_add(offsets, topics[1], 2)->offset =
- msgs_cnt / 2;
-
- /* Not changing the offset (out of range) for topic 2 partition 0 */
- rd_kafka_topic_partition_list_add(offsets, topics[2], 0);
-
- /* Offset out of range for topic 2 partition 1 */
- rd_kafka_topic_partition_list_add(offsets, topics[2], 1)->offset =
- msgs_cnt + 1;
-
- del_records = rd_kafka_DeleteRecords_new(offsets);
-
- TIMING_START(&timing, "DeleteRecords");
- TEST_SAY("Call DeleteRecords\n");
- rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- rd_kafka_DeleteRecords_destroy(del_records);
-
- TIMING_START(&timing, "DeleteRecords.queue_poll");
-
- /* Poll result queue for DeleteRecords result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("DeleteRecords: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_DELETERECORDS_RESULT) {
- break;
- }
-
- rd_kafka_event_destroy(rkev);
- }
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteRecords_result(rkev);
- TEST_ASSERT(res, "expected DeleteRecords_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected DeleteRecords to return %s, not %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("DeleteRecords: returned %s (%s)\n", rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- results = rd_kafka_topic_partition_list_copy(
- rd_kafka_DeleteRecords_result_offsets(res));
-
- /* Sort both input and output list */
- rd_kafka_topic_partition_list_sort(offsets, NULL, NULL);
- rd_kafka_topic_partition_list_sort(results, NULL, NULL);
-
- TEST_SAY("Input partitions:\n");
- test_print_partition_list(offsets);
- TEST_SAY("Result partitions:\n");
- test_print_partition_list(results);
-
- TEST_ASSERT(offsets->cnt == results->cnt,
- "expected DeleteRecords_result_offsets to return %d items, "
- "not %d",
- offsets->cnt, results->cnt);
-
- for (i = 0; i < results->cnt; i++) {
- const rd_kafka_topic_partition_t *input = &offsets->elems[i];
- const rd_kafka_topic_partition_t *output = &results->elems[i];
- int64_t expected_offset = input->offset;
- rd_kafka_resp_err_t expected_err = 0;
-
- if (expected_offset == RD_KAFKA_OFFSET_END)
- expected_offset = msgs_cnt;
-
- /* Expect Offset out of range error */
- if (input->offset < RD_KAFKA_OFFSET_END ||
- input->offset > msgs_cnt)
- expected_err = 1;
-
- TEST_SAY("DeleteRecords Returned %s for %s [%" PRId32
- "] "
- "low-watermark = %d\n",
- rd_kafka_err2name(output->err), output->topic,
- output->partition, (int)output->offset);
-
- if (strcmp(output->topic, input->topic))
- TEST_FAIL_LATER(
- "Result order mismatch at #%d: "
- "expected topic %s, got %s",
- i, input->topic, output->topic);
-
- if (output->partition != input->partition)
- TEST_FAIL_LATER(
- "Result order mismatch at #%d: "
- "expected partition %d, got %d",
- i, input->partition, output->partition);
-
- if (output->err != expected_err)
- TEST_FAIL_LATER(
- "%s [%" PRId32
- "]: "
- "expected error code %d (%s), "
- "got %d (%s)",
- output->topic, output->partition, expected_err,
- rd_kafka_err2str(expected_err), output->err,
- rd_kafka_err2str(output->err));
-
- if (output->err == 0 && output->offset != expected_offset)
- TEST_FAIL_LATER("%s [%" PRId32
- "]: "
- "expected offset %" PRId64
- ", "
- "got %" PRId64,
- output->topic, output->partition,
- expected_offset, output->offset);
- }
-
- /* Check watermarks for partitions */
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
- int32_t partition;
- for (partition = 0; partition < partitions_cnt; partition++) {
- const rd_kafka_topic_partition_t *del =
- rd_kafka_topic_partition_list_find(
- results, topics[i], partition);
- int64_t expected_low = 0;
- int64_t expected_high = msgs_cnt;
- int64_t low, high;
-
- if (del && del->err == 0) {
- expected_low = del->offset;
- }
-
- err = rd_kafka_query_watermark_offsets(
- rk, topics[i], partition, &low, &high,
- tmout_multip(10000));
- if (err)
- TEST_FAIL(
- "query_watermark_offsets failed: "
- "%s\n",
- rd_kafka_err2str(err));
-
- if (low != expected_low)
- TEST_FAIL_LATER("For %s [%" PRId32
- "] expected "
- "a low watermark of %" PRId64
- ", got %" PRId64,
- topics[i], partition,
- expected_low, low);
-
- if (high != expected_high)
- TEST_FAIL_LATER("For %s [%" PRId32
- "] expected "
- "a high watermark of %" PRId64
- ", got %" PRId64,
- topics[i], partition,
- expected_high, high);
- }
- }
-
- rd_kafka_event_destroy(rkev);
-
- for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
- rd_free(topics[i]);
-
- if (results)
- rd_kafka_topic_partition_list_destroy(results);
-
- if (offsets)
- rd_kafka_topic_partition_list_destroy(offsets);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_DEL_RECORDS_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test deletion of groups
- *
- *
- */
-
-typedef struct expected_group_result {
- char *group;
- rd_kafka_resp_err_t err;
-} expected_group_result_t;
-
-static void do_test_DeleteGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int request_timeout) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define MY_DEL_GROUPS_CNT 4
- int known_groups = MY_DEL_GROUPS_CNT - 1;
- int i;
- const int partitions_cnt = 1;
- const int msgs_cnt = 100;
- char *topic;
- rd_kafka_metadata_topic_t exp_mdtopic = {0};
- int64_t testid = test_id_generate();
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- const rd_kafka_group_result_t **results = NULL;
- expected_group_result_t expected[MY_DEL_GROUPS_CNT] = {{0}};
- rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT];
- const rd_kafka_DeleteGroups_result_t *res;
-
- SUB_TEST_QUICK("%s DeleteGroups with %s, request_timeout %d",
- rd_kafka_name(rk), what, request_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (request_timeout != -1) {
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, request_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- exp_mdtopic.topic = topic;
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
-
- /* Produce 100 msgs */
- test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
-
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- if (i < known_groups) {
- test_consume_msgs_easy(group, topic, testid, -1,
- msgs_cnt, NULL);
- expected[i].group = group;
- expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
- } else {
- expected[i].group = group;
- expected[i].err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
- }
- del_groups[i] = rd_kafka_DeleteGroup_new(group);
- }
-
- TIMING_START(&timing, "DeleteGroups");
- TEST_SAY("Call DeleteGroups\n");
- rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- TIMING_START(&timing, "DeleteGroups.queue_poll");
-
- /* Poll result queue for DeleteGroups result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("DeleteGroups: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_DELETEGROUPS_RESULT) {
- break;
- }
-
- rd_kafka_event_destroy(rkev);
- }
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteGroups_result(rkev);
- TEST_ASSERT(res, "expected DeleteGroups_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected DeleteGroups to return %s, not %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("DeleteGroups: returned %s (%s)\n", rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- size_t cnt = 0;
- results = rd_kafka_DeleteGroups_result_groups(res, &cnt);
-
- TEST_ASSERT(MY_DEL_GROUPS_CNT == cnt,
- "expected DeleteGroups_result_groups to return %d items, "
- "not %" PRIusz,
- MY_DEL_GROUPS_CNT, cnt);
-
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- const expected_group_result_t *exp = &expected[i];
- rd_kafka_resp_err_t exp_err = exp->err;
- const rd_kafka_group_result_t *act = results[i];
- rd_kafka_resp_err_t act_err =
- rd_kafka_error_code(rd_kafka_group_result_error(act));
- TEST_ASSERT(
- strcmp(exp->group, rd_kafka_group_result_name(act)) == 0,
- "Result order mismatch at #%d: expected group name to be "
- "%s, not %s",
- i, exp->group, rd_kafka_group_result_name(act));
- TEST_ASSERT(exp_err == act_err,
- "expected err=%d for group %s, not %d (%s)",
- exp_err, exp->group, act_err,
- rd_kafka_err2str(act_err));
- }
-
- rd_kafka_event_destroy(rkev);
-
- for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
- rd_kafka_DeleteGroup_destroy(del_groups[i]);
- rd_free(expected[i].group);
- }
-
- rd_free(topic);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_DEL_GROUPS_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test list groups, creating consumers for a set of groups,
- * listing and deleting them at the end.
- */
-static void do_test_ListConsumerGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int request_timeout,
- rd_bool_t match_states) {
-#define TEST_LIST_CONSUMER_GROUPS_CNT 4
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- size_t valid_cnt, error_cnt;
- rd_bool_t is_simple_consumer_group;
- rd_kafka_consumer_group_state_t state;
- char errstr[512];
- const char *errstr2, *group_id;
- char *list_consumer_groups[TEST_LIST_CONSUMER_GROUPS_CNT];
- const int partitions_cnt = 1;
- const int msgs_cnt = 100;
- size_t i, found;
- char *topic;
- rd_kafka_metadata_topic_t exp_mdtopic = {0};
- int64_t testid = test_id_generate();
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- const rd_kafka_ListConsumerGroups_result_t *res;
- const rd_kafka_ConsumerGroupListing_t **groups;
- rd_bool_t has_match_states =
- test_broker_version >= TEST_BRKVER(2, 7, 0, 0);
-
- SUB_TEST_QUICK(
- "%s ListConsumerGroups with %s, request_timeout %d"
- ", match_states %s",
- rd_kafka_name(rk), what, request_timeout, RD_STR_ToF(match_states));
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (request_timeout != -1) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
-
- if (match_states) {
- rd_kafka_consumer_group_state_t empty =
- RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY;
-
- TEST_CALL_ERROR__(
- rd_kafka_AdminOptions_set_match_consumer_group_states(
- options, &empty, 1));
- }
-
- TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
- options, request_timeout, errstr, sizeof(errstr)));
- }
-
-
- topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- exp_mdtopic.topic = topic;
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
-
- /* Produce 100 msgs */
- test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
-
- for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) {
- char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- test_consume_msgs_easy(group, topic, testid, -1, msgs_cnt,
- NULL);
- list_consumer_groups[i] = group;
- }
-
- TIMING_START(&timing, "ListConsumerGroups");
- TEST_SAY("Call ListConsumerGroups\n");
- rd_kafka_ListConsumerGroups(rk, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- TIMING_START(&timing, "ListConsumerGroups.queue_poll");
-
- /* Poll result queue for ListConsumerGroups result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("ListConsumerGroups: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) {
- break;
- }
-
- rd_kafka_event_destroy(rkev);
- }
- /* Convert event to proper result */
- res = rd_kafka_event_ListConsumerGroups_result(rkev);
- TEST_ASSERT(res, "expected ListConsumerGroups_result, got %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected ListConsumerGroups to return %s, got %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("ListConsumerGroups: returned %s (%s)\n",
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- groups = rd_kafka_ListConsumerGroups_result_valid(res, &valid_cnt);
- rd_kafka_ListConsumerGroups_result_errors(res, &error_cnt);
-
- /* Other tests could be running */
- TEST_ASSERT(valid_cnt >= TEST_LIST_CONSUMER_GROUPS_CNT,
- "expected ListConsumerGroups to return at least %" PRId32
- " valid groups,"
- " got %zu",
- TEST_LIST_CONSUMER_GROUPS_CNT, valid_cnt);
-
- TEST_ASSERT(error_cnt == 0,
- "expected ListConsumerGroups to return 0 errors,"
- " got %zu",
- error_cnt);
-
- found = 0;
- for (i = 0; i < valid_cnt; i++) {
- int j;
- const rd_kafka_ConsumerGroupListing_t *group;
- group = groups[i];
- group_id = rd_kafka_ConsumerGroupListing_group_id(group);
- is_simple_consumer_group =
- rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
- group);
- state = rd_kafka_ConsumerGroupListing_state(group);
- for (j = 0; j < TEST_LIST_CONSUMER_GROUPS_CNT; j++) {
- if (!strcmp(list_consumer_groups[j], group_id)) {
- found++;
- TEST_ASSERT(!is_simple_consumer_group,
- "expected a normal group,"
- " got a simple group");
-
- if (!has_match_states)
- break;
-
- TEST_ASSERT(
- state ==
- RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY,
- "expected an Empty state,"
- " got state %s",
- rd_kafka_consumer_group_state_name(state));
- break;
- }
- }
- }
- TEST_ASSERT(found == TEST_LIST_CONSUMER_GROUPS_CNT,
- "expected to find %d"
- " started groups,"
- " got %" PRIusz,
- TEST_LIST_CONSUMER_GROUPS_CNT, found);
-
- rd_kafka_event_destroy(rkev);
-
- test_DeleteGroups_simple(rk, NULL, (char **)list_consumer_groups,
- TEST_LIST_CONSUMER_GROUPS_CNT, NULL);
-
- for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) {
- rd_free(list_consumer_groups[i]);
- }
-
- rd_free(topic);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef TEST_LIST_CONSUMER_GROUPS_CNT
-
- SUB_TEST_PASS();
-}
-
-typedef struct expected_DescribeConsumerGroups_result {
- char *group_id;
- rd_kafka_resp_err_t err;
-} expected_DescribeConsumerGroups_result_t;
-
-
-/**
- * @brief Test describe groups, creating consumers for a set of groups,
- * describing and deleting them at the end.
- */
-static void do_test_DescribeConsumerGroups(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int request_timeout) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4
- int known_groups = TEST_DESCRIBE_CONSUMER_GROUPS_CNT - 1;
- int i;
- const int partitions_cnt = 1;
- const int msgs_cnt = 100;
- char *topic;
- rd_kafka_metadata_topic_t exp_mdtopic = {0};
- int64_t testid = test_id_generate();
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- const rd_kafka_ConsumerGroupDescription_t **results = NULL;
- expected_DescribeConsumerGroups_result_t
- expected[TEST_DESCRIBE_CONSUMER_GROUPS_CNT] = RD_ZERO_INIT;
- const char *describe_groups[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
- char group_instance_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512];
- char client_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512];
- rd_kafka_t *rks[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
- const rd_kafka_DescribeConsumerGroups_result_t *res;
- rd_bool_t has_group_instance_id =
- test_broker_version >= TEST_BRKVER(2, 4, 0, 0);
-
- SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, request_timeout %d",
- rd_kafka_name(rk), what, request_timeout);
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (request_timeout != -1) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, request_timeout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- exp_mdtopic.topic = topic;
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
-
- /* Produce 100 msgs */
- test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
-
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- rd_kafka_conf_t *conf;
- char *group_id = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
- if (i < known_groups) {
- snprintf(group_instance_ids[i],
- sizeof(group_instance_ids[i]),
- "group_instance_id_%" PRId32, i);
- snprintf(client_ids[i], sizeof(client_ids[i]),
- "client_id_%" PRId32, i);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "client.id", client_ids[i]);
- test_conf_set(conf, "group.instance.id",
- group_instance_ids[i]);
- test_conf_set(conf, "session.timeout.ms", "5000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- rks[i] =
- test_create_consumer(group_id, NULL, conf, NULL);
- test_consumer_subscribe(rks[i], topic);
- /* Consume messages */
- test_consumer_poll("consumer", rks[i], testid, -1, -1,
- msgs_cnt, NULL);
- }
- expected[i].group_id = group_id;
- expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
- describe_groups[i] = group_id;
- }
-
- TIMING_START(&timing, "DescribeConsumerGroups");
- TEST_SAY("Call DescribeConsumerGroups\n");
- rd_kafka_DescribeConsumerGroups(
- rk, describe_groups, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- TIMING_START(&timing, "DescribeConsumerGroups.queue_poll");
-
- /* Poll result queue for DescribeConsumerGroups result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
- TEST_SAY("DescribeConsumerGroups: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) {
- break;
- }
-
- rd_kafka_event_destroy(rkev);
- }
- /* Convert event to proper result */
- res = rd_kafka_event_DescribeConsumerGroups_result(rkev);
- TEST_ASSERT(res, "expected DescribeConsumerGroups_result, got %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(err == exp_err,
- "expected DescribeConsumerGroups to return %s, got %s (%s)",
- rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
- err ? errstr2 : "n/a");
-
- TEST_SAY("DescribeConsumerGroups: returned %s (%s)\n",
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- size_t cnt = 0;
- results = rd_kafka_DescribeConsumerGroups_result_groups(res, &cnt);
-
- TEST_ASSERT(
- TEST_DESCRIBE_CONSUMER_GROUPS_CNT == cnt,
- "expected DescribeConsumerGroups_result_groups to return %d items, "
- "got %" PRIusz,
- TEST_DESCRIBE_CONSUMER_GROUPS_CNT, cnt);
-
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- expected_DescribeConsumerGroups_result_t *exp = &expected[i];
- rd_kafka_resp_err_t exp_err = exp->err;
- const rd_kafka_ConsumerGroupDescription_t *act = results[i];
- rd_kafka_resp_err_t act_err = rd_kafka_error_code(
- rd_kafka_ConsumerGroupDescription_error(act));
- rd_kafka_consumer_group_state_t state =
- rd_kafka_ConsumerGroupDescription_state(act);
- TEST_ASSERT(
- strcmp(exp->group_id,
- rd_kafka_ConsumerGroupDescription_group_id(act)) ==
- 0,
- "Result order mismatch at #%d: expected group id to be "
- "%s, got %s",
- i, exp->group_id,
- rd_kafka_ConsumerGroupDescription_group_id(act));
- if (i < known_groups) {
- int member_count;
- const rd_kafka_MemberDescription_t *member;
- const rd_kafka_MemberAssignment_t *assignment;
- const char *client_id;
- const char *group_instance_id;
- const rd_kafka_topic_partition_list_t *partitions;
-
- TEST_ASSERT(state ==
- RD_KAFKA_CONSUMER_GROUP_STATE_STABLE,
- "Expected Stable state, got %s.",
- rd_kafka_consumer_group_state_name(state));
-
- TEST_ASSERT(
- !rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
- act),
- "Expected a normal consumer group, got a simple "
- "one.");
-
- member_count =
- rd_kafka_ConsumerGroupDescription_member_count(act);
- TEST_ASSERT(member_count == 1,
- "Expected one member, got %d.",
- member_count);
-
- member =
- rd_kafka_ConsumerGroupDescription_member(act, 0);
-
- client_id =
- rd_kafka_MemberDescription_client_id(member);
- TEST_ASSERT(!strcmp(client_id, client_ids[i]),
- "Expected client id \"%s\","
- " got \"%s\".",
- client_ids[i], client_id);
-
- if (has_group_instance_id) {
- group_instance_id =
- rd_kafka_MemberDescription_group_instance_id(
- member);
- TEST_ASSERT(!strcmp(group_instance_id,
- group_instance_ids[i]),
- "Expected group instance id \"%s\","
- " got \"%s\".",
- group_instance_ids[i],
- group_instance_id);
- }
-
- assignment =
- rd_kafka_MemberDescription_assignment(member);
- TEST_ASSERT(assignment != NULL,
- "Expected non-NULL member assignment");
-
- partitions =
- rd_kafka_MemberAssignment_partitions(assignment);
- TEST_ASSERT(partitions != NULL,
- "Expected non-NULL member partitions");
-
- TEST_SAY(
- "Member client.id=\"%s\", "
- "group.instance.id=\"%s\", "
- "consumer_id=\"%s\", "
- "host=\"%s\", assignment:\n",
- rd_kafka_MemberDescription_client_id(member),
- rd_kafka_MemberDescription_group_instance_id(
- member),
- rd_kafka_MemberDescription_consumer_id(member),
- rd_kafka_MemberDescription_host(member));
- /* This is just to make sure the returned memory
- * is valid. */
- test_print_partition_list(partitions);
- } else {
- TEST_ASSERT(state == RD_KAFKA_CONSUMER_GROUP_STATE_DEAD,
- "Expected Dead state, got %s.",
- rd_kafka_consumer_group_state_name(state));
- }
- TEST_ASSERT(exp_err == act_err,
- "expected err=%d for group %s, got %d (%s)",
- exp_err, exp->group_id, act_err,
- rd_kafka_err2str(act_err));
- }
-
- rd_kafka_event_destroy(rkev);
-
- for (i = 0; i < known_groups; i++) {
- test_consumer_close(rks[i]);
- rd_kafka_destroy(rks[i]);
- }
-
- /* Wait session timeout + 1s. Because using static group membership */
- rd_sleep(6);
-
- test_DeleteGroups_simple(rk, NULL, (char **)describe_groups,
- known_groups, NULL);
-
- for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
- rd_free(expected[i].group_id);
- }
-
- rd_free(topic);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test deletion of committed offsets.
- *
- *
- */
-static void do_test_DeleteConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int req_timeout_ms,
- rd_bool_t sub_consumer) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_delete,
- *committed, *deleted, *subscription = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define MY_TOPIC_CNT 3
- int i;
- const int partitions_cnt = 3;
- char *topics[MY_TOPIC_CNT];
- rd_kafka_metadata_topic_t exp_mdtopics[MY_TOPIC_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets;
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
- const rd_kafka_group_result_t **gres;
- size_t gres_cnt;
- rd_kafka_t *consumer;
- char *groupid;
-
- SUB_TEST_QUICK(
- "%s DeleteConsumerGroupOffsets with %s, req_timeout_ms %d%s",
- rd_kafka_name(rk), what, req_timeout_ms,
- sub_consumer ? ", with subscribing consumer" : "");
-
- if (sub_consumer)
- exp_err = RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC;
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (req_timeout_ms != -1) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, req_timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- subscription = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT);
-
- for (i = 0; i < MY_TOPIC_CNT; i++) {
- char pfx[64];
- char *topic;
-
- rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
- topic = rd_strdup(test_mk_topic_name(pfx, 1));
-
- topics[i] = topic;
- exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
-
- rd_kafka_topic_partition_list_add(subscription, topic,
- RD_KAFKA_PARTITION_UA);
- }
-
- groupid = topics[0];
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, topics, MY_TOPIC_CNT, partitions_cnt,
- NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
- 15 * 1000);
-
- rd_sleep(1); /* Additional wait time for cluster propagation */
-
- consumer = test_create_consumer(groupid, NULL, NULL, NULL);
-
- if (sub_consumer) {
- TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription));
- test_consumer_wait_assignment(consumer, rd_true);
- }
-
- /* Commit some offsets */
- orig_offsets = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT * 2);
- for (i = 0; i < MY_TOPIC_CNT * 2; i++)
- rd_kafka_topic_partition_list_add(orig_offsets, topics[i / 2],
- i % MY_TOPIC_CNT)
- ->offset = (i + 1) * 10;
-
- TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
-
- /* Verify committed offsets match */
- committed = rd_kafka_topic_partition_list_copy(orig_offsets);
- TEST_CALL_ERR__(
- rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
-
- if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) {
- TEST_SAY("commit() list:\n");
- test_print_partition_list(orig_offsets);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(committed);
- TEST_FAIL("committed offsets don't match");
- }
-
- rd_kafka_topic_partition_list_destroy(committed);
-
- /* Now delete second half of the commits */
- offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
- to_delete = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
- for (i = 0; i < orig_offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar;
- if (i < orig_offsets->cnt / 2) {
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = orig_offsets->elems[i].offset;
- } else {
- rktpar = rd_kafka_topic_partition_list_add(
- to_delete, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = RD_KAFKA_OFFSET_INVALID;
- }
- }
-
- cgoffsets = rd_kafka_DeleteConsumerGroupOffsets_new(groupid, to_delete);
-
- TIMING_START(&timing, "DeleteConsumerGroupOffsets");
- TEST_SAY("Call DeleteConsumerGroupOffsets\n");
- rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets);
-
- TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll");
- /* Poll result queue for DeleteConsumerGroupOffsets result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
- TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT)
- break;
-
- rd_kafka_event_destroy(rkev);
- }
-
- /* Convert event to proper result */
- res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!err,
- "expected DeleteConsumerGroupOffsets to succeed, "
- "got %s (%s)",
- rd_kafka_err2name(err), err ? errstr2 : "n/a");
-
- TEST_SAY("DeleteConsumerGroupOffsets: returned %s (%s)\n",
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- gres =
- rd_kafka_DeleteConsumerGroupOffsets_result_groups(res, &gres_cnt);
- TEST_ASSERT(gres && gres_cnt == 1,
- "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
-
- deleted = rd_kafka_topic_partition_list_copy(
- rd_kafka_group_result_partitions(gres[0]));
-
- if (test_partition_list_and_offsets_cmp(deleted, to_delete)) {
- TEST_SAY("Result list:\n");
- test_print_partition_list(deleted);
- TEST_SAY("Partitions passed to DeleteConsumerGroupOffsets:\n");
- test_print_partition_list(to_delete);
- TEST_FAIL("deleted/requested offsets don't match");
- }
-
- /* Verify expected errors */
- for (i = 0; i < deleted->cnt; i++) {
- TEST_ASSERT_LATER(deleted->elems[i].err == exp_err,
- "Result %s [%" PRId32
- "] has error %s, "
- "expected %s",
- deleted->elems[i].topic,
- deleted->elems[i].partition,
- rd_kafka_err2name(deleted->elems[i].err),
- rd_kafka_err2name(exp_err));
- }
-
- TEST_LATER_CHECK();
-
- rd_kafka_topic_partition_list_destroy(deleted);
- rd_kafka_topic_partition_list_destroy(to_delete);
-
- rd_kafka_event_destroy(rkev);
-
-
- /* Verify committed offsets match */
- committed = rd_kafka_topic_partition_list_copy(orig_offsets);
- TEST_CALL_ERR__(
- rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
-
- TEST_SAY("Original committed offsets:\n");
- test_print_partition_list(orig_offsets);
-
- TEST_SAY("Committed offsets after delete:\n");
- test_print_partition_list(committed);
-
- rd_kafka_topic_partition_list_t *expected = offsets;
- if (sub_consumer)
- expected = orig_offsets;
-
- if (test_partition_list_and_offsets_cmp(committed, expected)) {
- TEST_SAY("expected list:\n");
- test_print_partition_list(expected);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(committed);
- TEST_FAIL("committed offsets don't match");
- }
-
- rd_kafka_topic_partition_list_destroy(committed);
- rd_kafka_topic_partition_list_destroy(offsets);
- rd_kafka_topic_partition_list_destroy(orig_offsets);
- rd_kafka_topic_partition_list_destroy(subscription);
-
- for (i = 0; i < MY_TOPIC_CNT; i++)
- rd_free(topics[i]);
-
- rd_kafka_destroy(consumer);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef MY_TOPIC_CNT
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test altering of committed offsets.
- *
- *
- */
-static void do_test_AlterConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int req_timeout_ms,
- rd_bool_t sub_consumer,
- rd_bool_t create_topics) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_alter,
- *committed, *alterd, *subscription = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3
- int i;
- const int partitions_cnt = 3;
- char *topics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT];
- rd_kafka_metadata_topic_t
- exp_mdtopics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets;
- const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
- const rd_kafka_group_result_t **gres;
- size_t gres_cnt;
- rd_kafka_t *consumer = NULL;
- char *group_id;
-
- SUB_TEST_QUICK(
- "%s AlterConsumerGroupOffsets with %s, "
- "request_timeout %d%s",
- rd_kafka_name(rk), what, req_timeout_ms,
- sub_consumer ? ", with subscribing consumer" : "");
-
- if (!create_topics)
- exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
- else if (sub_consumer)
- exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
-
- if (sub_consumer && !create_topics)
- TEST_FAIL(
- "Can't use set sub_consumer and unset create_topics at the "
- "same time");
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (req_timeout_ms != -1) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, req_timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- subscription = rd_kafka_topic_partition_list_new(
- TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
-
- for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) {
- char pfx[64];
- char *topic;
-
- rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
- topic = rd_strdup(test_mk_topic_name(pfx, 1));
-
- topics[i] = topic;
- exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
-
- rd_kafka_topic_partition_list_add(subscription, topic,
- RD_KAFKA_PARTITION_UA);
- }
-
- group_id = topics[0];
-
- /* Create the topics first if needed. */
- if (create_topics) {
- test_CreateTopics_simple(
- rk, NULL, topics,
- TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, partitions_cnt,
- NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt,
- NULL, 0, 15 * 1000);
-
- rd_sleep(1); /* Additional wait time for cluster propagation */
-
- consumer = test_create_consumer(group_id, NULL, NULL, NULL);
-
- if (sub_consumer) {
- TEST_CALL_ERR__(
- rd_kafka_subscribe(consumer, subscription));
- test_consumer_wait_assignment(consumer, rd_true);
- }
- }
-
- orig_offsets = rd_kafka_topic_partition_list_new(
- TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt);
- for (i = 0;
- i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt;
- i++) {
- rd_kafka_topic_partition_t *rktpar;
- rktpar = rd_kafka_topic_partition_list_add(
- orig_offsets, topics[i / partitions_cnt],
- i % partitions_cnt);
- rktpar->offset = (i + 1) * 10;
- rd_kafka_topic_partition_set_leader_epoch(rktpar, 1);
- }
-
- /* Commit some offsets, if topics exists */
- if (create_topics) {
- TEST_CALL_ERR__(
- rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
-
- /* Verify committed offsets match */
- committed = rd_kafka_topic_partition_list_copy(orig_offsets);
- TEST_CALL_ERR__(rd_kafka_committed(consumer, committed,
- tmout_multip(5 * 1000)));
-
- if (test_partition_list_and_offsets_cmp(committed,
- orig_offsets)) {
- TEST_SAY("commit() list:\n");
- test_print_partition_list(orig_offsets);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(committed);
- TEST_FAIL("committed offsets don't match");
- }
- rd_kafka_topic_partition_list_destroy(committed);
- }
-
- /* Now alter second half of the commits */
- offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
- to_alter = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
- for (i = 0; i < orig_offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar;
- if (i < orig_offsets->cnt / 2) {
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = orig_offsets->elems[i].offset;
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar, rd_kafka_topic_partition_get_leader_epoch(
- &orig_offsets->elems[i]));
- } else {
- rktpar = rd_kafka_topic_partition_list_add(
- to_alter, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = 5;
- rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
- rktpar = rd_kafka_topic_partition_list_add(
- offsets, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- rktpar->offset = 5;
- rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
- }
- }
-
- cgoffsets = rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter);
-
- TIMING_START(&timing, "AlterConsumerGroupOffsets");
- TEST_SAY("Call AlterConsumerGroupOffsets\n");
- rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets);
-
- TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
- /* Poll result queue for AlterConsumerGroupOffsets result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
- TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
- break;
-
- rd_kafka_event_destroy(rkev);
- }
-
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!err,
- "expected AlterConsumerGroupOffsets to succeed, "
- "got %s (%s)",
- rd_kafka_err2name(err), err ? errstr2 : "n/a");
-
- TEST_SAY("AlterConsumerGroupOffsets: returned %s (%s)\n",
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(res, &gres_cnt);
- TEST_ASSERT(gres && gres_cnt == 1,
- "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
-
- alterd = rd_kafka_topic_partition_list_copy(
- rd_kafka_group_result_partitions(gres[0]));
-
- if (test_partition_list_and_offsets_cmp(alterd, to_alter)) {
- TEST_SAY("Result list:\n");
- test_print_partition_list(alterd);
- TEST_SAY("Partitions passed to AlterConsumerGroupOffsets:\n");
- test_print_partition_list(to_alter);
- TEST_FAIL("altered/requested offsets don't match");
- }
-
- /* Verify expected errors */
- for (i = 0; i < alterd->cnt; i++) {
- TEST_ASSERT_LATER(alterd->elems[i].err == exp_err,
- "Result %s [%" PRId32
- "] has error %s, "
- "expected %s",
- alterd->elems[i].topic,
- alterd->elems[i].partition,
- rd_kafka_err2name(alterd->elems[i].err),
- rd_kafka_err2name(exp_err));
- }
-
- TEST_LATER_CHECK();
-
- rd_kafka_topic_partition_list_destroy(alterd);
- rd_kafka_topic_partition_list_destroy(to_alter);
-
- rd_kafka_event_destroy(rkev);
-
-
- /* Verify committed offsets match, if topics exist. */
- if (create_topics) {
- committed = rd_kafka_topic_partition_list_copy(orig_offsets);
- TEST_CALL_ERR__(rd_kafka_committed(consumer, committed,
- tmout_multip(5 * 1000)));
-
- rd_kafka_topic_partition_list_t *expected = offsets;
- if (sub_consumer) {
- /* Alter fails with an active consumer */
- expected = orig_offsets;
- }
- TEST_SAY("Original committed offsets:\n");
- test_print_partition_list(orig_offsets);
-
- TEST_SAY("Committed offsets after alter:\n");
- test_print_partition_list(committed);
-
- if (test_partition_list_and_offsets_cmp(committed, expected)) {
- TEST_SAY("expected list:\n");
- test_print_partition_list(expected);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(committed);
- TEST_FAIL("committed offsets don't match");
- }
- rd_kafka_topic_partition_list_destroy(committed);
- }
-
- rd_kafka_topic_partition_list_destroy(offsets);
- rd_kafka_topic_partition_list_destroy(orig_offsets);
- rd_kafka_topic_partition_list_destroy(subscription);
-
- for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++)
- rd_free(topics[i]);
-
- if (create_topics) /* consumer is created only if topics are. */
- rd_kafka_destroy(consumer);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-#undef TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Test listing of committed offsets.
- *
- *
- */
-static void do_test_ListConsumerGroupOffsets(const char *what,
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- int req_timeout_ms,
- rd_bool_t sub_consumer,
- rd_bool_t null_toppars) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_topic_partition_list_t *orig_offsets, *to_list, *committed,
- *listd, *subscription = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t err;
- char errstr[512];
- const char *errstr2;
-#define TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3
- int i;
- const int partitions_cnt = 3;
- char *topics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT];
- rd_kafka_metadata_topic_t
- exp_mdtopics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}};
- int exp_mdtopic_cnt = 0;
- test_timing_t timing;
- rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- rd_kafka_ListConsumerGroupOffsets_t *cgoffsets;
- const rd_kafka_ListConsumerGroupOffsets_result_t *res;
- const rd_kafka_group_result_t **gres;
- size_t gres_cnt;
- rd_kafka_t *consumer;
- char *group_id;
-
- SUB_TEST_QUICK(
- "%s ListConsumerGroupOffsets with %s, "
- "request timeout %d%s",
- rd_kafka_name(rk), what, req_timeout_ms,
- sub_consumer ? ", with subscribing consumer" : "");
-
- q = useq ? useq : rd_kafka_queue_new(rk);
-
- if (req_timeout_ms != -1) {
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, req_timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- }
-
-
- subscription = rd_kafka_topic_partition_list_new(
- TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
-
- for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) {
- char pfx[64];
- char *topic;
-
- rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
- topic = rd_strdup(test_mk_topic_name(pfx, 1));
-
- topics[i] = topic;
- exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
-
- rd_kafka_topic_partition_list_add(subscription, topic,
- RD_KAFKA_PARTITION_UA);
- }
-
- group_id = topics[0];
-
- /* Create the topics first. */
- test_CreateTopics_simple(rk, NULL, topics,
- TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT,
- partitions_cnt, NULL);
-
- /* Verify that topics are reported by metadata */
- test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
- 15 * 1000);
-
- rd_sleep(1); /* Additional wait time for cluster propagation */
-
- consumer = test_create_consumer(group_id, NULL, NULL, NULL);
-
- if (sub_consumer) {
- TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription));
- test_consumer_wait_assignment(consumer, rd_true);
- }
-
- /* Commit some offsets */
- orig_offsets = rd_kafka_topic_partition_list_new(
- TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2);
- for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2; i++) {
- rd_kafka_topic_partition_t *rktpar;
- rktpar = rd_kafka_topic_partition_list_add(
- orig_offsets, topics[i / 2],
- i % TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
- rktpar->offset = (i + 1) * 10;
- rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
- }
-
- TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
-
- /* Verify committed offsets match */
- committed = rd_kafka_topic_partition_list_copy(orig_offsets);
- TEST_CALL_ERR__(
- rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
-
- if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) {
- TEST_SAY("commit() list:\n");
- test_print_partition_list(orig_offsets);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(committed);
- TEST_FAIL("committed offsets don't match");
- }
-
- rd_kafka_topic_partition_list_destroy(committed);
-
- to_list = rd_kafka_topic_partition_list_new(orig_offsets->cnt);
- for (i = 0; i < orig_offsets->cnt; i++) {
- rd_kafka_topic_partition_list_add(
- to_list, orig_offsets->elems[i].topic,
- orig_offsets->elems[i].partition);
- }
-
- if (null_toppars) {
- cgoffsets =
- rd_kafka_ListConsumerGroupOffsets_new(group_id, NULL);
- } else {
- cgoffsets =
- rd_kafka_ListConsumerGroupOffsets_new(group_id, to_list);
- }
-
- TIMING_START(&timing, "ListConsumerGroupOffsets");
- TEST_SAY("Call ListConsumerGroupOffsets\n");
- rd_kafka_ListConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
- TIMING_ASSERT_LATER(&timing, 0, 50);
-
- rd_kafka_ListConsumerGroupOffsets_destroy(cgoffsets);
-
- TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll");
- /* Poll result queue for ListConsumerGroupOffsets result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
- TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fms\n",
- rd_kafka_event_name(rkev),
- TIMING_DURATION(&timing) / 1000.0f);
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT)
- break;
-
- rd_kafka_event_destroy(rkev);
- }
-
- /* Convert event to proper result */
- res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- /* Expecting error */
- err = rd_kafka_event_error(rkev);
- errstr2 = rd_kafka_event_error_string(rkev);
- TEST_ASSERT(!err,
- "expected ListConsumerGroupOffsets to succeed, "
- "got %s (%s)",
- rd_kafka_err2name(err), err ? errstr2 : "n/a");
-
- TEST_SAY("ListConsumerGroupOffsets: returned %s (%s)\n",
- rd_kafka_err2str(err), err ? errstr2 : "n/a");
-
- gres = rd_kafka_ListConsumerGroupOffsets_result_groups(res, &gres_cnt);
- TEST_ASSERT(gres && gres_cnt == 1,
- "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
-
- listd = rd_kafka_topic_partition_list_copy(
- rd_kafka_group_result_partitions(gres[0]));
-
- if (test_partition_list_and_offsets_cmp(listd, orig_offsets)) {
- TEST_SAY("Result list:\n");
- test_print_partition_list(listd);
- TEST_SAY("Partitions passed to ListConsumerGroupOffsets:\n");
- test_print_partition_list(orig_offsets);
- TEST_FAIL("listd/requested offsets don't match");
- }
-
- /* Verify expected errors */
- for (i = 0; i < listd->cnt; i++) {
- TEST_ASSERT_LATER(listd->elems[i].err == exp_err,
- "Result %s [%" PRId32
- "] has error %s, "
- "expected %s",
- listd->elems[i].topic,
- listd->elems[i].partition,
- rd_kafka_err2name(listd->elems[i].err),
- rd_kafka_err2name(exp_err));
- }
-
- TEST_LATER_CHECK();
-
- rd_kafka_topic_partition_list_destroy(listd);
- rd_kafka_topic_partition_list_destroy(to_list);
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_topic_partition_list_destroy(orig_offsets);
- rd_kafka_topic_partition_list_destroy(subscription);
-
- for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++)
- rd_free(topics[i]);
-
- rd_kafka_destroy(consumer);
-
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- if (!useq)
- rd_kafka_queue_destroy(q);
-
- TEST_LATER_CHECK();
-
-#undef TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT
-
- SUB_TEST_PASS();
-}
-
-static void do_test_apis(rd_kafka_type_t cltype) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *mainq;
-
- /* Get the available brokers, but use a separate rd_kafka_t instance
- * so we don't jinx the tests by having up-to-date metadata. */
- avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt);
- TEST_SAY("%" PRIusz
- " brokers in cluster "
- "which will be used for replica sets\n",
- avail_broker_cnt);
-
- do_test_unclean_destroy(cltype, 0 /*tempq*/);
- do_test_unclean_destroy(cltype, 1 /*mainq*/);
-
- test_conf_init(&conf, NULL, 180);
- test_conf_set(conf, "socket.timeout.ms", "10000");
- rk = test_create_handle(cltype, conf);
-
- mainq = rd_kafka_queue_get_main(rk);
-
- /* Create topics */
- do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0);
- do_test_CreateTopics("temp queue, op timeout 15000", rk, NULL, 15000,
- 0);
- do_test_CreateTopics(
- "temp queue, op timeout 300, "
- "validate only",
- rk, NULL, 300, rd_true);
- do_test_CreateTopics("temp queue, op timeout 9000, validate_only", rk,
- NULL, 9000, rd_true);
- do_test_CreateTopics("main queue, options", rk, mainq, -1, 0);
-
- /* Delete topics */
- do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0);
- do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500);
-
- if (test_broker_version >= TEST_BRKVER(1, 0, 0, 0)) {
- /* Create Partitions */
- do_test_CreatePartitions("temp queue, op timeout 6500", rk,
- NULL, 6500);
- do_test_CreatePartitions("main queue, op timeout 0", rk, mainq,
- 0);
- }
-
- /* CreateAcls */
- do_test_CreateAcls(rk, mainq, 0);
- do_test_CreateAcls(rk, mainq, 1);
-
- /* DescribeAcls */
- do_test_DescribeAcls(rk, mainq, 0);
- do_test_DescribeAcls(rk, mainq, 1);
-
- /* DeleteAcls */
- do_test_DeleteAcls(rk, mainq, 0);
- do_test_DeleteAcls(rk, mainq, 1);
-
- /* AlterConfigs */
- do_test_AlterConfigs(rk, mainq);
-
- /* DescribeConfigs */
- do_test_DescribeConfigs(rk, mainq);
-
- /* Delete records */
- do_test_DeleteRecords("temp queue, op timeout 0", rk, NULL, 0);
- do_test_DeleteRecords("main queue, op timeout 1500", rk, mainq, 1500);
-
- /* List groups */
- do_test_ListConsumerGroups("temp queue", rk, NULL, -1, rd_false);
- do_test_ListConsumerGroups("main queue", rk, mainq, 1500, rd_true);
-
- /* Describe groups */
- do_test_DescribeConsumerGroups("temp queue", rk, NULL, -1);
- do_test_DescribeConsumerGroups("main queue", rk, mainq, 1500);
-
- /* Delete groups */
- do_test_DeleteGroups("temp queue", rk, NULL, -1);
- do_test_DeleteGroups("main queue", rk, mainq, 1500);
-
- if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0)) {
- /* Delete committed offsets */
- do_test_DeleteConsumerGroupOffsets("temp queue", rk, NULL, -1,
- rd_false);
- do_test_DeleteConsumerGroupOffsets("main queue", rk, mainq,
- 1500, rd_false);
- do_test_DeleteConsumerGroupOffsets(
- "main queue", rk, mainq, 1500,
- rd_true /*with subscribing consumer*/);
-
- /* Alter committed offsets */
- do_test_AlterConsumerGroupOffsets("temp queue", rk, NULL, -1,
- rd_false, rd_true);
- do_test_AlterConsumerGroupOffsets("main queue", rk, mainq, 1500,
- rd_false, rd_true);
- do_test_AlterConsumerGroupOffsets(
- "main queue, nonexistent topics", rk, mainq, 1500, rd_false,
- rd_false /* don't create topics */);
- do_test_AlterConsumerGroupOffsets(
- "main queue", rk, mainq, 1500,
- rd_true, /*with subscribing consumer*/
- rd_true);
-
- /* List committed offsets */
- do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1,
- rd_false, rd_false);
- do_test_ListConsumerGroupOffsets(
- "main queue, op timeout "
- "1500",
- rk, mainq, 1500, rd_false, rd_false);
- do_test_ListConsumerGroupOffsets(
- "main queue", rk, mainq, 1500,
- rd_true /*with subscribing consumer*/, rd_false);
- do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1,
- rd_false, rd_true);
- do_test_ListConsumerGroupOffsets("main queue", rk, mainq, 1500,
- rd_false, rd_true);
- do_test_ListConsumerGroupOffsets(
- "main queue", rk, mainq, 1500,
- rd_true /*with subscribing consumer*/, rd_true);
- }
-
- rd_kafka_queue_destroy(mainq);
-
- rd_kafka_destroy(rk);
-
- free(avail_brokers);
-}
-
-
-int main_0081_admin(int argc, char **argv) {
-
- do_test_apis(RD_KAFKA_PRODUCER);
-
- if (test_quick) {
- TEST_SAY("Skipping further 0081 tests due to quick mode\n");
- return 0;
- }
-
- do_test_apis(RD_KAFKA_CONSUMER);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp
deleted file mode 100644
index 16eb5a21a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * @brief Test fetch.max.bytes
- *
- * - Produce 1*10 Megs to 3 partitions (~<1 Meg per message)
- * - Set max.partition.fetch.bytes to 5 Meg
- * - Set fetch.max.bytes to 2 Meg
- * - Verify all messages are consumed without error.
- */
-
-
-static void do_test_fetch_max_bytes(void) {
- const int partcnt = 3;
- int msgcnt = 10 * partcnt;
- const int msgsize = 900 * 1024; /* Less than 1 Meg to account
- * for batch overhead */
- std::string errstr;
- RdKafka::ErrorCode err;
-
- std::string topic = Test::mk_topic_name("0081-fetch_max_bytes", 1);
-
- /* Produce messages to partitions */
- for (int32_t p = 0; p < (int32_t)partcnt; p++)
- test_produce_msgs_easy_size(topic.c_str(), 0, p, msgcnt, msgsize);
-
- /* Create consumer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "group.id", topic);
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- /* We try to fetch 20 Megs per partition, but only allow 1 Meg as total
- * response size, this ends up serving the first batch from the
- * first partition.
- * receive.message.max.bytes is set low to trigger the original bug,
- * but this value is now adjusted upwards automatically by rd_kafka_new()
- * to hold both fetch.max.bytes and the protocol / batching overhead.
- * Prior to the introduction of fetch.max.bytes the fetcher code
- * would use receive.message.max.bytes to limit the total Fetch response,
- * but due to batching overhead it would result in situations where
- * the consumer asked for 1000000 bytes and got 1000096 bytes batch, which
- * was higher than the 1000000 limit.
- * See https://github.com/edenhill/librdkafka/issues/1616
- *
- * With the added configuration strictness checks, a user-supplied
- * value is no longer over-written:
- * receive.message.max.bytes must be configured to be at least 512 bytes
- * larger than fetch.max.bytes.
- */
- Test::conf_set(conf, "max.partition.fetch.bytes", "20000000"); /* ~20MB */
- Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */
- Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Subscribe */
- std::vector<std::string> topics;
- topics.push_back(topic);
- if ((err = c->subscribe(topics)))
- Test::Fail("subscribe failed: " + RdKafka::err2str(err));
-
- /* Start consuming */
- Test::Say("Consuming topic " + topic + "\n");
- int cnt = 0;
- while (cnt < msgcnt) {
- RdKafka::Message *msg = c->consume(tmout_multip(1000));
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- break;
-
- case RdKafka::ERR_NO_ERROR:
- cnt++;
- break;
-
- default:
- Test::Fail("Consume error: " + msg->errstr());
- break;
- }
-
- delete msg;
- }
- Test::Say("Done\n");
-
- c->close();
- delete c;
-}
-
-extern "C" {
-int main_0082_fetch_max_bytes(int argc, char **argv) {
- if (test_quick) {
- Test::Skip("Test skipped due to quick mode\n");
- return 0;
- }
-
- do_test_fetch_max_bytes();
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c
deleted file mode 100644
index 23ce79820..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests the queue callback IO event signalling.
- */
-
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * @brief Thread safe event counter */
-static struct {
- mtx_t lock;
- int count;
-} event_receiver;
-
-/**
- * @brief Event callback function. Check the opaque pointer and
- * increase the count of received event. */
-static void event_cb(rd_kafka_t *rk_p, void *opaque) {
- TEST_ASSERT(opaque == (void *)0x1234,
- "Opaque pointer is not as expected (got: %p)", opaque);
- mtx_lock(&event_receiver.lock);
- event_receiver.count += 1;
- mtx_unlock(&event_receiver.lock);
-}
-
-/**
- * @brief Wait for one or more events to be received.
- * Return 0 if no event was received within the timeout. */
-static int wait_event_cb(int timeout_secs) {
- int event_count = 0;
- for (; timeout_secs >= 0; timeout_secs--) {
- mtx_lock(&event_receiver.lock);
- event_count = event_receiver.count;
- event_receiver.count = 0;
- mtx_unlock(&event_receiver.lock);
- if (event_count > 0 || timeout_secs == 0)
- return event_count;
- rd_sleep(1);
- }
- return 0;
-}
-
-
-int main_0083_cb_event(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *tconf;
- rd_kafka_t *rk_p, *rk_c;
- const char *topic;
- rd_kafka_topic_t *rkt_p;
- rd_kafka_queue_t *queue;
- uint64_t testid;
- int msgcnt = 100;
- int recvd = 0;
- int wait_multiplier = 1;
- rd_kafka_resp_err_t err;
- enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE;
- int callback_event_count;
- rd_kafka_event_t *rkev;
- int eventcnt = 0;
-
- mtx_init(&event_receiver.lock, mtx_plain);
-
- testid = test_id_generate();
- topic = test_mk_topic_name(__FUNCTION__, 1);
-
- rk_p = test_create_producer();
- rkt_p = test_create_producer_topic(rk_p, topic, NULL);
- err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000));
- TEST_ASSERT(!err, "Topic auto creation failed: %s",
- rd_kafka_err2str(err));
-
- test_conf_init(&conf, &tconf, 0);
- rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "enable.partition.eof", "false");
- /* Speed up propagation of new topics */
- test_conf_set(conf, "metadata.max.age.ms", "5000");
- test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
- rk_c = test_create_consumer(topic, NULL, conf, tconf);
-
- queue = rd_kafka_queue_get_consumer(rk_c);
-
- test_consumer_subscribe(rk_c, topic);
-
- rd_kafka_queue_cb_event_enable(queue, event_cb, (void *)0x1234);
-
- /**
- * 1) Wait for rebalance event
- * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
- * 3) Produce half the messages
- * 4) Expect CB
- * 5) Consume the available messages
- * 6) Wait 1 interval expecting no CB.
- * 7) Produce remaing half
- * 8) Expect CB
- * 9) Done.
- */
- while (recvd < msgcnt) {
- TEST_SAY("Waiting for event\n");
- callback_event_count = wait_event_cb(1 * wait_multiplier);
- TEST_ASSERT(callback_event_count <= 1,
- "Event cb called %d times", callback_event_count);
-
- if (callback_event_count == 1) {
- TEST_SAY("Events received: %d\n", callback_event_count);
-
- while ((rkev = rd_kafka_queue_poll(queue, 0))) {
- eventcnt++;
- switch (rd_kafka_event_type(rkev)) {
- case RD_KAFKA_EVENT_REBALANCE:
- TEST_SAY(
- "Got %s: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_err2str(
- rd_kafka_event_error(rkev)));
- if (expecting_io != _REBALANCE)
- TEST_FAIL(
- "Got Rebalance when "
- "expecting message\n");
- if (rd_kafka_event_error(rkev) ==
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- rd_kafka_assign(
- rk_c,
- rd_kafka_event_topic_partition_list(
- rkev));
- expecting_io = _NOPE;
- } else
- rd_kafka_assign(rk_c, NULL);
- break;
-
- case RD_KAFKA_EVENT_FETCH:
- if (expecting_io != _YEP)
- TEST_FAIL(
- "Did not expect more "
- "messages at %d/%d\n",
- recvd, msgcnt);
- recvd++;
- if (recvd == (msgcnt / 2) ||
- recvd == msgcnt)
- expecting_io = _NOPE;
- break;
-
- case RD_KAFKA_EVENT_ERROR:
- TEST_FAIL(
- "Error: %s\n",
- rd_kafka_event_error_string(rkev));
- break;
-
- default:
- TEST_SAY("Ignoring event %s\n",
- rd_kafka_event_name(rkev));
- }
-
- rd_kafka_event_destroy(rkev);
- }
- TEST_SAY("%d events, Consumed %d/%d messages\n",
- eventcnt, recvd, msgcnt);
-
- wait_multiplier = 1;
-
- } else {
- if (expecting_io == _REBALANCE) {
- continue;
- } else if (expecting_io == _YEP) {
- TEST_FAIL(
- "Did not see expected IO after %d/%d "
- "msgs\n",
- recvd, msgcnt);
- }
-
- TEST_SAY("Event wait timeout (good)\n");
- TEST_SAY("Got idle period, producing\n");
- test_produce_msgs(rk_p, rkt_p, testid, 0, recvd,
- msgcnt / 2, NULL, 10);
-
- expecting_io = _YEP;
- /* When running slowly (e.g., valgrind) it might take
- * some time before the first message is received
- * after producing. */
- wait_multiplier = 3;
- }
- }
- TEST_SAY("Done\n");
-
- rd_kafka_topic_destroy(rkt_p);
- rd_kafka_destroy(rk_p);
-
- rd_kafka_queue_destroy(queue);
- rd_kafka_consumer_close(rk_c);
- rd_kafka_destroy(rk_c);
-
- mtx_destroy(&event_receiver.lock);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c
deleted file mode 100644
index cd8bbf7de..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @name Test rd_kafka_destroy_flags()
- */
-
-
-#include "test.h"
-
-
-static RD_TLS int rebalance_cnt = 0;
-
-static void destroy_flags_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- rebalance_cnt++;
-
- TEST_SAY("rebalance_cb: %s with %d partition(s)\n",
- rd_kafka_err2str(err), parts->cnt);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- test_consumer_assign("rebalance", rk, parts);
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- test_consumer_unassign("rebalance", rk);
- break;
-
- default:
- TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
- }
-}
-
-struct df_args {
- rd_kafka_type_t client_type;
- int produce_cnt;
- int consumer_subscribe;
- int consumer_unsubscribe;
-};
-
-static void do_test_destroy_flags(const char *topic,
- int destroy_flags,
- int local_mode,
- const struct df_args *args) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- test_timing_t t_destroy;
-
- TEST_SAY(_C_MAG
- "[ test destroy_flags 0x%x for client_type %d, "
- "produce_cnt %d, subscribe %d, unsubscribe %d, "
- "%s mode ]\n" _C_CLR,
- destroy_flags, args->client_type, args->produce_cnt,
- args->consumer_subscribe, args->consumer_unsubscribe,
- local_mode ? "local" : "broker");
-
- test_conf_init(&conf, NULL, 20);
-
- if (local_mode)
- test_conf_set(conf, "bootstrap.servers", "");
-
- if (args->client_type == RD_KAFKA_PRODUCER) {
-
- rk = test_create_handle(args->client_type, conf);
-
- if (args->produce_cnt > 0) {
- rd_kafka_topic_t *rkt;
- int msgcounter = 0;
-
- rkt = test_create_producer_topic(rk, topic, NULL);
- test_produce_msgs_nowait(
- rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
- args->produce_cnt, NULL, 100, 0, &msgcounter);
- rd_kafka_topic_destroy(rkt);
- }
-
- } else {
- int i;
-
- TEST_ASSERT(args->client_type == RD_KAFKA_CONSUMER);
-
- rk = test_create_consumer(topic, destroy_flags_rebalance_cb,
- conf, NULL);
-
- if (args->consumer_subscribe) {
- test_consumer_subscribe(rk, topic);
-
- if (!local_mode) {
- TEST_SAY("Waiting for assignment\n");
- while (rebalance_cnt == 0)
- test_consumer_poll_once(rk, NULL, 1000);
- }
- }
-
- for (i = 0; i < 5; i++)
- test_consumer_poll_once(rk, NULL, 100);
-
- if (args->consumer_unsubscribe) {
- /* Test that calling rd_kafka_unsubscribe immediately
- * prior to rd_kafka_destroy_flags doesn't cause the
- * latter to hang. */
- TEST_SAY(_C_YEL "Calling rd_kafka_unsubscribe\n"_C_CLR);
- rd_kafka_unsubscribe(rk);
- }
- }
-
- rebalance_cnt = 0;
- TEST_SAY(_C_YEL "Calling rd_kafka_destroy_flags(0x%x)\n" _C_CLR,
- destroy_flags);
- TIMING_START(&t_destroy, "rd_kafka_destroy_flags(0x%x)", destroy_flags);
- rd_kafka_destroy_flags(rk, destroy_flags);
- TIMING_STOP(&t_destroy);
-
- if (destroy_flags & RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)
- TIMING_ASSERT_LATER(&t_destroy, 0, 200);
- else
- TIMING_ASSERT_LATER(&t_destroy, 0, 1000);
-
- if (args->consumer_subscribe &&
- !(destroy_flags & RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)) {
- if (!local_mode)
- TEST_ASSERT(rebalance_cnt > 0,
- "expected final rebalance callback");
- } else
- TEST_ASSERT(rebalance_cnt == 0,
- "expected no rebalance callbacks, got %d",
- rebalance_cnt);
-
- TEST_SAY(_C_GRN
- "[ test destroy_flags 0x%x for client_type %d, "
- "produce_cnt %d, subscribe %d, unsubscribe %d, "
- "%s mode: PASS ]\n" _C_CLR,
- destroy_flags, args->client_type, args->produce_cnt,
- args->consumer_subscribe, args->consumer_unsubscribe,
- local_mode ? "local" : "broker");
-}
-
-
-/**
- * @brief Destroy with flags
- */
-static void destroy_flags(int local_mode) {
- const struct df_args args[] = {
- {RD_KAFKA_PRODUCER, 0, 0, 0},
- {RD_KAFKA_PRODUCER, test_quick ? 100 : 10000, 0, 0},
- {RD_KAFKA_CONSUMER, 0, 1, 0},
- {RD_KAFKA_CONSUMER, 0, 1, 1},
- {RD_KAFKA_CONSUMER, 0, 0, 0}};
- const int flag_combos[] = {0, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE};
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- const rd_bool_t can_subscribe =
- test_broker_version >= TEST_BRKVER(0, 9, 0, 0);
- int i, j;
-
- /* Create the topic to avoid not-yet-auto-created-topics being
- * subscribed to (and thus raising an error). */
- if (!local_mode) {
- test_create_topic(NULL, topic, 3, 1);
- test_wait_topic_exists(NULL, topic, 5000);
- }
-
- for (i = 0; i < (int)RD_ARRAYSIZE(args); i++) {
- for (j = 0; j < (int)RD_ARRAYSIZE(flag_combos); j++) {
- if (!can_subscribe && (args[i].consumer_subscribe ||
- args[i].consumer_unsubscribe))
- continue;
- do_test_destroy_flags(topic, flag_combos[j], local_mode,
- &args[i]);
- }
- }
-}
-
-
-
-int main_0084_destroy_flags_local(int argc, char **argv) {
- destroy_flags(1 /*no brokers*/);
- return 0;
-}
-
-int main_0084_destroy_flags(int argc, char **argv) {
- destroy_flags(0 /*with brokers*/);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp
deleted file mode 100644
index a342478c1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-
-static RdKafka::Producer *producer;
-static RdKafka::KafkaConsumer *consumer;
-static std::string topic;
-
-static void assert_all_headers_match(RdKafka::Headers *actual,
- const RdKafka::Headers *expected) {
- if (!actual) {
- Test::Fail("Expected RdKafka::Message to contain headers");
- }
- if (actual->size() != expected->size()) {
- Test::Fail(tostr() << "Expected headers length to equal "
- << expected->size() << " instead equals "
- << actual->size() << "\n");
- }
-
- std::vector<RdKafka::Headers::Header> actual_headers = actual->get_all();
- std::vector<RdKafka::Headers::Header> expected_headers = expected->get_all();
- Test::Say(3, tostr() << "Header size " << actual_headers.size() << "\n");
- for (size_t i = 0; i < actual_headers.size(); i++) {
- RdKafka::Headers::Header actual_header = actual_headers[i];
- const RdKafka::Headers::Header expected_header = expected_headers[i];
- std::string actual_key = actual_header.key();
- std::string actual_value =
- std::string(actual_header.value_string(), actual_header.value_size());
- std::string expected_key = expected_header.key();
- std::string expected_value =
- std::string(actual_header.value_string(), expected_header.value_size());
-
- Test::Say(3, tostr() << "Expected Key " << expected_key << ", Expected val "
- << expected_value << ", Actual key " << actual_key
- << ", Actual val " << actual_value << "\n");
-
- if (actual_key != expected_key) {
- Test::Fail(tostr() << "Header key does not match, expected '"
- << actual_key << "' but got '" << expected_key
- << "'\n");
- }
- if (actual_value != expected_value) {
- Test::Fail(tostr() << "Header value does not match, expected '"
- << actual_value << "' but got '" << expected_value
- << "'\n");
- }
- }
-}
-
-static void test_headers(RdKafka::Headers *produce_headers,
- const RdKafka::Headers *compare_headers) {
- RdKafka::ErrorCode err;
-
- err = producer->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY,
- (void *)"message", 7, (void *)"key", 3, 0,
- produce_headers, NULL);
- if (err)
- Test::Fail("produce() failed: " + RdKafka::err2str(err));
-
- producer->flush(tmout_multip(10 * 1000));
-
- if (producer->outq_len() > 0)
- Test::Fail(tostr() << "Expected producer to be flushed, "
- << producer->outq_len() << " messages remain");
-
- int cnt = 0;
- bool running = true;
-
- while (running) {
- RdKafka::Message *msg = consumer->consume(10 * 1000);
-
- if (msg->err() == RdKafka::ERR_NO_ERROR) {
- cnt++;
- RdKafka::Headers *headers = msg->headers();
- if (compare_headers->size() > 0) {
- assert_all_headers_match(headers, compare_headers);
- } else {
- if (headers != 0) {
- Test::Fail("Expected headers to return a NULL pointer");
- }
- }
- running = false;
- } else {
- Test::Fail("consume() failed: " + msg->errstr());
- }
- delete msg;
- }
-}
-
-static void test_headers(int num_hdrs) {
- Test::Say(tostr() << "Test " << num_hdrs
- << " headers in consumed message.\n");
- RdKafka::Headers *produce_headers = RdKafka::Headers::create();
- RdKafka::Headers *compare_headers = RdKafka::Headers::create();
- for (int i = 0; i < num_hdrs; ++i) {
- std::stringstream key_s;
- key_s << "header_" << i;
- std::string key = key_s.str();
-
- if ((i % 4) == 0) {
- /* NULL value */
- produce_headers->add(key, NULL, 0);
- compare_headers->add(key, NULL, 0);
- } else if ((i % 5) == 0) {
- /* Empty value, use different methods for produce
- * and compare to make sure they behave the same way. */
- std::string val = "";
- produce_headers->add(key, val);
- compare_headers->add(key, "", 0);
- } else if ((i % 6) == 0) {
- /* Binary value (no nul-term) */
- produce_headers->add(key, "binary", 6);
- compare_headers->add(key, "binary"); /* auto-nul-terminated */
- } else {
- /* Standard string value */
- std::stringstream val_s;
- val_s << "value_" << i;
- std::string val = val_s.str();
- produce_headers->add(key, val);
- compare_headers->add(key, val);
- }
- }
- test_headers(produce_headers, compare_headers);
- delete compare_headers;
-}
-
-static void test_duplicate_keys() {
- Test::Say("Test multiple headers with duplicate keys.\n");
- int num_hdrs = 4;
- RdKafka::Headers *produce_headers = RdKafka::Headers::create();
- RdKafka::Headers *compare_headers = RdKafka::Headers::create();
- for (int i = 0; i < num_hdrs; ++i) {
- std::string dup_key = "dup_key";
- std::stringstream val_s;
- val_s << "value_" << i;
- std::string val = val_s.str();
- produce_headers->add(dup_key, val);
- compare_headers->add(dup_key, val);
- }
- test_headers(produce_headers, compare_headers);
- delete compare_headers;
-}
-
-static void test_remove_after_add() {
- Test::Say("Test removing after adding headers.\n");
- RdKafka::Headers *headers = RdKafka::Headers::create();
-
- // Add one unique key
- std::string key_one = "key1";
- std::string val_one = "val_one";
- headers->add(key_one, val_one);
-
- // Add a second unique key
- std::string key_two = "key2";
- std::string val_two = "val_two";
- headers->add(key_two, val_one);
-
- // Assert header length is 2
- size_t expected_size = 2;
- if (headers->size() != expected_size) {
- Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
- << ", instead got " << headers->size() << "\n");
- }
-
- // Remove key_one and assert headers == 1
- headers->remove(key_one);
- size_t expected_remove_size = 1;
- if (headers->size() != expected_remove_size) {
- Test::Fail(tostr() << "Expected header->size() to equal "
- << expected_remove_size << ", instead got "
- << headers->size() << "\n");
- }
-
- delete headers;
-}
-
-static void test_remove_all_duplicate_keys() {
- Test::Say("Test removing duplicate keys removes all headers.\n");
- RdKafka::Headers *headers = RdKafka::Headers::create();
-
- // Add one unique key
- std::string key_one = "key1";
- std::string val_one = "val_one";
- headers->add(key_one, val_one);
-
- // Add 2 duplicate keys
- std::string dup_key = "dup_key";
- std::string val_two = "val_two";
- headers->add(dup_key, val_one);
- headers->add(dup_key, val_two);
-
- // Assert header length is 3
- size_t expected_size = 3;
- if (headers->size() != expected_size) {
- Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
- << ", instead got " << headers->size() << "\n");
- }
-
- // Remove key_one and assert headers == 1
- headers->remove(dup_key);
- size_t expected_size_remove = 1;
- if (headers->size() != expected_size_remove) {
- Test::Fail(tostr() << "Expected header->size() to equal "
- << expected_size_remove << ", instead got "
- << headers->size() << "\n");
- }
-
- delete headers;
-}
-
-static void test_get_last_gives_last_added_val() {
- Test::Say("Test get_last returns the last added value of duplicate keys.\n");
- RdKafka::Headers *headers = RdKafka::Headers::create();
-
- // Add two duplicate keys
- std::string dup_key = "dup_key";
- std::string val_one = "val_one";
- std::string val_two = "val_two";
- std::string val_three = "val_three";
- headers->add(dup_key, val_one);
- headers->add(dup_key, val_two);
- headers->add(dup_key, val_three);
-
- // Assert header length is 3
- size_t expected_size = 3;
- if (headers->size() != expected_size) {
- Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
- << ", instead got " << headers->size() << "\n");
- }
-
- // Get last of duplicate key and assert it equals val_two
- RdKafka::Headers::Header last = headers->get_last(dup_key);
- std::string value = std::string(last.value_string());
- if (value != val_three) {
- Test::Fail(tostr() << "Expected get_last to return " << val_two
- << " as the value of the header instead got " << value
- << "\n");
- }
-
- delete headers;
-}
-
-static void test_get_of_key_returns_all() {
- Test::Say("Test get returns all the headers of a duplicate key.\n");
- RdKafka::Headers *headers = RdKafka::Headers::create();
-
- // Add two duplicate keys
- std::string unique_key = "unique";
- std::string dup_key = "dup_key";
- std::string val_one = "val_one";
- std::string val_two = "val_two";
- std::string val_three = "val_three";
- headers->add(unique_key, val_one);
- headers->add(dup_key, val_one);
- headers->add(dup_key, val_two);
- headers->add(dup_key, val_three);
-
- // Assert header length is 4
- size_t expected_size = 4;
- if (headers->size() != expected_size) {
- Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
- << ", instead got " << headers->size() << "\n");
- }
-
- // Get all of the duplicate key
- std::vector<RdKafka::Headers::Header> get = headers->get(dup_key);
- size_t expected_get_size = 3;
- if (get.size() != expected_get_size) {
- Test::Fail(tostr() << "Expected header->size() to equal "
- << expected_get_size << ", instead got "
- << headers->size() << "\n");
- }
-
- delete headers;
-}
-
-static void test_failed_produce() {
- RdKafka::Headers *headers = RdKafka::Headers::create();
- headers->add("my", "header");
-
- RdKafka::ErrorCode err;
-
- err = producer->produce(topic, 999 /* invalid partition */,
- RdKafka::Producer::RK_MSG_COPY, (void *)"message", 7,
- (void *)"key", 3, 0, headers, NULL);
- if (!err)
- Test::Fail("Expected produce() to fail");
-
- delete headers;
-}
-
-static void test_assignment_op() {
- Test::Say("Test Header assignment operator\n");
-
- RdKafka::Headers *headers = RdKafka::Headers::create();
-
- headers->add("abc", "123");
- headers->add("def", "456");
-
- RdKafka::Headers::Header h = headers->get_last("abc");
- h = headers->get_last("def");
- RdKafka::Headers::Header h2 = h;
- h = headers->get_last("nope");
- RdKafka::Headers::Header h3 = h;
- h = headers->get_last("def");
-
- delete headers;
-}
-
-
-extern "C" {
-int main_0085_headers(int argc, char **argv) {
- topic = Test::mk_topic_name("0085-headers", 1);
-
- RdKafka::Conf *conf;
- std::string errstr;
-
- Test::conf_init(&conf, NULL, 0);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- Test::conf_set(conf, "group.id", topic);
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
-
- delete conf;
-
- std::vector<RdKafka::TopicPartition *> parts;
- parts.push_back(RdKafka::TopicPartition::create(
- topic, 0, RdKafka::Topic::OFFSET_BEGINNING));
- RdKafka::ErrorCode err = c->assign(parts);
- if (err != RdKafka::ERR_NO_ERROR)
- Test::Fail("assign() failed: " + RdKafka::err2str(err));
- RdKafka::TopicPartition::destroy(parts);
-
- producer = p;
- consumer = c;
-
- test_headers(0);
- test_headers(1);
- test_headers(261);
- test_duplicate_keys();
- test_remove_after_add();
- test_remove_all_duplicate_keys();
- test_get_last_gives_last_added_val();
- test_get_of_key_returns_all();
- test_failed_produce();
- test_assignment_op();
-
- c->close();
- delete c;
- delete p;
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c
deleted file mode 100644
index 4dbf937f3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "../src/rdkafka_protocol.h"
-
-/**
- * @name Test rd_kafka_purge()
- *
- * Local test:
- * - produce 29 messages (that will be held up in queues),
- * for specific partitions and UA.
- * - purge(INFLIGHT) => no change in len()
- * - purge(QUEUE) => len() should drop to 0, dr errs should be ERR__PURGE_QUEUE
- *
- * Remote test (WITH_SOCKEM):
- * - Limit in-flight messages to 10
- * - Produce 20 messages to the same partition, in batches of 10.
- * - First batch succeeds, then sets a 50 s delay
- * - Second batch times out in flight
- * - Third batch isn't completed an times out in queue
- * - purge(QUEUE) => len should drop to 10, dr err ERR__PURGE_QUEUE
- * - purge(INFLIGHT|QUEUE) => len should drop to 0, ERR__PURGE_INFLIGHT
- */
-
-
-static const int msgcnt = 29;
-struct waitmsgs {
- rd_kafka_resp_err_t exp_err[29];
- int cnt;
-};
-
-static mtx_t produce_req_lock;
-static cnd_t produce_req_cnd;
-static int produce_req_cnt = 0;
-
-
-#if WITH_SOCKEM
-
-int test_sockfd = 0;
-
-static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- void *ic_opaque) {
-
- /* Save socket fd to limit ProduceRequest */
- if (ApiKey == RD_KAFKAP_ApiVersion) {
- test_sockfd = sockfd;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-static rd_kafka_resp_err_t on_response_received(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
- /* Add delay to send fd after first batch is received */
- if (ApiKey == RD_KAFKAP_Produce) {
- mtx_lock(&produce_req_lock);
- produce_req_cnt++;
- cnd_broadcast(&produce_req_cnd);
- mtx_unlock(&produce_req_lock);
- test_socket_sockem_set(test_sockfd, "delay", 50000);
- }
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_resp_err_t err;
- err = rd_kafka_interceptor_add_on_request_sent(rk, "catch_producer_req",
- on_request_sent, NULL);
- if (!err) {
- rd_kafka_interceptor_add_on_response_received(
- rk, "catch_api_version_resp", on_response_received, NULL);
- }
- return err;
-}
-#endif
-
-
-
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- int msgid;
- struct waitmsgs *waitmsgs = rkmessage->_private;
-
- TEST_ASSERT(waitmsgs->cnt > 0, "wait_msg_cnt is zero on DR");
-
- waitmsgs->cnt--;
-
- TEST_ASSERT(rkmessage->len == sizeof(msgid),
- "invalid message size %" PRIusz ", expected sizeof(int)",
- rkmessage->len);
-
- memcpy(&msgid, rkmessage->payload, rkmessage->len);
-
- TEST_ASSERT(msgid >= 0 && msgid < msgcnt, "msgid %d out of range 0..%d",
- msgid, msgcnt - 1);
-
- TEST_ASSERT((int)waitmsgs->exp_err[msgid] != 12345,
- "msgid %d delivered twice", msgid);
-
- TEST_SAY("DeliveryReport for msg #%d: %s\n", msgid,
- rd_kafka_err2name(rkmessage->err));
-
- if (rkmessage->err != waitmsgs->exp_err[msgid]) {
- TEST_FAIL_LATER("Expected message #%d to fail with %s, not %s",
- msgid,
- rd_kafka_err2str(waitmsgs->exp_err[msgid]),
- rd_kafka_err2str(rkmessage->err));
- }
-
- /* Indicate already seen */
- waitmsgs->exp_err[msgid] = (rd_kafka_resp_err_t)12345;
-}
-
-
-
-static void purge_and_expect(const char *what,
- int line,
- rd_kafka_t *rk,
- int purge_flags,
- struct waitmsgs *waitmsgs,
- int exp_remain,
- const char *reason) {
- test_timing_t t_purge;
- rd_kafka_resp_err_t err;
-
- TEST_SAY(
- "%s:%d: purge(0x%x): "
- "expecting %d messages to remain when done\n",
- what, line, purge_flags, exp_remain);
- TIMING_START(&t_purge, "%s:%d: purge(0x%x)", what, line, purge_flags);
- err = rd_kafka_purge(rk, purge_flags);
- TIMING_STOP(&t_purge);
-
- TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", purge_flags, line,
- rd_kafka_err2str(err));
-
- rd_kafka_poll(rk, 0);
- TEST_ASSERT(waitmsgs->cnt == exp_remain,
- "%s:%d: expected %d messages remaining, not %d", what, line,
- exp_remain, waitmsgs->cnt);
-}
-
-
-/**
- * @brief Don't treat ERR__GAPLESS_GUARANTEE as a fatal error
- */
-static int gapless_is_not_fatal_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason) {
- return err != RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE;
-}
-
-static void
-do_test_purge(const char *what, int remote, int idempotence, int gapless) {
- const char *topic = test_mk_topic_name("0086_purge", 0);
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- int i;
- rd_kafka_resp_err_t err;
- struct waitmsgs waitmsgs = RD_ZERO_INIT;
-
-#if !WITH_SOCKEM
- if (remote) {
- TEST_SKIP("No sockem support\n");
- return;
- }
-#endif
-
- TEST_SAY(_C_MAG "Test rd_kafka_purge(): %s\n" _C_CLR, what);
-
- test_conf_init(&conf, NULL, 20);
-
- test_conf_set(conf, "batch.num.messages", "10");
- test_conf_set(conf, "max.in.flight", "1");
- test_conf_set(conf, "linger.ms", "5000");
- test_conf_set(conf, "enable.idempotence",
- idempotence ? "true" : "false");
- test_conf_set(conf, "enable.gapless.guarantee",
- gapless ? "true" : "false");
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
-
- if (remote) {
-#if WITH_SOCKEM
- test_socket_enable(conf);
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
- on_new_producer, NULL);
-#endif
-
- if (idempotence && !gapless)
- test_curr->is_fatal_cb = gapless_is_not_fatal_cb;
-
- mtx_init(&produce_req_lock, mtx_plain);
- cnd_init(&produce_req_cnd);
- } else {
- test_conf_set(conf, "bootstrap.servers", NULL);
- }
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("Producing %d messages to topic %s\n", msgcnt, topic);
-
- for (i = 0; i < msgcnt; i++) {
- int32_t partition;
-
- if (remote) {
- /* We need all messages in the same partition
- * so that remaining messages are queued
- * up behind the first messageset */
- partition = 0;
- } else {
- partition = (i < 20 ? i % 3 : RD_KAFKA_PARTITION_UA);
- }
-
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(partition),
- RD_KAFKA_V_VALUE((void *)&i, sizeof(i)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(&waitmsgs), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev(#%d) failed: %s", i,
- rd_kafka_err2str(err));
-
- waitmsgs.exp_err[i] =
- (remote && i < 10
- ? RD_KAFKA_RESP_ERR_NO_ERROR
- : remote && i < 20 ? RD_KAFKA_RESP_ERR__PURGE_INFLIGHT
- : RD_KAFKA_RESP_ERR__PURGE_QUEUE);
-
- waitmsgs.cnt++;
- }
-
-
- if (remote) {
- /* Wait for ProduceRequest to be sent */
- mtx_lock(&produce_req_lock);
- cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock,
- 15 * 1000);
- TEST_ASSERT(produce_req_cnt > 0,
- "First Produce request should've been sent by now");
- mtx_unlock(&produce_req_lock);
-
- purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_QUEUE,
- &waitmsgs, 10,
- "in-flight messages should not be purged");
-
- purge_and_expect(
- what, __LINE__, rk,
- RD_KAFKA_PURGE_F_INFLIGHT | RD_KAFKA_PURGE_F_QUEUE,
- &waitmsgs, 0, "all messages should have been purged");
- } else {
- purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_INFLIGHT,
- &waitmsgs, msgcnt,
- "no messagess should have been purged");
-
- purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_QUEUE,
- &waitmsgs, 0,
- "no messagess should have been purged");
- }
-
-
- rd_kafka_destroy(rk);
-
- TEST_LATER_CHECK();
-}
-
-
-int main_0086_purge_remote(int argc, char **argv) {
- const rd_bool_t has_idempotence =
- test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
-
- do_test_purge("remote", 1 /*remote*/, 0 /*idempotence*/,
- 0 /*!gapless*/);
-
- if (has_idempotence) {
- do_test_purge("remote,idempotence", 1 /*remote*/,
- 1 /*idempotence*/, 0 /*!gapless*/);
- do_test_purge("remote,idempotence,gapless", 1 /*remote*/,
- 1 /*idempotence*/, 1 /*!gapless*/);
- }
- return 0;
-}
-
-
-int main_0086_purge_local(int argc, char **argv) {
- do_test_purge("local", 0 /*local*/, 0, 0);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c
deleted file mode 100644
index c71b5a69f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#if WITH_SOCKEM
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * @name Verify #1985:
- *
- * Previously known topic transitions to UNKNOWN when metadata times out,
- * new messages are put on UA, when brokers come up again and metadata
- * is retrieved the UA messages must be produced.
- */
-
-static rd_atomic32_t refuse_connect;
-
-
-/**
- * @brief Sockem connect, called from **internal librdkafka thread** through
- * librdkafka's connect_cb
- */
-static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
- if (rd_atomic32_get(&refuse_connect) > 0)
- return -1;
- else
- return 0;
-}
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- return 0;
- return 1;
-}
-
-static int msg_dr_cnt = 0;
-static int msg_dr_fail_cnt = 0;
-
-static void
-dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
- msg_dr_cnt++;
- TEST_SAYL(3, "Delivery for message %.*s: %s\n", (int)rkmessage->len,
- (const char *)rkmessage->payload,
- rd_kafka_err2name(rkmessage->err));
-
- if (rkmessage->err) {
- TEST_FAIL_LATER("Expected message to succeed, got %s",
- rd_kafka_err2str(rkmessage->err));
- msg_dr_fail_cnt++;
- }
-}
-
-
-
-int main_0088_produce_metadata_timeout(int argc, char **argv) {
- int64_t testid;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- const char *topic =
- test_mk_topic_name("0088_produce_metadata_timeout", 1);
- int msgcnt = 0;
- rd_kafka_conf_t *conf;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
- rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
- test_conf_set(conf, "metadata.max.age.ms", "10000");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "-1");
- test_conf_set(conf, "linger.ms", "5000");
- test_conf_set(conf, "batch.num.messages", "5");
-
- test_socket_enable(conf);
- test_curr->connect_cb = connect_cb;
- test_curr->is_fatal_cb = is_fatal_cb;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* Create topic with single partition, for simplicity. */
- test_create_topic(rk, topic, 1, 1);
-
- rkt = rd_kafka_topic_new(rk, topic, NULL);
-
- /* Produce first set of messages and wait for delivery */
- test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
- 20, NULL, 0, 0, &msgcnt);
- while (msg_dr_cnt < 5)
- rd_kafka_poll(rk, 1000);
-
- TEST_SAY(_C_YEL
- "Disconnecting sockets and "
- "refusing future connections\n");
- rd_atomic32_set(&refuse_connect, 1);
- test_socket_close_all(test_curr, 1 /*reinit*/);
-
-
- /* Wait for metadata timeout */
- TEST_SAY("Waiting for metadata timeout\n");
- rd_sleep(10 + 5);
-
- /* These messages will be put on the UA queue */
- test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
- 20, NULL, 0, 0, &msgcnt);
-
- /* Restore the connection(s) when metadata has timed out. */
- TEST_SAY(_C_YEL "Allowing connections\n");
- rd_atomic32_set(&refuse_connect, 0);
-
- rd_sleep(3);
- test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
- 20, NULL, 0, 0, &msgcnt);
-
- test_flush(rk, 2 * 5 * 1000); /* linger.ms * 2 */
-
- TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt,
- msg_dr_cnt);
- TEST_ASSERT(msg_dr_fail_cnt == 0, "expected %d dr failures, got %d", 0,
- msg_dr_fail_cnt);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return 0;
-}
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c
deleted file mode 100644
index 3d7cbf66f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * Verify that long-processing consumer leaves the group during
- * processing, with or without a log queue.
- *
- * MO:
- * - produce messages to a single partition topic.
- * - create two consumers, c1 and c2.
- * - process first message slowly (2 * max.poll.interval.ms)
- * - verify in other consumer that group rebalances after max.poll.interval.ms
- * and the partition is assigned to the other consumer.
- */
-
-/**
- * @brief Test max.poll.interval.ms without any additional polling.
- */
-static void do_test(void) {
- const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
- uint64_t testid;
- const int msgcnt = 10;
- rd_kafka_t *c[2];
- rd_kafka_conf_t *conf;
- int64_t ts_next[2] = {0, 0};
- int64_t ts_exp_msg[2] = {0, 0};
- int cmsgcnt = 0;
- int i;
- int bad = -1;
-
- SUB_TEST();
-
- testid = test_id_generate();
-
- test_create_topic(NULL, topic, 1, 1);
-
- test_produce_msgs_easy(topic, testid, -1, msgcnt);
-
- test_conf_init(&conf, NULL, 60);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- c[1] = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(c[0], topic);
- test_consumer_subscribe(c[1], topic);
-
- while (1) {
- for (i = 0; i < 2; i++) {
- int64_t now;
- rd_kafka_message_t *rkm;
-
- /* Consumer is "processing" */
- if (ts_next[i] > test_clock())
- continue;
-
- rkm = rd_kafka_consumer_poll(c[i], 100);
- if (!rkm)
- continue;
-
- if (rkm->err) {
- TEST_WARN(
- "Consumer %d error: %s: "
- "ignoring\n",
- i, rd_kafka_message_errstr(rkm));
- continue;
- }
-
- now = test_clock();
-
- cmsgcnt++;
-
- TEST_SAY(
- "Consumer %d received message (#%d) "
- "at offset %" PRId64 "\n",
- i, cmsgcnt, rkm->offset);
-
- if (ts_exp_msg[i]) {
- /* This consumer is expecting a message
- * after a certain time, namely after the
- * rebalance following max.poll.. being
- * exceeded in the other consumer */
- TEST_ASSERT(
- now > ts_exp_msg[i],
- "Consumer %d: did not expect "
- "message for at least %dms",
- i, (int)((ts_exp_msg[i] - now) / 1000));
- TEST_ASSERT(
- now < ts_exp_msg[i] + 10000 * 1000,
- "Consumer %d: expected message "
- "within 10s, not after %dms",
- i, (int)((now - ts_exp_msg[i]) / 1000));
- TEST_SAY(
- "Consumer %d: received message "
- "at offset %" PRId64 " after rebalance\n",
- i, rkm->offset);
-
- rd_kafka_message_destroy(rkm);
- goto done;
-
- } else if (cmsgcnt == 1) {
- /* Process this message for 20s */
- ts_next[i] = now + (20000 * 1000);
-
- /* Exp message on other consumer after
- * max.poll.interval.ms */
- ts_exp_msg[i ^ 1] = now + (10000 * 1000);
-
- /* This is the bad consumer */
- bad = i;
-
- TEST_SAY(
- "Consumer %d processing message at "
- "offset %" PRId64 "\n",
- i, rkm->offset);
- rd_kafka_message_destroy(rkm);
- } else {
- rd_kafka_message_destroy(rkm);
-
- TEST_FAIL(
- "Consumer %d did not expect "
- "a message",
- i);
- }
- }
- }
-
-done:
-
- TEST_ASSERT(bad != -1, "Bad consumer not set");
-
- /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
- while (1) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(c[bad], 1000);
- TEST_ASSERT(rkm, "Expected consumer result within 1s");
-
- TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");
-
- TEST_SAY("Consumer error: %s: %s\n",
- rd_kafka_err2name(rkm->err),
- rd_kafka_message_errstr(rkm));
-
- if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
- rd_kafka_message_destroy(rkm);
- break;
- }
-
- rd_kafka_message_destroy(rkm);
- }
-
-
- for (i = 0; i < 2; i++)
- rd_kafka_destroy_flags(c[i],
- RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test max.poll.interval.ms while polling log queue.
- */
-static void do_test_with_log_queue(void) {
- const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
- uint64_t testid;
- const int msgcnt = 10;
- rd_kafka_t *c[2];
- rd_kafka_conf_t *conf;
- rd_kafka_queue_t *logq[2];
- int64_t ts_next[2] = {0, 0};
- int64_t ts_exp_msg[2] = {0, 0};
- int cmsgcnt = 0;
- int i;
- int bad = -1;
- char errstr[512];
-
- SUB_TEST();
-
- testid = test_id_generate();
-
- test_create_topic(NULL, topic, 1, 1);
-
- test_produce_msgs_easy(topic, testid, -1, msgcnt);
-
- test_conf_init(&conf, NULL, 60);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "log.queue", "true");
-
- c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- c[1] = test_create_consumer(topic, NULL, conf, NULL);
-
-
- for (i = 0; i < 2; i++) {
- logq[i] = rd_kafka_queue_new(c[i]);
- TEST_CALL__(rd_kafka_set_log_queue(c[i], logq[i]));
- test_consumer_subscribe(c[i], topic);
- }
-
- while (1) {
- for (i = 0; i < 2; i++) {
- int64_t now;
- rd_kafka_message_t *rkm;
-
- /* Consumer is "processing".
- * When we are "processing", we poll the log queue. */
- if (ts_next[i] > test_clock()) {
- rd_kafka_event_destroy(
- rd_kafka_queue_poll(logq[i], 100));
- continue;
- }
-
- rkm = rd_kafka_consumer_poll(c[i], 100);
- if (!rkm)
- continue;
-
- if (rkm->err) {
- TEST_WARN(
- "Consumer %d error: %s: "
- "ignoring\n",
- i, rd_kafka_message_errstr(rkm));
- continue;
- }
-
- now = test_clock();
-
- cmsgcnt++;
-
- TEST_SAY(
- "Consumer %d received message (#%d) "
- "at offset %" PRId64 "\n",
- i, cmsgcnt, rkm->offset);
-
- if (ts_exp_msg[i]) {
- /* This consumer is expecting a message
- * after a certain time, namely after the
- * rebalance following max.poll.. being
- * exceeded in the other consumer */
- TEST_ASSERT(
- now > ts_exp_msg[i],
- "Consumer %d: did not expect "
- "message for at least %dms",
- i, (int)((ts_exp_msg[i] - now) / 1000));
- TEST_ASSERT(
- now < ts_exp_msg[i] + 10000 * 1000,
- "Consumer %d: expected message "
- "within 10s, not after %dms",
- i, (int)((now - ts_exp_msg[i]) / 1000));
- TEST_SAY(
- "Consumer %d: received message "
- "at offset %" PRId64 " after rebalance\n",
- i, rkm->offset);
-
- rd_kafka_message_destroy(rkm);
- goto done;
-
- } else if (cmsgcnt == 1) {
- /* Process this message for 20s */
- ts_next[i] = now + (20000 * 1000);
-
- /* Exp message on other consumer after
- * max.poll.interval.ms */
- ts_exp_msg[i ^ 1] = now + (10000 * 1000);
-
- /* This is the bad consumer */
- bad = i;
-
- TEST_SAY(
- "Consumer %d processing message at "
- "offset %" PRId64 "\n",
- i, rkm->offset);
- rd_kafka_message_destroy(rkm);
- } else {
- rd_kafka_message_destroy(rkm);
-
- TEST_FAIL(
- "Consumer %d did not expect "
- "a message",
- i);
- }
- }
- }
-
-done:
-
- TEST_ASSERT(bad != -1, "Bad consumer not set");
-
- /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
- while (1) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(c[bad], 1000);
- TEST_ASSERT(rkm, "Expected consumer result within 1s");
-
- TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");
-
- TEST_SAY("Consumer error: %s: %s\n",
- rd_kafka_err2name(rkm->err),
- rd_kafka_message_errstr(rkm));
-
- if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
- rd_kafka_message_destroy(rkm);
- break;
- }
-
- rd_kafka_message_destroy(rkm);
- }
-
-
- for (i = 0; i < 2; i++) {
- rd_kafka_destroy_flags(c[i],
- RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
- rd_kafka_queue_destroy(logq[i]);
- }
-
- SUB_TEST_PASS();
-}
-
-int main_0089_max_poll_interval(int argc, char **argv) {
- do_test();
- do_test_with_log_queue();
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c
deleted file mode 100644
index 02d16df56..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#include <stdarg.h>
-
-/**
- * @name Idempotent Producer tests
- *
- */
-
-static struct {
- int batch_cnt;
- int initial_fail_batch_cnt;
- rd_atomic32_t produce_cnt;
-} state;
-
-
-
-/**
- * @brief This is called prior to parsing the ProduceResponse,
- * we use it to inject errors.
- *
- * @locality an internal rdkafka thread
- */
-static rd_kafka_resp_err_t handle_ProduceResponse(rd_kafka_t *rk,
- int32_t brokerid,
- uint64_t msgseq,
- rd_kafka_resp_err_t err) {
- rd_kafka_resp_err_t new_err = err;
- int n;
-
- if (err == RD_KAFKA_RESP_ERR__RETRY)
- return err; /* Skip internal retries, such as triggered by
- * rd_kafka_broker_bufq_purge_by_toppar() */
-
- n = rd_atomic32_add(&state.produce_cnt, 1);
-
- /* Let the first N ProduceRequests fail with request timeout.
- * Do allow the first request through. */
- if (n > 1 && n <= state.initial_fail_batch_cnt) {
- if (err)
- TEST_WARN(
- "First %d ProduceRequests should not "
- "have failed, this is #%d with error %s for "
- "brokerid %" PRId32 " and msgseq %" PRIu64 "\n",
- state.initial_fail_batch_cnt, n,
- rd_kafka_err2name(err), brokerid, msgseq);
- assert(!err &&
- *"First N ProduceRequests should not have failed");
- new_err = RD_KAFKA_RESP_ERR__TIMED_OUT;
- }
-
- TEST_SAY("handle_ProduceResponse(broker %" PRId32 ", MsgSeq %" PRId64
- ", Error %s) -> new Error %s\n",
- brokerid, msgseq, rd_kafka_err2name(err),
- rd_kafka_err2name(new_err));
-
- return new_err;
-}
-
-
-/**
- * @brief Test handling of implicit acks.
- *
- * @param batch_cnt Total number of batches, ProduceRequests, sent.
- * @param initial_fail_batch_cnt How many of the initial batches should
- * fail with an emulated network timeout.
- */
-static void do_test_implicit_ack(const char *what,
- int batch_cnt,
- int initial_fail_batch_cnt) {
- rd_kafka_t *rk;
- const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
- const int32_t partition = 0;
- uint64_t testid;
- int msgcnt = 10 * batch_cnt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- test_msgver_t mv;
-
- TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);
-
- rd_atomic32_init(&state.produce_cnt, 0);
- state.batch_cnt = batch_cnt;
- state.initial_fail_batch_cnt = initial_fail_batch_cnt;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- test_conf_set(conf, "enable.idempotence", "true");
- test_conf_set(conf, "batch.num.messages", "10");
- test_conf_set(conf, "linger.ms", "500");
- test_conf_set(conf, "retry.backoff.ms", "10");
-
- /* The ProduceResponse handler will inject timed-out-in-flight
- * errors for the first N ProduceRequests, which will trigger retries
- * that in turn will result in OutOfSequence errors. */
- test_conf_set(conf, "ut_handle_ProduceResponse",
- (char *)handle_ProduceResponse);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(rk, topic, 1, 1);
-
- rkt = test_create_producer_topic(rk, topic, NULL);
-
-
- TEST_SAY("Producing %d messages\n", msgcnt);
- test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);
-
- TEST_SAY("Flushing..\n");
- rd_kafka_flush(rk, 10000);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- TEST_SAY("Verifying messages with consumer\n");
- test_msgver_init(&mv, testid);
- test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, msgcnt,
- NULL, &mv);
- test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
- test_msgver_clear(&mv);
-
- TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
-}
-
-
-int main_0090_idempotence(int argc, char **argv) {
- /* The broker maintains a window of the N last ProduceRequests
- * per partition and producer to allow ProduceRequest retries
- * for previously successful requests to return a non-error response.
- * This limit is currently (AK 2.0) hard coded at 5. */
- const int broker_req_window = 5;
-
- do_test_implicit_ack("within broker request window",
- broker_req_window * 2, broker_req_window);
-
- do_test_implicit_ack("outside broker request window",
- broker_req_window + 3, broker_req_window + 3);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c
deleted file mode 100644
index c1506afd9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * Verify that long-processing consumer does not leave the group during
- * processing when processing time < max.poll.interval.ms but
- * max.poll.interval.ms > socket.timeout.ms.
- *
- * MO:
- * - produce N*.. messages to two partitions
- * - create two consumers, c0 and c1.
- * - subscribe c0, wait for rebalance, poll first message.
- * - subscribe c1
- * - have both consumers poll messages and spend T seconds processing
- * each message.
- * - wait until both consumers have received N messages each.
- * - check that no errors (disconnects, etc) or extra rebalances were raised.
- */
-
-
-const int64_t processing_time = 31 * 1000 * 1000; /*31s*/
-
-struct _consumer {
- rd_kafka_t *rk;
- int64_t last;
- int cnt;
- int rebalance_cnt;
- int max_rebalance_cnt;
-};
-
-static void do_consume(struct _consumer *cons, int timeout_s) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(cons->rk, timeout_s * 1000);
- if (!rkm)
- return;
-
- TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)",
- rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm),
- (int)((test_clock() - cons->last) / 1000));
-
- TEST_SAY(
- "%s: processing message #%d from "
- "partition %" PRId32 " at offset %" PRId64 "\n",
- rd_kafka_name(cons->rk), cons->cnt, rkm->partition, rkm->offset);
-
- rd_kafka_message_destroy(rkm);
-
- cons->cnt++;
- cons->last = test_clock();
-
- TEST_SAY("%s: simulate processing by sleeping for %ds\n",
- rd_kafka_name(cons->rk), timeout_s);
- rd_sleep(timeout_s);
-}
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- struct _consumer *cons = opaque;
-
- cons->rebalance_cnt++;
-
- TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n",
- rd_kafka_name(cons->rk), cons->rebalance_cnt,
- cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt);
-
- TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt,
- "%s rebalanced %d times, max was %d",
- rd_kafka_name(cons->rk), cons->rebalance_cnt,
- cons->max_rebalance_cnt);
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- rd_kafka_assign(rk, parts);
- else
- rd_kafka_assign(rk, NULL);
-}
-
-
-#define _CONSUMER_CNT 2
-static void do_test_with_subscribe(const char *topic) {
- int64_t testid;
- const int msgcnt = 3;
- struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
- rd_kafka_conf_t *conf;
-
- TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with subscribe() ]\n");
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL,
- 10 + (int)(processing_time / 1000000) * msgcnt);
-
- /* Produce extra messages since we can't fully rely on the
- * random partitioner to provide exact distribution. */
- test_produce_msgs_easy(topic, testid, -1, msgcnt * _CONSUMER_CNT * 2);
- test_produce_msgs_easy(topic, testid, 1, msgcnt / 2);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "20000" /*20s*/);
- test_conf_set(conf, "socket.timeout.ms", "15000" /*15s*/);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.partition.eof", "false");
- /* Trigger other requests often */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "1000");
- rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-
- rd_kafka_conf_set_opaque(conf, &c[0]);
- c[0].rk =
- test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
-
- rd_kafka_conf_set_opaque(conf, &c[1]);
- c[1].rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(c[0].rk, topic);
-
- /* c0: assign, (c1 joins) revoke, assign */
- c[0].max_rebalance_cnt = 3;
- /* c1: assign */
- c[1].max_rebalance_cnt = 1;
-
- /* Wait for assignment */
- while (1) {
- rd_kafka_topic_partition_list_t *parts = NULL;
-
- do_consume(&c[0], 1 /*1s*/);
-
- if (rd_kafka_assignment(c[0].rk, &parts) !=
- RD_KAFKA_RESP_ERR_NO_ERROR ||
- !parts || parts->cnt == 0) {
- if (parts)
- rd_kafka_topic_partition_list_destroy(parts);
- continue;
- }
-
- TEST_SAY("%s got assignment of %d partition(s)\n",
- rd_kafka_name(c[0].rk), parts->cnt);
- rd_kafka_topic_partition_list_destroy(parts);
- break;
- }
-
- test_consumer_subscribe(c[1].rk, topic);
-
- /* Poll until both consumers have finished reading N messages */
- while (c[0].cnt < msgcnt && c[1].cnt < msgcnt) {
- do_consume(&c[0], 0);
- do_consume(&c[1], 10 /*10s*/);
- }
-
- /* Allow the extra revoke rebalance on close() */
- c[0].max_rebalance_cnt++;
- c[1].max_rebalance_cnt++;
-
- test_consumer_close(c[0].rk);
- test_consumer_close(c[1].rk);
-
- rd_kafka_destroy(c[0].rk);
- rd_kafka_destroy(c[1].rk);
-
- TEST_SAY(_C_GRN
- "[ Test max.poll.interval.ms with subscribe(): PASS ]\n");
-}
-
-
-/**
- * @brief Verify that max.poll.interval.ms does NOT kick in
- * when just using assign() and not subscribe().
- */
-static void do_test_with_assign(const char *topic) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_message_t *rkm;
-
- TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with assign() ]\n");
-
- test_conf_init(&conf, NULL, 60);
-
- test_create_topic(NULL, topic, 2, 1);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/);
-
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_assign_partition("ASSIGN", rk, topic, 0,
- RD_KAFKA_OFFSET_END);
-
-
- /* Sleep for longer than max.poll.interval.ms */
- rd_sleep(10);
-
- /* Make sure no error was raised */
- while ((rkm = rd_kafka_consumer_poll(rk, 0))) {
- TEST_ASSERT(!rkm->err, "Unexpected consumer error: %s: %s",
- rd_kafka_err2name(rkm->err),
- rd_kafka_message_errstr(rkm));
-
- rd_kafka_message_destroy(rkm);
- }
-
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN "[ Test max.poll.interval.ms with assign(): PASS ]\n");
-}
-
-
-/**
- * @brief Verify that max.poll.interval.ms kicks in even if
- * the application hasn't called poll once.
- */
-static void do_test_no_poll(const char *topic) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_message_t *rkm;
- rd_bool_t raised = rd_false;
-
- TEST_SAY(_C_MAG "[ Test max.poll.interval.ms without calling poll ]\n");
-
- test_conf_init(&conf, NULL, 60);
-
- test_create_topic(NULL, topic, 2, 1);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/);
-
- rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(rk, topic);
-
- /* Sleep for longer than max.poll.interval.ms */
- rd_sleep(10);
-
- /* Make sure the error is raised */
- while ((rkm = rd_kafka_consumer_poll(rk, 0))) {
- if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
- raised = rd_true;
-
- rd_kafka_message_destroy(rkm);
- }
-
- TEST_ASSERT(raised, "Expected to have seen ERR__MAX_POLL_EXCEEDED");
-
- test_consumer_close(rk);
- rd_kafka_destroy(rk);
-
- TEST_SAY(_C_GRN
- "[ Test max.poll.interval.ms without calling poll: PASS ]\n");
-}
-
-
-int main_0091_max_poll_interval_timeout(int argc, char **argv) {
- const char *topic =
- test_mk_topic_name("0091_max_poll_interval_tmout", 1);
-
- test_create_topic(NULL, topic, 2, 1);
-
- do_test_with_subscribe(topic);
-
- do_test_with_assign(topic);
-
- do_test_no_poll(topic);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c
deleted file mode 100644
index 46308ddf4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * @name Mixed MsgVersions.
- *
- * - Create producer.
- * - Produce N/2 m essages. (with MsgVer2)
- * - Change the topic message.format.version to a MsgVer1 version.
- * - Consume the messages to verify all can be read.
- */
-
-
-
-int main_0092_mixed_msgver(int argc, char **argv) {
- rd_kafka_t *rk;
- const char *topic = test_mk_topic_name("0092_mixed_msgver", 1);
- int32_t partition = 0;
- const int msgcnt = 60;
- int cnt;
- int64_t testid;
- int msgcounter = msgcnt;
-
- if (test_idempotent_producer) {
- TEST_SKIP("Idempotent producer requires MsgVersion >= 2\n");
- return 0;
- }
-
- testid = test_id_generate();
-
- rk = test_create_producer();
-
- /* Produce messages */
- for (cnt = 0; cnt < msgcnt; cnt++) {
- rd_kafka_resp_err_t err;
- char buf[230];
-
- test_msg_fmt(buf, sizeof(buf), testid, partition, cnt);
-
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(partition),
- RD_KAFKA_V_VALUE(buf, sizeof(buf)),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "producev() #%d failed: %s", cnt,
- rd_kafka_err2str(err));
-
- /* One message per batch */
- rd_kafka_flush(rk, 30 * 1000);
-
- if (cnt == msgcnt / 2) {
- const char *msgconf[] = {"message.format.version",
- "0.10.0.0"};
- TEST_SAY("Changing message.format.version\n");
- err = test_AlterConfigs_simple(
- rk, RD_KAFKA_RESOURCE_TOPIC, topic, msgconf, 1);
- TEST_ASSERT(!err, "AlterConfigs failed: %s",
- rd_kafka_err2str(err));
- }
- }
-
- rd_kafka_destroy(rk);
-
- /* Consume messages */
- test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c
deleted file mode 100644
index 366deca32..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-
-/**
- * @brief Attempt to verify head-of-line-blocking behaviour.
- *
- * - Create two high-level consumers with socket.timeout.ms=low,
- * and max.poll.interval.ms=high, metadata refresh interval=low.
- * - Have first consumer join the group (subscribe()), should finish quickly.
- * - Have second consumer join the group, but don't call poll on
- * the first consumer for some time to have the second consumer
- * block on JoinGroup.
- * - Verify that errors were raised due to timed out (Metadata) requests.
- */
-
-struct _consumer {
- rd_kafka_t *rk;
- int64_t last;
- int cnt;
- int rebalance_cnt;
- int max_rebalance_cnt;
-};
-
-static void do_consume(struct _consumer *cons, int timeout_s) {
- rd_kafka_message_t *rkm;
-
- rkm = rd_kafka_consumer_poll(cons->rk, 100 + (timeout_s * 1000));
- if (!rkm)
- return;
-
- TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)",
- rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm),
- (int)((test_clock() - cons->last) / 1000));
-
- rd_kafka_message_destroy(rkm);
-
- cons->cnt++;
- cons->last = test_clock();
-
- if (timeout_s > 0) {
- TEST_SAY("%s: simulate processing by sleeping for %ds\n",
- rd_kafka_name(cons->rk), timeout_s);
- rd_sleep(timeout_s);
- }
-}
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- struct _consumer *cons = opaque;
-
- cons->rebalance_cnt++;
-
- TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n",
- rd_kafka_name(cons->rk), cons->rebalance_cnt,
- cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt);
-
- TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt,
- "%s rebalanced %d times, max was %d",
- rd_kafka_name(cons->rk), cons->rebalance_cnt,
- cons->max_rebalance_cnt);
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
- rd_kafka_assign(rk, parts);
- else
- rd_kafka_assign(rk, NULL);
-}
-
-
-#define _CONSUMER_CNT 2
-int main_0093_holb_consumer(int argc, char **argv) {
- const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
- int64_t testid;
- const int msgcnt = 100;
- struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
- rd_kafka_conf_t *conf;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
-
- test_create_topic(NULL, topic, 1, 1);
-
- test_produce_msgs_easy(topic, testid, 0, msgcnt);
-
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "max.poll.interval.ms", "20000");
- test_conf_set(conf, "socket.timeout.ms", "3000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- /* Trigger other requests often */
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
- rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-
- rd_kafka_conf_set_opaque(conf, &c[0]);
- c[0].rk =
- test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
-
- rd_kafka_conf_set_opaque(conf, &c[1]);
- c[1].rk = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(c[0].rk, topic);
-
- /* c0: assign */
- c[0].max_rebalance_cnt = 1;
-
- /* c1: none, hasn't joined yet */
- c[1].max_rebalance_cnt = 0;
-
- TEST_SAY("Waiting for c[0] assignment\n");
- while (1) {
- rd_kafka_topic_partition_list_t *parts = NULL;
-
- do_consume(&c[0], 1 /*1s*/);
-
- if (rd_kafka_assignment(c[0].rk, &parts) !=
- RD_KAFKA_RESP_ERR_NO_ERROR ||
- !parts || parts->cnt == 0) {
- if (parts)
- rd_kafka_topic_partition_list_destroy(parts);
- continue;
- }
-
- TEST_SAY("%s got assignment of %d partition(s)\n",
- rd_kafka_name(c[0].rk), parts->cnt);
- rd_kafka_topic_partition_list_destroy(parts);
- break;
- }
-
- TEST_SAY("c[0] got assignment, consuming..\n");
- do_consume(&c[0], 5 /*5s*/);
-
- TEST_SAY("Joining second consumer\n");
- test_consumer_subscribe(c[1].rk, topic);
-
- /* Just poll second consumer for 10s, the rebalance will not
- * finish until the first consumer polls */
- do_consume(&c[1], 10 /*10s*/);
-
- /* c0: the next call to do_consume/poll will trigger
- * its rebalance callback, first revoke then assign. */
- c[0].max_rebalance_cnt += 2;
- /* c1: first rebalance */
- c[1].max_rebalance_cnt++;
-
- TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
- c[0].rebalance_cnt, c[0].max_rebalance_cnt, c[1].rebalance_cnt,
- c[1].max_rebalance_cnt);
-
- /* Let rebalances kick in, then consume messages. */
- while (c[0].cnt + c[1].cnt < msgcnt) {
- do_consume(&c[0], 0);
- do_consume(&c[1], 0);
- }
-
- /* Allow the extra revoke rebalance on close() */
- c[0].max_rebalance_cnt++;
- c[1].max_rebalance_cnt++;
-
- test_consumer_close(c[0].rk);
- test_consumer_close(c[1].rk);
-
- rd_kafka_destroy(c[0].rk);
- rd_kafka_destroy(c[1].rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c
deleted file mode 100644
index 8704adc09..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-#if WITH_SOCKEM
-/**
- * @name Test handling of message timeouts with the idempotent producer.
- *
- * - Set message timeout low.
- * - Set low socket send buffer, promote batching, and use large messages
- * to make sure requests are partially sent.
- * - Produce a steady flow of messages
- * - After some time, set the sockem delay higher than the message timeout.
- * - Shortly after, remove the sockem delay.
- * - Verify that all messages were succesfully produced in order.
- *
- * https://github.com/confluentinc/confluent-kafka-dotnet/issues/704
- */
-
-/*
- * Scenario:
- *
- * MsgSets: [ 1 | 2 | 3 | 4 | 5 | 6 ]
- *
- * 1. Producer sends MsgSets 1,2,3,4,5.
- * 2. Producer receives ack for MsgSet 1.
- * 3. Connection to broker goes down.
- * 4. The messages in MsgSet 2 are timed out by producer's timeout scanner.
- * 5. Connection to broker comes back up.
- * 6. Producer choices:
- * 6a. Reset the epoch and starting producing MsgSet 3 with reset sequence 0.
- * Pros: instant recovery.
- * Cons: a. If MsgSet 2 was persisted by the broker we now have desynch
- * between producer and broker: Producer thinks the message failed,
- * while broker wrote them to the log.
- * b. If MsgSets 3,.. was also persisted then there will be duplicates
- * as MsgSet 3 is produced with a reset sequence of 0.
- * 6b. Try to recover within the current epoch, the broker is expecting
- * sequence 2, 3, 4, or 5, depending on what it managed to persist
- * before the connection went down.
- * The producer should produce msg 2 but it no longer exists due to timed
- * out. If lucky, only 2 was persisted by the broker, which means the Producer
- * can successfully produce 3.
- * If 3 was persisted the producer would get a DuplicateSequence error
- * back, indicating that it was already produced, this would get
- * the producer back in synch.
- * If 2+ was not persisted an OutOfOrderSeq would be returned when 3
- * is produced. The producer should be able to bump the epoch and
- * start with Msg 3 as reset sequence 0 without risking loss or duplication.
- * 6c. Try to recover within the current epoch by draining the toppar
- * and then adjusting its base msgid to the head-of-line message in
- * the producer queue (after timed out messages were removed).
- * This avoids bumping the epoch (which grinds all partitions to a halt
- * while draining, and requires an extra roundtrip).
- * It is tricky to get the adjustment value correct though.
- * 6d. Drain all partitions and then bump the epoch, resetting the base
- * sequence to the first message in the queue.
- * Pros: simple.
- * Cons: will grind all partitions to a halt while draining.
- *
- * We chose to go with option 6d.
- */
-
-
-#include <stdarg.h>
-#include <errno.h>
-
-#include "sockem_ctrl.h"
-
-static struct {
- int dr_ok;
- int dr_fail;
- test_msgver_t mv_delivered;
-} counters;
-
-
-static void my_dr_msg_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque) {
-
- if (rd_kafka_message_status(rkmessage) >=
- RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED)
- test_msgver_add_msg(rk, &counters.mv_delivered,
- (rd_kafka_message_t *)rkmessage);
-
- if (rkmessage->err) {
- counters.dr_fail++;
- } else {
- counters.dr_ok++;
- }
-}
-
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- /* Ignore connectivity errors since we'll be bringing down
- * .. connectivity.
- * SASL auther will think a connection-down even in the auth
- * state means the broker doesn't support SASL PLAIN. */
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
- err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__TIMED_OUT)
- return 0;
- return 1;
-}
-
-
-static void do_test_produce_timeout(const char *topic, const int msgrate) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_t *rkt;
- uint64_t testid;
- rd_kafka_resp_err_t err;
- const int partition = RD_KAFKA_PARTITION_UA;
- int msgcnt = msgrate * 20;
- const int msgsize = 100 * 1000;
- sockem_ctrl_t ctrl;
- int msgcounter = 0;
- test_msgver_t mv;
-
- TEST_SAY(_C_BLU
- "Test idempotent producer "
- "with message timeouts (%d msgs/s)\n",
- msgrate);
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 60);
- test_msgver_init(&counters.mv_delivered, testid);
- sockem_ctrl_init(&ctrl);
-
- test_conf_set(conf, "enable.idempotence", "true");
- test_conf_set(conf, "linger.ms", "300");
- test_conf_set(conf, "reconnect.backoff.ms", "2000");
- test_conf_set(conf, "socket.send.buffer.bytes", "10000");
- rd_kafka_conf_set_dr_msg_cb(conf, my_dr_msg_cb);
-
- test_socket_enable(conf);
- test_curr->is_fatal_cb = is_fatal_cb;
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- rkt = test_create_producer_topic(rk, topic, "message.timeout.ms",
- "5000", NULL);
-
- /* Create the topic to make sure connections are up and ready. */
- err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
- TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err));
-
- /* After 1 seconds, set socket delay to 2*message.timeout.ms */
- sockem_ctrl_set_delay(&ctrl, 1000, 2 * 5000);
-
- /* After 3*message.timeout.ms seconds, remove delay. */
- sockem_ctrl_set_delay(&ctrl, 3 * 5000, 0);
-
- test_produce_msgs_nowait(rk, rkt, testid, partition, 0, msgcnt, NULL,
- msgsize, msgrate, &msgcounter);
-
- test_flush(rk, 3 * 5000);
-
- TEST_SAY("%d/%d messages produced, %d delivered, %d failed\n",
- msgcounter, msgcnt, counters.dr_ok, counters.dr_fail);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- sockem_ctrl_term(&ctrl);
-
- TEST_SAY("Verifying %d delivered messages with consumer\n",
- counters.dr_ok);
-
- test_msgver_init(&mv, testid);
- test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, -1, NULL,
- &mv);
- test_msgver_verify_compare("delivered", &mv, &counters.mv_delivered,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_MSGID |
- TEST_MSGVER_SUBSET);
- test_msgver_clear(&mv);
- test_msgver_clear(&counters.mv_delivered);
-
-
- TEST_SAY(_C_GRN
- "Test idempotent producer "
- "with message timeouts (%d msgs/s): SUCCESS\n",
- msgrate);
-}
-
-int main_0094_idempotence_msg_timeout(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
-
- do_test_produce_timeout(topic, 10);
-
- if (test_quick) {
- TEST_SAY("Skipping further tests due to quick mode\n");
- return 0;
- }
-
- do_test_produce_timeout(topic, 100);
-
- return 0;
-}
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp
deleted file mode 100644
index 6ebd5f500..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-
-class errorEventCb : public RdKafka::EventCb {
- public:
- errorEventCb() : error_seen(false) {
- }
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_ERROR:
- Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": "
- << event.str() << "\n");
- if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
- error_seen = true;
- break;
-
- case RdKafka::Event::EVENT_LOG:
- Test::Say(tostr() << "Log: " << event.str() << "\n");
- break;
-
- default:
- break;
- }
- }
-
- bool error_seen;
-};
-
-
-extern "C" {
-int main_0095_all_brokers_down(int argc, char **argv) {
- RdKafka::Conf *conf;
- std::string errstr;
-
- Test::conf_init(&conf, NULL, 20);
- /* Two broker addresses that will quickly reject the connection */
- Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2");
-
- /*
- * First test producer
- */
- errorEventCb pEvent = errorEventCb();
-
- if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- Test::Say("Test Producer\n");
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- /* Wait for all brokers down */
- while (!pEvent.error_seen)
- p->poll(1000);
-
- delete p;
-
-
- /*
- * Test high-level consumer that has a logical broker (group coord),
- * which has caused AllBrokersDown generation problems (#2259)
- */
- errorEventCb cEvent = errorEventCb();
-
- Test::conf_set(conf, "group.id", "test");
-
- if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- Test::Say("Test KafkaConsumer\n");
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
-
- delete conf;
-
- /* Wait for all brokers down */
- while (!cEvent.error_seen) {
- RdKafka::Message *m = c->consume(1000);
- if (m)
- delete m;
- }
-
- c->close();
-
- delete c;
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp
deleted file mode 100644
index 8a3a0bce5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include <fstream>
-#include <streambuf>
-#include "testcpp.h"
-#include "tinycthread.h"
-
-static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = {
- /* [RdKafka::CERT_PUBLIC_KEY] = */
- {
- "SSL_pkcs",
- "SSL_pub_der",
- "SSL_pub_pem",
- },
- /* [RdKafka::CERT_PRIVATE_KEY] = */
- {
- "SSL_pkcs",
- "SSL_priv_der",
- "SSL_priv_pem",
- },
- /* [RdKafka::CERT_CA] = */
- {
- "SSL_pkcs",
- "SSL_ca_der",
- "SSL_all_cas_pem" /* Contains multiple CA certs */,
- }};
-
-
-static std::vector<char> read_file(const std::string path) {
- std::ifstream ifs(path.c_str(), std::ios::binary | std::ios::ate);
- if (ifs.fail())
- Test::Fail("Failed to open " + path + ": " + strerror(errno));
- int size = (int)ifs.tellg();
- ifs.seekg(0, std::ifstream::beg);
- std::vector<char> buffer;
- buffer.resize(size);
- ifs.read(buffer.data(), size);
- ifs.close();
- return buffer;
-}
-
-
-/**
- * @name SslCertVerifyCb verification.
- *
- * Requires security.protocol=*SSL
- */
-
-class TestVerifyCb : public RdKafka::SslCertificateVerifyCb {
- public:
- bool verify_ok;
- int cnt; //< Verify callbacks triggered.
- mtx_t lock;
-
- TestVerifyCb(bool verify_ok) : verify_ok(verify_ok), cnt(0) {
- mtx_init(&lock, mtx_plain);
- }
-
- ~TestVerifyCb() {
- mtx_destroy(&lock);
- }
-
- bool ssl_cert_verify_cb(const std::string &broker_name,
- int32_t broker_id,
- int *x509_error,
- int depth,
- const char *buf,
- size_t size,
- std::string &errstr) {
- mtx_lock(&lock);
-
- Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << ": broker_name="
- << broker_name << ", broker_id=" << broker_id
- << ", x509_error=" << *x509_error << ", depth=" << depth
- << ", buf size=" << size << ", verify_ok=" << verify_ok
- << "\n");
-
- cnt++;
- mtx_unlock(&lock);
-
- if (verify_ok)
- return true;
-
- errstr = "This test triggered a verification failure";
- *x509_error = 26; /*X509_V_ERR_INVALID_PURPOSE*/
-
- return false;
- }
-};
-
-
-/**
- * @brief Set SSL PEM cert/key using configuration property.
- *
- * The cert/key is loadded from environment variables set up by trivup.
- *
- * @param loc_prop ssl.X.location property that will be cleared.
- * @param pem_prop ssl.X.pem property that will be set.
- * @param cert_type Certificate type.
- */
-static void conf_location_to_pem(RdKafka::Conf *conf,
- std::string loc_prop,
- std::string pem_prop,
- RdKafka::CertificateType cert_type) {
- std::string loc;
-
- std::string errstr;
- if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to reset " + loc_prop + ": " + errstr);
-
- const char *p;
- p = test_getenv(envname[cert_type][RdKafka::CERT_ENC_PEM].c_str(), NULL);
- if (!p)
- Test::Fail(
- "Invalid test environment: "
- "Missing " +
- envname[cert_type][RdKafka::CERT_ENC_PEM] +
- " env variable: make sure trivup is up to date");
-
- loc = p;
-
-
- /* Read file */
- std::ifstream ifs(loc.c_str());
- std::string pem((std::istreambuf_iterator<char>(ifs)),
- std::istreambuf_iterator<char>());
-
- Test::Say("Read env " + envname[cert_type][RdKafka::CERT_ENC_PEM] + "=" +
- loc + " from disk and changed to in-memory " + pem_prop +
- " string\n");
-
- if (conf->set(pem_prop, pem, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to set " + pem_prop + ": " + errstr);
-}
-
-/**
- * @brief Set SSL cert/key using set_ssl_cert() rather than
- * config string property \p loc_prop (which will be cleared)
- *
- * @remark Requires a bunch of SSL_.. env vars to point out where
- * certs are found. These are set up by trivup.
- */
-static void conf_location_to_setter(RdKafka::Conf *conf,
- std::string loc_prop,
- RdKafka::CertificateType cert_type,
- RdKafka::CertificateEncoding encoding) {
- std::string loc;
- static const std::string encnames[] = {
- "PKCS#12",
- "DER",
- "PEM",
- };
-
- /* Clear the config property (e.g., ssl.key.location) */
- std::string errstr;
- if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to reset " + loc_prop);
-
- const char *p;
- p = test_getenv(envname[cert_type][encoding].c_str(), NULL);
- if (!p)
- Test::Fail(
- "Invalid test environment: "
- "Missing " +
- envname[cert_type][encoding] +
- " env variable: make sure trivup is up to date");
-
- loc = p;
-
- Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << " as "
- << encnames[encoding] << " from env "
- << envname[cert_type][encoding] << "\n");
-
- /* Read file */
- std::ifstream ifs(loc.c_str(), std::ios::binary | std::ios::ate);
- if (ifs.fail())
- Test::Fail("Failed to open " + loc + ": " + strerror(errno));
- int size = (int)ifs.tellg();
- ifs.seekg(0, std::ifstream::beg);
- std::vector<char> buffer;
- buffer.resize(size);
- ifs.read(buffer.data(), size);
- ifs.close();
-
- if (conf->set_ssl_cert(cert_type, encoding, buffer.data(), size, errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail(tostr() << "Failed to set " << loc_prop << " from " << loc
- << " as cert type " << cert_type << " with encoding "
- << encoding << ": " << errstr << "\n");
-}
-
-
-typedef enum {
- USE_LOCATION, /* use ssl.X.location */
- USE_CONF, /* use ssl.X.pem */
- USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */
-} cert_load_t;
-
-static const std::string load_names[] = {
- "location",
- "conf",
- "setter",
-};
-
-
-static void do_test_verify(const int line,
- bool verify_ok,
- cert_load_t load_key,
- RdKafka::CertificateEncoding key_enc,
- cert_load_t load_pub,
- RdKafka::CertificateEncoding pub_enc,
- cert_load_t load_ca,
- RdKafka::CertificateEncoding ca_enc) {
- /*
- * Create any type of client
- */
- std::string teststr = tostr() << line << ": "
- << "SSL cert verify: verify_ok=" << verify_ok
- << ", load_key=" << load_names[load_key]
- << ", load_pub=" << load_names[load_pub]
- << ", load_ca=" << load_names[load_ca];
-
- Test::Say(_C_BLU "[ " + teststr + " ]\n" _C_CLR);
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
-
- std::string val;
- if (conf->get("ssl.key.location", val) != RdKafka::Conf::CONF_OK ||
- val.empty()) {
- Test::Skip("Test requires SSL to be configured\n");
- delete conf;
- return;
- }
-
- /* Get ssl.key.location, read its contents, and replace with
- * ssl.key.pem. Same with ssl.certificate.location -> ssl.certificate.pem. */
- if (load_key == USE_CONF)
- conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem",
- RdKafka::CERT_PRIVATE_KEY);
- else if (load_key == USE_SETTER)
- conf_location_to_setter(conf, "ssl.key.location", RdKafka::CERT_PRIVATE_KEY,
- key_enc);
-
- if (load_pub == USE_CONF)
- conf_location_to_pem(conf, "ssl.certificate.location",
- "ssl.certificate.pem", RdKafka::CERT_PUBLIC_KEY);
- else if (load_pub == USE_SETTER)
- conf_location_to_setter(conf, "ssl.certificate.location",
- RdKafka::CERT_PUBLIC_KEY, pub_enc);
-
- if (load_ca == USE_CONF)
- conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem",
- RdKafka::CERT_CA);
- else if (load_ca == USE_SETTER)
- conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc);
-
-
- std::string errstr;
- conf->set("debug", "security", errstr);
-
- TestVerifyCb verifyCb(verify_ok);
- if (conf->set("ssl_cert_verify_cb", &verifyCb, errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to set verifyCb: " + errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create producer: " + errstr);
- delete conf;
-
- bool run = true;
- for (int i = 0; run && i < 10; i++) {
- p->poll(1000);
-
- mtx_lock(&verifyCb.lock);
- if ((verify_ok && verifyCb.cnt > 0) || (!verify_ok && verifyCb.cnt > 3))
- run = false;
- mtx_unlock(&verifyCb.lock);
- }
-
- mtx_lock(&verifyCb.lock);
- if (!verifyCb.cnt)
- Test::Fail("Expected at least one verifyCb invocation");
- mtx_unlock(&verifyCb.lock);
-
- /* Retrieving the clusterid allows us to easily check if a
- * connection could be made. Match this to the expected outcome of
- * this test. */
- std::string cluster = p->clusterid(1000);
-
- if (verify_ok == cluster.empty())
- Test::Fail("Expected connection to " +
- (std::string)(verify_ok ? "succeed" : "fail") +
- ", but got clusterid '" + cluster + "'");
-
- delete p;
-
- Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR);
-}
-
-
-/**
- * @brief Verification that some bad combinations of calls behave as expected.
- * This is simply to verify #2904.
- */
-static void do_test_bad_calls() {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
- std::string errstr;
-
- if (conf->set("enable.ssl.certificate.verification", "false", errstr))
- Test::Fail(errstr);
-
- if (conf->set("security.protocol", "SSL", errstr))
- Test::Fail(errstr);
-
- if (conf->set("ssl.key.password", test_getenv("SSL_password", NULL), errstr))
- Test::Fail(errstr);
-
- std::vector<char> certBuffer = read_file(test_getenv(
- envname[RdKafka::CERT_CA][RdKafka::CERT_ENC_PEM].c_str(), NULL));
-
- if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM,
- certBuffer.data(), certBuffer.size(), errstr))
- Test::Fail(errstr);
-
- /* Set public-key as CA (over-writing the previous one) */
- std::vector<char> userBuffer = read_file(test_getenv(
- envname[RdKafka::CERT_PUBLIC_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL));
-
- if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM,
- userBuffer.data(), userBuffer.size(), errstr))
- Test::Fail(errstr);
-
- std::vector<char> keyBuffer = read_file(test_getenv(
- envname[RdKafka::CERT_PRIVATE_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL));
-
- if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, RdKafka::CERT_ENC_PEM,
- keyBuffer.data(), keyBuffer.size(), errstr))
- Test::Fail(errstr);
-
- // Create Kafka producer
- RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
- delete conf;
- if (producer)
- Test::Fail("Expected producer creation to fail");
-
- if (errstr.find("Private key check failed") == std::string::npos)
- Test::Fail("Expected 'Private key check failed' error, not " + errstr);
-
- Test::Say("Producer creation failed expectedly: " + errstr + "\n");
-}
-
-extern "C" {
-int main_0097_ssl_verify(int argc, char **argv) {
- if (!test_check_builtin("ssl")) {
- Test::Skip("Test requires SSL support\n");
- return 0;
- }
-
- if (!test_getenv("SSL_pkcs", NULL)) {
- Test::Skip("Test requires SSL_* env-vars set up by trivup\n");
- return 0;
- }
-
-
- do_test_bad_calls();
-
- do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
- USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION,
- RdKafka::CERT_ENC_PEM);
- do_test_verify(__LINE__, false, USE_LOCATION, RdKafka::CERT_ENC_PEM,
- USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION,
- RdKafka::CERT_ENC_PEM);
-
- /* Verify various priv and pub key and CA input formats */
- do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF,
- RdKafka::CERT_ENC_PEM, USE_LOCATION, RdKafka::CERT_ENC_PEM);
- do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF,
- RdKafka::CERT_ENC_PEM, USE_CONF, RdKafka::CERT_ENC_PEM);
- do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PEM, USE_SETTER,
- RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_PKCS12);
- do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
- USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER,
- RdKafka::CERT_ENC_DER);
- do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
- USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER,
- RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */
- do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
- USE_SETTER, RdKafka::CERT_ENC_DER, USE_CONF,
- RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */
- do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PKCS12,
- USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER,
- RdKafka::CERT_ENC_PKCS12);
-
- return 0;
-}
-
-
-int main_0097_ssl_verify_local(int argc, char **argv) {
- if (!test_check_builtin("ssl")) {
- Test::Skip("Test requires SSL support\n");
- return 0;
- }
-
-
- /* Check that creating a client with an invalid PEM string fails. */
- const std::string props[] = {"ssl.ca.pem", "ssl.key.pem",
- "ssl.certificate.pem", ""};
-
- for (int i = 0; props[i] != ""; i++) {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
- std::string errstr;
-
- if (conf->set("security.protocol", "SSL", errstr))
- Test::Fail(errstr);
- conf->set("debug", "security", errstr);
- if (conf->set(props[i], "this is \n not a \t PEM!", errstr))
- Test::Fail("Setting " + props[i] +
- " to junk should work, "
- "expecting failure on client creation");
-
- RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
- delete conf;
- if (producer)
- Test::Fail("Expected producer creation to fail with " + props[i] +
- " set to junk");
- else
- Test::Say("Failed to create producer with junk " + props[i] +
- " (as expected): " + errstr + "\n");
- }
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp
deleted file mode 100644
index 1bdb46d0b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp
+++ /dev/null
@@ -1,1218 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "testcpp.h"
-
-#if WITH_RAPIDJSON
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include <assert.h>
-#include <sstream>
-#include <string>
-#include <map>
-
-#include <rapidjson/document.h>
-#include <rapidjson/schema.h>
-#include <rapidjson/filereadstream.h>
-#include <rapidjson/stringbuffer.h>
-#include <rapidjson/error/en.h>
-#include <rapidjson/prettywriter.h>
-
-
-/**
- * @name Consumer Transactions.
- *
- * - Uses the TransactionProducerCli Java application to produce messages
- * that are part of abort and commit transactions in various combinations
- * and tests that librdkafka consumes them as expected. Refer to
- * TransactionProducerCli.java for scenarios covered.
- */
-
-
-class TestEventCb : public RdKafka::EventCb {
- public:
- static bool should_capture_stats;
- static bool has_captured_stats;
- static int64_t partition_0_hi_offset;
- static int64_t partition_0_ls_offset;
- static std::string topic;
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_STATS:
- if (should_capture_stats) {
- partition_0_hi_offset = -1;
- partition_0_ls_offset = -1;
-
- has_captured_stats = true;
- should_capture_stats = false;
- char path[256];
-
- /* Parse JSON to validate */
- rapidjson::Document d;
- if (d.Parse(event.str().c_str()).HasParseError())
- Test::Fail(tostr() << "Failed to parse stats JSON: "
- << rapidjson::GetParseError_En(d.GetParseError())
- << " at " << d.GetErrorOffset());
-
- rd_snprintf(path, sizeof(path), "/topics/%s/partitions/0",
- topic.c_str());
-
- rapidjson::Pointer jpath((const char *)path);
- rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
- if (pp == NULL)
- return;
-
- TEST_ASSERT(pp->HasMember("hi_offset"), "hi_offset not found in stats");
- TEST_ASSERT(pp->HasMember("ls_offset"), "ls_offset not found in stats");
-
- partition_0_hi_offset = (*pp)["hi_offset"].GetInt();
- partition_0_ls_offset = (*pp)["ls_offset"].GetInt();
- }
- break;
-
- case RdKafka::Event::EVENT_LOG:
- std::cerr << event.str() << "\n";
- break;
-
- default:
- break;
- }
- }
-};
-
-bool TestEventCb::should_capture_stats;
-bool TestEventCb::has_captured_stats;
-int64_t TestEventCb::partition_0_hi_offset;
-int64_t TestEventCb::partition_0_ls_offset;
-std::string TestEventCb::topic;
-
-static TestEventCb ex_event_cb;
-
-
-static void execute_java_produce_cli(std::string &bootstrapServers,
- const std::string &topic,
- const std::string &testidstr,
- const char **cmds,
- size_t cmd_cnt) {
- const std::string topicCmd = "topic," + topic;
- const std::string testidCmd = "testid," + testidstr;
- const char **argv;
- size_t i = 0;
-
- argv = (const char **)rd_alloca(sizeof(*argv) * (1 + 1 + 1 + cmd_cnt + 1));
- argv[i++] = bootstrapServers.c_str();
- argv[i++] = topicCmd.c_str();
- argv[i++] = testidCmd.c_str();
-
- for (size_t j = 0; j < cmd_cnt; j++)
- argv[i++] = cmds[j];
-
- argv[i] = NULL;
-
- int pid = test_run_java("TransactionProducerCli", (const char **)argv);
- test_waitpid(pid);
-}
-
-static std::vector<RdKafka::Message *>
-consume_messages(RdKafka::KafkaConsumer *c, std::string topic, int partition) {
- RdKafka::ErrorCode err;
-
- /* Assign partitions */
- std::vector<RdKafka::TopicPartition *> parts;
- parts.push_back(RdKafka::TopicPartition::create(topic, partition));
- if ((err = c->assign(parts)))
- Test::Fail("assign failed: " + RdKafka::err2str(err));
- RdKafka::TopicPartition::destroy(parts);
-
- Test::Say(tostr() << "Consuming from topic " << topic << " partition "
- << partition << "\n");
- std::vector<RdKafka::Message *> result = std::vector<RdKafka::Message *>();
-
- while (true) {
- RdKafka::Message *msg = c->consume(tmout_multip(1000));
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- delete msg;
- continue;
- case RdKafka::ERR__PARTITION_EOF:
- delete msg;
- break;
- case RdKafka::ERR_NO_ERROR:
- result.push_back(msg);
- continue;
- default:
- Test::Fail("Error consuming from topic " + topic + ": " + msg->errstr());
- delete msg;
- break;
- }
- break;
- }
-
- Test::Say("Read all messages from topic: " + topic + "\n");
-
- TestEventCb::should_capture_stats = true;
-
- /* rely on the test timeout to prevent an infinite loop in
- * the (unlikely) event that the statistics callback isn't
- * called. */
- while (!TestEventCb::has_captured_stats) {
- RdKafka::Message *msg = c->consume(tmout_multip(500));
- delete msg;
- }
-
- Test::Say("Captured consumer statistics event\n");
-
- return result;
-}
-
-
-static void delete_messages(std::vector<RdKafka::Message *> &messages) {
- for (size_t i = 0; i < messages.size(); ++i)
- delete messages[i];
-}
-
-
-static std::string get_bootstrap_servers() {
- RdKafka::Conf *conf;
- std::string bootstrap_servers;
- Test::conf_init(&conf, NULL, 40);
- conf->get("bootstrap.servers", bootstrap_servers);
- delete conf;
- return bootstrap_servers;
-}
-
-
-static RdKafka::KafkaConsumer *create_consumer(std::string &topic_name,
- const char *isolation_level) {
- RdKafka::Conf *conf;
- std::string errstr;
-
- Test::conf_init(&conf, NULL, 40);
- Test::conf_set(conf, "group.id", topic_name);
- Test::conf_set(conf, "enable.auto.commit", "false");
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "enable.partition.eof", "true");
- Test::conf_set(conf, "isolation.level", isolation_level);
- Test::conf_set(conf, "statistics.interval.ms", "1000");
- conf->set("event_cb", &ex_event_cb, errstr);
- TestEventCb::should_capture_stats = false;
- TestEventCb::has_captured_stats = false;
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
-
- delete conf;
-
- return c;
-}
-
-
-static std::vector<std::string> csv_split(const std::string &input) {
- std::stringstream ss(input);
- std::vector<std::string> res;
-
- while (ss.good()) {
- std::string substr;
- std::getline(ss, substr, ',');
- /* Trim */
- substr.erase(0, substr.find_first_not_of(' '));
- substr.erase(substr.find_last_not_of(' ') + 1);
- res.push_back(substr);
- }
-
- return res;
-}
-
-
-
-enum TransactionType {
- TransactionType_None,
- TransactionType_BeginAbort,
- TransactionType_BeginCommit,
- TransactionType_BeginOpen,
- TransactionType_ContinueAbort,
- TransactionType_ContinueCommit,
- TransactionType_ContinueOpen
-};
-
-static TransactionType TransactionType_from_string(std::string str) {
-#define _CHKRET(NAME) \
- if (!str.compare(#NAME)) \
- return TransactionType_##NAME
-
- _CHKRET(None);
- _CHKRET(BeginAbort);
- _CHKRET(BeginCommit);
- _CHKRET(BeginOpen);
- _CHKRET(ContinueAbort);
- _CHKRET(ContinueCommit);
- _CHKRET(ContinueOpen);
-
- Test::Fail("Unknown TransactionType: " + str);
-
- return TransactionType_None; /* NOTREACHED */
-}
-
-
-static void txn_producer_makeTestMessages(RdKafka::Producer *producer,
- const std::string &topic,
- const std::string &testidstr,
- int partition,
- int idStart,
- int msgcount,
- TransactionType tt,
- bool do_flush) {
- RdKafka::Error *error;
-
- if (tt != TransactionType_None && tt != TransactionType_ContinueOpen &&
- tt != TransactionType_ContinueCommit &&
- tt != TransactionType_ContinueAbort) {
- error = producer->begin_transaction();
- if (error) {
- Test::Fail("begin_transaction() failed: " + error->str());
- delete error;
- }
- }
-
- for (int i = 0; i < msgcount; i++) {
- char key[] = {(char)((i + idStart) & 0xff)};
- char payload[] = {0x10, 0x20, 0x30, 0x40};
- RdKafka::ErrorCode err;
-
- err = producer->produce(topic, partition, producer->RK_MSG_COPY, payload,
- sizeof(payload), key, sizeof(key), 0, NULL);
- if (err)
- Test::Fail("produce() failed: " + RdKafka::err2str(err));
- }
-
- if (do_flush)
- producer->flush(-1);
-
- switch (tt) {
- case TransactionType_BeginAbort:
- case TransactionType_ContinueAbort:
- error = producer->abort_transaction(30 * 1000);
- if (error) {
- Test::Fail("abort_transaction() failed: " + error->str());
- delete error;
- }
- break;
-
- case TransactionType_BeginCommit:
- case TransactionType_ContinueCommit:
- error = producer->commit_transaction(30 * 1000);
- if (error) {
- Test::Fail("commit_transaction() failed: " + error->str());
- delete error;
- }
- break;
-
- default:
- break;
- }
-}
-
-
-class txnDeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
- void dr_cb(RdKafka::Message &msg) {
- switch (msg.err()) {
- case RdKafka::ERR__PURGE_QUEUE:
- case RdKafka::ERR__PURGE_INFLIGHT:
- /* These are expected when transactions are aborted */
- break;
-
- case RdKafka::ERR_NO_ERROR:
- break;
-
- default:
- Test::Fail("Delivery failed: " + msg.errstr());
- break;
- }
- }
-};
-
-
-/**
- * @brief Transactional producer, performing the commands in \p cmds.
- * This is the librdkafka counterpart of
- * java/TransactionProducerCli.java
- */
-static void txn_producer(const std::string &brokers,
- const std::string &topic,
- const std::string &testidstr,
- const char **cmds,
- size_t cmd_cnt) {
- RdKafka::Conf *conf;
- txnDeliveryReportCb txn_dr;
-
- Test::conf_init(&conf, NULL, 0);
- Test::conf_set(conf, "bootstrap.servers", brokers);
-
-
- std::map<std::string, RdKafka::Producer *> producers;
-
- for (size_t i = 0; i < cmd_cnt; i++) {
- std::string cmdstr = std::string(cmds[i]);
-
- Test::Say(_C_CLR "rdkafka txn producer command: " + cmdstr + "\n");
-
- std::vector<std::string> cmd = csv_split(cmdstr);
-
- if (!cmd[0].compare("sleep")) {
- rd_usleep(atoi(cmd[1].c_str()) * 1000, NULL);
-
- } else if (!cmd[0].compare("exit")) {
- break; /* We can't really simulate the Java exit behaviour
- * from in-process. */
-
- } else if (cmd[0].find("producer") == 0) {
- TransactionType txntype = TransactionType_from_string(cmd[4]);
-
- std::map<std::string, RdKafka::Producer *>::iterator it =
- producers.find(cmd[0]);
-
- RdKafka::Producer *producer;
-
- if (it == producers.end()) {
- /* Create producer if it doesn't exist */
- std::string errstr;
-
- Test::Say(tostr() << "Creating producer " << cmd[0]
- << " with transactiontype " << txntype << " '"
- << cmd[4] << "'\n");
-
- /* Config */
- Test::conf_set(conf, "enable.idempotence", "true");
- if (txntype != TransactionType_None)
- Test::conf_set(conf, "transactional.id",
- "test-transactional-id-c-" + testidstr + "-" + cmd[0]);
- else
- Test::conf_set(conf, "transactional.id", "");
- Test::conf_set(conf, "linger.ms", "5"); /* ensure batching */
- conf->set("dr_cb", &txn_dr, errstr);
-
- /* Create producer */
- producer = RdKafka::Producer::create(conf, errstr);
- if (!producer)
- Test::Fail("Failed to create producer " + cmd[0] + ": " + errstr);
-
- /* Init transactions if producer is transactional */
- if (txntype != TransactionType_None) {
- RdKafka::Error *error = producer->init_transactions(20 * 1000);
- if (error) {
- Test::Fail("init_transactions() failed: " + error->str());
- delete error;
- }
- }
-
-
- producers[cmd[0]] = producer;
- } else {
- producer = it->second;
- }
-
- txn_producer_makeTestMessages(
- producer, /* producer */
- topic, /* topic */
- testidstr, /* testid */
- atoi(cmd[1].c_str()), /* partition */
- (int)strtol(cmd[2].c_str(), NULL, 0), /* idStart */
- atoi(cmd[3].c_str()), /* msg count */
- txntype, /* TransactionType */
- !cmd[5].compare("DoFlush") /* Flush */);
-
- } else {
- Test::Fail("Unknown command: " + cmd[0]);
- }
- }
-
- delete conf;
-
- for (std::map<std::string, RdKafka::Producer *>::iterator it =
- producers.begin();
- it != producers.end(); it++)
- delete it->second;
-}
-
-
-
-static void do_test_consumer_txn_test(bool use_java_producer) {
- std::string errstr;
- std::string topic_name;
- RdKafka::KafkaConsumer *c;
- std::vector<RdKafka::Message *> msgs;
- std::string testidstr = test_str_id_generate_tmp();
-
- std::string bootstrap_servers = get_bootstrap_servers();
-
- Test::Say(tostr() << _C_BLU "[ Consumer transaction tests using "
- << (use_java_producer ? "java" : "librdkafka")
- << " producer with testid " << testidstr << "]\n" _C_CLR);
-
-#define run_producer(CMDS...) \
- do { \
- const char *_cmds[] = {CMDS}; \
- size_t _cmd_cnt = sizeof(_cmds) / sizeof(*_cmds); \
- if (use_java_producer) \
- execute_java_produce_cli(bootstrap_servers, topic_name, testidstr, \
- _cmds, _cmd_cnt); \
- else \
- txn_producer(bootstrap_servers, topic_name, testidstr, _cmds, _cmd_cnt); \
- } while (0)
-
- if (test_quick) {
- Test::Say("Skipping consumer_txn tests 0->4 due to quick mode\n");
- goto test5;
- }
-
-
- Test::Say(_C_BLU "Test 0 - basic commit + abort\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x0, 5, BeginCommit, DoFlush",
- "producer1, -1, 0x10, 5, BeginAbort, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 5,
- "Consumed unexpected number of messages. "
- "Expected 5, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
- c->close();
- delete c;
-
-#define expect_msgcnt(msgcnt) \
- TEST_ASSERT(msgs.size() == msgcnt, "Expected %d messages, got %d", \
- (int)msgs.size(), msgcnt)
-
-#define expect_key(msgidx, value) \
- do { \
- TEST_ASSERT(msgs.size() > msgidx, \
- "Expected at least %d message(s), only got %d", msgidx + 1, \
- (int)msgs.size()); \
- TEST_ASSERT(msgs[msgidx]->key_len() == 1, \
- "Expected msg #%d key to be of size 1, not %d\n", msgidx, \
- (int)msgs[msgidx]->key_len()); \
- TEST_ASSERT(value == (int)msgs[msgidx]->key()->c_str()[0], \
- "Expected msg #%d key 0x%x, not 0x%x", msgidx, value, \
- (int)msgs[msgidx]->key()->c_str()[0]); \
- } while (0)
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- expect_msgcnt(10);
- expect_key(0, 0x0);
- expect_key(4, 0x4);
- expect_key(5, 0x10);
- expect_key(9, 0x14);
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 0.1\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-0.1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x0, 5, BeginCommit, DontFlush",
- "producer1, -1, 0x10, 5, BeginAbort, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 5,
- "Consumed unexpected number of messages. "
- "Expected 5, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 10,
- "Consumed unexpected number of messages. "
- "Expected 10, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x10 == msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x14 == msgs[9]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 0.2\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-0.2", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x10, 5, BeginAbort, DoFlush",
- "producer1, -1, 0x30, 5, BeginCommit, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 5,
- "Consumed unexpected number of messages. "
- "Expected 5, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x30 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x34 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 10,
- "Consumed unexpected number of messages. "
- "Expected 10, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 1 - mixed with non-transactional.\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
- TestEventCb::topic = topic_name;
-
- run_producer("producer3, -1, 0x10, 5, None, DoFlush",
- "producer1, -1, 0x50, 5, BeginCommit, DoFlush",
- "producer1, -1, 0x80, 5, BeginAbort, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
-
- TEST_ASSERT(TestEventCb::partition_0_ls_offset != -1 &&
- TestEventCb::partition_0_ls_offset ==
- TestEventCb::partition_0_hi_offset,
- "Expected hi_offset to equal ls_offset but "
- "got hi_offset: %" PRId64 ", ls_offset: %" PRId64,
- TestEventCb::partition_0_hi_offset,
- TestEventCb::partition_0_ls_offset);
-
- TEST_ASSERT(msgs.size() == 10,
- "Consumed unexpected number of messages. "
- "Expected 10, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x50 == msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x54 == msgs[9]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
- Test::Say(_C_BLU "Test 1.1\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-1.1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x30, 5, BeginAbort, DoFlush",
- "producer3, -1, 0x40, 5, None, DoFlush",
- "producer1, -1, 0x60, 5, BeginCommit, DoFlush");
-
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 10,
- "Consumed unexpected number of messages. "
- "Expected 10, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x40 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x44 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x60 == msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x64 == msgs[9]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 1.2\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-1.2", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x10, 5, BeginCommit, DoFlush",
- "producer1, -1, 0x20, 5, BeginAbort, DoFlush",
- "producer3, -1, 0x30, 5, None, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 10,
- "Consumed unexpected number of messages. "
- "Expected 10, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 2 - rapid abort / committing.\n" _C_CLR);
- // note: aborted records never seem to make it to the broker when not flushed.
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-2", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x10, 1, BeginAbort, DontFlush",
- "producer1, -1, 0x20, 1, BeginCommit, DontFlush",
- "producer1, -1, 0x30, 1, BeginAbort, DontFlush",
- "producer1, -1, 0x40, 1, BeginCommit, DontFlush",
- "producer1, -1, 0x50, 1, BeginAbort, DontFlush",
- "producer1, -1, 0x60, 1, BeginCommit, DontFlush",
- "producer1, -1, 0x70, 1, BeginAbort, DontFlush",
- "producer1, -1, 0x80, 1, BeginCommit, DontFlush",
- "producer1, -1, 0x90, 1, BeginAbort, DontFlush",
- "producer1, -1, 0xa0, 1, BeginCommit, DoFlush",
- "producer3, -1, 0xb0, 1, None, DontFlush",
- "producer3, -1, 0xc0, 1, None, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 7,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x20 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[1]->key_len() >= 1 &&
- 0x40 == (unsigned char)msgs[1]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[2]->key_len() >= 1 &&
- 0x60 == (unsigned char)msgs[2]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[3]->key_len() >= 1 &&
- 0x80 == (unsigned char)msgs[3]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 &&
- 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 &&
- 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[6]->key_len() >= 1 &&
- 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 2.1\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-2.1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, -1, 0x10, 1, BeginAbort, DoFlush",
- "producer1, -1, 0x20, 1, BeginCommit, DoFlush",
- "producer1, -1, 0x30, 1, BeginAbort, DoFlush",
- "producer1, -1, 0x40, 1, BeginCommit, DoFlush",
- "producer1, -1, 0x50, 1, BeginAbort, DoFlush",
- "producer1, -1, 0x60, 1, BeginCommit, DoFlush",
- "producer1, -1, 0x70, 1, BeginAbort, DoFlush",
- "producer1, -1, 0x80, 1, BeginCommit, DoFlush",
- "producer1, -1, 0x90, 1, BeginAbort, DoFlush",
- "producer1, -1, 0xa0, 1, BeginCommit, DoFlush",
- "producer3, -1, 0xb0, 1, None, DoFlush",
- "producer3, -1, 0xc0, 1, None, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 7,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x20 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[1]->key_len() >= 1 &&
- 0x40 == (unsigned char)msgs[1]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[2]->key_len() >= 1 &&
- 0x60 == (unsigned char)msgs[2]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[3]->key_len() >= 1 &&
- 0x80 == (unsigned char)msgs[3]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 &&
- 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 &&
- 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[6]->key_len() >= 1 &&
- 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 12,
- "Consumed unexpected number of messages. "
- "Expected 12, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x10 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[1]->key_len() >= 1 &&
- 0x20 == (unsigned char)msgs[1]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[2]->key_len() >= 1 &&
- 0x30 == (unsigned char)msgs[2]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[3]->key_len() >= 1 &&
- 0x40 == (unsigned char)msgs[3]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 &&
- 0x50 == (unsigned char)msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 &&
- 0x60 == (unsigned char)msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[6]->key_len() >= 1 &&
- 0x70 == (unsigned char)msgs[6]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 3 - cross partition (simple).\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-3", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 2, 3);
-
- run_producer("producer1, 0, 0x10, 3, BeginOpen, DoFlush",
- "producer1, 1, 0x20, 3, ContinueOpen, DoFlush",
- "producer1, 0, 0x30, 3, ContinueCommit, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 6,
- "Consumed unexpected number of messages. "
- "Expected 6, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- msgs = consume_messages(c, topic_name, 1);
- TEST_ASSERT(msgs.size() == 3,
- "Consumed unexpected number of messages. "
- "Expected 3, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 6,
- "Consumed unexpected number of messages. "
- "Expected 6, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- msgs = consume_messages(c, topic_name, 1);
- TEST_ASSERT(msgs.size() == 3,
- "Consumed unexpected number of messages. "
- "Expected 3, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 3.1\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-3.1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 2, 3);
-
- run_producer("producer1, 0, 0x55, 1, BeginCommit, DoFlush",
- "producer1, 0, 0x10, 3, BeginOpen, DoFlush",
- "producer1, 1, 0x20, 3, ContinueOpen, DoFlush",
- "producer1, 0, 0x30, 3, ContinueAbort, DoFlush",
- "producer3, 0, 0x00, 1, None, DoFlush",
- "producer1, 1, 0x44, 1, BeginCommit, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 2,
- "Consumed unexpected number of messages. "
- "Expected 2, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x55 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[1]->key_len() >= 1 &&
- 0x00 == (unsigned char)msgs[1]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
- msgs = consume_messages(c, topic_name, 1);
- TEST_ASSERT(msgs.size() == 1,
- "Consumed unexpected number of messages. "
- "Expected 1, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x44 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 4 - simultaneous transactions (simple).\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-4", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer3, 0, 0x10, 1, None, DoFlush",
- "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
- "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
- "producer1, 0, 0x40, 3, ContinueCommit, DoFlush",
- "producer2, 0, 0x50, 3, ContinueAbort, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 7,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 13,
- "Consumed unexpected number of messages. "
- "Expected 13, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 4.1\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-4.1", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer3, 0, 0x10, 1, None, DoFlush",
- "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
- "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
- "producer1, 0, 0x40, 3, ContinueAbort, DoFlush",
- "producer2, 0, 0x50, 3, ContinueCommit, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 7,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 13,
- "Consumed unexpected number of messages. "
- "Expected 13, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 4.2\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-4.2", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer3, 0, 0x10, 1, None, DoFlush",
- "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
- "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
- "producer1, 0, 0x40, 3, ContinueCommit, DoFlush",
- "producer2, 0, 0x50, 3, ContinueCommit, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 13,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 13,
- "Consumed unexpected number of messages. "
- "Expected 13, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 4.3\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-4.3", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer3, 0, 0x10, 1, None, DoFlush",
- "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
- "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
- "producer1, 0, 0x40, 3, ContinueAbort, DoFlush",
- "producer2, 0, 0x50, 3, ContinueAbort, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 1,
- "Consumed unexpected number of messages. "
- "Expected 7, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
- c->close();
- delete c;
-
- c = create_consumer(topic_name, "READ_UNCOMMITTED");
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 13,
- "Consumed unexpected number of messages. "
- "Expected 13, got: %d",
- (int)msgs.size());
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
-
- Test::Say(_C_BLU "Test 5 - split transaction across message sets.\n" _C_CLR);
-
-test5:
- topic_name = Test::mk_topic_name("0098-consumer_txn-5", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
-
- run_producer("producer1, 0, 0x10, 2, BeginOpen, DontFlush", "sleep,200",
- "producer1, 0, 0x20, 2, ContinueAbort, DontFlush",
- "producer1, 0, 0x30, 2, BeginOpen, DontFlush", "sleep,200",
- "producer1, 0, 0x40, 2, ContinueCommit, DontFlush",
- "producer1, 0, 0x50, 2, BeginOpen, DontFlush", "sleep,200",
- "producer1, 0, 0x60, 2, ContinueAbort, DontFlush",
- "producer1, 0, 0xa0, 2, BeginOpen, DontFlush", "sleep,200",
- "producer1, 0, 0xb0, 2, ContinueCommit, DontFlush",
- "producer3, 0, 0x70, 1, None, DoFlush");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 9,
- "Consumed unexpected number of messages. "
- "Expected 9, got: %d",
- (int)msgs.size());
- TEST_ASSERT(msgs[0]->key_len() >= 1 &&
- 0x30 == (unsigned char)msgs[0]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[1]->key_len() >= 1 &&
- 0x31 == (unsigned char)msgs[1]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[2]->key_len() >= 1 &&
- 0x40 == (unsigned char)msgs[2]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[3]->key_len() >= 1 &&
- 0x41 == (unsigned char)msgs[3]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[4]->key_len() >= 1 &&
- 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[5]->key_len() >= 1 &&
- 0xa1 == (unsigned char)msgs[5]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[6]->key_len() >= 1 &&
- 0xb0 == (unsigned char)msgs[6]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[7]->key_len() >= 1 &&
- 0xb1 == (unsigned char)msgs[7]->key()->c_str()[0],
- "Unexpected key");
- TEST_ASSERT(msgs[8]->key_len() >= 1 &&
- 0x70 == (unsigned char)msgs[8]->key()->c_str()[0],
- "Unexpected key");
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-
-
- Test::Say(_C_BLU "Test 6 - transaction left open\n" _C_CLR);
-
- topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1);
- c = create_consumer(topic_name, "READ_COMMITTED");
- Test::create_topic(c, topic_name.c_str(), 1, 3);
- TestEventCb::topic = topic_name;
-
- run_producer("producer3, 0, 0x10, 1, None, DoFlush",
- "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
- // prevent abort control message from being written.
- "exit,0");
-
- msgs = consume_messages(c, topic_name, 0);
- TEST_ASSERT(msgs.size() == 1,
- "Consumed unexpected number of messages. "
- "Expected 1, got: %d",
- (int)msgs.size());
-
- TEST_ASSERT(TestEventCb::partition_0_ls_offset + 3 ==
- TestEventCb::partition_0_hi_offset,
- "Expected hi_offset to be 3 greater than ls_offset "
- "but got hi_offset: %" PRId64 ", ls_offset: %" PRId64,
- TestEventCb::partition_0_hi_offset,
- TestEventCb::partition_0_ls_offset);
-
- delete_messages(msgs);
-
- Test::delete_topic(c, topic_name.c_str());
-
- c->close();
- delete c;
-}
-#endif
-
-
-extern "C" {
-int main_0098_consumer_txn(int argc, char **argv) {
- if (test_needs_auth()) {
- Test::Skip(
- "Authentication or security configuration "
- "required on client: not supported in "
- "Java transactional producer: skipping tests\n");
- return 0;
- }
-#if WITH_RAPIDJSON
- do_test_consumer_txn_test(true /* with java producer */);
- do_test_consumer_txn_test(false /* with librdkafka producer */);
-#else
- Test::Skip("RapidJSON >=1.1.0 not available\n");
-#endif
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c
deleted file mode 100644
index 902849fb2..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-static RD_UNUSED void
-print_toppar_list(const rd_kafka_topic_partition_list_t *list) {
- int i;
-
- TEST_SAY("List count: %d\n", list->cnt);
-
- for (i = 0; i < list->cnt; i++) {
- const rd_kafka_topic_partition_t *a = &list->elems[i];
-
- TEST_SAY(
- " #%d/%d: "
- "%s [%" PRId32 "] @ %" PRId64
- ": "
- "(%" PRIusz ") \"%*s\"\n",
- i, list->cnt, a->topic, a->partition, a->offset,
- a->metadata_size, (int)a->metadata_size,
- (const char *)a->metadata);
- }
-}
-
-
-static void compare_toppar_lists(const rd_kafka_topic_partition_list_t *lista,
- const rd_kafka_topic_partition_list_t *listb) {
- int i;
-
- TEST_ASSERT(lista->cnt == listb->cnt,
- "different list lengths: %d != %d", lista->cnt, listb->cnt);
-
- for (i = 0; i < lista->cnt; i++) {
- const rd_kafka_topic_partition_t *a = &lista->elems[i];
- const rd_kafka_topic_partition_t *b = &listb->elems[i];
-
- if (a->offset != b->offset ||
- a->metadata_size != b->metadata_size ||
- memcmp(a->metadata, b->metadata, a->metadata_size))
- TEST_FAIL_LATER(
- "Lists did not match at element %d/%d:\n"
- " a: %s [%" PRId32 "] @ %" PRId64
- ": "
- "(%" PRIusz
- ") \"%*s\"\n"
- " b: %s [%" PRId32 "] @ %" PRId64
- ": "
- "(%" PRIusz ") \"%*s\"",
- i, lista->cnt, a->topic, a->partition, a->offset,
- a->metadata_size, (int)a->metadata_size,
- (const char *)a->metadata, b->topic, b->partition,
- b->offset, b->metadata_size, (int)b->metadata_size,
- (const char *)b->metadata);
- }
-
- TEST_LATER_CHECK();
-}
-
-
-static int commit_cb_cnt = 0;
-
-static void offset_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *list,
- void *opaque) {
- commit_cb_cnt++;
- TEST_ASSERT(!err, "offset_commit_cb failure: %s",
- rd_kafka_err2str(err));
-}
-
-
-static void
-commit_metadata(const char *group_id,
- const rd_kafka_topic_partition_list_t *toppar_to_commit) {
- rd_kafka_resp_err_t err;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, NULL, 20 /*timeout*/);
-
- test_conf_set(conf, "group.id", group_id);
-
- rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- TEST_SAY("Committing:\n");
- print_toppar_list(toppar_to_commit);
-
- err = rd_kafka_commit(rk, toppar_to_commit, 0);
- TEST_ASSERT(!err, "rd_kafka_commit failed: %s", rd_kafka_err2str(err));
-
- while (commit_cb_cnt == 0)
- rd_kafka_poll(rk, 1000);
-
- rd_kafka_destroy(rk);
-}
-
-
-static void
-get_committed_metadata(const char *group_id,
- const rd_kafka_topic_partition_list_t *toppar_to_check,
- const rd_kafka_topic_partition_list_t *expected_toppar) {
- rd_kafka_resp_err_t err;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_partition_list_t *committed_toppar;
-
- test_conf_init(&conf, NULL, 20 /*timeout*/);
-
- test_conf_set(conf, "group.id", group_id);
-
- committed_toppar = rd_kafka_topic_partition_list_copy(toppar_to_check);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- err = rd_kafka_committed(rk, committed_toppar, tmout_multip(5000));
- TEST_ASSERT(!err, "rd_kafka_committed failed: %s",
- rd_kafka_err2str(err));
-
- compare_toppar_lists(committed_toppar, expected_toppar);
-
- rd_kafka_topic_partition_list_destroy(committed_toppar);
-
- rd_kafka_destroy(rk);
-}
-
-int main_0099_commit_metadata(int argc, char **argv) {
- rd_kafka_topic_partition_list_t *origin_toppar;
- rd_kafka_topic_partition_list_t *expected_toppar;
- const char *topic = test_mk_topic_name("0099-commit_metadata", 0);
- char group_id[16];
-
- test_conf_init(NULL, NULL, 20 /*timeout*/);
-
- test_str_id_generate(group_id, sizeof(group_id));
-
- test_create_topic(NULL, topic, 1, 1);
-
- origin_toppar = rd_kafka_topic_partition_list_new(1);
-
- rd_kafka_topic_partition_list_add(origin_toppar, topic, 0);
-
- expected_toppar = rd_kafka_topic_partition_list_copy(origin_toppar);
-
- expected_toppar->elems[0].offset = 42;
- expected_toppar->elems[0].metadata = rd_strdup("Hello world!");
- expected_toppar->elems[0].metadata_size =
- strlen(expected_toppar->elems[0].metadata);
-
- get_committed_metadata(group_id, origin_toppar, origin_toppar);
-
- commit_metadata(group_id, expected_toppar);
-
- get_committed_metadata(group_id, origin_toppar, expected_toppar);
-
- rd_kafka_topic_partition_list_destroy(origin_toppar);
- rd_kafka_topic_partition_list_destroy(expected_toppar);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp
deleted file mode 100644
index a34ccac98..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include "testcpp.h"
-
-extern "C" {
-#include "rdkafka.h" /* For interceptor interface */
-#include "../src/tinycthread.h" /* For mutexes */
-}
-
-class myThreadCb {
- public:
- myThreadCb() : startCnt_(0), exitCnt_(0) {
- mtx_init(&lock_, mtx_plain);
- }
- ~myThreadCb() {
- mtx_destroy(&lock_);
- }
- int startCount() {
- int cnt;
- mtx_lock(&lock_);
- cnt = startCnt_;
- mtx_unlock(&lock_);
- return cnt;
- }
- int exitCount() {
- int cnt;
- mtx_lock(&lock_);
- cnt = exitCnt_;
- mtx_unlock(&lock_);
- return cnt;
- }
- virtual void thread_start_cb(const char *threadname) {
- Test::Say(tostr() << "Started thread: " << threadname << "\n");
- mtx_lock(&lock_);
- startCnt_++;
- mtx_unlock(&lock_);
- }
- virtual void thread_exit_cb(const char *threadname) {
- Test::Say(tostr() << "Exiting from thread: " << threadname << "\n");
- mtx_lock(&lock_);
- exitCnt_++;
- mtx_unlock(&lock_);
- }
-
- private:
- int startCnt_;
- int exitCnt_;
- mtx_t lock_;
-};
-
-
-/**
- * @brief C to C++ callback trampoline.
- */
-static rd_kafka_resp_err_t on_thread_start_trampoline(
- rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type,
- const char *threadname,
- void *ic_opaque) {
- myThreadCb *threadcb = (myThreadCb *)ic_opaque;
-
- Test::Say(tostr() << "on_thread_start(" << thread_type << ", " << threadname
- << ") called\n");
-
- threadcb->thread_start_cb(threadname);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief C to C++ callback trampoline.
- */
-static rd_kafka_resp_err_t on_thread_exit_trampoline(
- rd_kafka_t *rk,
- rd_kafka_thread_type_t thread_type,
- const char *threadname,
- void *ic_opaque) {
- myThreadCb *threadcb = (myThreadCb *)ic_opaque;
-
- Test::Say(tostr() << "on_thread_exit(" << thread_type << ", " << threadname
- << ") called\n");
-
- threadcb->thread_exit_cb(threadname);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief This interceptor is called when a new client instance is created
- * prior to any threads being created.
- * We use it to set up the instance's thread interceptors.
- */
-static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- Test::Say("on_new() interceptor called\n");
- rd_kafka_interceptor_add_on_thread_start(
- rk, "test:0100", on_thread_start_trampoline, ic_opaque);
- rd_kafka_interceptor_add_on_thread_exit(rk, "test:0100",
- on_thread_exit_trampoline, ic_opaque);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief The on_conf_dup() interceptor let's use add the on_new interceptor
- * in case the config object is copied, since interceptors are not
- * automatically copied.
- */
-static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf,
- const rd_kafka_conf_t *old_conf,
- size_t filter_cnt,
- const char **filter,
- void *ic_opaque) {
- Test::Say("on_conf_dup() interceptor called\n");
- return rd_kafka_conf_interceptor_add_on_new(new_conf, "test:0100", on_new,
- ic_opaque);
-}
-
-
-
-static void test_thread_cbs() {
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
- std::string errstr;
- rd_kafka_conf_t *c_conf;
- myThreadCb my_threads;
-
- Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1");
-
- /* Interceptors are not supported in the C++ API, instead use the C API:
- * 1. Extract the C conf_t object
- * 2. Set up an on_new() interceptor
- * 3. Set up an on_conf_dup() interceptor to add interceptors in the
- * case the config object is copied (which the C++ Conf always does).
- * 4. In the on_new() interceptor, add the thread interceptors. */
- c_conf = conf->c_ptr_global();
- rd_kafka_conf_interceptor_add_on_new(c_conf, "test:0100", on_new,
- &my_threads);
- rd_kafka_conf_interceptor_add_on_conf_dup(c_conf, "test:0100", on_conf_dup,
- &my_threads);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- p->poll(500);
- delete conf;
- delete p;
-
- Test::Say(tostr() << my_threads.startCount() << " thread start calls, "
- << my_threads.exitCount() << " thread exit calls seen\n");
-
- /* 3 = rdkafka main thread + internal broker + bootstrap broker */
- if (my_threads.startCount() < 3)
- Test::Fail("Did not catch enough thread start callback calls");
- if (my_threads.exitCount() < 3)
- Test::Fail("Did not catch enough thread exit callback calls");
- if (my_threads.startCount() != my_threads.exitCount())
- Test::Fail("Did not catch same number of start and exit callback calls");
-}
-
-
-extern "C" {
-int main_0100_thread_interceptors(int argc, char **argv) {
- test_thread_cbs();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp
deleted file mode 100644
index 342ec4f8f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "testcpp.h"
-
-#if WITH_RAPIDJSON
-
-#include <iostream>
-#include <cstring>
-#include <cstdlib>
-#include <assert.h>
-#include <sstream>
-#include <string>
-#include <map>
-#include <set>
-#include "rdkafka.h"
-
-#include <rapidjson/document.h>
-#include <rapidjson/schema.h>
-#include <rapidjson/filereadstream.h>
-#include <rapidjson/stringbuffer.h>
-#include <rapidjson/error/en.h>
-#include <rapidjson/prettywriter.h>
-
-
-/**
- * @brief A basic test of fetch from follower funtionality
- * - produces a bunch of messages to a replicated topic.
- * - configure the consumer such that `client.rack` is different from the
- * broker's `broker.rack` (and use
- * org.apache.kafka.common.replica.RackAwareReplicaSelector).
- * - consume the messages, and check they are as expected.
- * - use rxbytes from the statistics event to confirm that
- * the messages were retrieved from the replica broker (not the
- * leader).
- */
-
-
-#define test_assert(cond, msg) \
- do { \
- if (!(cond)) \
- Test::Say(msg); \
- } while (0)
-
-
-class TestEvent2Cb : public RdKafka::EventCb {
- public:
- static bool should_capture_stats;
- static bool has_captured_stats;
- static std::map<int32_t, int64_t> rxbytes;
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_LOG:
- Test::Say(event.str() + "\n");
- break;
- case RdKafka::Event::EVENT_STATS:
- if (should_capture_stats) {
- rapidjson::Document d;
- if (d.Parse(event.str().c_str()).HasParseError())
- Test::Fail(tostr() << "Failed to parse stats JSON: "
- << rapidjson::GetParseError_En(d.GetParseError())
- << " at " << d.GetErrorOffset());
-
- /* iterate over brokers. */
- rapidjson::Pointer jpath((const char *)"/brokers");
- rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
- if (pp == NULL)
- return;
-
- for (rapidjson::Value::ConstMemberIterator itr = pp->MemberBegin();
- itr != pp->MemberEnd(); ++itr) {
- std::string broker_name = itr->name.GetString();
- size_t broker_id_idx = broker_name.rfind('/');
- if (broker_id_idx == (size_t)-1)
- continue;
- std::string broker_id = broker_name.substr(
- broker_id_idx + 1, broker_name.size() - broker_id_idx - 1);
-
- int64_t broker_rxbytes =
- itr->value.FindMember("rxbytes")->value.GetInt64();
- rxbytes[atoi(broker_id.c_str())] = broker_rxbytes;
- }
-
- has_captured_stats = true;
- break;
- }
- default:
- break;
- }
- }
-};
-
-bool TestEvent2Cb::should_capture_stats;
-bool TestEvent2Cb::has_captured_stats;
-std::map<int32_t, int64_t> TestEvent2Cb::rxbytes;
-static TestEvent2Cb ex_event_cb;
-
-
-static void get_brokers_info(std::string &topic_str,
- int32_t *leader,
- std::vector<int> &brokers) {
- std::string errstr;
- RdKafka::ErrorCode err;
- class RdKafka::Metadata *metadata;
-
- /* Determine the ids of the brokers that the partition has replicas
- * on and which one of those is the leader.
- */
- RdKafka::Conf *pConf;
- Test::conf_init(&pConf, NULL, 10);
- RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr);
- delete pConf;
- test_assert(p, tostr() << "Failed to create producer: " << errstr);
-
- RdKafka::Topic *topic = RdKafka::Topic::create(p, topic_str, NULL, errstr);
- test_assert(topic, tostr() << "Failed to create topic: " << errstr);
-
- err = p->metadata(0, topic, &metadata, tmout_multip(5000));
- test_assert(
- err == RdKafka::ERR_NO_ERROR,
- tostr() << "%% Failed to acquire metadata: " << RdKafka::err2str(err));
-
- test_assert(metadata->topics()->size() == 1,
- tostr() << "expecting metadata for exactly one topic. "
- << "have metadata for " << metadata->topics()->size()
- << "topics");
-
- RdKafka::Metadata::TopicMetadataIterator topicMetadata =
- metadata->topics()->begin();
- RdKafka::TopicMetadata::PartitionMetadataIterator partitionMetadata =
- (*topicMetadata)->partitions()->begin();
-
- *leader = (*partitionMetadata)->leader();
-
- size_t idx = 0;
- RdKafka::PartitionMetadata::ReplicasIterator replicasIterator;
- for (replicasIterator = (*partitionMetadata)->replicas()->begin();
- replicasIterator != (*partitionMetadata)->replicas()->end();
- ++replicasIterator) {
- brokers.push_back(*replicasIterator);
- idx++;
- }
-
- delete metadata;
- delete topic;
- delete p;
-}
-
-
-/**
- * @brief Wait for up to \p tmout for any type of admin result.
- * @returns the event
- */
-rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
- rd_kafka_event_type_t evtype,
- int tmout) {
- rd_kafka_event_t *rkev;
-
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout);
- if (!rkev)
- Test::Fail(tostr() << "Timed out waiting for admin result (" << evtype
- << ")\n");
-
- if (rd_kafka_event_type(rkev) == evtype)
- return rkev;
-
- if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) {
- Test::Say(tostr() << "Received error event while waiting for " << evtype
- << ": " << rd_kafka_event_error_string(rkev)
- << ": ignoring");
- continue;
- }
-
- test_assert(rd_kafka_event_type(rkev) == evtype,
- tostr() << "Expected event type " << evtype << ", got "
- << rd_kafka_event_type(rkev) << " ("
- << rd_kafka_event_name(rkev) << ")");
- }
-
- return NULL;
-}
-
-
-/**
- * @returns the number of broker.rack values configured across all brokers.
- */
-static int get_broker_rack_count(std::vector<int> &replica_ids) {
- std::string errstr;
- RdKafka::Conf *pConf;
- Test::conf_init(&pConf, NULL, 10);
- RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr);
- delete pConf;
-
- rd_kafka_queue_t *mainq = rd_kafka_queue_get_main(p->c_ptr());
-
- std::set<std::string> racks;
- for (size_t i = 0; i < replica_ids.size(); ++i) {
- std::string name = tostr() << replica_ids[i];
-
- rd_kafka_ConfigResource_t *config =
- rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_BROKER, &name[0]);
-
- rd_kafka_AdminOptions_t *options;
- char cerrstr[128];
- options = rd_kafka_AdminOptions_new(p->c_ptr(), RD_KAFKA_ADMIN_OP_ANY);
- rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout(
- options, 10000, cerrstr, sizeof(cerrstr));
- test_assert(!err, cerrstr);
-
- rd_kafka_DescribeConfigs(p->c_ptr(), &config, 1, options, mainq);
- rd_kafka_ConfigResource_destroy(config);
- rd_kafka_AdminOptions_destroy(options);
- rd_kafka_event_t *rkev = test_wait_admin_result(
- mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000);
-
- const rd_kafka_DescribeConfigs_result_t *res =
- rd_kafka_event_DescribeConfigs_result(rkev);
- test_assert(res, "expecting describe config results to be not NULL");
-
- err = rd_kafka_event_error(rkev);
- const char *errstr2 = rd_kafka_event_error_string(rkev);
- test_assert(!err, tostr() << "Expected success, not "
- << rd_kafka_err2name(err) << ": " << errstr2);
-
- size_t rconfig_cnt;
- const rd_kafka_ConfigResource_t **rconfigs =
- rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
- test_assert(rconfig_cnt == 1,
- tostr() << "Expecting 1 resource, got " << rconfig_cnt);
-
- err = rd_kafka_ConfigResource_error(rconfigs[0]);
- errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[0]);
-
- size_t entry_cnt;
- const rd_kafka_ConfigEntry_t **entries =
- rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt);
-
- for (size_t j = 0; j < entry_cnt; ++j) {
- const rd_kafka_ConfigEntry_t *e = entries[j];
- const char *cname = rd_kafka_ConfigEntry_name(e);
- if (!strcmp(cname, "broker.rack")) {
- const char *val = rd_kafka_ConfigEntry_value(e)
- ? rd_kafka_ConfigEntry_value(e)
- : "(NULL)";
- racks.insert(std::string(val));
- }
- }
-
- rd_kafka_event_destroy(rkev);
- }
-
- rd_kafka_queue_destroy(mainq);
- delete p;
-
- return (int)racks.size();
-}
-
-
-static void do_fff_test(void) {
- /* Produce some messages to a single partition topic
- * with 3 replicas.
- */
- int msgcnt = 1000;
- const int msgsize = 100;
- std::string topic_str = Test::mk_topic_name("0101-fetch-from-follower", 1);
- test_create_topic(NULL, topic_str.c_str(), 1, 3);
- test_produce_msgs_easy_size(topic_str.c_str(), 0, 0, msgcnt, msgsize);
-
- int leader_id;
- std::vector<int> replica_ids;
- get_brokers_info(topic_str, &leader_id, replica_ids);
- test_assert(replica_ids.size() == 3,
- tostr() << "expecting three replicas, but " << replica_ids.size()
- << " were reported.");
- Test::Say(tostr() << topic_str << " leader id: " << leader_id
- << ", all replica ids: [" << replica_ids[0] << ", "
- << replica_ids[1] << ", " << replica_ids[2] << "]\n");
-
- if (get_broker_rack_count(replica_ids) != 3) {
- Test::Skip("unexpected broker.rack configuration: skipping test.\n");
- return;
- }
-
- /* arrange for the consumer's client.rack to align with a broker that is not
- * the leader. */
- int client_rack_id = -1;
- size_t i;
- for (i = 0; i < replica_ids.size(); ++i) {
- if (replica_ids[i] != leader_id) {
- client_rack_id = replica_ids[i];
- break;
- }
- }
-
- std::string client_rack = tostr() << "RACK" << client_rack_id;
- Test::Say("client.rack: " + client_rack + "\n");
-
- std::string errstr;
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 10);
- Test::conf_set(conf, "group.id", topic_str);
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "enable.auto.commit", "false");
- Test::conf_set(conf, "statistics.interval.ms", "1000");
- conf->set("event_cb", &ex_event_cb, errstr);
- Test::conf_set(conf, "client.rack", client_rack);
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- test_assert(c, "Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Subscribe */
- std::vector<std::string> topics;
- topics.push_back(topic_str);
- RdKafka::ErrorCode err;
- if ((err = c->subscribe(topics)))
- Test::Fail("subscribe failed: " + RdKafka::err2str(err));
-
- /* Start consuming */
- Test::Say("Consuming topic " + topic_str + "\n");
- int cnt = 0;
- while (cnt < msgcnt) {
- RdKafka::Message *msg = c->consume(tmout_multip(1000));
-
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- break;
-
- case RdKafka::ERR_NO_ERROR: {
- test_assert(msg->len() == 100, "expecting message value size to be 100");
- char *cnt_str_start_ptr = strstr((char *)msg->payload(), "msg=") + 4;
- test_assert(cnt_str_start_ptr, "expecting 'msg=' in message payload");
- char *cnt_str_end_ptr = strstr(cnt_str_start_ptr, "\n");
- test_assert(cnt_str_start_ptr,
- "expecting '\n' following 'msg=' in message payload");
- *cnt_str_end_ptr = '\0';
- int msg_cnt = atoi(cnt_str_start_ptr);
- test_assert(msg_cnt == cnt, "message consumed out of order");
- cnt++;
- } break;
-
- default:
- Test::Fail("Consume error: " + msg->errstr());
- break;
- }
-
- delete msg;
- }
-
- /* rely on the test timeout to prevent an infinite loop in
- * the (unlikely) event that the statistics callback isn't
- * called. */
- Test::Say("Capturing rxbytes statistics\n");
- TestEvent2Cb::should_capture_stats = true;
- while (!TestEvent2Cb::has_captured_stats) {
- RdKafka::Message *msg = c->consume(tmout_multip(500));
- delete msg;
- }
-
- for (i = 0; i < replica_ids.size(); ++i)
- Test::Say(
- tostr() << _C_YEL << "rxbytes for replica on broker " << replica_ids[i]
- << ": " << TestEvent2Cb::rxbytes[replica_ids[i]]
- << (replica_ids[i] == leader_id ? " (leader)" : "")
- << (replica_ids[i] == client_rack_id ? " (preferred replica)"
- : "")
- << "\n");
-
- for (i = 0; i < replica_ids.size(); ++i)
- if (replica_ids[i] != client_rack_id)
- test_assert(
- TestEvent2Cb::rxbytes[replica_ids[i]] <
- TestEvent2Cb::rxbytes[client_rack_id],
- "rxbytes was not highest on broker corresponding to client.rack.");
-
- test_assert(
- TestEvent2Cb::rxbytes[client_rack_id] > msgcnt * msgsize,
- tostr() << "expecting rxbytes of client.rack broker to be at least "
- << msgcnt * msgsize << " but it was "
- << TestEvent2Cb::rxbytes[client_rack_id]);
-
- Test::Say("Done\n");
-
- // Manual test 1:
- // - change the lease period from 5 minutes to 5 seconds (modify
- // rdkafka_partition.c)
- // - change the max lease grant period from 1 minute to 10 seconds (modify
- // rdkafka_broker.c)
- // - add infinite consume loop to the end of this test.
- // - observe:
- // - the partition gets delegated to the preferred replica.
- // - the messages get consumed.
- // - the lease expires.
- // - the partition is reverted to the leader.
- // - the toppar is backed off, and debug message noting the faster than
- // expected delegation to a replica.
-
- // Manual test 2:
- // - same modifications as above.
- // - add Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "3000");
- // - observe:
- // - that metadata being periodically received and not interfering with
- // anything.
-
- c->close();
- delete c;
-}
-#endif
-
-extern "C" {
-int main_0101_fetch_from_follower(int argc, char **argv) {
-#if WITH_RAPIDJSON
- do_fff_test();
-#else
- Test::Skip("RapidJSON >=1.1.0 not available\n");
-#endif
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c
deleted file mode 100644
index 231a09065..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c
+++ /dev/null
@@ -1,535 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * @name KafkaConsumer static membership tests
- *
- * Runs two consumers subscribing to multiple topics simulating various
- * rebalance scenarios with static group membership enabled.
- */
-
-#define _CONSUMER_CNT 2
-
-typedef struct _consumer_s {
- rd_kafka_t *rk;
- test_msgver_t *mv;
- int64_t assigned_at;
- int64_t revoked_at;
- int partition_cnt;
- rd_kafka_resp_err_t expected_rb_event;
- int curr_line;
-} _consumer_t;
-
-
-/**
- * @brief Call poll until a rebalance has been triggered
- */
-static int static_member_wait_rebalance0(int line,
- _consumer_t *c,
- int64_t start,
- int64_t *target,
- int timeout_ms) {
- int64_t tmout = test_clock() + (timeout_ms * 1000);
- test_timing_t t_time;
-
- c->curr_line = line;
-
- TEST_SAY("line %d: %s awaiting %s event\n", line, rd_kafka_name(c->rk),
- rd_kafka_err2name(c->expected_rb_event));
-
- TIMING_START(&t_time, "wait_rebalance");
- while (timeout_ms < 0 ? 1 : test_clock() <= tmout) {
- if (*target > start) {
- c->curr_line = 0;
- return 1;
- }
- test_consumer_poll_once(c->rk, c->mv, 1000);
- }
- TIMING_STOP(&t_time);
-
- c->curr_line = 0;
-
- TEST_SAY("line %d: %s timed out awaiting %s event\n", line,
- rd_kafka_name(c->rk), rd_kafka_err2name(c->expected_rb_event));
-
- return 0;
-}
-
-#define static_member_expect_rebalance(C, START, TARGET, TIMEOUT_MS) \
- do { \
- if (!static_member_wait_rebalance0(__LINE__, C, START, TARGET, \
- TIMEOUT_MS)) \
- TEST_FAIL("%s: timed out waiting for %s event", \
- rd_kafka_name((C)->rk), \
- rd_kafka_err2name((C)->expected_rb_event)); \
- } while (0)
-
-#define static_member_wait_rebalance(C, START, TARGET, TIMEOUT_MS) \
- static_member_wait_rebalance0(__LINE__, C, START, TARGET, TIMEOUT_MS)
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- _consumer_t *c = opaque;
-
- TEST_ASSERT(c->expected_rb_event == err,
- "line %d: %s: Expected rebalance event %s got %s\n",
- c->curr_line, rd_kafka_name(rk),
- rd_kafka_err2name(c->expected_rb_event),
- rd_kafka_err2name(err));
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- TEST_SAY("line %d: %s Assignment (%d partition(s)):\n",
- c->curr_line, rd_kafka_name(rk), parts->cnt);
- test_print_partition_list(parts);
-
- c->partition_cnt = parts->cnt;
- c->assigned_at = test_clock();
- rd_kafka_assign(rk, parts);
-
- break;
-
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- c->revoked_at = test_clock();
- rd_kafka_assign(rk, NULL);
- TEST_SAY("line %d: %s revoked %d partitions\n", c->curr_line,
- rd_kafka_name(c->rk), parts->cnt);
-
- break;
-
- default:
- TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
- break;
- }
-
- /* Reset error */
- c->expected_rb_event = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- /* prevent poll from triggering more than one rebalance event */
- rd_kafka_yield(rk);
-}
-
-
-static void do_test_static_group_rebalance(void) {
- rd_kafka_conf_t *conf;
- test_msgver_t mv;
- int64_t rebalance_start;
- _consumer_t c[_CONSUMER_CNT] = RD_ZERO_INIT;
- const int msgcnt = 100;
- uint64_t testid = test_id_generate();
- const char *topic =
- test_mk_topic_name("0102_static_group_rebalance", 1);
- char *topics = rd_strdup(tsprintf("^%s.*", topic));
- test_timing_t t_close;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 70);
- test_msgver_init(&mv, testid);
- c[0].mv = &mv;
- c[1].mv = &mv;
-
- test_create_topic(NULL, topic, 3, 1);
- test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
-
- test_conf_set(conf, "max.poll.interval.ms", "9000");
- test_conf_set(conf, "session.timeout.ms", "6000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
- test_conf_set(conf, "metadata.max.age.ms", "5000");
- test_conf_set(conf, "enable.partition.eof", "true");
- test_conf_set(conf, "group.instance.id", "consumer1");
-
- rd_kafka_conf_set_opaque(conf, &c[0]);
- c[0].rk = test_create_consumer(topic, rebalance_cb,
- rd_kafka_conf_dup(conf), NULL);
-
- rd_kafka_conf_set_opaque(conf, &c[1]);
- test_conf_set(conf, "group.instance.id", "consumer2");
- c[1].rk = test_create_consumer(topic, rebalance_cb,
- rd_kafka_conf_dup(conf), NULL);
- rd_kafka_conf_destroy(conf);
-
- test_wait_topic_exists(c[1].rk, topic, 5000);
-
- test_consumer_subscribe(c[0].rk, topics);
- test_consumer_subscribe(c[1].rk, topics);
-
- /*
- * Static members enforce `max.poll.interval.ms` which may prompt
- * an unwanted rebalance while the other consumer awaits its assignment.
- * These members remain in the member list however so we must
- * interleave calls to poll while awaiting our assignment to avoid
- * unexpected rebalances being triggered.
- */
- rebalance_start = test_clock();
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- while (!static_member_wait_rebalance(&c[0], rebalance_start,
- &c[0].assigned_at, 1000)) {
- /* keep consumer 2 alive while consumer 1 awaits
- * its assignment
- */
- c[1].curr_line = __LINE__;
- test_consumer_poll_once(c[1].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, -1);
-
- /*
- * Consume all the messages so we can watch for duplicates
- * after rejoin/rebalance operations.
- */
- c[0].curr_line = __LINE__;
- test_consumer_poll("serve.queue", c[0].rk, testid, c[0].partition_cnt,
- 0, -1, &mv);
- c[1].curr_line = __LINE__;
- test_consumer_poll("serve.queue", c[1].rk, testid, c[1].partition_cnt,
- 0, -1, &mv);
-
- test_msgver_verify("first.verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
-
- TEST_SAY("== Testing consumer restart ==\n");
- conf = rd_kafka_conf_dup(rd_kafka_conf(c[1].rk));
-
- /* Only c[1] should exhibit rebalance behavior */
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- TIMING_START(&t_close, "consumer restart");
- test_consumer_close(c[1].rk);
- rd_kafka_destroy(c[1].rk);
-
- c[1].rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
- rd_kafka_poll_set_consumer(c[1].rk);
-
- test_consumer_subscribe(c[1].rk, topics);
-
- /* Await assignment */
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- rebalance_start = test_clock();
- while (!static_member_wait_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, 1000)) {
- c[0].curr_line = __LINE__;
- test_consumer_poll_once(c[0].rk, &mv, 0);
- }
- TIMING_STOP(&t_close);
-
- /* Should complete before `session.timeout.ms` */
- TIMING_ASSERT(&t_close, 0, 6000);
-
-
- TEST_SAY("== Testing subscription expansion ==\n");
-
- /*
- * New topics matching the subscription pattern should cause
- * group rebalance
- */
- test_create_topic(c->rk, tsprintf("%snew", topic), 1, 1);
-
- /* Await revocation */
- rebalance_start = test_clock();
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- while (!static_member_wait_rebalance(&c[0], rebalance_start,
- &c[0].revoked_at, 1000)) {
- c[1].curr_line = __LINE__;
- test_consumer_poll_once(c[1].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
- -1);
-
- /* Await assignment */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- while (!static_member_wait_rebalance(&c[0], rebalance_start,
- &c[0].assigned_at, 1000)) {
- c[1].curr_line = __LINE__;
- test_consumer_poll_once(c[1].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, -1);
-
- TEST_SAY("== Testing consumer unsubscribe ==\n");
-
- /* Unsubscribe should send a LeaveGroupRequest invoking a rebalance */
-
- /* Send LeaveGroup incrementing generation by 1 */
- rebalance_start = test_clock();
- rd_kafka_unsubscribe(c[1].rk);
-
- /* Await revocation */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
- -1);
- static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at,
- -1);
-
- /* New cgrp generation with 1 member, c[0] */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- static_member_expect_rebalance(&c[0], rebalance_start,
- &c[0].assigned_at, -1);
-
- /* Send JoinGroup bumping generation by 1 */
- rebalance_start = test_clock();
- test_consumer_subscribe(c[1].rk, topics);
-
- /* End previous single member generation */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at,
- -1);
-
- /* Await assignment */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- while (!static_member_wait_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, 1000)) {
- c[0].curr_line = __LINE__;
- test_consumer_poll_once(c[0].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[0], rebalance_start,
- &c[0].assigned_at, -1);
-
- TEST_SAY("== Testing max poll violation ==\n");
- /* max.poll.interval.ms should still be enforced by the consumer */
-
- /*
- * Block long enough for consumer 2 to be evicted from the group
- * `max.poll.interval.ms` + `session.timeout.ms`
- */
- rebalance_start = test_clock();
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- c[0].curr_line = __LINE__;
- test_consumer_poll_no_msgs("wait.max.poll", c[0].rk, testid,
- 6000 + 9000);
- c[1].curr_line = __LINE__;
- test_consumer_poll_expect_err(c[1].rk, testid, 1000,
- RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED);
-
- /* Await revocation */
- while (!static_member_wait_rebalance(&c[0], rebalance_start,
- &c[0].revoked_at, 1000)) {
- c[1].curr_line = __LINE__;
- test_consumer_poll_once(c[1].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
- -1);
-
- /* Await assignment */
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- while (!static_member_wait_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, 1000)) {
- c[0].curr_line = __LINE__;
- test_consumer_poll_once(c[0].rk, &mv, 0);
- }
-
- static_member_expect_rebalance(&c[0], rebalance_start,
- &c[0].assigned_at, -1);
-
- TEST_SAY("== Testing `session.timeout.ms` member eviction ==\n");
-
- rebalance_start = test_clock();
- c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- TIMING_START(&t_close, "consumer close");
- test_consumer_close(c[0].rk);
- rd_kafka_destroy(c[0].rk);
-
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
- 2 * 7000);
-
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
- static_member_expect_rebalance(&c[1], rebalance_start,
- &c[1].assigned_at, 2000);
-
- /* Should take at least as long as `session.timeout.ms` but less than
- * `max.poll.interval.ms`, but since we can't really know when
- * the last Heartbeat or SyncGroup request was sent we need to
- * allow some leeway on the minimum side (4s), and also some on
- * the maximum side (1s) for slow runtimes. */
- TIMING_ASSERT(&t_close, 6000 - 4000, 9000 + 1000);
-
- c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- test_consumer_close(c[1].rk);
- rd_kafka_destroy(c[1].rk);
-
- test_msgver_verify("final.validation", &mv, TEST_MSGVER_ALL, 0, msgcnt);
- test_msgver_clear(&mv);
- free(topics);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Await a non-empty assignment for all consumers in \p c
- */
-static void await_assignment_multi(const char *what, rd_kafka_t **c, int cnt) {
- rd_kafka_topic_partition_list_t *parts;
- int assignment_cnt;
-
- TEST_SAY("%s\n", what);
-
- do {
- int i;
- int timeout_ms = 1000;
-
- assignment_cnt = 0;
-
- for (i = 0; i < cnt; i++) {
- test_consumer_poll_no_msgs("poll", c[i], 0, timeout_ms);
- timeout_ms = 100;
-
- if (!rd_kafka_assignment(c[i], &parts) && parts) {
- TEST_SAY("%s has %d partition(s) assigned\n",
- rd_kafka_name(c[i]), parts->cnt);
- if (parts->cnt > 0)
- assignment_cnt++;
- rd_kafka_topic_partition_list_destroy(parts);
- }
- }
-
- } while (assignment_cnt < cnt);
-}
-
-
-static const rd_kafka_t *valid_fatal_rk;
-/**
- * @brief Tells test harness that fatal error should not fail the current test
- */
-static int
-is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- return rk != valid_fatal_rk;
-}
-
-/**
- * @brief Test that consumer fencing raises a fatal error
- */
-static void do_test_fenced_member(void) {
- rd_kafka_t *c[3]; /* 0: consumer2b, 1: consumer1, 2: consumer2a */
- rd_kafka_conf_t *conf;
- const char *topic =
- test_mk_topic_name("0102_static_group_rebalance", 1);
- rd_kafka_message_t *rkm;
- char errstr[512];
- rd_kafka_resp_err_t err;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 30);
-
- test_create_topic(NULL, topic, 3, 1);
-
- test_conf_set(conf, "group.instance.id", "consumer1");
- c[1] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
-
- test_conf_set(conf, "group.instance.id", "consumer2");
- c[2] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
-
- test_wait_topic_exists(c[2], topic, 5000);
-
- test_consumer_subscribe(c[1], topic);
- test_consumer_subscribe(c[2], topic);
-
- await_assignment_multi("Awaiting initial assignments", &c[1], 2);
-
- /* Create conflicting consumer */
- TEST_SAY("Creating conflicting consumer2 instance\n");
- test_conf_set(conf, "group.instance.id", "consumer2");
- c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- rd_kafka_conf_destroy(conf);
-
- test_curr->is_fatal_cb = is_fatal_cb;
- valid_fatal_rk = c[2]; /* consumer2a is the consumer that should fail */
-
- test_consumer_subscribe(c[0], topic);
-
- /* consumer1 should not be affected (other than a rebalance which
- * we ignore here)... */
- test_consumer_poll_no_msgs("consumer1", c[1], 0, 5000);
-
- /* .. but consumer2a should now have been fenced off by consumer2b */
- rkm = rd_kafka_consumer_poll(c[2], 5000);
- TEST_ASSERT(rkm != NULL, "Expected error, not timeout");
- TEST_ASSERT(rkm->err == RD_KAFKA_RESP_ERR__FATAL,
- "Expected ERR__FATAL, not %s: %s",
- rd_kafka_err2str(rkm->err), rd_kafka_message_errstr(rkm));
- TEST_SAY("Fenced consumer returned expected: %s: %s\n",
- rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm));
- rd_kafka_message_destroy(rkm);
-
-
- /* Read the actual error */
- err = rd_kafka_fatal_error(c[2], errstr, sizeof(errstr));
- TEST_SAY("%s fatal error: %s: %s\n", rd_kafka_name(c[2]),
- rd_kafka_err2name(err), errstr);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
- "Expected ERR_FENCED_INSTANCE_ID as fatal error, not %s",
- rd_kafka_err2name(err));
-
- TEST_SAY("close\n");
- /* Close consumer2a, should also return a fatal error */
- err = rd_kafka_consumer_close(c[2]);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL,
- "Expected close on %s to return ERR__FATAL, not %s",
- rd_kafka_name(c[2]), rd_kafka_err2name(err));
-
- rd_kafka_destroy(c[2]);
-
- /* consumer2b and consumer1 should be fine and get their
- * assignments */
- await_assignment_multi("Awaiting post-fencing assignment", c, 2);
-
- rd_kafka_destroy(c[0]);
- rd_kafka_destroy(c[1]);
-
- SUB_TEST_PASS();
-}
-
-
-
-int main_0102_static_group_rebalance(int argc, char **argv) {
-
- do_test_static_group_rebalance();
-
- do_test_fenced_member();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c
deleted file mode 100644
index eaab2f217..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c
+++ /dev/null
@@ -1,1297 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-/**
- * @name Producer transaction tests
- *
- */
-
-
-/**
- * @brief Produce messages using batch interface.
- */
-void do_produce_batch(rd_kafka_t *rk,
- const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt) {
- rd_kafka_message_t *messages;
- rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, topic, NULL);
- int i;
- int ret;
- int remains = cnt;
-
- TEST_SAY("Batch-producing %d messages to partition %" PRId32 "\n", cnt,
- partition);
-
- messages = rd_calloc(sizeof(*messages), cnt);
- for (i = 0; i < cnt; i++) {
- char key[128];
- char value[128];
-
- test_prepare_msg(testid, partition, msg_base + i, value,
- sizeof(value), key, sizeof(key));
- messages[i].key = rd_strdup(key);
- messages[i].key_len = strlen(key);
- messages[i].payload = rd_strdup(value);
- messages[i].len = strlen(value);
- messages[i]._private = &remains;
- }
-
- ret = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY,
- messages, cnt);
-
- rd_kafka_topic_destroy(rkt);
-
- TEST_ASSERT(ret == cnt,
- "Failed to batch-produce: %d/%d messages produced", ret,
- cnt);
-
- for (i = 0; i < cnt; i++) {
- TEST_ASSERT(!messages[i].err, "Failed to produce message: %s",
- rd_kafka_err2str(messages[i].err));
- rd_free(messages[i].key);
- rd_free(messages[i].payload);
- }
- rd_free(messages);
-
- /* Wait for deliveries */
- test_wait_delivery(rk, &remains);
-}
-
-
-
-/**
- * @brief Basic producer transaction testing without consumed input
- * (only consumed output for verification).
- * e.g., no consumer offsets to commit with transaction.
- */
-static void do_test_basic_producer_txn(rd_bool_t enable_compression) {
- const char *topic = test_mk_topic_name("0103_transactions", 1);
- const int partition_cnt = 4;
-#define _TXNCNT 6
- struct {
- const char *desc;
- uint64_t testid;
- int msgcnt;
- rd_bool_t abort;
- rd_bool_t sync;
- rd_bool_t batch;
- rd_bool_t batch_any;
- } txn[_TXNCNT] = {
- {"Commit transaction, sync producing", 0, 100, rd_false, rd_true},
- {"Commit transaction, async producing", 0, 1000, rd_false,
- rd_false},
- {"Commit transaction, sync batch producing to any partition", 0,
- 100, rd_false, rd_true, rd_true, rd_true},
- {"Abort transaction, sync producing", 0, 500, rd_true, rd_true},
- {"Abort transaction, async producing", 0, 5000, rd_true, rd_false},
- {"Abort transaction, sync batch producing to one partition", 0, 500,
- rd_true, rd_true, rd_true, rd_false},
-
- };
- rd_kafka_t *p, *c;
- rd_kafka_conf_t *conf, *p_conf, *c_conf;
- int i;
-
- /* Mark one of run modes as quick so we don't run both when
- * in a hurry.*/
- SUB_TEST0(enable_compression /* quick */, "with%s compression",
- enable_compression ? "" : "out");
-
- test_conf_init(&conf, NULL, 30);
-
- /* Create producer */
- p_conf = rd_kafka_conf_dup(conf);
- rd_kafka_conf_set_dr_msg_cb(p_conf, test_dr_msg_cb);
- test_conf_set(p_conf, "transactional.id", topic);
- if (enable_compression)
- test_conf_set(p_conf, "compression.type", "lz4");
- p = test_create_handle(RD_KAFKA_PRODUCER, p_conf);
-
- // FIXME: add testing were the txn id is reused (and thus fails)
-
- /* Create topic */
- test_create_topic(p, topic, partition_cnt, 3);
-
- /* Create consumer */
- c_conf = conf;
- test_conf_set(conf, "auto.offset.reset", "earliest");
- /* Make sure default isolation.level is transaction aware */
- TEST_ASSERT(
- !strcmp(test_conf_get(c_conf, "isolation.level"), "read_committed"),
- "expected isolation.level=read_committed, not %s",
- test_conf_get(c_conf, "isolation.level"));
-
- c = test_create_consumer(topic, NULL, c_conf, NULL);
-
- /* Wait for topic to propagate to avoid test flakyness */
- test_wait_topic_exists(c, topic, tmout_multip(5000));
-
- /* Subscribe to topic */
- test_consumer_subscribe(c, topic);
-
- /* Wait for assignment to make sure consumer is fetching messages
- * below, so we can use the poll_no_msgs() timeout to
- * determine that messages were indeed aborted. */
- test_consumer_wait_assignment(c, rd_true);
-
- /* Init transactions */
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
-
- for (i = 0; i < _TXNCNT; i++) {
- int wait_msgcnt = 0;
-
- TEST_SAY(_C_BLU "txn[%d]: Begin transaction: %s\n" _C_CLR, i,
- txn[i].desc);
-
- /* Begin a transaction */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- /* If the transaction is aborted it is okay if
- * messages fail producing, since they'll be
- * purged from queues. */
- test_curr->ignore_dr_err = txn[i].abort;
-
- /* Produce messages */
- txn[i].testid = test_id_generate();
- TEST_SAY(
- "txn[%d]: Produce %d messages %ssynchronously "
- "with testid %" PRIu64 "\n",
- i, txn[i].msgcnt, txn[i].sync ? "" : "a", txn[i].testid);
-
- if (!txn[i].batch) {
- if (txn[i].sync)
- test_produce_msgs2(p, topic, txn[i].testid,
- RD_KAFKA_PARTITION_UA, 0,
- txn[i].msgcnt, NULL, 0);
- else
- test_produce_msgs2_nowait(
- p, topic, txn[i].testid,
- RD_KAFKA_PARTITION_UA, 0, txn[i].msgcnt,
- NULL, 0, &wait_msgcnt);
- } else if (txn[i].batch_any) {
- /* Batch: use any partition */
- do_produce_batch(p, topic, txn[i].testid,
- RD_KAFKA_PARTITION_UA, 0,
- txn[i].msgcnt);
- } else {
- /* Batch: specific partition */
- do_produce_batch(p, topic, txn[i].testid,
- 1 /* partition */, 0, txn[i].msgcnt);
- }
-
-
- /* Abort or commit transaction */
- TEST_SAY("txn[%d]: %s" _C_CLR " transaction\n", i,
- txn[i].abort ? _C_RED "Abort" : _C_GRN "Commit");
- if (txn[i].abort) {
- test_curr->ignore_dr_err = rd_true;
- TEST_CALL_ERROR__(
- rd_kafka_abort_transaction(p, 30 * 1000));
- } else {
- test_curr->ignore_dr_err = rd_false;
- TEST_CALL_ERROR__(
- rd_kafka_commit_transaction(p, 30 * 1000));
- }
-
- if (!txn[i].sync)
- /* Wait for delivery reports */
- test_wait_delivery(p, &wait_msgcnt);
-
- /* Consume messages */
- if (txn[i].abort)
- test_consumer_poll_no_msgs(txn[i].desc, c,
- txn[i].testid, 3000);
- else
- test_consumer_poll(txn[i].desc, c, txn[i].testid,
- partition_cnt, 0, txn[i].msgcnt,
- NULL);
-
- TEST_SAY(_C_GRN "txn[%d]: Finished successfully: %s\n" _C_CLR,
- i, txn[i].desc);
- }
-
- rd_kafka_destroy(p);
-
- test_consumer_close(c);
- rd_kafka_destroy(c);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Consumes \p cnt messages and returns them in the provided array
- * which must be pre-allocated.
- */
-static void
-consume_messages(rd_kafka_t *c, rd_kafka_message_t **msgs, int msgcnt) {
- int i = 0;
- while (i < msgcnt) {
- msgs[i] = rd_kafka_consumer_poll(c, 1000);
- if (!msgs[i])
- continue;
-
- if (msgs[i]->err) {
- TEST_SAY("%s consumer error: %s\n", rd_kafka_name(c),
- rd_kafka_message_errstr(msgs[i]));
- rd_kafka_message_destroy(msgs[i]);
- continue;
- }
-
- TEST_SAYL(3, "%s: consumed message %s [%d] @ %" PRId64 "\n",
- rd_kafka_name(c), rd_kafka_topic_name(msgs[i]->rkt),
- msgs[i]->partition, msgs[i]->offset);
-
-
- i++;
- }
-}
-
-static void destroy_messages(rd_kafka_message_t **msgs, int msgcnt) {
- while (msgcnt-- > 0)
- rd_kafka_message_destroy(msgs[msgcnt]);
-}
-
-
-/**
- * @brief Test a transactional consumer + transactional producer combo,
- * mimicing a streams job.
- *
- * One input topic produced to by transactional producer 1,
- * consumed by transactional consumer 1, which forwards messages
- * to transactional producer 2 that writes messages to output topic,
- * which is consumed and verified by transactional consumer 2.
- *
- * Every 3rd transaction is aborted.
- */
-void do_test_consumer_producer_txn(void) {
- char *input_topic =
- rd_strdup(test_mk_topic_name("0103-transactions-input", 1));
- char *output_topic =
- rd_strdup(test_mk_topic_name("0103-transactions-output", 1));
- const char *c1_groupid = input_topic;
- const char *c2_groupid = output_topic;
- rd_kafka_t *p1, *p2, *c1, *c2;
- rd_kafka_conf_t *conf, *tmpconf;
- uint64_t testid;
-#define _MSGCNT (10 * 30)
- const int txncnt = 10;
- const int msgcnt = _MSGCNT;
- int txn;
- int committed_msgcnt = 0;
- test_msgver_t expect_mv, actual_mv;
-
- SUB_TEST_QUICK("transactional test with %d transactions", txncnt);
-
- test_conf_init(&conf, NULL, 30);
-
- testid = test_id_generate();
-
- /*
- *
- * Producer 1
- * |
- * v
- * input topic
- * |
- * v
- * Consumer 1 }
- * | } transactional streams job
- * v }
- * Producer 2 }
- * |
- * v
- * output tpic
- * |
- * v
- * Consumer 2
- */
-
-
- /* Create Producer 1 and seed input topic */
- tmpconf = rd_kafka_conf_dup(conf);
- test_conf_set(tmpconf, "transactional.id", input_topic);
- rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb);
- p1 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf);
-
- /* Create input and output topics */
- test_create_topic(p1, input_topic, 4, 3);
- test_create_topic(p1, output_topic, 4, 3);
-
- /* Seed input topic with messages */
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1));
- test_produce_msgs2(p1, input_topic, testid, RD_KAFKA_PARTITION_UA, 0,
- msgcnt, NULL, 0);
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(p1, 30 * 1000));
-
- rd_kafka_destroy(p1);
-
- /* Create Consumer 1: reading msgs from input_topic (Producer 1) */
- tmpconf = rd_kafka_conf_dup(conf);
- test_conf_set(tmpconf, "isolation.level", "read_committed");
- test_conf_set(tmpconf, "auto.offset.reset", "earliest");
- test_conf_set(tmpconf, "enable.auto.commit", "false");
- c1 = test_create_consumer(c1_groupid, NULL, tmpconf, NULL);
- test_consumer_subscribe(c1, input_topic);
-
- /* Create Producer 2 */
- tmpconf = rd_kafka_conf_dup(conf);
- test_conf_set(tmpconf, "transactional.id", output_topic);
- rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb);
- p2 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf);
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000));
-
- /* Create Consumer 2: reading msgs from output_topic (Producer 2) */
- tmpconf = rd_kafka_conf_dup(conf);
- test_conf_set(tmpconf, "isolation.level", "read_committed");
- test_conf_set(tmpconf, "auto.offset.reset", "earliest");
- c2 = test_create_consumer(c2_groupid, NULL, tmpconf, NULL);
- test_consumer_subscribe(c2, output_topic);
-
- /* Keep track of what messages to expect on the output topic */
- test_msgver_init(&expect_mv, testid);
-
- for (txn = 0; txn < txncnt; txn++) {
- int msgcnt2 = 10 * (1 + (txn % 3));
- rd_kafka_message_t *msgs[_MSGCNT];
- int i;
- rd_bool_t do_abort = !(txn % 3);
- rd_bool_t recreate_consumer =
- (do_abort && txn == 3) || (!do_abort && txn == 2);
- rd_kafka_topic_partition_list_t *offsets,
- *expected_offsets = NULL;
- rd_kafka_resp_err_t err;
- rd_kafka_consumer_group_metadata_t *c1_cgmetadata;
- int remains = msgcnt2;
-
- TEST_SAY(_C_BLU
- "Begin transaction #%d/%d "
- "(msgcnt=%d, do_abort=%s, recreate_consumer=%s)\n",
- txn, txncnt, msgcnt2, do_abort ? "true" : "false",
- recreate_consumer ? "true" : "false");
-
- consume_messages(c1, msgs, msgcnt2);
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p2));
-
- for (i = 0; i < msgcnt2; i++) {
- rd_kafka_message_t *msg = msgs[i];
-
- if (!do_abort) {
- /* The expected msgver based on the input topic
- * will be compared to the actual msgver based
- * on the output topic, so we need to
- * override the topic name to match
- * the actual msgver's output topic. */
- test_msgver_add_msg0(
- __FUNCTION__, __LINE__, rd_kafka_name(p2),
- &expect_mv, msg, output_topic);
- committed_msgcnt++;
- }
-
- err = rd_kafka_producev(
- p2, RD_KAFKA_V_TOPIC(output_topic),
- RD_KAFKA_V_KEY(msg->key, msg->key_len),
- RD_KAFKA_V_VALUE(msg->payload, msg->len),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_OPAQUE(&remains), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s",
- rd_kafka_err2str(err));
-
- rd_kafka_poll(p2, 0);
- }
-
- destroy_messages(msgs, msgcnt2);
-
- err = rd_kafka_assignment(c1, &offsets);
- TEST_ASSERT(!err, "failed to get consumer assignment: %s",
- rd_kafka_err2str(err));
-
- err = rd_kafka_position(c1, offsets);
- TEST_ASSERT(!err, "failed to get consumer position: %s",
- rd_kafka_err2str(err));
-
- c1_cgmetadata = rd_kafka_consumer_group_metadata(c1);
- TEST_ASSERT(c1_cgmetadata != NULL,
- "failed to get consumer group metadata");
-
- TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
- p2, offsets, c1_cgmetadata, -1));
-
- if (recreate_consumer && !do_abort) {
- expected_offsets =
- rd_kafka_topic_partition_list_new(offsets->cnt);
-
- /* Cannot use rd_kafka_topic_partition_list_copy
- * as it needs to be destroyed before closing the
- * consumer, because of the _private field holding
- * a reference to the internal toppar */
- for (i = 0; i < offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar =
- &offsets->elems[i];
- rd_kafka_topic_partition_t *rktpar_new;
- rktpar_new = rd_kafka_topic_partition_list_add(
- expected_offsets, rktpar->topic,
- rktpar->partition);
- rktpar_new->offset = rktpar->offset;
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar_new,
- rd_kafka_topic_partition_get_leader_epoch(
- rktpar));
- }
- }
-
- rd_kafka_consumer_group_metadata_destroy(c1_cgmetadata);
-
- rd_kafka_topic_partition_list_destroy(offsets);
-
-
- if (do_abort) {
- test_curr->ignore_dr_err = rd_true;
- TEST_CALL_ERROR__(
- rd_kafka_abort_transaction(p2, 30 * 1000));
- } else {
- test_curr->ignore_dr_err = rd_false;
- TEST_CALL_ERROR__(
- rd_kafka_commit_transaction(p2, 30 * 1000));
- }
-
- TEST_ASSERT(remains == 0,
- "expected no remaining messages "
- "in-flight/in-queue, got %d",
- remains);
-
-
- if (recreate_consumer) {
- /* Recreate the consumer to pick up
- * on the committed offset. */
- TEST_SAY("Recreating consumer 1\n");
- rd_kafka_consumer_close(c1);
- rd_kafka_destroy(c1);
-
- tmpconf = rd_kafka_conf_dup(conf);
- test_conf_set(tmpconf, "isolation.level",
- "read_committed");
- test_conf_set(tmpconf, "auto.offset.reset", "earliest");
- test_conf_set(tmpconf, "enable.auto.commit", "false");
- c1 = test_create_consumer(c1_groupid, NULL, tmpconf,
- NULL);
- test_consumer_subscribe(c1, input_topic);
-
-
- if (expected_offsets) {
- rd_kafka_topic_partition_list_t
- *committed_offsets =
- rd_kafka_topic_partition_list_copy(
- expected_offsets);
- /* Set committed offsets and epochs to a
- * different value before requesting them. */
- for (i = 0; i < committed_offsets->cnt; i++) {
- rd_kafka_topic_partition_t *rktpar =
- &committed_offsets->elems[i];
- rktpar->offset = -100;
- rd_kafka_topic_partition_set_leader_epoch(
- rktpar, -100);
- }
-
- TEST_CALL_ERR__(rd_kafka_committed(
- c1, committed_offsets, -1));
-
- if (test_partition_list_and_offsets_cmp(
- expected_offsets, committed_offsets)) {
- TEST_SAY("expected list:\n");
- test_print_partition_list(
- expected_offsets);
- TEST_SAY("committed() list:\n");
- test_print_partition_list(
- committed_offsets);
- TEST_FAIL(
- "committed offsets don't match");
- }
-
- rd_kafka_topic_partition_list_destroy(
- committed_offsets);
-
- rd_kafka_topic_partition_list_destroy(
- expected_offsets);
- }
- }
- }
-
- rd_kafka_conf_destroy(conf);
-
- test_msgver_init(&actual_mv, testid);
-
- test_consumer_poll("Verify output topic", c2, testid, -1, 0,
- committed_msgcnt, &actual_mv);
-
- test_msgver_verify_compare("Verify output topic", &actual_mv,
- &expect_mv, TEST_MSGVER_ALL);
-
- test_msgver_clear(&actual_mv);
- test_msgver_clear(&expect_mv);
-
- rd_kafka_consumer_close(c1);
- rd_kafka_consumer_close(c2);
- rd_kafka_destroy(c1);
- rd_kafka_destroy(c2);
- rd_kafka_destroy(p2);
-
- rd_free(input_topic);
- rd_free(output_topic);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Testing misuse of the transaction API.
- */
-static void do_test_misuse_txn(void) {
- const char *topic = test_mk_topic_name("0103-test_misuse_txn", 1);
- rd_kafka_t *p;
- rd_kafka_conf_t *conf;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t fatal_err;
- char errstr[512];
- int i;
-
- /*
- * transaction.timeout.ms out of range (from broker's point of view)
- */
- SUB_TEST_QUICK();
-
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "transactional.id", topic);
- test_conf_set(conf, "transaction.timeout.ms", "2147483647");
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- error = rd_kafka_init_transactions(p, 10 * 1000);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
- "Expected error ERR_INVALID_TRANSACTION_TIMEOUT, "
- "not %s: %s",
- rd_kafka_error_name(error),
- error ? rd_kafka_error_string(error) : "");
- TEST_ASSERT(rd_kafka_error_is_fatal(error),
- "Expected error to have is_fatal() set");
- rd_kafka_error_destroy(error);
- /* Check that a fatal error is raised */
- fatal_err = rd_kafka_fatal_error(p, errstr, sizeof(errstr));
- TEST_ASSERT(fatal_err == RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
- "Expected fatal error ERR_INVALID_TRANSACTION_TIMEOUT, "
- "not %s: %s",
- rd_kafka_err2name(fatal_err), fatal_err ? errstr : "");
-
- rd_kafka_destroy(p);
-
-
- /*
- * Multiple calls to init_transactions(): finish on first.
- */
- TEST_SAY("[ Test multiple init_transactions(): finish on first ]\n");
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "transactional.id", topic);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
-
- error = rd_kafka_init_transactions(p, 1);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
- "Expected ERR__STATE error, not %s",
- rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- error = rd_kafka_init_transactions(p, 3 * 1000);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
- "Expected ERR__STATE error, not %s",
- rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_destroy(p);
-
-
- /*
- * Multiple calls to init_transactions(): timeout on first.
- */
- TEST_SAY("[ Test multiple init_transactions(): timeout on first ]\n");
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "transactional.id", topic);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- error = rd_kafka_init_transactions(p, 1);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_SAY("error: %s, %d\n", rd_kafka_error_string(error),
- rd_kafka_error_is_retriable(error));
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected ERR__TIMED_OUT, not %s: %s",
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "Expected error to be retriable");
- rd_kafka_error_destroy(error);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
-
- rd_kafka_destroy(p);
-
-
- /*
- * Multiple calls to init_transactions(): hysterical amounts
- */
- TEST_SAY("[ Test multiple init_transactions(): hysterical amounts ]\n");
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "transactional.id", topic);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* Call until init succeeds */
- for (i = 0; i < 5000; i++) {
- if (!(error = rd_kafka_init_transactions(p, 1)))
- break;
-
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "Expected error to be retriable");
- rd_kafka_error_destroy(error);
-
- error = rd_kafka_begin_transaction(p);
- TEST_ASSERT(error, "Expected begin_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR__CONFLICT,
- "Expected begin_transactions() to fail "
- "with CONFLICT, not %s",
- rd_kafka_error_name(error));
-
- rd_kafka_error_destroy(error);
- }
-
- TEST_ASSERT(i <= 5000,
- "init_transactions() did not succeed after %d calls\n", i);
-
- TEST_SAY("init_transactions() succeeded after %d call(s)\n", i + 1);
-
- /* Make sure a sub-sequent init call fails. */
- error = rd_kafka_init_transactions(p, 5 * 1000);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
- "Expected init_transactions() to fail with STATE, not %s",
- rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- /* But begin.. should work now */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- rd_kafka_destroy(p);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief is_fatal_cb for fenced_txn test.
- */
-static int fenced_txn_is_fatal_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason) {
- TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
- if (err == RD_KAFKA_RESP_ERR__FENCED) {
- TEST_SAY("Saw the expected fatal error\n");
- return 0;
- }
- return 1;
-}
-
-
-/**
- * @brief Check that transaction fencing is handled correctly.
- */
-static void do_test_fenced_txn(rd_bool_t produce_after_fence) {
- const char *topic = test_mk_topic_name("0103_fenced_txn", 1);
- rd_kafka_conf_t *conf;
- rd_kafka_t *p1, *p2;
- rd_kafka_error_t *error;
- uint64_t testid;
-
- SUB_TEST_QUICK("%sproduce after fence",
- produce_after_fence ? "" : "do not ");
-
- if (produce_after_fence)
- test_curr->is_fatal_cb = fenced_txn_is_fatal_cb;
-
- test_curr->ignore_dr_err = rd_false;
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "transactional.id", topic);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
- p2 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
- rd_kafka_conf_destroy(conf);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000));
-
- /* Begin a transaction */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1));
-
- /* Produce some messages */
- test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, 10,
- NULL, 0);
-
- /* Initialize transactions on producer 2, this should
- * fence off producer 1. */
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000));
-
- if (produce_after_fence) {
- /* This will fail hard since the epoch was bumped. */
- TEST_SAY("Producing after producing fencing\n");
- test_curr->ignore_dr_err = rd_true;
- test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0,
- 10, NULL, 0);
- }
-
-
- error = rd_kafka_commit_transaction(p1, 30 * 1000);
-
- TEST_ASSERT(error, "Expected commit to fail");
- TEST_ASSERT(rd_kafka_fatal_error(p1, NULL, 0),
- "Expected a fatal error to have been raised");
- TEST_ASSERT(error, "Expected commit_transaction() to fail");
- TEST_ASSERT(rd_kafka_error_is_fatal(error),
- "Expected commit_transaction() to return a "
- "fatal error");
- TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error),
- "Expected commit_transaction() not to return an "
- "abortable error");
- TEST_ASSERT(!rd_kafka_error_is_retriable(error),
- "Expected commit_transaction() not to return a "
- "retriable error");
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__FENCED,
- "Expected commit_transaction() to return %s, "
- "not %s: %s",
- rd_kafka_err2name(RD_KAFKA_RESP_ERR__FENCED),
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_destroy(p1);
- rd_kafka_destroy(p2);
-
- /* Make sure no messages were committed. */
- test_consume_txn_msgs_easy(
- topic, topic, testid,
- test_get_partition_count(NULL, topic, 10 * 1000), 0, NULL);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Check that fatal idempotent producer errors are also fatal
- * transactional errors when KIP-360 is not supported.
- */
-static void do_test_fatal_idempo_error_without_kip360(void) {
- const char *topic = test_mk_topic_name("0103_fatal_idempo", 1);
- const int32_t partition = 0;
- rd_kafka_conf_t *conf, *c_conf;
- rd_kafka_t *p, *c;
- rd_kafka_error_t *error;
- uint64_t testid;
- const int msgcnt[3] = {6, 4, 1};
- rd_kafka_topic_partition_list_t *records;
- test_msgver_t expect_mv, actual_mv;
- /* This test triggers UNKNOWN_PRODUCER_ID on AK <2.4 and >2.4, but
- * not on AK 2.4.
- * On AK <2.5 (pre KIP-360) these errors are unrecoverable,
- * on AK >2.5 (with KIP-360) we can recover.
- * Since 2.4 is not behaving as the other releases we skip it here. */
- rd_bool_t expect_fail = test_broker_version < TEST_BRKVER(2, 5, 0, 0);
-
- SUB_TEST_QUICK(
- "%s", expect_fail ? "expecting failure since broker is < 2.5"
- : "not expecting failure since broker is >= 2.5");
-
- if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0) &&
- test_broker_version < TEST_BRKVER(2, 5, 0, 0))
- SUB_TEST_SKIP("can't trigger UNKNOWN_PRODUCER_ID on AK 2.4");
-
- if (expect_fail)
- test_curr->is_fatal_cb = test_error_is_not_fatal_cb;
- test_curr->ignore_dr_err = expect_fail;
-
- testid = test_id_generate();
-
- /* Keep track of what messages to expect on the output topic */
- test_msgver_init(&expect_mv, testid);
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "transactional.id", topic);
- test_conf_set(conf, "batch.num.messages", "1");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(p, topic, 1, 3);
-
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
-
- /*
- * 3 transactions:
- * 1. Produce some messages, commit.
- * 2. Produce some messages, then delete the messages from txn 1 and
- * then produce some more messages: UNKNOWN_PRODUCER_ID should be
- * raised as a fatal error.
- * 3. Start a new transaction, produce and commit some new messages.
- * (this step is only performed when expect_fail is false).
- */
-
- /*
- * Transaction 1
- */
- TEST_SAY(_C_BLU "Transaction 1: %d msgs\n", msgcnt[0]);
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
- test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[0], NULL, 0);
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
-
-
- /*
- * Transaction 2
- */
- TEST_SAY(_C_BLU "Transaction 2: %d msgs\n", msgcnt[1]);
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- /* Now delete the messages from txn1 */
- TEST_SAY("Deleting records < %s [%" PRId32 "] offset %d+1\n", topic,
- partition, msgcnt[0]);
- records = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(records, topic, partition)->offset =
- msgcnt[0]; /* include the control message too */
-
- TEST_CALL_ERR__(test_DeleteRecords_simple(p, NULL, records, NULL));
- rd_kafka_topic_partition_list_destroy(records);
-
- /* Wait for deletes to propagate */
- rd_sleep(2);
-
- if (!expect_fail)
- test_curr->dr_mv = &expect_mv;
-
- /* Produce more messages, should now fail */
- test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[1], NULL, 0);
-
- error = rd_kafka_commit_transaction(p, -1);
-
- TEST_SAY_ERROR(error, "commit_transaction() returned: ");
-
- if (expect_fail) {
- TEST_ASSERT(error != NULL, "Expected transaction to fail");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected abortable error");
- rd_kafka_error_destroy(error);
-
- /* Now abort transaction, which should raise the fatal error
- * since it is the abort that performs the PID reinitialization.
- */
- error = rd_kafka_abort_transaction(p, -1);
- TEST_SAY_ERROR(error, "abort_transaction() returned: ");
- TEST_ASSERT(error != NULL, "Expected abort to fail");
- TEST_ASSERT(rd_kafka_error_is_fatal(error),
- "Expecting fatal error");
- TEST_ASSERT(!rd_kafka_error_is_retriable(error),
- "Did not expect retriable error");
- TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error),
- "Did not expect abortable error");
-
- rd_kafka_error_destroy(error);
-
- } else {
- TEST_ASSERT(!error, "Did not expect commit to fail: %s",
- rd_kafka_error_string(error));
- }
-
-
- if (!expect_fail) {
- /*
- * Transaction 3
- */
- TEST_SAY(_C_BLU "Transaction 3: %d msgs\n", msgcnt[2]);
- test_curr->dr_mv = &expect_mv;
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
- test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[2],
- NULL, 0);
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
- }
-
- rd_kafka_destroy(p);
-
- /* Consume messages.
- * On AK<2.5 (expect_fail=true) we do not expect to see any messages
- * since the producer will have failed with a fatal error.
- * On AK>=2.5 (expect_fail=false) we should only see messages from
- * txn 3 which are sent after the producer has recovered.
- */
-
- test_conf_init(&c_conf, NULL, 0);
- test_conf_set(c_conf, "enable.partition.eof", "true");
- c = test_create_consumer(topic, NULL, c_conf, NULL);
- test_consumer_assign_partition("consume", c, topic, partition,
- RD_KAFKA_OFFSET_BEGINNING);
-
- test_msgver_init(&actual_mv, testid);
- test_msgver_ignore_eof(&actual_mv);
-
- test_consumer_poll("Verify output topic", c, testid, 1, 0, -1,
- &actual_mv);
-
- test_msgver_verify_compare("Verify output topic", &actual_mv,
- &expect_mv, TEST_MSGVER_ALL);
-
- test_msgver_clear(&actual_mv);
- test_msgver_clear(&expect_mv);
-
- rd_kafka_destroy(c);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Check that empty transactions, with no messages produced, work
- * as expected.
- */
-static void do_test_empty_txn(rd_bool_t send_offsets, rd_bool_t do_commit) {
- const char *topic = test_mk_topic_name("0103_empty_txn", 1);
- rd_kafka_conf_t *conf, *c_conf;
- rd_kafka_t *p, *c;
- uint64_t testid;
- const int msgcnt = 10;
- rd_kafka_topic_partition_list_t *committed;
- int64_t offset;
-
- SUB_TEST_QUICK("%ssend offsets, %s", send_offsets ? "" : "don't ",
- do_commit ? "commit" : "abort");
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 30);
- c_conf = rd_kafka_conf_dup(conf);
-
- test_conf_set(conf, "transactional.id", topic);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(p, topic, 1, 3);
-
- /* Produce some non-txnn messages for the consumer to read and commit */
- test_produce_msgs_easy(topic, testid, 0, msgcnt);
-
- /* Create consumer and subscribe to the topic */
- test_conf_set(c_conf, "auto.offset.reset", "earliest");
- test_conf_set(c_conf, "enable.auto.commit", "false");
- c = test_create_consumer(topic, NULL, c_conf, NULL);
- test_consumer_subscribe(c, topic);
- test_consumer_wait_assignment(c, rd_false);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- /* send_offsets? Consume messages and send those offsets to the txn */
- if (send_offsets) {
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- test_consumer_poll("consume", c, testid, -1, 0, msgcnt, NULL);
-
- TEST_CALL_ERR__(rd_kafka_assignment(c, &offsets));
- TEST_CALL_ERR__(rd_kafka_position(c, offsets));
-
- cgmetadata = rd_kafka_consumer_group_metadata(c);
- TEST_ASSERT(cgmetadata != NULL,
- "failed to get consumer group metadata");
-
- TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
- p, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
-
- rd_kafka_topic_partition_list_destroy(offsets);
- }
-
-
- if (do_commit)
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
- else
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(p, -1));
-
- /* Wait before checking the committed offsets (Kafka < 2.5.0) */
- if (test_broker_version < TEST_BRKVER(2, 5, 0, 0))
- rd_usleep(tmout_multip(5000 * 1000), NULL);
-
- /* Get the committed offsets */
- TEST_CALL_ERR__(rd_kafka_assignment(c, &committed));
- TEST_CALL_ERR__(rd_kafka_committed(c, committed, 10 * 1000));
-
- TEST_ASSERT(committed->cnt == 1,
- "expected one committed offset, not %d", committed->cnt);
- offset = committed->elems[0].offset;
- TEST_SAY("Committed offset is %" PRId64 "\n", offset);
-
- if (do_commit && send_offsets)
- TEST_ASSERT(offset >= msgcnt,
- "expected committed offset >= %d, got %" PRId64,
- msgcnt, offset);
- else
- TEST_ASSERT(offset < 0,
- "expected no committed offset, got %" PRId64,
- offset);
-
- rd_kafka_topic_partition_list_destroy(committed);
-
- rd_kafka_destroy(c);
- rd_kafka_destroy(p);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @returns the high watermark for the given partition.
- */
-int64_t
-query_hi_wmark0(int line, rd_kafka_t *c, const char *topic, int32_t partition) {
- rd_kafka_resp_err_t err;
- int64_t lo = -1, hi = -1;
-
- err = rd_kafka_query_watermark_offsets(c, topic, partition, &lo, &hi,
- tmout_multip(5 * 1000));
- TEST_ASSERT(!err, "%d: query_watermark_offsets(%s) failed: %s", line,
- topic, rd_kafka_err2str(err));
-
- return hi;
-}
-#define query_hi_wmark(c, topic, part) query_hi_wmark0(__LINE__, c, topic, part)
-
-/**
- * @brief Check that isolation.level works as expected for query_watermark..().
- */
-static void do_test_wmark_isolation_level(void) {
- const char *topic = test_mk_topic_name("0103_wmark_isol", 1);
- rd_kafka_conf_t *conf, *c_conf;
- rd_kafka_t *p, *c1, *c2;
- uint64_t testid;
- int64_t hw_uncommitted, hw_committed;
-
- SUB_TEST_QUICK();
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 30);
- c_conf = rd_kafka_conf_dup(conf);
-
- test_conf_set(conf, "transactional.id", topic);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
-
- test_create_topic(p, topic, 1, 3);
-
- /* Produce some non-txn messages to avoid 0 as the committed hwmark */
- test_produce_msgs_easy(topic, testid, 0, 100);
-
- /* Create consumer and subscribe to the topic */
- test_conf_set(c_conf, "isolation.level", "read_committed");
- c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(c_conf), NULL);
- test_conf_set(c_conf, "isolation.level", "read_uncommitted");
- c2 = test_create_consumer(topic, NULL, c_conf, NULL);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
-
- /* Produce some txn messages */
- test_produce_msgs2(p, topic, testid, 0, 0, 100, NULL, 0);
-
- test_flush(p, 10 * 1000);
-
- hw_committed = query_hi_wmark(c1, topic, 0);
- hw_uncommitted = query_hi_wmark(c2, topic, 0);
-
- TEST_SAY("Pre-commit hwmarks: committed %" PRId64
- ", uncommitted %" PRId64 "\n",
- hw_committed, hw_uncommitted);
-
- TEST_ASSERT(hw_committed > 0 && hw_committed < hw_uncommitted,
- "Committed hwmark %" PRId64
- " should be lower than "
- "uncommitted hwmark %" PRId64 " for %s [0]",
- hw_committed, hw_uncommitted, topic);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
-
- /* Re-create the producer and re-init transactions to make
- * sure the transaction is fully committed in the cluster. */
- rd_kafka_destroy(p);
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
- TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
- rd_kafka_destroy(p);
-
-
- /* Now query wmarks again */
- hw_committed = query_hi_wmark(c1, topic, 0);
- hw_uncommitted = query_hi_wmark(c2, topic, 0);
-
- TEST_SAY("Post-commit hwmarks: committed %" PRId64
- ", uncommitted %" PRId64 "\n",
- hw_committed, hw_uncommitted);
-
- TEST_ASSERT(hw_committed == hw_uncommitted,
- "Committed hwmark %" PRId64
- " should be equal to "
- "uncommitted hwmark %" PRId64 " for %s [0]",
- hw_committed, hw_uncommitted, topic);
-
- rd_kafka_destroy(c1);
- rd_kafka_destroy(c2);
-
- SUB_TEST_PASS();
-}
-
-
-
-int main_0103_transactions(int argc, char **argv) {
-
- do_test_misuse_txn();
- do_test_basic_producer_txn(rd_false /* without compression */);
- do_test_basic_producer_txn(rd_true /* with compression */);
- do_test_consumer_producer_txn();
- do_test_fenced_txn(rd_false /* no produce after fencing */);
- do_test_fenced_txn(rd_true /* produce after fencing */);
- do_test_fatal_idempo_error_without_kip360();
- do_test_empty_txn(rd_false /*don't send offsets*/, rd_true /*commit*/);
- do_test_empty_txn(rd_false /*don't send offsets*/, rd_false /*abort*/);
- do_test_empty_txn(rd_true /*send offsets*/, rd_true /*commit*/);
- do_test_empty_txn(rd_true /*send offsets*/, rd_false /*abort*/);
- do_test_wmark_isolation_level();
- return 0;
-}
-
-
-
-/**
- * @brief Transaction tests that don't require a broker.
- */
-static void do_test_txn_local(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *p;
- rd_kafka_error_t *error;
- test_timing_t t_init;
- int timeout_ms = 7 * 1000;
-
- SUB_TEST_QUICK();
-
- /*
- * No transactional.id, init_transactions() should fail.
- */
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", NULL);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- error = rd_kafka_init_transactions(p, 10);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(
- rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
- "Expected ERR__NOT_CONFIGURED, not %s", rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_destroy(p);
-
-
- /*
- * No brokers, init_transactions() should time out according
- * to the timeout.
- */
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", NULL);
- test_conf_set(conf, "transactional.id", "test");
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- TEST_SAY("Waiting for init_transactions() timeout %d ms\n", timeout_ms);
-
- test_timeout_set((timeout_ms + 2000) / 1000);
-
- TIMING_START(&t_init, "init_transactions()");
- error = rd_kafka_init_transactions(p, timeout_ms);
- TIMING_STOP(&t_init);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected RD_KAFKA_RESP_ERR__TIMED_OUT, "
- "not %s: %s",
- rd_kafka_error_name(error), rd_kafka_error_string(error));
-
- TEST_SAY("init_transactions() failed as expected: %s\n",
- rd_kafka_error_string(error));
-
- rd_kafka_error_destroy(error);
-
- TIMING_ASSERT(&t_init, timeout_ms - 2000, timeout_ms + 5000);
-
- rd_kafka_destroy(p);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0103_transactions_local(int argc, char **argv) {
-
- do_test_txn_local();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c
deleted file mode 100644
index 1ecf99da3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * @name Fetch from follower tests using the mock broker.
- */
-
-static int allowed_error;
-
-/**
- * @brief Decide what error_cb's will cause the test to fail.
- */
-static int
-error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- if (err == allowed_error ||
- /* If transport errors are allowed then it is likely
- * that we'll also see ALL_BROKERS_DOWN. */
- (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT &&
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) {
- TEST_SAY("Ignoring allowed error: %s: %s\n",
- rd_kafka_err2name(err), reason);
- return 0;
- }
- return 1;
-}
-
-
-/**
- * @brief Test offset reset when fetching from replica.
- * Since the highwatermark is in sync with the leader the
- * ERR_OFFSETS_OUT_OF_RANGE is trusted by the consumer and
- * a reset is performed. See do_test_offset_reset_lag()
- * for the case where the replica is lagging and can't be trusted.
- */
-static void do_test_offset_reset(const char *auto_offset_reset) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 1000;
- const size_t msgsize = 1000;
-
- TEST_SAY(_C_MAG "[ Test FFF auto.offset.reset=%s ]\n",
- auto_offset_reset);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* Set partition leader to broker 1, follower to broker 2 */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", auto_offset_reset);
- /* Make sure we don't consume the entire partition in one Fetch */
- test_conf_set(conf, "fetch.message.max.bytes", "100");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- /* The first fetch will go to the leader which will redirect
- * the consumer to the follower, the second and sub-sequent fetches
- * will go to the follower. We want the third fetch, second one on
- * the follower, to fail and trigger an offset reset. */
- rd_kafka_mock_push_request_errors(
- mcluster, 1 /*FetchRequest*/, 3,
- RD_KAFKA_RESP_ERR_NO_ERROR /*leader*/,
- RD_KAFKA_RESP_ERR_NO_ERROR /*follower*/,
- RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE /*follower: fail*/);
-
- test_consumer_assign_partition(auto_offset_reset, c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- if (!strcmp(auto_offset_reset, "latest"))
- test_consumer_poll_no_msgs(auto_offset_reset, c, 0, 5000);
- else
- test_consumer_poll(auto_offset_reset, c, 0, 1, 0, msgcnt, NULL);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test FFF auto.offset.reset=%s PASSED ]\n",
- auto_offset_reset);
-}
-
-
-/**
- * @brief Test offset reset when fetching from a lagging replica
- * who's high-watermark is behind the leader, which means
- * an offset reset should not be triggered.
- */
-static void do_test_offset_reset_lag(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 10;
- const int lag = 3;
- const size_t msgsize = 1000;
-
- TEST_SAY(_C_MAG "[ Test lagging FFF offset reset ]\n");
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "1", NULL);
-
- /* Set broker rack */
- /* Set partition leader to broker 1, follower to broker 2 */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
-
- /* Make follower lag by some messages
- * ( .. -1 because offsets start at 0) */
- rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1,
- msgcnt - lag - 1);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- /* Make sure we don't consume the entire partition in one Fetch */
- test_conf_set(conf, "fetch.message.max.bytes", "100");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_assign_partition("lag", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- /* Should receive all messages up to the followers hwmark */
- test_consumer_poll("up to wmark", c, 0, 0, 0, msgcnt - lag, NULL);
-
- /* And then nothing.. as the consumer waits for the replica to
- * catch up. */
- test_consumer_poll_no_msgs("no msgs", c, 0, 3000);
-
- /* Catch up the replica, consumer should now get the
- * remaining messages */
- rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1, -1);
- test_consumer_poll("remaining", c, 0, 1, msgcnt - lag, lag, NULL);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test lagging FFF offset reset PASSED ]\n");
-}
-
-
-/**
- * @brief Test delegating consumer to a follower that does not exist,
- * the consumer should not be able to consume any messages (which
- * is questionable but for a later PR). Then change to a valid
- * replica and verify messages can be consumed.
- */
-static void do_test_unknown_follower(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 1000;
- const size_t msgsize = 1000;
- test_msgver_t mv;
-
- TEST_SAY(_C_MAG "[ Test unknown follower ]\n");
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* Set partition leader to broker 1, follower
- * to non-existent broker 19 */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 19);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- /* Make sure we don't consume the entire partition in one Fetch */
- test_conf_set(conf, "fetch.message.max.bytes", "100");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_assign_partition("unknown follower", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- test_consumer_poll_no_msgs("unknown follower", c, 0, 5000);
-
- /* Set a valid follower (broker 3) */
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3);
- test_msgver_init(&mv, 0);
- test_consumer_poll("proper follower", c, 0, 1, 0, msgcnt, &mv);
- /* Verify messages were indeed received from broker 3 */
- test_msgver_verify0(
- __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
- (struct test_mv_vs) {
- .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3});
- test_msgver_clear(&mv);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test unknown follower PASSED ]\n");
-}
-
-
-/**
- * @brief Issue #2955: Verify that fetch does not stall until next
- * periodic metadata timeout when leader broker is no longer
- * a replica.
- */
-static void do_test_replica_not_available(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 1000;
-
- TEST_SAY(_C_MAG "[ Test REPLICA_NOT_AVAILABLE ]\n");
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* Set partition leader to broker 1. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
- test_conf_set(conf, "fetch.error.backoff.ms", "1000");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
- RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0);
-
-
- test_consumer_assign_partition("REPLICA_NOT_AVAILABLE", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000);
-
- /* Switch leader to broker 2 so that metadata is updated,
- * causing the consumer to start fetching from the new leader. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
-
- test_consumer_poll("Consume", c, 0, 1, 0, msgcnt, NULL);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test REPLICA_NOT_AVAILABLE PASSED ]\n");
-}
-
-/**
- * @brief With an error \p err on a Fetch request should query for the new
- * leader or preferred replica and refresh metadata.
- */
-static void do_test_delegate_to_leader_on_error(rd_kafka_resp_err_t err) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 1000;
- const char *errstr = rd_kafka_err2name(err);
-
- TEST_SAY(_C_MAG "[ Test %s ]\n", errstr);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* Set partition leader to broker 1. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
- test_conf_set(conf, "fetch.error.backoff.ms", "1000");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10, err, 0, err, 0,
- err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0);
-
-
- test_consumer_assign_partition(errstr, c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000);
-
- /* Switch leader to broker 2 so that metadata is updated,
- * causing the consumer to start fetching from the new leader. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
-
- test_consumer_poll_timeout("Consume", c, 0, 1, 0, msgcnt, NULL, 2000);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test %s ]\n", errstr);
-}
-
-/**
- * @brief Test when the preferred replica is no longer a follower of the
- * partition leader. We should try fetch from the leader instead.
- */
-static void do_test_not_leader_or_follower(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 10;
-
- TEST_SAY(_C_MAG "[ Test NOT_LEADER_OR_FOLLOWER ]\n");
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
- /* Set partition leader to broker 1. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
- test_conf_set(conf, "fetch.error.backoff.ms", "1000");
- test_conf_set(conf, "fetch.message.max.bytes", "10");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_assign_partition("NOT_LEADER_OR_FOLLOWER", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- /* Since there are no messages, this poll only waits for metadata, and
- * then sets the preferred replica after the first fetch request. */
- test_consumer_poll_no_msgs("Initial metadata and preferred replica set",
- c, 0, 2000);
-
- /* Change the follower, so that the preferred replica is no longer the
- * leader or follower. */
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, -1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* On getting a NOT_LEADER_OR_FOLLOWER error, we should change to the
- * leader and fetch from there without timing out. */
- test_msgver_t mv;
- test_msgver_init(&mv, 0);
- test_consumer_poll_timeout("from leader", c, 0, 1, 0, msgcnt, &mv,
- 2000);
- test_msgver_verify0(
- __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
- (struct test_mv_vs) {
- .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 1});
- test_msgver_clear(&mv);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test NOT_LEADER_OR_FOLLOWER PASSED ]\n");
-}
-
-
-/**
- * @brief Test when the preferred replica broker goes down. When a broker is
- * going down, we should delegate all its partitions to their leaders.
- */
-static void do_test_follower_down(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 10;
-
- TEST_SAY(_C_MAG "[ Test with follower down ]\n");
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
- /* Set partition leader to broker 1. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "client.rack", "myrack");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
- test_conf_set(conf, "fetch.error.backoff.ms", "1000");
- test_conf_set(conf, "fetch.message.max.bytes", "10");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_assign_partition("follower down", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- /* Since there are no messages, this poll only waits for metadata, and
- * then sets the preferred replica after the first fetch request. */
- test_consumer_poll_no_msgs("Initial metadata and preferred replica set",
- c, 0, 2000);
-
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "10", NULL);
-
- /* Set follower down. When follower is set as DOWN, we also expect
- * that the cluster itself knows and does not ask us to change our
- * preferred replica to the broker which is down. To facilitate this,
- * we just set the follower to 3 instead of 2. */
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- rd_kafka_mock_broker_set_down(mcluster, 2);
- rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3);
-
- /* Wee should change to the new follower when the old one goes down,
- * and fetch from there without timing out. */
- test_msgver_t mv;
- test_msgver_init(&mv, 0);
- test_consumer_poll_timeout("from other follower", c, 0, 1, 0, msgcnt,
- &mv, 2000);
- test_msgver_verify0(
- __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
- (struct test_mv_vs) {
- .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3});
- test_msgver_clear(&mv);
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_SAY(_C_GRN "[ Test with follower down PASSED ]\n");
-}
-
-
-/**
- * @brief When a seek is done with a leader epoch,
- * the expected behavior is to validate it and
- * start fetching from the end offset of that epoch if
- * less than current offset.
- * This is possible in case of external group offsets storage,
- * associated with an unclean leader election.
- */
-static void do_test_seek_to_offset_with_previous_epoch(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *topic = "test";
- const int msgcnt = 10;
- const size_t msgsize = 1000;
- rd_kafka_topic_partition_list_t *rktpars;
- rd_kafka_topic_partition_t *rktpar;
-
- SUB_TEST_QUICK();
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
- "bootstrap.servers", bootstraps, NULL);
-
- test_conf_init(&conf, NULL, 0);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- c = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_assign_partition("zero", c, topic, 0,
- RD_KAFKA_OFFSET_INVALID);
-
- test_consumer_poll("first", c, 0, 0, msgcnt, msgcnt, NULL);
-
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
- "bootstrap.servers", bootstraps, NULL);
-
- test_consumer_poll("second", c, 0, 0, msgcnt, msgcnt, NULL);
-
- rktpars = rd_kafka_topic_partition_list_new(1);
- rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0);
- rktpar->offset = msgcnt * 2;
- /* Will validate the offset at start fetching again
- * from offset 'msgcnt'. */
- rd_kafka_topic_partition_set_leader_epoch(rktpar, 0);
- rd_kafka_seek_partitions(c, rktpars, -1);
-
- test_consumer_poll("third", c, 0, 0, msgcnt, msgcnt, NULL);
-
- test_consumer_close(c);
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0104_fetch_from_follower_mock(int argc, char **argv) {
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- do_test_offset_reset("earliest");
- do_test_offset_reset("latest");
-
- do_test_offset_reset_lag();
-
- do_test_unknown_follower();
-
- do_test_replica_not_available();
-
- do_test_delegate_to_leader_on_error(
- RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE);
-
- do_test_not_leader_or_follower();
-
- do_test_follower_down();
-
- do_test_seek_to_offset_with_previous_epoch();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c
deleted file mode 100644
index 014642df1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c
+++ /dev/null
@@ -1,3926 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2019, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#include "../src/rdkafka_proto.h"
-#include "../src/rdstring.h"
-#include "../src/rdunittest.h"
-
-#include <stdarg.h>
-
-
-/**
- * @name Producer transaction tests using the mock cluster
- *
- */
-
-
-static int allowed_error;
-
-/**
- * @brief Decide what error_cb's will cause the test to fail.
- */
-static int
-error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- if (err == allowed_error ||
- /* If transport errors are allowed then it is likely
- * that we'll also see ALL_BROKERS_DOWN. */
- (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT &&
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) {
- TEST_SAY("Ignoring allowed error: %s: %s\n",
- rd_kafka_err2name(err), reason);
- return 0;
- }
- return 1;
-}
-
-
-static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque);
-
-/**
- * @brief Simple on_response_received interceptor that simply calls the
- * sub-test's on_response_received_cb function, if set.
- */
-static rd_kafka_resp_err_t
-on_response_received_trampoline(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
- TEST_ASSERT(on_response_received_cb != NULL, "");
- return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey,
- ApiVersion, CorrId, size, rtt, err,
- ic_opaque);
-}
-
-
-/**
- * @brief on_new interceptor to add an on_response_received interceptor.
- */
-static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- if (on_response_received_cb)
- err = rd_kafka_interceptor_add_on_response_received(
- rk, "on_response_received", on_response_received_trampoline,
- ic_opaque);
-
- return err;
-}
-
-
-/**
- * @brief Create a transactional producer and a mock cluster.
- *
- * The var-arg list is a NULL-terminated list of
- * (const char *key, const char *value) config properties.
- *
- * Special keys:
- * "on_response_received", "" - enable the on_response_received_cb
- * interceptor,
- * which must be assigned prior to
- * calling create_tnx_producer().
- */
-static RD_SENTINEL rd_kafka_t *
-create_txn_producer(rd_kafka_mock_cluster_t **mclusterp,
- const char *transactional_id,
- int broker_cnt,
- ...) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- char numstr[8];
- va_list ap;
- const char *key;
- rd_bool_t add_interceptors = rd_false;
-
- rd_snprintf(numstr, sizeof(numstr), "%d", broker_cnt);
-
- test_conf_init(&conf, NULL, 60);
-
- test_conf_set(conf, "transactional.id", transactional_id);
- /* When mock brokers are set to down state they're still binding
- * the port, just not listening to it, which makes connection attempts
- * stall until socket.connection.setup.timeout.ms expires.
- * To speed up detection of brokers being down we reduce this timeout
- * to just a couple of seconds. */
- test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000");
- /* Speed up reconnects */
- test_conf_set(conf, "reconnect.backoff.max.ms", "2000");
- test_conf_set(conf, "test.mock.num.brokers", numstr);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- test_curr->ignore_dr_err = rd_false;
-
- va_start(ap, broker_cnt);
- while ((key = va_arg(ap, const char *))) {
- if (!strcmp(key, "on_response_received")) {
- add_interceptors = rd_true;
- (void)va_arg(ap, const char *);
- } else {
- test_conf_set(conf, key, va_arg(ap, const char *));
- }
- }
- va_end(ap);
-
- /* Add an on_.. interceptors */
- if (add_interceptors)
- rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
- on_new_producer, NULL);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- if (mclusterp) {
- *mclusterp = rd_kafka_handle_mock_cluster(rk);
- TEST_ASSERT(*mclusterp, "failed to create mock cluster");
-
- /* Create some of the common consumer "input" topics
- * that we must be able to commit to with
- * send_offsets_to_transaction().
- * The number depicts the number of partitions in the topic. */
- TEST_CALL_ERR__(
- rd_kafka_mock_topic_create(*mclusterp, "srctopic4", 4, 1));
- TEST_CALL_ERR__(rd_kafka_mock_topic_create(
- *mclusterp, "srctopic64", 64, 1));
- }
-
- return rk;
-}
-
-
-/**
- * @brief Test recoverable errors using mock broker error injections
- * and code coverage checks.
- */
-static void do_test_txn_recoverable_errors(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- const char *groupid = "myGroupId";
- const char *txnid = "myTxnId";
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- /* Make sure transaction and group coordinators are different.
- * This verifies that AddOffsetsToTxnRequest isn't sent to the
- * transaction coordinator but the group coordinator. */
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 2);
-
- /*
- * Inject som InitProducerId errors that causes retries
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_InitProducerId, 3,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- (void)RD_UT_COVERAGE_CHECK(0); /* idemp_request_pid_failed(retry) */
- (void)RD_UT_COVERAGE_CHECK(1); /* txn_idemp_state_change(READY) */
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- /* Produce a message without error first */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- rd_kafka_flush(rk, -1);
-
- /*
- * Produce a message, let it fail with a non-idempo/non-txn
- * retryable error
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS);
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- /* Make sure messages are produced */
- rd_kafka_flush(rk, -1);
-
- /*
- * Send some arbitrary offsets, first with some failures, then
- * succeed.
- */
- offsets = rd_kafka_topic_partition_list_new(4);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 39)->offset =
- 999999111;
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
- 999;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 19)->offset =
- 123456789;
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /*
- * Commit transaction, first with som failures, then succeed.
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_EndTxn, 3,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief KIP-360: Test that fatal idempotence errors triggers abortable
- * transaction errors and that the producer can recover.
- */
-static void do_test_txn_fatal_idempo_errors(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- const char *txnid = "myTxnId";
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- /* Produce a message without error first */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- /* Produce a message, let it fail with a fatal idempo error. */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- /* Commit the transaction, should fail */
- error = rd_kafka_commit_transaction(rk, -1);
- TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
-
- TEST_SAY("commit_transaction() failed (expectedly): %s\n",
- rd_kafka_error_string(error));
-
- TEST_ASSERT(!rd_kafka_error_is_fatal(error),
- "Did not expect fatal error");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected abortable error");
- rd_kafka_error_destroy(error);
-
- /* Abort the transaction */
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- /* Run a new transaction without errors to verify that the
- * producer can recover. */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief KIP-360: Test that fatal idempotence errors triggers abortable
- * transaction errors, but let the broker-side bumping of the
- * producer PID take longer than the remaining transaction timeout
- * which should raise a retriable error from abort_transaction().
- *
- * @param with_sleep After the first abort sleep longer than it takes to
- * re-init the pid so that the internal state automatically
- * transitions.
- */
-static void do_test_txn_slow_reinit(rd_bool_t with_sleep) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- int32_t txn_coord = 2;
- const char *txnid = "myTxnId";
- test_timing_t timing;
-
- SUB_TEST("%s sleep", with_sleep ? "with" : "without");
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
- txn_coord);
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->is_fatal_cb = NULL;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- /* Produce a message without error first */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- test_flush(rk, -1);
-
- /* Set transaction coordinator latency higher than
- * the abort_transaction() call timeout so that the automatic
- * re-initpid takes longer than abort_transaction(). */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1,
- RD_KAFKA_RESP_ERR_NO_ERROR, 10000 /*10s*/);
-
- /* Produce a message, let it fail with a fatal idempo error. */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
-
- /* Commit the transaction, should fail */
- TIMING_START(&timing, "commit_transaction(-1)");
- error = rd_kafka_commit_transaction(rk, -1);
- TIMING_STOP(&timing);
- TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
-
- TEST_SAY("commit_transaction() failed (expectedly): %s\n",
- rd_kafka_error_string(error));
-
- TEST_ASSERT(!rd_kafka_error_is_fatal(error),
- "Did not expect fatal error");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected abortable error");
- rd_kafka_error_destroy(error);
-
- /* Abort the transaction, should fail with retriable (timeout) error */
- TIMING_START(&timing, "abort_transaction(100)");
- error = rd_kafka_abort_transaction(rk, 100);
- TIMING_STOP(&timing);
- TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
-
- TEST_SAY("First abort_transaction() failed: %s\n",
- rd_kafka_error_string(error));
- TEST_ASSERT(!rd_kafka_error_is_fatal(error),
- "Did not expect fatal error");
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "Expected retriable error");
- rd_kafka_error_destroy(error);
-
- if (with_sleep)
- rd_sleep(12);
-
- /* Retry abort, should now finish. */
- TEST_SAY("Retrying abort\n");
- TIMING_START(&timing, "abort_transaction(-1)");
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
- TIMING_STOP(&timing);
-
- /* Run a new transaction without errors to verify that the
- * producer can recover. */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief KIP-360: Test that fatal idempotence errors triggers abortable
- * transaction errors, but let the broker-side bumping of the
- * producer PID fail with a fencing error.
- * Should raise a fatal error.
- *
- * @param error_code Which error code InitProducerIdRequest should fail with.
- * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
- * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
- */
-static void do_test_txn_fenced_reinit(rd_kafka_resp_err_t error_code) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- int32_t txn_coord = 2;
- const char *txnid = "myTxnId";
- char errstr[512];
- rd_kafka_resp_err_t fatal_err;
-
- SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
- txn_coord);
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- allowed_error = RD_KAFKA_RESP_ERR__FENCED;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- /* Produce a message without error first */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- test_flush(rk, -1);
-
- /* Fail the PID reinit */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
-
- /* Produce a message, let it fail with a fatal idempo error. */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- test_flush(rk, -1);
-
- /* Abort the transaction, should fail with a fatal error */
- error = rd_kafka_abort_transaction(rk, -1);
- TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
-
- TEST_SAY("abort_transaction() failed: %s\n",
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
- rd_kafka_error_destroy(error);
-
- fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
- TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
- TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test EndTxn errors.
- */
-static void do_test_txn_endtxn_errors(void) {
- rd_kafka_t *rk = NULL;
- rd_kafka_mock_cluster_t *mcluster = NULL;
- rd_kafka_resp_err_t err;
- struct {
- size_t error_cnt;
- rd_kafka_resp_err_t errors[4];
- rd_kafka_resp_err_t exp_err;
- rd_bool_t exp_retriable;
- rd_bool_t exp_abortable;
- rd_bool_t exp_fatal;
- rd_bool_t exp_successful_abort;
- } scenario[] = {
- /* This list of errors is from the EndTxnResponse handler in
- * AK clients/.../TransactionManager.java */
- {
- /* #0 */
- 2,
- {RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE},
- /* Should auto-recover */
- RD_KAFKA_RESP_ERR_NO_ERROR,
- },
- {
- /* #1 */
- 2,
- {RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR},
- /* Should auto-recover */
- RD_KAFKA_RESP_ERR_NO_ERROR,
- },
- {
- /* #2 */
- 1,
- {RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS},
- /* Should auto-recover */
- RD_KAFKA_RESP_ERR_NO_ERROR,
- },
- {
- /* #3 */
- 3,
- {RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS},
- /* Should auto-recover */
- RD_KAFKA_RESP_ERR_NO_ERROR,
- },
- {
- /* #4: the abort is auto-recovering thru epoch bump */
- 1,
- {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID},
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
- rd_false /* !retriable */,
- rd_true /* abortable */,
- rd_false /* !fatal */,
- rd_true /* successful abort */
- },
- {
- /* #5: the abort is auto-recovering thru epoch bump */
- 1,
- {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING},
- RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
- rd_false /* !retriable */,
- rd_true /* abortable */,
- rd_false /* !fatal */,
- rd_true /* successful abort */
- },
- {
- /* #6 */
- 1,
- {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH},
- /* This error is normalized */
- RD_KAFKA_RESP_ERR__FENCED,
- rd_false /* !retriable */,
- rd_false /* !abortable */,
- rd_true /* fatal */
- },
- {
- /* #7 */
- 1,
- {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
- /* This error is normalized */
- RD_KAFKA_RESP_ERR__FENCED,
- rd_false /* !retriable */,
- rd_false /* !abortable */,
- rd_true /* fatal */
- },
- {
- /* #8 */
- 1,
- {RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED},
- RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
- rd_false /* !retriable */,
- rd_false /* !abortable */,
- rd_true /* fatal */
- },
- {
- /* #9 */
- 1,
- {RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED},
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
- rd_false /* !retriable */,
- rd_true /* abortable */,
- rd_false /* !fatal */
- },
- {
- /* #10 */
- /* Any other error should raise a fatal error */
- 1,
- {RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE},
- RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
- rd_false /* !retriable */,
- rd_true /* abortable */,
- rd_false /* !fatal */,
- },
- {
- /* #11 */
- 1,
- {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
- /* This error is normalized */
- RD_KAFKA_RESP_ERR__FENCED,
- rd_false /* !retriable */,
- rd_false /* !abortable */,
- rd_true /* fatal */
- },
- {0},
- };
- int i;
-
- SUB_TEST_QUICK();
-
- for (i = 0; scenario[i].error_cnt > 0; i++) {
- int j;
- /* For each scenario, test:
- * commit_transaction()
- * flush() + commit_transaction()
- * abort_transaction()
- * flush() + abort_transaction()
- */
- for (j = 0; j < (2 + 2); j++) {
- rd_bool_t commit = j < 2;
- rd_bool_t with_flush = j & 1;
- rd_bool_t exp_successful_abort =
- !commit && scenario[i].exp_successful_abort;
- const char *commit_str =
- commit ? (with_flush ? "commit&flush" : "commit")
- : (with_flush ? "abort&flush" : "abort");
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- rd_kafka_error_t *error;
- test_timing_t t_call;
-
- TEST_SAY("Testing scenario #%d %s with %" PRIusz
- " injected erorrs, expecting %s\n",
- i, commit_str, scenario[i].error_cnt,
- exp_successful_abort
- ? "successful abort"
- : rd_kafka_err2name(scenario[i].exp_err));
-
- if (!rk) {
- const char *txnid = "myTxnId";
- rk = create_txn_producer(&mcluster, txnid, 3,
- NULL);
- TEST_CALL_ERROR__(
- rd_kafka_init_transactions(rk, 5000));
- }
-
- /*
- * Start transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /* Transaction aborts will cause DR errors:
- * ignore them. */
- test_curr->ignore_dr_err = !commit;
-
- /*
- * Produce a message.
- */
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s",
- rd_kafka_err2str(err));
-
- if (with_flush)
- test_flush(rk, -1);
-
- /*
- * Send some arbitrary offsets.
- */
- offsets = rd_kafka_topic_partition_list_new(4);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4",
- 3)
- ->offset = 12;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64",
- 60)
- ->offset = 99999;
-
- cgmetadata =
- rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
- rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /*
- * Commit transaction, first with som failures,
- * then succeed.
- */
- rd_kafka_mock_push_request_errors_array(
- mcluster, RD_KAFKAP_EndTxn, scenario[i].error_cnt,
- scenario[i].errors);
-
- TIMING_START(&t_call, "%s", commit_str);
- if (commit)
- error = rd_kafka_commit_transaction(
- rk, tmout_multip(5000));
- else
- error = rd_kafka_abort_transaction(
- rk, tmout_multip(5000));
- TIMING_STOP(&t_call);
-
- if (error)
- TEST_SAY(
- "Scenario #%d %s failed: %s: %s "
- "(retriable=%s, req_abort=%s, "
- "fatal=%s)\n",
- i, commit_str, rd_kafka_error_name(error),
- rd_kafka_error_string(error),
- RD_STR_ToF(
- rd_kafka_error_is_retriable(error)),
- RD_STR_ToF(
- rd_kafka_error_txn_requires_abort(
- error)),
- RD_STR_ToF(rd_kafka_error_is_fatal(error)));
- else
- TEST_SAY("Scenario #%d %s succeeded\n", i,
- commit_str);
-
- if (!scenario[i].exp_err || exp_successful_abort) {
- TEST_ASSERT(!error,
- "Expected #%d %s to succeed, "
- "got %s",
- i, commit_str,
- rd_kafka_error_string(error));
- continue;
- }
-
-
- TEST_ASSERT(error != NULL, "Expected #%d %s to fail", i,
- commit_str);
- TEST_ASSERT(scenario[i].exp_err ==
- rd_kafka_error_code(error),
- "Scenario #%d: expected %s, not %s", i,
- rd_kafka_err2name(scenario[i].exp_err),
- rd_kafka_error_name(error));
- TEST_ASSERT(
- scenario[i].exp_retriable ==
- (rd_bool_t)rd_kafka_error_is_retriable(error),
- "Scenario #%d: retriable mismatch", i);
- TEST_ASSERT(
- scenario[i].exp_abortable ==
- (rd_bool_t)rd_kafka_error_txn_requires_abort(
- error),
- "Scenario #%d: abortable mismatch", i);
- TEST_ASSERT(
- scenario[i].exp_fatal ==
- (rd_bool_t)rd_kafka_error_is_fatal(error),
- "Scenario #%d: fatal mismatch", i);
-
- /* Handle errors according to the error flags */
- if (rd_kafka_error_is_fatal(error)) {
- TEST_SAY("Fatal error, destroying producer\n");
- rd_kafka_error_destroy(error);
- rd_kafka_destroy(rk);
- rk = NULL; /* Will be re-created on the next
- * loop iteration. */
-
- } else if (rd_kafka_error_txn_requires_abort(error)) {
- rd_kafka_error_destroy(error);
- TEST_SAY(
- "Abortable error, "
- "aborting transaction\n");
- TEST_CALL_ERROR__(
- rd_kafka_abort_transaction(rk, -1));
-
- } else if (rd_kafka_error_is_retriable(error)) {
- rd_kafka_error_destroy(error);
- TEST_SAY("Retriable error, retrying %s once\n",
- commit_str);
- if (commit)
- TEST_CALL_ERROR__(
- rd_kafka_commit_transaction(rk,
- 5000));
- else
- TEST_CALL_ERROR__(
- rd_kafka_abort_transaction(rk,
- 5000));
- } else {
- TEST_FAIL(
- "Scenario #%d %s: "
- "Permanent error without enough "
- "hints to proceed: %s\n",
- i, commit_str,
- rd_kafka_error_string(error));
- }
- }
- }
-
- /* All done */
- if (rk)
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test that the commit/abort works properly with infinite timeout.
- */
-static void do_test_txn_endtxn_infinite(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster = NULL;
- const char *txnid = "myTxnId";
- int i;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, NULL);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- for (i = 0; i < 2; i++) {
- rd_bool_t commit = i == 0;
- const char *commit_str = commit ? "commit" : "abort";
- rd_kafka_error_t *error;
- test_timing_t t_call;
-
- /* Messages will fail on as the transaction fails,
- * ignore the DR error */
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END));
-
- /*
- * Commit/abort transaction, first with som retriable failures,
- * then success.
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_EndTxn, 10,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
-
- rd_sleep(1);
-
- TIMING_START(&t_call, "%s_transaction()", commit_str);
- if (commit)
- error = rd_kafka_commit_transaction(rk, -1);
- else
- error = rd_kafka_abort_transaction(rk, -1);
- TIMING_STOP(&t_call);
-
- TEST_SAY("%s returned %s\n", commit_str,
- error ? rd_kafka_error_string(error) : "success");
-
- TEST_ASSERT(!error, "Expected %s to succeed, got %s",
- commit_str, rd_kafka_error_string(error));
- }
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test that the commit/abort user timeout is honoured.
- */
-static void do_test_txn_endtxn_timeout(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster = NULL;
- const char *txnid = "myTxnId";
- int i;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, NULL);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- for (i = 0; i < 2; i++) {
- rd_bool_t commit = i == 0;
- const char *commit_str = commit ? "commit" : "abort";
- rd_kafka_error_t *error;
- test_timing_t t_call;
-
- /* Messages will fail as the transaction fails,
- * ignore the DR error */
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END));
-
- /*
- * Commit/abort transaction, first with some retriable failures
- * whos retries exceed the user timeout.
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_EndTxn, 10,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
-
- rd_sleep(1);
-
- TIMING_START(&t_call, "%s_transaction()", commit_str);
- if (commit)
- error = rd_kafka_commit_transaction(rk, 100);
- else
- error = rd_kafka_abort_transaction(rk, 100);
- TIMING_STOP(&t_call);
-
- TEST_SAY_ERROR(error, "%s returned: ", commit_str);
- TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
- TEST_ASSERT(
- rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected %s to fail with timeout, not %s: %s", commit_str,
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "%s failure should raise a retriable error",
- commit_str);
- rd_kafka_error_destroy(error);
-
- /* Now call it again with an infinite timeout, should work. */
- TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
- if (commit)
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- else
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
- TIMING_STOP(&t_call);
- }
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test commit/abort inflight timeout behaviour, which should result
- * in a retriable error.
- */
-static void do_test_txn_endtxn_timeout_inflight(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster = NULL;
- const char *txnid = "myTxnId";
- int32_t coord_id = 1;
- int i;
-
- SUB_TEST();
-
- allowed_error = RD_KAFKA_RESP_ERR__TIMED_OUT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
-
- rk = create_txn_producer(&mcluster, txnid, 1, "transaction.timeout.ms",
- "5000", NULL);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- for (i = 0; i < 2; i++) {
- rd_bool_t commit = i == 0;
- const char *commit_str = commit ? "commit" : "abort";
- rd_kafka_error_t *error;
- test_timing_t t_call;
-
- /* Messages will fail as the transaction fails,
- * ignore the DR error */
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END));
-
- /* Let EndTxn & EndTxn retry timeout */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_EndTxn, 2,
- RD_KAFKA_RESP_ERR_NO_ERROR, 10000,
- RD_KAFKA_RESP_ERR_NO_ERROR, 10000);
-
- rd_sleep(1);
-
- TIMING_START(&t_call, "%s_transaction()", commit_str);
- if (commit)
- error = rd_kafka_commit_transaction(rk, 4000);
- else
- error = rd_kafka_abort_transaction(rk, 4000);
- TIMING_STOP(&t_call);
-
- TEST_SAY_ERROR(error, "%s returned: ", commit_str);
- TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
- TEST_ASSERT(
- rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected %s to fail with timeout, not %s: %s", commit_str,
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "%s failure should raise a retriable error",
- commit_str);
- rd_kafka_error_destroy(error);
-
- /* Now call it again with an infinite timeout, should work. */
- TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
- if (commit)
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- else
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
- TIMING_STOP(&t_call);
- }
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test that EndTxn is properly sent for aborted transactions
- * even if AddOffsetsToTxnRequest was retried.
- * This is a check for a txn_req_cnt bug.
- */
-static void do_test_txn_req_cnt(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- const char *txnid = "myTxnId";
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, NULL);
-
- /* Messages will fail on abort(), ignore the DR error */
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /*
- * Send some arbitrary offsets, first with some failures, then
- * succeed.
- */
- offsets = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 40)->offset =
- 999999111;
-
- rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_AddOffsetsToTxn,
- 2,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test abortable errors using mock broker error injections
- * and code coverage checks.
- */
-static void do_test_txn_requires_abort_errors(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- int r;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /*
- * 1. Fail on produce
- */
- TEST_SAY("1. Fail on produce\n");
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- /* Wait for messages to fail */
- test_flush(rk, 5000);
-
- /* Any other transactional API should now raise an error */
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- error =
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
- TEST_ASSERT(error, "expected error");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "expected abortable error, not %s",
- rd_kafka_error_string(error));
- TEST_SAY("Error %s: %s\n", rd_kafka_error_name(error),
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- /*
- * 2. Restart transaction and fail on AddPartitionsToTxn
- */
- TEST_SAY("2. Fail on AddPartitionsToTxn\n");
-
- /* First refresh proper Metadata to clear the topic's auth error,
- * otherwise the produce() below will fail immediately. */
- r = test_get_partition_count(rk, "mytopic", 5000);
- TEST_ASSERT(r > 0, "Expected topic %s to exist", "mytopic");
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- error = rd_kafka_commit_transaction(rk, 5000);
- TEST_ASSERT(error, "commit_transaction should have failed");
- TEST_SAY("commit_transaction() error %s: %s\n",
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- /*
- * 3. Restart transaction and fail on AddOffsetsToTxn
- */
- TEST_SAY("3. Fail on AddOffsetsToTxn\n");
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddOffsetsToTxn, 1,
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED);
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- error =
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
- TEST_ASSERT(error, "Expected send_offsets..() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
- "expected send_offsets_to_transaction() to fail with "
- "group auth error: not %s",
- rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
-
- error = rd_kafka_commit_transaction(rk, 5000);
- TEST_ASSERT(error, "commit_transaction should have failed");
- rd_kafka_error_destroy(error);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test error handling and recover for when broker goes down during
- * an ongoing transaction.
- */
-static void do_test_txn_broker_down_in_txn(rd_bool_t down_coord) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- int32_t coord_id, leader_id, down_id;
- const char *down_what;
- rd_kafka_resp_err_t err;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int msgcnt = 1000;
- int remains = 0;
-
- /* Assign coordinator and leader to two different brokers */
- coord_id = 1;
- leader_id = 2;
- if (down_coord) {
- down_id = coord_id;
- down_what = "coordinator";
- } else {
- down_id = leader_id;
- down_what = "leader";
- }
-
- SUB_TEST_QUICK("Test %s down", down_what);
-
- rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
-
- /* Broker down is not a test-failing error */
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
-
- /* Start transactioning */
- TEST_SAY("Starting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt / 2, NULL, 0, &remains);
-
- TEST_SAY("Bringing down %s %" PRId32 "\n", down_what, down_id);
- rd_kafka_mock_broker_set_down(mcluster, down_id);
-
- rd_kafka_flush(rk, 3000);
-
- /* Produce remaining messages */
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
- msgcnt / 2, msgcnt / 2, NULL, 0, &remains);
-
- rd_sleep(2);
-
- TEST_SAY("Bringing up %s %" PRId32 "\n", down_what, down_id);
- rd_kafka_mock_broker_set_up(mcluster, down_id);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
-
- rd_kafka_destroy(rk);
-
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Advance the coord_id to the next broker.
- */
-static void set_next_coord(rd_kafka_mock_cluster_t *mcluster,
- const char *transactional_id,
- int broker_cnt,
- int32_t *coord_idp) {
- int32_t new_coord_id;
-
- new_coord_id = 1 + ((*coord_idp) % (broker_cnt));
- TEST_SAY("Changing transaction coordinator from %" PRId32 " to %" PRId32
- "\n",
- *coord_idp, new_coord_id);
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- new_coord_id);
-
- *coord_idp = new_coord_id;
-}
-
-/**
- * @brief Switch coordinator during a transaction.
- *
- */
-static void do_test_txn_switch_coordinator(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- int32_t coord_id;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- const int broker_cnt = 5;
- const int iterations = 20;
- int i;
-
- test_timeout_set(iterations * 10);
-
- SUB_TEST("Test switching coordinators");
-
- rk = create_txn_producer(&mcluster, transactional_id, broker_cnt, NULL);
-
- coord_id = 1;
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
-
- /* Start transactioning */
- TEST_SAY("Starting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- for (i = 0; i < iterations; i++) {
- const int msgcnt = 100;
- int remains = 0;
-
- set_next_coord(mcluster, transactional_id, broker_cnt,
- &coord_id);
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt / 2, NULL, 0);
-
- if (!(i % 3))
- set_next_coord(mcluster, transactional_id, broker_cnt,
- &coord_id);
-
- /* Produce remaining messages */
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
- msgcnt / 2, msgcnt / 2, NULL, 0,
- &remains);
-
- if ((i & 1) || !(i % 8))
- set_next_coord(mcluster, transactional_id, broker_cnt,
- &coord_id);
-
-
- if (!(i % 5)) {
- test_curr->ignore_dr_err = rd_false;
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- } else {
- test_curr->ignore_dr_err = rd_true;
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
- }
- }
-
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Switch coordinator during a transaction when AddOffsetsToTxn
- * are sent. #3571.
- */
-static void do_test_txn_switch_coordinator_refresh(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- SUB_TEST("Test switching coordinators (refresh)");
-
- rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- 1);
-
- /* Start transactioning */
- TEST_SAY("Starting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /* Switch the coordinator so that AddOffsetsToTxnRequest
- * will respond with NOT_COORDINATOR. */
- TEST_SAY("Switching to coordinator 2\n");
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- 2);
-
- /*
- * Send some arbitrary offsets.
- */
- offsets = rd_kafka_topic_partition_list_new(4);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 29)->offset =
- 99999;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
- rk, offsets, cgmetadata, 20 * 1000));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
-
- /* Produce some messages */
- test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, NULL, 0);
-
- /* And commit the transaction */
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test fatal error handling when transactions are not supported
- * by the broker.
- */
-static void do_test_txns_not_supported(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
-
- SUB_TEST_QUICK();
-
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "transactional.id", "myxnid");
- test_conf_set(conf, "bootstrap.servers", ",");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* Create mock cluster */
- mcluster = rd_kafka_mock_cluster_new(rk, 3);
-
- /* Disable InitProducerId */
- rd_kafka_mock_set_apiversion(mcluster, 22 /*InitProducerId*/, -1, -1);
-
-
- rd_kafka_brokers_add(rk, rd_kafka_mock_cluster_bootstraps(mcluster));
-
-
-
- error = rd_kafka_init_transactions(rk, 5 * 1000);
- TEST_SAY("init_transactions() returned %s: %s\n",
- error ? rd_kafka_error_name(error) : "success",
- error ? rd_kafka_error_string(error) : "success");
-
- TEST_ASSERT(error, "Expected init_transactions() to fail");
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
- "Expected init_transactions() to fail with %s, not %s: %s",
- rd_kafka_err2name(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE),
- rd_kafka_error_name(error), rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"),
- RD_KAFKA_V_KEY("test", 4), RD_KAFKA_V_END);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL,
- "Expected producev() to fail with %s, not %s",
- rd_kafka_err2name(RD_KAFKA_RESP_ERR__FATAL),
- rd_kafka_err2name(err));
-
- rd_kafka_mock_cluster_destroy(mcluster);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief CONCURRENT_TRANSACTION on AddOffsets.. should be retried.
- */
-static void do_test_txns_send_offsets_concurrent_is_retried(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- /* Wait for messages to be delivered */
- test_flush(rk, 5000);
-
-
- /*
- * Have AddOffsetsToTxn fail but eventually succeed due to
- * infinite retries.
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddOffsetsToTxn,
- 1 + 5, /* first request + some retries */
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify that send_offsets_to_transaction() with no eligible offsets
- * is handled properly - the call should succeed immediately and be
- * repeatable.
- */
-static void do_test_txns_send_offsets_non_eligible(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- /* Wait for messages to be delivered */
- test_flush(rk, 5000);
-
- /* Empty offsets list */
- offsets = rd_kafka_topic_partition_list_new(0);
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- /* Now call it again, should also succeed. */
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify that request timeouts don't cause crash (#2913).
- */
-static void do_test_txns_no_timeout_crash(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- SUB_TEST_QUICK();
-
- rk =
- create_txn_producer(&mcluster, "txnid", 3, "socket.timeout.ms",
- "1000", "transaction.timeout.ms", "5000", NULL);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- test_flush(rk, -1);
-
- /* Delay all broker connections */
- if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 2000)) ||
- (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 2000)) ||
- (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 2000)))
- TEST_FAIL("Failed to set broker RTT: %s",
- rd_kafka_err2str(err));
-
- /* send_offsets..() should now time out */
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- error =
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
- TEST_ASSERT(error, "Expected send_offsets..() to fail");
- TEST_SAY("send_offsets..() failed with %serror: %s\n",
- rd_kafka_error_is_retriable(error) ? "retriable " : "",
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "expected send_offsets_to_transaction() to fail with "
- "timeout, not %s",
- rd_kafka_error_name(error));
- TEST_ASSERT(rd_kafka_error_is_retriable(error),
- "expected send_offsets_to_transaction() to fail with "
- "a retriable error");
- rd_kafka_error_destroy(error);
-
- /* Reset delay and try again */
- if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 0)) ||
- (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 0)) ||
- (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 0)))
- TEST_FAIL("Failed to reset broker RTT: %s",
- rd_kafka_err2str(err));
-
- TEST_SAY("Retrying send_offsets..()\n");
- error =
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
- TEST_ASSERT(!error, "Expected send_offsets..() to succeed, got: %s",
- rd_kafka_error_string(error));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /* All done */
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test auth failure handling.
- */
-static void do_test_txn_auth_failure(int16_t ApiKey,
- rd_kafka_resp_err_t ErrorCode) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
-
- SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", rd_kafka_ApiKey2str(ApiKey),
- rd_kafka_err2name(ErrorCode));
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- rd_kafka_mock_push_request_errors(mcluster, ApiKey, 1, ErrorCode);
-
- error = rd_kafka_init_transactions(rk, 5000);
- TEST_ASSERT(error, "Expected init_transactions() to fail");
-
- TEST_SAY("init_transactions() failed: %s: %s\n",
- rd_kafka_err2name(rd_kafka_error_code(error)),
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_code(error) == ErrorCode,
- "Expected error %s, not %s", rd_kafka_err2name(ErrorCode),
- rd_kafka_err2name(rd_kafka_error_code(error)));
- TEST_ASSERT(rd_kafka_error_is_fatal(error),
- "Expected error to be fatal");
- TEST_ASSERT(!rd_kafka_error_is_retriable(error),
- "Expected error to not be retriable");
- rd_kafka_error_destroy(error);
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Issue #3041: Commit fails due to message flush() taking too long,
- * eventually resulting in an unabortable error and failure to
- * re-init the transactional producer.
- */
-static void do_test_txn_flush_timeout(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- rd_kafka_error_t *error;
- const char *txnid = "myTxnId";
- const char *topic = "myTopic";
- const int32_t coord_id = 2;
- int msgcounter = 0;
- rd_bool_t is_retry = rd_false;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, "message.timeout.ms",
- "10000", "transaction.timeout.ms", "10000",
- /* Speed up coordinator reconnect */
- "reconnect.backoff.max.ms", "1000", NULL);
-
-
- /* Broker down is not a test-failing error */
- test_curr->is_fatal_cb = error_is_fatal_cb;
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
-
- rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
-
- /* Set coordinator so we can disconnect it later */
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, coord_id);
-
- /*
- * Init transactions
- */
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
-retry:
- if (!is_retry) {
- /* First attempt should fail. */
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- /* Assign invalid partition leaders for some partitions so
- * that messages will not be delivered. */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, -1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 1, -1);
-
- } else {
- /* The retry should succeed */
- test_curr->ignore_dr_err = rd_false;
- test_curr->exp_dr_err = is_retry
- ? RD_KAFKA_RESP_ERR_NO_ERROR
- : RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
- }
-
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /*
- * Produce some messages to specific partitions and random.
- */
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 100, NULL, 10,
- &msgcounter);
- test_produce_msgs2_nowait(rk, topic, 1, 0, 0, 100, NULL, 10,
- &msgcounter);
- test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, 0, 0, 100,
- NULL, 10, &msgcounter);
-
-
- /*
- * Send some arbitrary offsets.
- */
- offsets = rd_kafka_topic_partition_list_new(4);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 49)->offset =
- 999999111;
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
- 999;
- rd_kafka_topic_partition_list_add(offsets, "srctopic64", 34)->offset =
- 123456789;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- rd_sleep(2);
-
- if (!is_retry) {
- /* Now disconnect the coordinator. */
- TEST_SAY("Disconnecting transaction coordinator %" PRId32 "\n",
- coord_id);
- rd_kafka_mock_broker_set_down(mcluster, coord_id);
- }
-
- /*
- * Start committing.
- */
- error = rd_kafka_commit_transaction(rk, -1);
-
- if (!is_retry) {
- TEST_ASSERT(error != NULL, "Expected commit to fail");
- TEST_SAY("commit_transaction() failed (expectedly): %s\n",
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- } else {
- TEST_ASSERT(!error, "Expected commit to succeed, not: %s",
- rd_kafka_error_string(error));
- }
-
- if (!is_retry) {
- /*
- * Bring the coordinator back up.
- */
- rd_kafka_mock_broker_set_up(mcluster, coord_id);
- rd_sleep(2);
-
- /*
- * Abort, and try again, this time without error.
- */
- TEST_SAY("Aborting and retrying\n");
- is_retry = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 60000));
- goto retry;
- }
-
- /* All done */
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief ESC-4424: rko is reused in response handler after destroy in coord_req
- * sender due to bad state.
- *
- * This is somewhat of a race condition so we need to perform a couple of
- * iterations before it hits, usually 2 or 3, so we try at least 15 times.
- */
-static void do_test_txn_coord_req_destroy(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- int i;
- int errcnt = 0;
-
- SUB_TEST();
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- for (i = 0; i < 15; i++) {
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- test_timeout_set(10);
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /*
- * Inject errors to trigger retries
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddPartitionsToTxn,
- 2, /* first request + number of internal retries */
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_AddOffsetsToTxn,
- 1, /* first request + number of internal retries */
- RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 4,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
- /* FIXME: When KIP-360 is supported, add this error:
- * RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER */
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2),
- RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
-
- /*
- * Send offsets to transaction
- */
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)
- ->offset = 12;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- error = rd_kafka_send_offsets_to_transaction(rk, offsets,
- cgmetadata, -1);
-
- TEST_SAY("send_offsets_to_transaction() #%d: %s\n", i,
- rd_kafka_error_string(error));
-
- /* As we can't control the exact timing and sequence
- * of requests this sometimes fails and sometimes succeeds,
- * but we run the test enough times to trigger at least
- * one failure. */
- if (error) {
- TEST_SAY(
- "send_offsets_to_transaction() #%d "
- "failed (expectedly): %s\n",
- i, rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected abortable error for #%d", i);
- rd_kafka_error_destroy(error);
- errcnt++;
- }
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /* Allow time for internal retries */
- rd_sleep(2);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
- }
-
- TEST_ASSERT(errcnt > 0,
- "Expected at least one send_offets_to_transaction() "
- "failure");
-
- /* All done */
-
- rd_kafka_destroy(rk);
-}
-
-
-static rd_atomic32_t multi_find_req_cnt;
-
-static rd_kafka_resp_err_t
-multi_find_on_response_received_cb(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
- rd_kafka_mock_cluster_t *mcluster = rd_kafka_handle_mock_cluster(rk);
- rd_bool_t done = rd_atomic32_get(&multi_find_req_cnt) > 10000;
-
- if (ApiKey != RD_KAFKAP_AddOffsetsToTxn || done)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
- ", ApiKey %hd, CorrId %d, rtt %.2fms, %s: %s\n",
- rd_kafka_name(rk), brokername, brokerid, ApiKey, CorrId,
- rtt != -1 ? (float)rtt / 1000.0 : 0.0,
- done ? "already done" : "not done yet",
- rd_kafka_err2name(err));
-
-
- if (rd_atomic32_add(&multi_find_req_cnt, 1) == 1) {
- /* Trigger a broker down/up event, which in turns
- * triggers the coord_req_fsm(). */
- rd_kafka_mock_broker_set_down(mcluster, 2);
- rd_kafka_mock_broker_set_up(mcluster, 2);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- /* Trigger a broker down/up event, which in turns
- * triggers the coord_req_fsm(). */
- rd_kafka_mock_broker_set_down(mcluster, 3);
- rd_kafka_mock_broker_set_up(mcluster, 3);
-
- /* Clear the downed broker's latency so that it reconnects
- * quickly, otherwise the ApiVersionRequest will be delayed and
- * this will in turn delay the -> UP transition that we need to
- * trigger the coord_reqs. */
- rd_kafka_mock_broker_set_rtt(mcluster, 3, 0);
-
- /* Only do this down/up once */
- rd_atomic32_add(&multi_find_req_cnt, 10000);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief ESC-4444: multiple FindCoordinatorRequests are sent referencing
- * the same coord_req_t, but the first one received will destroy
- * the coord_req_t object and make the subsequent FindCoordingResponses
- * reference a freed object.
- *
- * What we want to achieve is this sequence:
- * 1. AddOffsetsToTxnRequest + Response which..
- * 2. Triggers TxnOffsetCommitRequest, but the coordinator is not known, so..
- * 3. Triggers a FindCoordinatorRequest
- * 4. FindCoordinatorResponse from 3 is received ..
- * 5. A TxnOffsetCommitRequest is sent from coord_req_fsm().
- * 6. Another broker changing state to Up triggers coord reqs again, which..
- * 7. Triggers a second TxnOffsetCommitRequest from coord_req_fsm().
- * 7. FindCoordinatorResponse from 5 is received, references the destroyed rko
- * and crashes.
- */
-static void do_test_txn_coord_req_multi_find(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- const char *txnid = "txnid", *groupid = "mygroupid", *topic = "mytopic";
- int i;
-
- SUB_TEST();
-
- rd_atomic32_init(&multi_find_req_cnt, 0);
-
- on_response_received_cb = multi_find_on_response_received_cb;
- rk = create_txn_producer(&mcluster, txnid, 3,
- /* Need connections to all brokers so we
- * can trigger coord_req_fsm events
- * by toggling connections. */
- "enable.sparse.connections", "false",
- /* Set up on_response_received interceptor */
- "on_response_received", "", NULL);
-
- /* Let broker 1 be both txn and group coordinator
- * so that the group coordinator connection is up when it is time
- * send the TxnOffsetCommitRequest. */
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Set broker 1, 2, and 3 as leaders for a partition each and
- * later produce to both partitions so we know there's a connection
- * to all brokers. */
- rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 2);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 3);
-
- /* Broker down is not a test-failing error */
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- for (i = 0; i < 3; i++) {
- err = rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(i),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
- }
-
- test_flush(rk, 5000);
-
- /*
- * send_offsets_to_transaction() will query for the group coordinator,
- * we need to make those requests slow so that multiple requests are
- * sent.
- */
- for (i = 1; i <= 3; i++)
- rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 4000);
-
- /*
- * Send offsets to transaction
- */
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new(groupid);
-
- error =
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
-
- TEST_SAY("send_offsets_to_transaction() %s\n",
- rd_kafka_error_string(error));
- TEST_ASSERT(!error, "send_offsets_to_transaction() failed: %s",
- rd_kafka_error_string(error));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /* Clear delay */
- for (i = 1; i <= 3; i++)
- rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 0);
-
- rd_sleep(5);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
-
- /* All done */
-
- TEST_ASSERT(rd_atomic32_get(&multi_find_req_cnt) > 10000,
- "on_request_sent interceptor did not trigger properly");
-
- rd_kafka_destroy(rk);
-
- on_response_received_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief ESC-4410: adding producer partitions gradually will trigger multiple
- * AddPartitionsToTxn requests. Due to a bug the third partition to be
- * registered would hang in PEND_TXN state.
- *
- * Trigger this behaviour by having two outstanding AddPartitionsToTxn requests
- * at the same time, followed by a need for a third:
- *
- * 1. Set coordinator broker rtt high (to give us time to produce).
- * 2. Produce to partition 0, will trigger first AddPartitionsToTxn.
- * 3. Produce to partition 1, will trigger second AddPartitionsToTxn.
- * 4. Wait for second AddPartitionsToTxn response.
- * 5. Produce to partition 2, should trigger AddPartitionsToTxn, but bug
- * causes it to be stale in pending state.
- */
-
-static rd_atomic32_t multi_addparts_resp_cnt;
-static rd_kafka_resp_err_t
-multi_addparts_response_received_cb(rd_kafka_t *rk,
- int sockfd,
- const char *brokername,
- int32_t brokerid,
- int16_t ApiKey,
- int16_t ApiVersion,
- int32_t CorrId,
- size_t size,
- int64_t rtt,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
-
- if (ApiKey == RD_KAFKAP_AddPartitionsToTxn) {
- TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
- ", ApiKey %hd, CorrId %d, rtt %.2fms, count %" PRId32
- ": %s\n",
- rd_kafka_name(rk), brokername, brokerid, ApiKey,
- CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0,
- rd_atomic32_get(&multi_addparts_resp_cnt),
- rd_kafka_err2name(err));
-
- rd_atomic32_add(&multi_addparts_resp_cnt, 1);
- }
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void do_test_txn_addparts_req_multi(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- const char *txnid = "txnid", *topic = "mytopic";
- int32_t txn_coord = 2;
-
- SUB_TEST();
-
- rd_atomic32_init(&multi_addparts_resp_cnt, 0);
-
- on_response_received_cb = multi_addparts_response_received_cb;
- rk = create_txn_producer(&mcluster, txnid, 3, "linger.ms", "0",
- "message.timeout.ms", "9000",
- /* Set up on_response_received interceptor */
- "on_response_received", "", NULL);
-
- /* Let broker 1 be txn coordinator. */
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
- txn_coord);
-
- rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
-
- /* Set partition leaders to non-txn-coord broker so they wont
- * be affected by rtt delay */
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 1);
-
-
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- /*
- * Run one transaction first to let the client familiarize with
- * the topic, this avoids metadata lookups, etc, when the real
- * test is run.
- */
- TEST_SAY("Running seed transaction\n");
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
- TEST_CALL_ERR__(rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_VALUE("seed", 4),
- RD_KAFKA_V_END));
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
-
-
- /*
- * Now perform test transaction with rtt delays
- */
- TEST_SAY("Running test transaction\n");
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /* Reset counter */
- rd_atomic32_set(&multi_addparts_resp_cnt, 0);
-
- /* Add latency to txn coordinator so we can pace our produce() calls */
- rd_kafka_mock_broker_set_rtt(mcluster, txn_coord, 1000);
-
- /* Produce to partition 0 */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- rd_usleep(500 * 1000, NULL);
-
- /* Produce to partition 1 */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(1),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- TEST_SAY("Waiting for two AddPartitionsToTxnResponse\n");
- while (rd_atomic32_get(&multi_addparts_resp_cnt) < 2)
- rd_usleep(10 * 1000, NULL);
-
- TEST_SAY("%" PRId32 " AddPartitionsToTxnResponses seen\n",
- rd_atomic32_get(&multi_addparts_resp_cnt));
-
- /* Produce to partition 2, this message will hang in
- * queue if the bug is not fixed. */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(2),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- /* Allow some extra time for things to settle before committing
- * transaction. */
- rd_usleep(1000 * 1000, NULL);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10 * 1000));
-
- /* All done */
- rd_kafka_destroy(rk);
-
- on_response_received_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Test handling of OffsetFetchRequest returning UNSTABLE_OFFSET_COMMIT.
- *
- * There are two things to test;
- * - OffsetFetch triggered by committed() (and similar code paths)
- * - OffsetFetch triggered by assign()
- */
-static void do_test_unstable_offset_commit(void) {
- rd_kafka_t *rk, *c;
- rd_kafka_conf_t *c_conf;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *offsets;
- const char *topic = "srctopic4";
- const int msgcnt = 100;
- const int64_t offset_to_commit = msgcnt / 2;
- int i;
- int remains = 0;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_conf_init(&c_conf, NULL, 0);
- test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
- test_conf_set(c_conf, "bootstrap.servers",
- rd_kafka_mock_cluster_bootstraps(mcluster));
- test_conf_set(c_conf, "enable.partition.eof", "true");
- test_conf_set(c_conf, "auto.offset.reset", "error");
- c = test_create_consumer("mygroup", NULL, c_conf, NULL);
-
- rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
-
- /* Produce some messages to the topic so that the consumer has
- * something to read. */
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, NULL, 0,
- &remains);
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
-
- /* Commit offset */
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
- offset_to_commit;
- TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0 /*sync*/));
- rd_kafka_topic_partition_list_destroy(offsets);
-
- /* Retrieve offsets by calling committed().
- *
- * Have OffsetFetch fail and retry, on the first iteration
- * the API timeout is higher than the amount of time the retries will
- * take and thus succeed, and on the second iteration the timeout
- * will be lower and thus fail. */
- for (i = 0; i < 2; i++) {
- rd_kafka_resp_err_t err;
- rd_kafka_resp_err_t exp_err =
- i == 0 ? RD_KAFKA_RESP_ERR_NO_ERROR
- : RD_KAFKA_RESP_ERR__TIMED_OUT;
- int timeout_ms = exp_err ? 200 : 5 * 1000;
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_OffsetFetch,
- 1 + 5, /* first request + some retries */
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, topic, 0);
-
- err = rd_kafka_committed(c, offsets, timeout_ms);
-
- TEST_SAY("#%d: committed() returned %s (expected %s)\n", i,
- rd_kafka_err2name(err), rd_kafka_err2name(exp_err));
-
- TEST_ASSERT(err == exp_err,
- "#%d: Expected committed() to return %s, not %s", i,
- rd_kafka_err2name(exp_err), rd_kafka_err2name(err));
- TEST_ASSERT(offsets->cnt == 1,
- "Expected 1 committed offset, not %d",
- offsets->cnt);
- if (!exp_err)
- TEST_ASSERT(offsets->elems[0].offset ==
- offset_to_commit,
- "Expected committed offset %" PRId64
- ", "
- "not %" PRId64,
- offset_to_commit, offsets->elems[0].offset);
- else
- TEST_ASSERT(offsets->elems[0].offset < 0,
- "Expected no committed offset, "
- "not %" PRId64,
- offsets->elems[0].offset);
-
- rd_kafka_topic_partition_list_destroy(offsets);
- }
-
- TEST_SAY("Phase 2: OffsetFetch lookup through assignment\n");
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
- RD_KAFKA_OFFSET_STORED;
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_OffsetFetch,
- 1 + 5, /* first request + some retries */
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
- RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
-
- test_consumer_incremental_assign("assign", c, offsets);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- test_consumer_poll_exact("consume", c, 0, 1 /*eof*/, 0, msgcnt / 2,
- rd_true /*exact counts*/, NULL);
-
- /* All done */
- rd_kafka_destroy(c);
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief If a message times out locally before being attempted to send
- * and commit_transaction() is called, the transaction must not succeed.
- * https://github.com/confluentinc/confluent-kafka-dotnet/issues/1568
- */
-static void do_test_commit_after_msg_timeout(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- int32_t coord_id, leader_id;
- rd_kafka_resp_err_t err;
- rd_kafka_error_t *error;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int remains = 0;
-
- SUB_TEST_QUICK();
-
- /* Assign coordinator and leader to two different brokers */
- coord_id = 1;
- leader_id = 2;
-
- rk = create_txn_producer(&mcluster, transactional_id, 3,
- "message.timeout.ms", "5000",
- "transaction.timeout.ms", "10000", NULL);
-
- /* Broker down is not a test-failing error */
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
-
- /* Start transactioning */
- TEST_SAY("Starting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_SAY("Bringing down %" PRId32 "\n", leader_id);
- rd_kafka_mock_broker_set_down(mcluster, leader_id);
- rd_kafka_mock_broker_set_down(mcluster, coord_id);
-
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
-
- error = rd_kafka_commit_transaction(rk, -1);
- TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail");
- TEST_SAY_ERROR(error, "commit_transaction() failed (as expected): ");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected txn_requires_abort error");
- rd_kafka_error_destroy(error);
-
- /* Bring the brokers up so the abort can complete */
- rd_kafka_mock_broker_set_up(mcluster, coord_id);
- rd_kafka_mock_broker_set_up(mcluster, leader_id);
-
- TEST_SAY("Aborting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains);
-
- TEST_SAY("Attempting second transaction, which should succeed\n");
- test_curr->is_fatal_cb = error_is_fatal_cb;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief #3575: Verify that OUT_OF_ORDER_SEQ does not trigger an epoch bump
- * during an ongoing transaction.
- * The transaction should instead enter the abortable state.
- */
-static void do_test_out_of_order_seq(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- int32_t txn_coord = 1, leader = 2;
- const char *txnid = "myTxnId";
- test_timing_t timing;
- rd_kafka_resp_err_t err;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
- txn_coord);
-
- rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader);
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->is_fatal_cb = NULL;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
-
- /* Produce one seeding message first to get the leader up and running */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
- test_flush(rk, -1);
-
- /* Let partition leader have a latency of 2 seconds
- * so that we can have multiple messages in-flight. */
- rd_kafka_mock_broker_set_rtt(mcluster, leader, 2 * 1000);
-
- /* Produce a message, let it fail with with different errors,
- * ending with OUT_OF_ORDER which previously triggered an
- * Epoch bump. */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 3,
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
- RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
- RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER);
-
- /* Produce three messages that will be delayed
- * and have errors injected.*/
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- /* Now sleep a short while so that the messages are processed
- * by the broker and errors are returned. */
- TEST_SAY("Sleeping..\n");
- rd_sleep(5);
-
- rd_kafka_mock_broker_set_rtt(mcluster, leader, 0);
-
- /* Produce a fifth message, should fail with ERR__STATE since
- * the transaction should have entered the abortable state. */
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE,
- "Expected produce() to fail with ERR__STATE, not %s",
- rd_kafka_err2name(err));
- TEST_SAY("produce() failed as expected: %s\n", rd_kafka_err2str(err));
-
- /* Commit the transaction, should fail with abortable error. */
- TIMING_START(&timing, "commit_transaction(-1)");
- error = rd_kafka_commit_transaction(rk, -1);
- TIMING_STOP(&timing);
- TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
-
- TEST_SAY("commit_transaction() failed (expectedly): %s\n",
- rd_kafka_error_string(error));
-
- TEST_ASSERT(!rd_kafka_error_is_fatal(error),
- "Did not expect fatal error");
- TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
- "Expected abortable error");
- rd_kafka_error_destroy(error);
-
- /* Abort the transaction */
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- /* Run a new transaction without errors to verify that the
- * producer can recover. */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify lossless delivery if topic disappears from Metadata for awhile.
- *
- * If a topic is removed from metadata inbetween transactions, the producer
- * will remove its partition state for the topic's partitions.
- * If later the same topic comes back (same topic instance, not a new creation)
- * then the producer must restore the previously used msgid/BaseSequence
- * in case the same Epoch is still used, or messages will be silently lost
- * as they would seem like legit duplicates to the broker.
- *
- * Reproduction:
- * 1. produce msgs to topic, commit transaction.
- * 2. remove topic from metadata
- * 3. make sure client updates its metadata, which removes the partition
- * objects.
- * 4. restore the topic in metadata
- * 5. produce new msgs to topic, commit transaction.
- * 6. consume topic. All messages should be accounted for.
- */
-static void do_test_topic_disappears_for_awhile(void) {
- rd_kafka_t *rk, *c;
- rd_kafka_conf_t *c_conf;
- rd_kafka_mock_cluster_t *mcluster;
- const char *topic = "mytopic";
- const char *txnid = "myTxnId";
- test_timing_t timing;
- int i;
- int msgcnt = 0;
- const int partition_cnt = 10;
-
- SUB_TEST_QUICK();
-
- rk = create_txn_producer(
- &mcluster, txnid, 1, "batch.num.messages", "3", "linger.ms", "100",
- "topic.metadata.refresh.interval.ms", "2000", NULL);
-
- rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- for (i = 0; i < 2; i++) {
- int cnt = 3 * 2 * partition_cnt;
- rd_bool_t remove_topic = (i % 2) == 0;
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- while (cnt-- >= 0) {
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_PARTITION(cnt % partition_cnt),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
- msgcnt++;
- }
-
- /* Commit the transaction */
- TIMING_START(&timing, "commit_transaction(-1)");
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- TIMING_STOP(&timing);
-
-
-
- if (remove_topic) {
- /* Make it seem the topic is removed, refresh metadata,
- * and then make the topic available again. */
- const rd_kafka_metadata_t *md;
-
- TEST_SAY("Marking topic as non-existent\n");
-
- rd_kafka_mock_topic_set_error(
- mcluster, topic,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
-
- TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, NULL, &md,
- tmout_multip(5000)));
-
- rd_kafka_metadata_destroy(md);
-
- rd_sleep(2);
-
- TEST_SAY("Bringing topic back to life\n");
- rd_kafka_mock_topic_set_error(
- mcluster, topic, RD_KAFKA_RESP_ERR_NO_ERROR);
- }
- }
-
- TEST_SAY("Verifying messages by consumtion\n");
- test_conf_init(&c_conf, NULL, 0);
- test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
- test_conf_set(c_conf, "bootstrap.servers",
- rd_kafka_mock_cluster_bootstraps(mcluster));
- test_conf_set(c_conf, "enable.partition.eof", "true");
- test_conf_set(c_conf, "auto.offset.reset", "earliest");
- c = test_create_consumer("mygroup", NULL, c_conf, NULL);
-
- test_consumer_subscribe(c, topic);
- test_consumer_poll_exact("consume", c, 0, partition_cnt, 0, msgcnt,
- rd_true /*exact*/, NULL);
- rd_kafka_destroy(c);
-
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test that group coordinator requests can handle an
- * untimely disconnect.
- *
- * The transaction manager makes use of librdkafka coord_req to commit
- * transaction offsets to the group coordinator.
- * If the connection to the given group coordinator is not up the
- * coord_req code will request a connection once, but if this connection fails
- * there will be no new attempts and the coord_req will idle until either
- * destroyed or the connection is retried for other reasons.
- * This in turn stalls the send_offsets_to_transaction() call until the
- * transaction times out.
- *
- * There are two variants to this test based on switch_coord:
- * - True - Switches the coordinator during the downtime.
- * The client should detect this and send the request to the
- * new coordinator.
- * - False - The coordinator remains on the down broker. Client will reconnect
- * when down broker comes up again.
- */
-struct some_state {
- rd_kafka_mock_cluster_t *mcluster;
- rd_bool_t switch_coord;
- int32_t broker_id;
- const char *grpid;
-};
-
-static int delayed_up_cb(void *arg) {
- struct some_state *state = arg;
- rd_sleep(3);
- if (state->switch_coord) {
- TEST_SAY("Switching group coordinator to %" PRId32 "\n",
- state->broker_id);
- rd_kafka_mock_coordinator_set(state->mcluster, "group",
- state->grpid, state->broker_id);
- } else {
- TEST_SAY("Bringing up group coordinator %" PRId32 "..\n",
- state->broker_id);
- rd_kafka_mock_broker_set_up(state->mcluster, state->broker_id);
- }
- return 0;
-}
-
-static void do_test_disconnected_group_coord(rd_bool_t switch_coord) {
- const char *topic = "mytopic";
- const char *txnid = "myTxnId";
- const char *grpid = "myGrpId";
- const int partition_cnt = 1;
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- struct some_state state = RD_ZERO_INIT;
- test_timing_t timing;
- thrd_t thrd;
- int ret;
-
- SUB_TEST_QUICK("switch_coord=%s", RD_STR_ToF(switch_coord));
-
- test_curr->is_fatal_cb = error_is_fatal_cb;
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
-
- rk = create_txn_producer(&mcluster, txnid, 3, NULL);
-
- rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
-
- /* Broker 1: txn coordinator
- * Broker 2: group coordinator
- * Broker 3: partition leader & backup coord if switch_coord=true */
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
- rd_kafka_mock_coordinator_set(mcluster, "group", grpid, 2);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 3);
-
- /* Bring down group coordinator so there are no undesired
- * connections to it. */
- rd_kafka_mock_broker_set_down(mcluster, 2);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
- test_flush(rk, -1);
-
- rd_sleep(1);
-
- /* Run a background thread that after 3s, which should be enough
- * to perform the first failed connection attempt, makes the
- * group coordinator available again. */
- state.switch_coord = switch_coord;
- state.mcluster = mcluster;
- state.grpid = grpid;
- state.broker_id = switch_coord ? 3 : 2;
- if (thrd_create(&thrd, delayed_up_cb, &state) != thrd_success)
- TEST_FAIL("Failed to create thread");
-
- TEST_SAY("Calling send_offsets_to_transaction()\n");
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 1;
- cgmetadata = rd_kafka_consumer_group_metadata_new(grpid);
-
- TIMING_START(&timing, "send_offsets_to_transaction(-1)");
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
- TIMING_STOP(&timing);
- TIMING_ASSERT(&timing, 0, 10 * 1000 /*10s*/);
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
- thrd_join(thrd, &ret);
-
- /* Commit the transaction */
- TIMING_START(&timing, "commit_transaction(-1)");
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- TIMING_STOP(&timing);
-
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test that a NULL coordinator is not fatal when
- * the transactional producer reconnects to the txn coordinator
- * and the first thing it does is a FindCoordinatorRequest that
- * fails with COORDINATOR_NOT_AVAILABLE, setting coordinator to NULL.
- */
-static void do_test_txn_coordinator_null_not_fatal(void) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- rd_kafka_resp_err_t err;
- int32_t coord_id = 1;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int msgcnt = 1;
- int remains = 0;
-
- SUB_TEST_QUICK();
-
- /* Broker down is not a test-failing error */
- allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
-
- /* One second is the minimum transaction timeout */
- rk = create_txn_producer(&mcluster, transactional_id, 1,
- "transaction.timeout.ms", "1000", NULL);
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
-
- /* Start transactioning */
- TEST_SAY("Starting transaction\n");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /* Makes the produce request timeout. */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
-
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt, NULL, 0, &remains);
-
- /* This value is linked to transaction.timeout.ms, needs enough time
- * so the message times out and a DrainBump sequence is started. */
- rd_kafka_flush(rk, 1000);
-
- /* To trigger the error the COORDINATOR_NOT_AVAILABLE response
- * must come AFTER idempotent state has changed to WaitTransport
- * but BEFORE it changes to WaitPID. To make it more likely
- * rd_kafka_txn_coord_timer_start timeout can be changed to 5 ms
- * in rd_kafka_txn_coord_query, when unable to query for
- * transaction coordinator.
- */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_FindCoordinator, 1,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, 10);
-
- /* Coordinator down starts the FindCoordinatorRequest loop. */
- TEST_SAY("Bringing down coordinator %" PRId32 "\n", coord_id);
- rd_kafka_mock_broker_set_down(mcluster, coord_id);
-
- /* Coordinator down for some time. */
- rd_usleep(100 * 1000, NULL);
-
- /* When it comes up, the error is triggered, if the preconditions
- * happen. */
- TEST_SAY("Bringing up coordinator %" PRId32 "\n", coord_id);
- rd_kafka_mock_broker_set_up(mcluster, coord_id);
-
- /* Make sure DRs are received */
- rd_kafka_flush(rk, 1000);
-
- error = rd_kafka_commit_transaction(rk, -1);
-
- TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
- TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
- TEST_SAY("commit_transaction() failed (expectedly): %s\n",
- rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
-
- /* Needs to wait some time before closing to make sure it doesn't go
- * into TERMINATING state before error is triggered. */
- rd_usleep(1000 * 1000, NULL);
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Simple test to make sure the init_transactions() timeout is honoured
- * and also not infinite.
- */
-static void do_test_txn_resumable_init(void) {
- rd_kafka_t *rk;
- const char *transactional_id = "txnid";
- rd_kafka_error_t *error;
- test_timing_t duration;
-
- SUB_TEST();
-
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, NULL, 20);
- test_conf_set(conf, "bootstrap.servers", "");
- test_conf_set(conf, "transactional.id", transactional_id);
- test_conf_set(conf, "transaction.timeout.ms", "4000");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- /* First make sure a lower timeout is honoured. */
- TIMING_START(&duration, "init_transactions(1000)");
- error = rd_kafka_init_transactions(rk, 1000);
- TIMING_STOP(&duration);
-
- if (error)
- TEST_SAY("First init_transactions failed (as expected): %s\n",
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected _TIMED_OUT, not %s",
- error ? rd_kafka_error_string(error) : "success");
- rd_kafka_error_destroy(error);
-
- TIMING_ASSERT(&duration, 900, 1500);
-
- TEST_SAY(
- "Performing second init_transactions() call now with an "
- "infinite timeout: "
- "should time out in 2 x transaction.timeout.ms\n");
-
- TIMING_START(&duration, "init_transactions(infinite)");
- error = rd_kafka_init_transactions(rk, -1);
- TIMING_STOP(&duration);
-
- if (error)
- TEST_SAY("Second init_transactions failed (as expected): %s\n",
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
- "Expected _TIMED_OUT, not %s",
- error ? rd_kafka_error_string(error) : "success");
- rd_kafka_error_destroy(error);
-
- TIMING_ASSERT(&duration, 2 * 4000 - 500, 2 * 4000 + 500);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Retries a transaction call until it succeeds or returns a
- * non-retriable error - which will cause the test to fail.
- *
- * @param intermed_calls Is a block of code that will be called after each
- * retriable failure of \p call.
- */
-#define RETRY_TXN_CALL__(call, intermed_calls) \
- do { \
- rd_kafka_error_t *_error = call; \
- if (!_error) \
- break; \
- TEST_SAY_ERROR(_error, "%s: ", "" #call); \
- TEST_ASSERT(rd_kafka_error_is_retriable(_error), \
- "Expected retriable error"); \
- TEST_SAY("%s failed, retrying in 1 second\n", "" #call); \
- rd_kafka_error_destroy(_error); \
- intermed_calls; \
- rd_sleep(1); \
- } while (1)
-
-/**
- * @brief Call \p call and expect it to fail with \p exp_err_code.
- */
-#define TXN_CALL_EXPECT_ERROR__(call, exp_err_code) \
- do { \
- rd_kafka_error_t *_error = call; \
- TEST_ASSERT(_error != NULL, \
- "%s: Expected %s error, got success", "" #call, \
- rd_kafka_err2name(exp_err_code)); \
- TEST_SAY_ERROR(_error, "%s: ", "" #call); \
- TEST_ASSERT(rd_kafka_error_code(_error) == exp_err_code, \
- "%s: Expected %s error, got %s", "" #call, \
- rd_kafka_err2name(exp_err_code), \
- rd_kafka_error_name(_error)); \
- rd_kafka_error_destroy(_error); \
- } while (0)
-
-
-/**
- * @brief Simple test to make sure short API timeouts can be safely resumed
- * by calling the same API again.
- *
- * @param do_commit Commit transaction if true, else abort transaction.
- */
-static void do_test_txn_resumable_calls_timeout(rd_bool_t do_commit) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- int32_t coord_id = 1;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int msgcnt = 1;
- int remains = 0;
-
- SUB_TEST("%s_transaction", do_commit ? "commit" : "abort");
-
- rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
-
- TEST_SAY("Starting transaction\n");
- TEST_SAY("Delaying first two InitProducerIdRequests by 500ms\n");
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_InitProducerId, 2,
- RD_KAFKA_RESP_ERR_NO_ERROR, 500, RD_KAFKA_RESP_ERR_NO_ERROR, 500);
-
- RETRY_TXN_CALL__(
- rd_kafka_init_transactions(rk, 100),
- TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
- RD_KAFKA_RESP_ERR__CONFLICT));
-
- RETRY_TXN_CALL__(rd_kafka_begin_transaction(rk), /*none*/);
-
-
- TEST_SAY("Delaying ProduceRequests by 3000ms\n");
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
-
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt, NULL, 0, &remains);
-
-
- TEST_SAY("Delaying SendOffsetsToTransaction by 400ms\n");
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_AddOffsetsToTxn, 1,
- RD_KAFKA_RESP_ERR_NO_ERROR, 400);
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- /* This is not a resumable call on timeout */
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
-
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
-
- TEST_SAY("Delaying EndTxnRequests by 1200ms\n");
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_EndTxn, 1, RD_KAFKA_RESP_ERR_NO_ERROR,
- 1200);
-
- /* Committing/aborting the transaction will also be delayed by the
- * previous accumulated remaining delays. */
-
- if (do_commit) {
- TEST_SAY("Committing transaction\n");
-
- RETRY_TXN_CALL__(
- rd_kafka_commit_transaction(rk, 100),
- TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
- RD_KAFKA_RESP_ERR__CONFLICT));
- } else {
- TEST_SAY("Aborting transaction\n");
-
- RETRY_TXN_CALL__(
- rd_kafka_abort_transaction(rk, 100),
- TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, -1),
- RD_KAFKA_RESP_ERR__CONFLICT));
- }
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify that resuming timed out calls that after the timeout, but
- * before the resuming call, would error out.
- */
-static void do_test_txn_resumable_calls_timeout_error(rd_bool_t do_commit) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
- int32_t coord_id = 1;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int msgcnt = 1;
- int remains = 0;
- rd_kafka_error_t *error;
-
- SUB_TEST_QUICK("%s_transaction", do_commit ? "commit" : "abort");
-
- rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
- coord_id);
- rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
-
- TEST_SAY("Starting transaction\n");
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
- msgcnt, NULL, 0, &remains);
-
-
- TEST_SAY("Fail EndTxn fatally after 2000ms\n");
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, coord_id, RD_KAFKAP_EndTxn, 1,
- RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, 2000);
-
- if (do_commit) {
- TEST_SAY("Committing transaction\n");
-
- TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
- RD_KAFKA_RESP_ERR__TIMED_OUT);
-
- /* Sleep so that the background EndTxn fails locally and sets
- * an error result. */
- rd_sleep(3);
-
- error = rd_kafka_commit_transaction(rk, -1);
-
- } else {
- TEST_SAY("Aborting transaction\n");
-
- TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
- RD_KAFKA_RESP_ERR__TIMED_OUT);
-
- /* Sleep so that the background EndTxn fails locally and sets
- * an error result. */
- rd_sleep(3);
-
- error = rd_kafka_commit_transaction(rk, -1);
- }
-
- TEST_ASSERT(error != NULL && rd_kafka_error_is_fatal(error),
- "Expected fatal error, not %s",
- rd_kafka_error_string(error));
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
- "Expected error INVALID_TXN_STATE, got %s",
- rd_kafka_error_name(error));
- rd_kafka_error_destroy(error);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Concurrent transaction API calls are not permitted.
- * This test makes sure they're properly enforced.
- *
- * For each transactional API, call it with a 5s timeout, and during that time
- * from another thread call transactional APIs, one by one, and verify that
- * we get an ERR__CONFLICT error back in the second thread.
- *
- * We use a mutex for synchronization, the main thread will hold the lock
- * when not calling an API but release it just prior to calling.
- * The other thread will acquire the lock, sleep, and hold the lock while
- * calling the concurrent API that should fail immediately, releasing the lock
- * when done.
- *
- */
-
-struct _txn_concurrent_state {
- const char *api;
- mtx_t lock;
- rd_kafka_t *rk;
- struct test *test;
-};
-
-static int txn_concurrent_thread_main(void *arg) {
- struct _txn_concurrent_state *state = arg;
- static const char *apis[] = {
- "init_transactions", "begin_transaction",
- "send_offsets_to_transaction", "commit_transaction",
- "abort_transaction", NULL};
- rd_kafka_t *rk = state->rk;
- const char *main_api = NULL;
- int i;
-
- /* Update TLS variable so TEST_..() macros work */
- test_curr = state->test;
-
- while (1) {
- const char *api = NULL;
- const int timeout_ms = 10000;
- rd_kafka_error_t *error = NULL;
- rd_kafka_resp_err_t exp_err;
- test_timing_t duration;
-
- /* Wait for other thread's txn call to start, then sleep a bit
- * to increase the chance of that call has really begun. */
- mtx_lock(&state->lock);
-
- if (state->api && state->api == main_api) {
- /* Main thread is still blocking on the last API call */
- TEST_SAY("Waiting for main thread to finish %s()\n",
- main_api);
- mtx_unlock(&state->lock);
- rd_sleep(1);
- continue;
- } else if (!(main_api = state->api)) {
- mtx_unlock(&state->lock);
- break;
- }
-
- rd_sleep(1);
-
- for (i = 0; (api = apis[i]) != NULL; i++) {
- TEST_SAY(
- "Triggering concurrent %s() call while "
- "main is in %s() call\n",
- api, main_api);
- TIMING_START(&duration, "%s", api);
-
- if (!strcmp(api, "init_transactions"))
- error =
- rd_kafka_init_transactions(rk, timeout_ms);
- else if (!strcmp(api, "begin_transaction"))
- error = rd_kafka_begin_transaction(rk);
- else if (!strcmp(api, "send_offsets_to_transaction")) {
- rd_kafka_topic_partition_list_t *offsets =
- rd_kafka_topic_partition_list_new(1);
- rd_kafka_consumer_group_metadata_t *cgmetadata =
- rd_kafka_consumer_group_metadata_new(
- "mygroupid");
- rd_kafka_topic_partition_list_add(
- offsets, "srctopic4", 0)
- ->offset = 12;
-
- error = rd_kafka_send_offsets_to_transaction(
- rk, offsets, cgmetadata, -1);
- rd_kafka_consumer_group_metadata_destroy(
- cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
- } else if (!strcmp(api, "commit_transaction"))
- error =
- rd_kafka_commit_transaction(rk, timeout_ms);
- else if (!strcmp(api, "abort_transaction"))
- error =
- rd_kafka_abort_transaction(rk, timeout_ms);
- else
- TEST_FAIL("Unknown API: %s", api);
-
- TIMING_STOP(&duration);
-
- TEST_SAY_ERROR(error, "Conflicting %s() call: ", api);
- TEST_ASSERT(error,
- "Expected conflicting %s() call to fail",
- api);
-
- exp_err = !strcmp(api, main_api)
- ? RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
- : RD_KAFKA_RESP_ERR__CONFLICT;
-
- TEST_ASSERT(rd_kafka_error_code(error) == exp_err,
-
- "Conflicting %s(): Expected %s, not %s",
- api, rd_kafka_err2str(exp_err),
- rd_kafka_error_name(error));
- TEST_ASSERT(
- rd_kafka_error_is_retriable(error),
- "Conflicting %s(): Expected retriable error", api);
- rd_kafka_error_destroy(error);
- /* These calls should fail immediately */
- TIMING_ASSERT(&duration, 0, 100);
- }
-
- mtx_unlock(&state->lock);
- }
-
- return 0;
-}
-
-static void do_test_txn_concurrent_operations(rd_bool_t do_commit) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- int32_t coord_id = 1;
- rd_kafka_resp_err_t err;
- const char *topic = "test";
- const char *transactional_id = "txnid";
- int remains = 0;
- thrd_t thrd;
- struct _txn_concurrent_state state = RD_ZERO_INIT;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
-
- SUB_TEST("%s", do_commit ? "commit" : "abort");
-
- test_timeout_set(90);
-
- /* We need to override the value of socket.connection.setup.timeout.ms
- * to be at least 2*RTT of the mock broker. This is because the first
- * ApiVersion request will fail, since we make the request with v3, and
- * the mock broker's MaxVersion is 2, so the request is retried with v0.
- * We use the value 3*RTT to add some buffer.
- */
- rk = create_txn_producer(&mcluster, transactional_id, 1,
- "socket.connection.setup.timeout.ms", "15000",
- NULL);
-
- /* Set broker RTT to 3.5s so that the background thread has ample
- * time to call its conflicting APIs.
- * This value must be less than socket.connection.setup.timeout.ms/2. */
- rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 3500);
-
- err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
- TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
-
- /* Set up shared state between us and the concurrent thread */
- mtx_init(&state.lock, mtx_plain);
- state.test = test_curr;
- state.rk = rk;
-
- /* We release the lock only while calling the TXN API */
- mtx_lock(&state.lock);
-
- /* Spin up concurrent thread */
- if (thrd_create(&thrd, txn_concurrent_thread_main, (void *)&state) !=
- thrd_success)
- TEST_FAIL("Failed to create thread");
-
-#define _start_call(callname) \
- do { \
- state.api = callname; \
- mtx_unlock(&state.lock); \
- } while (0)
-#define _end_call() mtx_lock(&state.lock)
-
- _start_call("init_transactions");
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
- _end_call();
-
- /* This call doesn't block, so can't really be tested concurrently. */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10,
- NULL, 0, &remains);
-
- _start_call("send_offsets_to_transaction");
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- TEST_CALL_ERROR__(
- rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
- _end_call();
-
- if (do_commit) {
- _start_call("commit_transaction");
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
- _end_call();
- } else {
- _start_call("abort_transaction");
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
- _end_call();
- }
-
- /* Signal completion to background thread */
- state.api = NULL;
-
- mtx_unlock(&state.lock);
-
- thrd_join(thrd, NULL);
-
- rd_kafka_destroy(rk);
-
- mtx_destroy(&state.lock);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief KIP-360: Test that fatal idempotence errors triggers abortable
- * transaction errors, but let the broker-side abort of the
- * transaction fail with a fencing error.
- * Should raise a fatal error.
- *
- * @param error_code Which error code EndTxn should fail with.
- * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
- * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
- */
-static void do_test_txn_fenced_abort(rd_kafka_resp_err_t error_code) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_error_t *error;
- int32_t txn_coord = 2;
- const char *txnid = "myTxnId";
- char errstr[512];
- rd_kafka_resp_err_t fatal_err;
- size_t errors_cnt;
-
- SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
-
- rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
- NULL);
-
- rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
- txn_coord);
-
- test_curr->ignore_dr_err = rd_true;
- test_curr->is_fatal_cb = error_is_fatal_cb;
- allowed_error = RD_KAFKA_RESP_ERR__FENCED;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
-
- /*
- * Start a transaction
- */
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
-
- /* Produce a message without error first */
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- test_flush(rk, -1);
-
- /* Fail abort transaction */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, txn_coord, RD_KAFKAP_EndTxn, 1, error_code, 0);
-
- /* Fail the PID reinit */
- rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
-
- /* Produce a message, let it fail with a fatal idempo error. */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 1,
- RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
-
- TEST_CALL_ERR__(rd_kafka_producev(
- rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
-
- test_flush(rk, -1);
-
- /* Abort the transaction, should fail with a fatal error */
- error = rd_kafka_abort_transaction(rk, -1);
- TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
-
- TEST_SAY_ERROR(error, "abort_transaction() failed: ");
- TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
- rd_kafka_error_destroy(error);
-
- fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
- TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
- TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
-
- /* Verify that the producer sent the expected number of EndTxn requests
- * by inspecting the mock broker error stack,
- * which should now be empty. */
- if (rd_kafka_mock_broker_error_stack_cnt(
- mcluster, txn_coord, RD_KAFKAP_EndTxn, &errors_cnt)) {
- TEST_FAIL(
- "Broker error count should succeed for API %s"
- " on broker %" PRId32,
- rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), txn_coord);
- }
- /* Checks all the RD_KAFKAP_EndTxn responses have been consumed */
- TEST_ASSERT(errors_cnt == 0,
- "Expected error count 0 for API %s, found %zu",
- rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), errors_cnt);
-
- if (rd_kafka_mock_broker_error_stack_cnt(
- mcluster, txn_coord, RD_KAFKAP_InitProducerId, &errors_cnt)) {
- TEST_FAIL(
- "Broker error count should succeed for API %s"
- " on broker %" PRId32,
- rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), txn_coord);
- }
- /* Checks none of the RD_KAFKAP_InitProducerId responses have been
- * consumed
- */
- TEST_ASSERT(errors_cnt == 1,
- "Expected error count 1 for API %s, found %zu",
- rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), errors_cnt);
-
- /* All done */
- rd_kafka_destroy(rk);
-
- allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Test that the TxnOffsetCommit op doesn't retry without waiting
- * if the coordinator is found but not available, causing too frequent retries.
- */
-static void
-do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) {
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *offsets;
- rd_kafka_consumer_group_metadata_t *cgmetadata;
- rd_kafka_error_t *error;
- int timeout;
-
- SUB_TEST_QUICK("times_out=%s", RD_STR_ToF(times_out));
-
- rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
-
- test_curr->ignore_dr_err = rd_true;
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
-
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- /* Wait for messages to be delivered */
- test_flush(rk, 5000);
-
- /*
- * Fail TxnOffsetCommit with COORDINATOR_NOT_AVAILABLE
- * repeatedly.
- */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_TxnOffsetCommit, 4,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE);
-
- offsets = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 1;
-
- cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
-
- /* The retry delay is 500ms, with 4 retries it should take at least
- * 2000ms for this call to succeed. */
- timeout = times_out ? 500 : 4000;
- error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata,
- timeout);
- rd_kafka_consumer_group_metadata_destroy(cgmetadata);
- rd_kafka_topic_partition_list_destroy(offsets);
-
- if (times_out) {
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- "expected %s, got: %s",
- rd_kafka_err2name(
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE),
- rd_kafka_err2str(rd_kafka_error_code(error)));
- } else {
- TEST_ASSERT(rd_kafka_error_code(error) ==
- RD_KAFKA_RESP_ERR_NO_ERROR,
- "expected \"Success\", found: %s",
- rd_kafka_err2str(rd_kafka_error_code(error)));
- }
- rd_kafka_error_destroy(error);
-
- /* All done */
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0105_transactions_mock(int argc, char **argv) {
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- do_test_txn_recoverable_errors();
-
- do_test_txn_fatal_idempo_errors();
-
- do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
- do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
-
- do_test_txn_req_cnt();
-
- do_test_txn_requires_abort_errors();
-
- do_test_txn_slow_reinit(rd_false);
- do_test_txn_slow_reinit(rd_true);
-
- /* Just do a subset of tests in quick mode */
- if (test_quick)
- return 0;
-
- do_test_txn_endtxn_errors();
-
- do_test_txn_endtxn_infinite();
-
- do_test_txn_endtxn_timeout();
-
- do_test_txn_endtxn_timeout_inflight();
-
- /* Bring down the coordinator */
- do_test_txn_broker_down_in_txn(rd_true);
-
- /* Bring down partition leader */
- do_test_txn_broker_down_in_txn(rd_false);
-
- do_test_txns_not_supported();
-
- do_test_txns_send_offsets_concurrent_is_retried();
-
- do_test_txns_send_offsets_non_eligible();
-
- do_test_txn_coord_req_destroy();
-
- do_test_txn_coord_req_multi_find();
-
- do_test_txn_addparts_req_multi();
-
- do_test_txns_no_timeout_crash();
-
- do_test_txn_auth_failure(
- RD_KAFKAP_InitProducerId,
- RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
-
- do_test_txn_auth_failure(
- RD_KAFKAP_FindCoordinator,
- RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
-
- do_test_txn_flush_timeout();
-
- do_test_unstable_offset_commit();
-
- do_test_commit_after_msg_timeout();
-
- do_test_txn_switch_coordinator();
-
- do_test_txn_switch_coordinator_refresh();
-
- do_test_out_of_order_seq();
-
- do_test_topic_disappears_for_awhile();
-
- do_test_disconnected_group_coord(rd_false);
-
- do_test_disconnected_group_coord(rd_true);
-
- do_test_txn_coordinator_null_not_fatal();
-
- do_test_txn_resumable_calls_timeout(rd_true);
-
- do_test_txn_resumable_calls_timeout(rd_false);
-
- do_test_txn_resumable_calls_timeout_error(rd_true);
-
- do_test_txn_resumable_calls_timeout_error(rd_false);
- do_test_txn_resumable_init();
-
- do_test_txn_concurrent_operations(rd_true /*commit*/);
-
- do_test_txn_concurrent_operations(rd_false /*abort*/);
-
- do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
-
- do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
-
- do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_true);
-
- do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_false);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c
deleted file mode 100644
index 0451e4a00..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "../src/rdkafka_proto.h"
-
-
-/**
- * @name Verify that the high-level consumer times out itself if
- * heartbeats are not successful (issue #2631).
- */
-
-static const char *commit_type;
-static int rebalance_cnt;
-static rd_kafka_resp_err_t rebalance_exp_event;
-static rd_kafka_resp_err_t commit_exp_err;
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
-
- rebalance_cnt++;
- TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt,
- rd_kafka_err2name(err), parts->cnt);
-
- TEST_ASSERT(
- err == rebalance_exp_event, "Expected rebalance event %s, not %s",
- rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err));
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- test_consumer_assign("assign", rk, parts);
- } else {
- rd_kafka_resp_err_t commit_err;
-
- if (strcmp(commit_type, "auto")) {
- rd_kafka_resp_err_t perr;
-
- TEST_SAY("Performing %s commit\n", commit_type);
-
- perr = rd_kafka_position(rk, parts);
- TEST_ASSERT(!perr, "Failed to acquire position: %s",
- rd_kafka_err2str(perr));
-
- /* Sleep a short while so the broker times out the
- * member too. */
- rd_sleep(1);
-
- commit_err = rd_kafka_commit(
- rk, parts, !strcmp(commit_type, "async"));
-
- if (!strcmp(commit_type, "async"))
- TEST_ASSERT(!commit_err,
- "Async commit should not fail, "
- "but it returned %s",
- rd_kafka_err2name(commit_err));
- else
- TEST_ASSERT(
- commit_err == commit_exp_err ||
- (!commit_exp_err &&
- commit_err ==
- RD_KAFKA_RESP_ERR__NO_OFFSET),
- "Expected %s commit to return %s, "
- "not %s",
- commit_type,
- rd_kafka_err2name(commit_exp_err),
- rd_kafka_err2name(commit_err));
- }
-
- test_consumer_unassign("unassign", rk);
- }
-
- /* Make sure only one rebalance callback is served per poll()
- * so that expect_rebalance() returns to the test logic on each
- * rebalance. */
- rd_kafka_yield(rk);
-}
-
-
-/**
- * @brief Wait for an expected rebalance event, or fail.
- */
-static void expect_rebalance(const char *what,
- rd_kafka_t *c,
- rd_kafka_resp_err_t exp_event,
- int timeout_s) {
- int64_t tmout = test_clock() + (timeout_s * 1000000);
- int start_cnt = rebalance_cnt;
-
- TEST_SAY("Waiting for %s (%s) for %ds\n", what,
- rd_kafka_err2name(exp_event), timeout_s);
-
- rebalance_exp_event = exp_event;
-
- while (tmout > test_clock() && rebalance_cnt == start_cnt) {
- if (test_consumer_poll_once(c, NULL, 1000))
- rd_sleep(1);
- }
-
- if (rebalance_cnt == start_cnt + 1) {
- rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR;
- return;
- }
-
- TEST_FAIL("Timed out waiting for %s (%s)\n", what,
- rd_kafka_err2name(exp_event));
-}
-
-
-/**
- * @brief Verify that session timeouts are handled by the consumer itself.
- *
- * @param use_commit_type "auto", "sync" (manual), "async" (manual)
- */
-static void do_test_session_timeout(const char *use_commit_type) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *groupid = "mygroup";
- const char *topic = "test";
-
- rebalance_cnt = 0;
- commit_type = use_commit_type;
-
- SUB_TEST0(!strcmp(use_commit_type, "sync") /*quick*/,
- "Test session timeout with %s commit", use_commit_type);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "security.protocol", "PLAINTEXT");
- test_conf_set(conf, "group.id", groupid);
- test_conf_set(conf, "session.timeout.ms", "5000");
- test_conf_set(conf, "heartbeat.interval.ms", "1000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit",
- !strcmp(commit_type, "auto") ? "true" : "false");
-
- c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c, topic);
-
- /* Let Heartbeats fail after a couple of successful ones */
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Heartbeat, 9, RD_KAFKA_RESP_ERR_NO_ERROR,
- RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
-
- expect_rebalance("initial assignment", c,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2);
-
- /* Consume a couple of messages so that we have something to commit */
- test_consumer_poll("consume", c, 0, -1, 0, 10, NULL);
-
- /* The commit in the rebalance callback should fail when the
- * member has timed out from the group. */
- commit_exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
-
- expect_rebalance("session timeout revoke", c,
- RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, 2 + 5 + 2);
-
- expect_rebalance("second assignment", c,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2);
-
- /* Final rebalance in close().
- * Its commit will work. */
- rebalance_exp_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
- commit_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Attempt manual commit when assignment has been lost (#3217)
- */
-static void do_test_commit_on_lost(void) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- const char *groupid = "mygroup";
- const char *topic = "test";
- rd_kafka_resp_err_t err;
-
- SUB_TEST();
-
- test_curr->is_fatal_cb = test_error_is_not_fatal_cb;
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "security.protocol", "PLAINTEXT");
- test_conf_set(conf, "group.id", groupid);
- test_conf_set(conf, "session.timeout.ms", "5000");
- test_conf_set(conf, "heartbeat.interval.ms", "1000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", "false");
-
- c = test_create_consumer(groupid, test_rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c, topic);
-
- /* Consume a couple of messages so that we have something to commit */
- test_consumer_poll("consume", c, 0, -1, 0, 10, NULL);
-
- /* Make the coordinator unreachable, this will cause a local session
- * timeout followed by a revoke and assignment lost. */
- rd_kafka_mock_broker_set_down(mcluster, 1);
-
- /* Wait until the assignment is lost */
- TEST_SAY("Waiting for assignment to be lost...\n");
- while (!rd_kafka_assignment_lost(c))
- rd_sleep(1);
-
- TEST_SAY("Assignment is lost, committing\n");
- /* Perform manual commit */
- err = rd_kafka_commit(c, NULL, 0 /*sync*/);
- TEST_SAY("commit() returned: %s\n", rd_kafka_err2name(err));
- TEST_ASSERT(err, "expected commit to fail");
-
- test_consumer_close(c);
-
- rd_kafka_destroy(c);
-
- test_mock_cluster_destroy(mcluster);
-
- test_curr->is_fatal_cb = NULL;
-
- SUB_TEST_PASS();
-}
-
-
-int main_0106_cgrp_sess_timeout(int argc, char **argv) {
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- do_test_session_timeout("sync");
- do_test_session_timeout("async");
- do_test_session_timeout("auto");
-
- do_test_commit_on_lost();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c
deleted file mode 100644
index 1f91e2a84..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "../src/rdkafka_proto.h"
-
-
-/**
- * @name Verify that producer and consumer resumes operation after
- * a topic has been deleted and recreated.
- */
-
-/**
- * The message value to produce, one of:
- * "before" - before topic deletion
- * "during" - during topic deletion
- * "after" - after topic has been re-created
- * "end" - stop producing
- */
-static mtx_t value_mtx;
-static char *value;
-
-static const int msg_rate = 10; /**< Messages produced per second */
-
-static struct test *this_test; /**< Exposes current test struct (in TLS) to
- * producer thread. */
-
-
-/**
- * @brief Treat all error_cb as non-test-fatal.
- */
-static int
-is_error_fatal(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
- return rd_false;
-}
-
-/**
- * @brief Producing thread
- */
-static int run_producer(void *arg) {
- const char *topic = arg;
- rd_kafka_t *producer = test_create_producer();
- int ret = 0;
-
- test_curr = this_test;
-
- /* Don't check message status */
- test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1;
-
- while (1) {
- rd_kafka_resp_err_t err;
-
- mtx_lock(&value_mtx);
- if (!strcmp(value, "end")) {
- mtx_unlock(&value_mtx);
- break;
- } else if (strcmp(value, "before")) {
- /* Ignore Delivery report errors after topic
- * has been deleted and eventually re-created,
- * we rely on the consumer to verify that
- * messages are produced. */
- test_curr->ignore_dr_err = rd_true;
- }
-
- err = rd_kafka_producev(
- producer, RD_KAFKA_V_TOPIC(topic),
- RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
- RD_KAFKA_V_VALUE(value, strlen(value)), RD_KAFKA_V_END);
-
- if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART ||
- err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- TEST_SAY("Produce failed (expectedly): %s\n",
- rd_kafka_err2name(err));
- else
- TEST_ASSERT(!err, "producev() failed: %s",
- rd_kafka_err2name(err));
-
- mtx_unlock(&value_mtx);
-
- rd_usleep(1000000 / msg_rate, NULL);
-
- rd_kafka_poll(producer, 0);
- }
-
- if (rd_kafka_flush(producer, 5000)) {
- TEST_WARN("Failed to flush all message(s), %d remain\n",
- rd_kafka_outq_len(producer));
- /* Purge the messages to see which partition they were for */
- rd_kafka_purge(producer, RD_KAFKA_PURGE_F_QUEUE |
- RD_KAFKA_PURGE_F_INFLIGHT);
- rd_kafka_flush(producer, 5000);
- TEST_SAY("%d message(s) in queue after purge\n",
- rd_kafka_outq_len(producer));
-
- ret = 1; /* Fail test from main thread */
- }
-
- rd_kafka_destroy(producer);
-
- return ret;
-}
-
-
-/**
- * @brief Expect at least \p cnt messages with value matching \p exp_value,
- * else fail the current test.
- */
-static void
-expect_messages(rd_kafka_t *consumer, int cnt, const char *exp_value) {
- int match_cnt = 0, other_cnt = 0, err_cnt = 0;
- size_t exp_len = strlen(exp_value);
-
- TEST_SAY("Expecting >= %d messages with value \"%s\"...\n", cnt,
- exp_value);
-
- while (match_cnt < cnt) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consumer_poll(consumer, 1000);
- if (!rkmessage)
- continue;
-
- if (rkmessage->err) {
- TEST_SAY("Consume error: %s\n",
- rd_kafka_message_errstr(rkmessage));
- err_cnt++;
- } else if (rkmessage->len == exp_len &&
- !memcmp(rkmessage->payload, exp_value, exp_len)) {
- match_cnt++;
- } else {
- TEST_SAYL(3,
- "Received \"%.*s\", expected \"%s\": "
- "ignored\n",
- (int)rkmessage->len,
- (const char *)rkmessage->payload, exp_value);
- other_cnt++;
- }
-
- rd_kafka_message_destroy(rkmessage);
- }
-
- TEST_SAY(
- "Consumed %d messages matching \"%s\", "
- "ignored %d others, saw %d error(s)\n",
- match_cnt, exp_value, other_cnt, err_cnt);
-}
-
-
-/**
- * @brief Test topic create + delete + create with first topic having
- * \p part_cnt_1 partitions and second topic having \p part_cnt_2 .
- */
-static void do_test_create_delete_create(int part_cnt_1, int part_cnt_2) {
- rd_kafka_t *consumer;
- thrd_t producer_thread;
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- int ret = 0;
-
- TEST_SAY(_C_MAG
- "[ Test topic create(%d parts)+delete+create(%d parts) ]\n",
- part_cnt_1, part_cnt_2);
-
- consumer = test_create_consumer(topic, NULL, NULL, NULL);
-
- /* Create topic */
- test_create_topic(consumer, topic, part_cnt_1, 3);
-
- /* Start consumer */
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_true);
-
- mtx_lock(&value_mtx);
- value = "before";
- mtx_unlock(&value_mtx);
-
- /* Create producer thread */
- if (thrd_create(&producer_thread, run_producer, (void *)topic) !=
- thrd_success)
- TEST_FAIL("thrd_create failed");
-
- /* Consume messages for 5s */
- expect_messages(consumer, msg_rate * 5, value);
-
- /* Delete topic */
- mtx_lock(&value_mtx);
- value = "during";
- mtx_unlock(&value_mtx);
-
- test_delete_topic(consumer, topic);
- rd_sleep(5);
-
- /* Re-create topic */
- test_create_topic(consumer, topic, part_cnt_2, 3);
-
- mtx_lock(&value_mtx);
- value = "after";
- mtx_unlock(&value_mtx);
-
- /* Consume for 5 more seconds, should see new messages */
- expect_messages(consumer, msg_rate * 5, value);
-
- rd_kafka_destroy(consumer);
-
- /* Wait for producer to exit */
- mtx_lock(&value_mtx);
- value = "end";
- mtx_unlock(&value_mtx);
-
- if (thrd_join(producer_thread, &ret) != thrd_success || ret != 0)
- TEST_FAIL("Producer failed: see previous errors");
-
- TEST_SAY(_C_GRN
- "[ Test topic create(%d parts)+delete+create(%d parts): "
- "PASS ]\n",
- part_cnt_1, part_cnt_2);
-}
-
-
-int main_0107_topic_recreate(int argc, char **argv) {
- this_test = test_curr; /* Need to expose current test struct (in TLS)
- * to producer thread. */
-
- this_test->is_fatal_cb = is_error_fatal;
-
- mtx_init(&value_mtx, mtx_plain);
-
- test_conf_init(NULL, NULL, 60);
-
- do_test_create_delete_create(10, 3);
- do_test_create_delete_create(3, 6);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp
deleted file mode 100644
index cabee6704..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <map>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * Test consumer allow.auto.create.topics by subscribing to a mix
- * of available, unauthorized and non-existent topics.
- *
- * The same test is run with and without allow.auto.create.topics
- * and with and without wildcard subscribes.
- *
- */
-
-
-static void do_test_consumer(bool allow_auto_create_topics,
- bool with_wildcards) {
- Test::Say(tostr() << _C_MAG << "[ Test allow.auto.create.topics="
- << (allow_auto_create_topics ? "true" : "false")
- << " with_wildcards=" << (with_wildcards ? "true" : "false")
- << " ]\n");
-
- bool has_acl_cli = test_broker_version >= TEST_BRKVER(2, 1, 0, 0) &&
- !test_needs_auth(); /* We can't bother passing Java
- * security config to kafka-acls.sh */
-
- bool supports_allow = test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
-
- std::string topic_exists = Test::mk_topic_name("0109-exists", 1);
- std::string topic_notexists = Test::mk_topic_name("0109-notexists", 1);
- std::string topic_unauth = Test::mk_topic_name("0109-unauthorized", 1);
-
- /* Create consumer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 20);
- Test::conf_set(conf, "group.id", topic_exists);
- Test::conf_set(conf, "enable.partition.eof", "true");
- /* Quickly refresh metadata on topic auto-creation since the first
- * metadata after auto-create hides the topic due to 0 partition count. */
- Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "1000");
- if (allow_auto_create_topics)
- Test::conf_set(conf, "allow.auto.create.topics", "true");
-
- std::string bootstraps;
- if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to retrieve bootstrap.servers");
-
- std::string errstr;
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Create topics */
- Test::create_topic(c, topic_exists.c_str(), 1, 1);
-
- if (has_acl_cli) {
- Test::create_topic(c, topic_unauth.c_str(), 1, 1);
-
- /* Add denying ACL for unauth topic */
- test_kafka_cmd(
- "kafka-acls.sh --bootstrap-server %s "
- "--add --deny-principal 'User:*' "
- "--operation All --deny-host '*' "
- "--topic '%s'",
- bootstraps.c_str(), topic_unauth.c_str());
- }
-
-
- /* Wait for topic to be fully created */
- test_wait_topic_exists(NULL, topic_exists.c_str(), 10 * 1000);
-
-
- /*
- * Subscribe
- */
- std::vector<std::string> topics;
- std::map<std::string, RdKafka::ErrorCode> exp_errors;
-
- topics.push_back(topic_notexists);
- if (has_acl_cli)
- topics.push_back(topic_unauth);
-
- if (with_wildcards) {
- topics.push_back("^" + topic_exists);
- topics.push_back("^" + topic_notexists);
- /* If the subscription contains at least one wildcard/regex
- * then no auto topic creation will take place (since the consumer
- * requests all topics in metadata, and not specific ones, thus
- * not triggering topic auto creation).
- * We need to handle the expected error cases accordingly. */
- exp_errors["^" + topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
- exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
-
- if (has_acl_cli) {
- /* Unauthorized topics are not included in list-all-topics Metadata,
- * which we use for wildcards, so in this case the error code for
- * unauthorixed topics show up as unknown topic. */
- exp_errors[topic_unauth] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
- }
- } else {
- topics.push_back(topic_exists);
-
- if (has_acl_cli)
- exp_errors[topic_unauth] = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED;
- }
-
- if (supports_allow && !allow_auto_create_topics)
- exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
-
- RdKafka::ErrorCode err;
- if ((err = c->subscribe(topics)))
- Test::Fail("subscribe failed: " + RdKafka::err2str(err));
-
- /* Start consuming until EOF is reached, which indicates that we have an
- * assignment and any errors should have been reported. */
- bool run = true;
- while (run) {
- RdKafka::Message *msg = c->consume(tmout_multip(1000));
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- case RdKafka::ERR_NO_ERROR:
- break;
-
- case RdKafka::ERR__PARTITION_EOF:
- run = false;
- break;
-
- default:
- Test::Say("Consume error on " + msg->topic_name() + ": " + msg->errstr() +
- "\n");
-
- std::map<std::string, RdKafka::ErrorCode>::iterator it =
- exp_errors.find(msg->topic_name());
-
- /* Temporary unknown-topic errors are okay for auto-created topics. */
- bool unknown_is_ok = allow_auto_create_topics && !with_wildcards &&
- msg->err() == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART &&
- msg->topic_name() == topic_notexists;
-
- if (it == exp_errors.end()) {
- if (unknown_is_ok)
- Test::Say("Ignoring temporary auto-create error for topic " +
- msg->topic_name() + ": " + RdKafka::err2str(msg->err()) +
- "\n");
- else
- Test::Fail("Did not expect error for " + msg->topic_name() +
- ": got: " + RdKafka::err2str(msg->err()));
- } else if (msg->err() != it->second) {
- if (unknown_is_ok)
- Test::Say("Ignoring temporary auto-create error for topic " +
- msg->topic_name() + ": " + RdKafka::err2str(msg->err()) +
- "\n");
- else
- Test::Fail("Expected '" + RdKafka::err2str(it->second) + "' for " +
- msg->topic_name() + ", got " +
- RdKafka::err2str(msg->err()));
- } else {
- exp_errors.erase(msg->topic_name());
- }
-
- break;
- }
-
- delete msg;
- }
-
-
- /* Fail if not all expected errors were seen. */
- if (!exp_errors.empty())
- Test::Fail(tostr() << "Expecting " << exp_errors.size() << " more errors");
-
- c->close();
-
- delete c;
-}
-
-extern "C" {
-int main_0109_auto_create_topics(int argc, char **argv) {
- /* Parameters:
- * allow auto create, with wildcards */
- do_test_consumer(true, true);
- do_test_consumer(true, false);
- do_test_consumer(false, true);
- do_test_consumer(false, false);
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp
deleted file mode 100644
index 1f36b3a76..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Test batch.size producer property.
- *
- */
-
-#include <iostream>
-#include <fstream>
-#include <iterator>
-#include <string>
-#include "testcpp.h"
-
-#if WITH_RAPIDJSON
-#include <rapidjson/document.h>
-#include <rapidjson/pointer.h>
-#include <rapidjson/error/en.h>
-
-
-class myAvgStatsCb : public RdKafka::EventCb {
- public:
- myAvgStatsCb(std::string topic) :
- avg_batchsize(0), min_batchsize(0), max_batchsize(0), topic_(topic) {
- }
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_LOG:
- Test::Say(event.str() + "\n");
- break;
- case RdKafka::Event::EVENT_STATS:
- read_batch_stats(event.str());
- break;
- default:
- break;
- }
- }
-
- int avg_batchsize;
- int min_batchsize;
- int max_batchsize;
-
- private:
- void read_val(rapidjson::Document &d, const std::string &path, int &val) {
- rapidjson::Pointer jpath(path.c_str());
-
- if (!jpath.IsValid())
- Test::Fail(tostr() << "json pointer parse " << path << " failed at "
- << jpath.GetParseErrorOffset() << " with error code "
- << jpath.GetParseErrorCode());
-
- rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
- if (!pp) {
- Test::Say(tostr() << "Could not find " << path << " in stats\n");
- return;
- }
-
- val = pp->GetInt();
- }
-
- void read_batch_stats(const std::string &stats) {
- rapidjson::Document d;
-
- if (d.Parse(stats.c_str()).HasParseError())
- Test::Fail(tostr() << "Failed to parse stats JSON: "
- << rapidjson::GetParseError_En(d.GetParseError())
- << " at " << d.GetErrorOffset());
-
- read_val(d, "/topics/" + topic_ + "/batchsize/avg", avg_batchsize);
- read_val(d, "/topics/" + topic_ + "/batchsize/min", min_batchsize);
- read_val(d, "/topics/" + topic_ + "/batchsize/max", max_batchsize);
- }
-
- std::string topic_;
-};
-
-
-/**
- * @brief Specify batch.size and parse stats to verify it takes effect.
- *
- */
-static void do_test_batch_size() {
- std::string topic = Test::mk_topic_name(__FILE__, 0);
-
- myAvgStatsCb event_cb(topic);
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
-
- const int msgcnt = 1000;
- const int msgsize = 1000;
- int batchsize = 5000;
- int exp_min_batchsize = batchsize - msgsize - 100 /*~framing overhead*/;
-
- Test::conf_set(conf, "batch.size", "5000");
-
- /* Make sure batch.size takes precedence by setting the following high */
- Test::conf_set(conf, "batch.num.messages", "100000");
- Test::conf_set(conf, "linger.ms", "2000");
-
- Test::conf_set(conf, "statistics.interval.ms", "7000");
- std::string errstr;
- if (conf->set("event_cb", &event_cb, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- delete conf;
-
- /* Produce messages */
- char val[msgsize];
- memset(val, 'a', msgsize);
-
- for (int i = 0; i < msgcnt; i++) {
- RdKafka::ErrorCode err =
- p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, val, msgsize, NULL,
- 0, -1, NULL);
- if (err)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- }
-
- Test::Say(tostr() << "Produced " << msgcnt << " messages\n");
- p->flush(5 * 1000);
-
- Test::Say("Waiting for stats\n");
- while (event_cb.avg_batchsize == 0)
- p->poll(1000);
-
- Test::Say(tostr() << "Batchsize: "
- << "configured " << batchsize << ", min "
- << event_cb.min_batchsize << ", max "
- << event_cb.max_batchsize << ", avg "
- << event_cb.avg_batchsize << "\n");
-
- /* The average batchsize should within a message size from batch.size. */
- if (event_cb.avg_batchsize < exp_min_batchsize ||
- event_cb.avg_batchsize > batchsize)
- Test::Fail(tostr() << "Expected avg batchsize to be within "
- << exp_min_batchsize << ".." << batchsize << " but got "
- << event_cb.avg_batchsize);
-
- delete p;
-}
-#endif
-
-extern "C" {
-int main_0110_batch_size(int argc, char **argv) {
-#if WITH_RAPIDJSON
- do_test_batch_size();
-#else
- Test::Skip("RapidJSON >=1.1.0 not available\n");
-#endif
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp
deleted file mode 100644
index 4b6683add..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <map>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-/**
- * Verify that the producer waits topic.metadata.propagation.max.ms
- * before flagging a topic as non-existent, allowing asynchronous
- * CreateTopics() to be used in non-auto-create scenarios.
- *
- * This tests the producer. The consumer behaviour is implicitly tested
- * in 0109.
- */
-
-
-namespace {
-class DrCb : public RdKafka::DeliveryReportCb {
- public:
- DrCb(RdKafka::ErrorCode exp_err) : ok(false), _exp_err(exp_err) {
- }
-
- void dr_cb(RdKafka::Message &msg) {
- Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n");
- if (msg.err() != _exp_err)
- Test::Fail("Delivery report: Expected " + RdKafka::err2str(_exp_err) +
- " but got " + RdKafka::err2str(msg.err()));
- else if (ok)
- Test::Fail("Too many delivery reports");
- else
- ok = true;
- }
-
- bool ok;
-
- private:
- RdKafka::ErrorCode _exp_err;
-};
-}; // namespace
-
-static void do_test_producer(bool timeout_too_short) {
- Test::Say(tostr() << _C_MAG << "[ Test with timeout_too_short="
- << (timeout_too_short ? "true" : "false") << " ]\n");
-
- std::string topic = Test::mk_topic_name("0110-delay_create_topics", 1);
-
- /* Create Producer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 20);
-
- std::string errstr;
-
- if (timeout_too_short) {
- if (conf->set("topic.metadata.propagation.max.ms", "3", errstr))
- Test::Fail(errstr);
- }
-
- DrCb dr_cb(timeout_too_short ? RdKafka::ERR_UNKNOWN_TOPIC_OR_PART
- : RdKafka::ERR_NO_ERROR);
- conf->set("dr_cb", &dr_cb, errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- /* Produce a message to the yet non-existent topic. */
- RdKafka::ErrorCode err = p->produce(
- topic, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY,
- (void *)"hello", 5, "hi", 2, 0, NULL, NULL);
- if (err)
- Test::Fail(tostr() << "produce failed: " << RdKafka::err2str(err));
-
- int delay = 5;
- int64_t end_wait = test_clock() + (delay * 1000000);
-
- while (test_clock() < end_wait)
- p->poll(1000);
-
- Test::create_topic(NULL, topic.c_str(), 1, 3);
-
- p->flush(10 * 1000);
-
- if (!dr_cb.ok)
- Test::Fail("Did not get delivery report for message");
-
- delete p;
-
- Test::Say(tostr() << _C_GRN << "[ Test with timeout_too_short="
- << (timeout_too_short ? "true" : "false") << ": PASS ]\n");
-}
-
-extern "C" {
-int main_0111_delay_create_topics(int argc, char **argv) {
- do_test_producer(false);
- do_test_producer(true);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c
deleted file mode 100644
index d945a2c32..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdkafka.h"
-
-/**
- * Assign consumer to single partition topic and consume a message.
- * Then add a new partition to the topic (i.e., one that will not
- * be in the consumer's metadata) and assign the consumer to it.
- * Verify that partition 0 is not incorrectly reported as missing.
- * See #2915.
- */
-
-int main_0112_assign_unknown_part(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
- int64_t offset = RD_KAFKA_OFFSET_BEGINNING;
- uint64_t testid = test_id_generate();
- rd_kafka_t *c;
- rd_kafka_topic_partition_list_t *tpl;
- int r;
-
- test_conf_init(NULL, NULL, 60);
-
- TEST_SAY("Creating consumer\n");
- c = test_create_consumer(topic, NULL, NULL, NULL);
-
- TEST_SAY("Creating topic %s with 1 partition\n", topic);
- test_create_topic(c, topic, 1, 1);
- test_wait_topic_exists(c, topic, 10 * 1000);
-
- TEST_SAY("Producing message to partition 0\n");
- test_produce_msgs_easy(topic, testid, 0, 1);
-
- TEST_SAY("Assigning partition 0\n");
- tpl = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(tpl, topic, 0)->offset = offset;
- test_consumer_assign("ASSIGN", c, tpl);
-
- TEST_SAY("Waiting for message\n");
- test_consumer_poll("CONSUME 0", c, testid, -1, 0, 1, NULL);
-
- TEST_SAY("Changing partition count for topic %s\n", topic);
- test_create_partitions(NULL, topic, 2);
-
- /* FIXME: The new partition might not have propagated through the
- * cluster by the time the producer tries to produce to it
- * which causes the produce to fail.
- * Loop until the partition count is correct. */
- while ((r = test_get_partition_count(c, topic, 5000)) != 2) {
- TEST_SAY(
- "Waiting for %s partition count to reach 2, "
- "currently %d\n",
- topic, r);
- rd_sleep(1);
- }
-
- TEST_SAY("Producing message to partition 1\n");
- test_produce_msgs_easy(topic, testid, 1, 1);
-
- TEST_SAY("Assigning partitions 1\n");
- rd_kafka_topic_partition_list_add(tpl, topic, 1)->offset = offset;
- test_consumer_assign("ASSIGN", c, tpl);
-
- TEST_SAY("Waiting for messages\n");
- test_consumer_poll("CONSUME", c, testid, -1, 0, 2, NULL);
-
- rd_kafka_topic_partition_list_destroy(tpl);
- test_consumer_close(c);
- rd_kafka_destroy(c);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp
deleted file mode 100644
index 430798d7f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp
+++ /dev/null
@@ -1,3170 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-extern "C" {
-#include "../src/rdkafka_protocol.h"
-#include "test.h"
-}
-#include <iostream>
-#include <map>
-#include <set>
-#include <algorithm>
-#include <cstring>
-#include <cstdlib>
-#include <assert.h>
-#include "testcpp.h"
-#include <fstream>
-
-using namespace std;
-
-/** Topic+Partition helper class */
-class Toppar {
- public:
- Toppar(const string &topic, int32_t partition) :
- topic(topic), partition(partition) {
- }
-
- Toppar(const RdKafka::TopicPartition *tp) :
- topic(tp->topic()), partition(tp->partition()) {
- }
-
- friend bool operator==(const Toppar &a, const Toppar &b) {
- return a.partition == b.partition && a.topic == b.topic;
- }
-
- friend bool operator<(const Toppar &a, const Toppar &b) {
- if (a.partition < b.partition)
- return true;
- return a.topic < b.topic;
- }
-
- string str() const {
- return tostr() << topic << "[" << partition << "]";
- }
-
- std::string topic;
- int32_t partition;
-};
-
-
-
-static std::string get_bootstrap_servers() {
- RdKafka::Conf *conf;
- std::string bootstrap_servers;
- Test::conf_init(&conf, NULL, 0);
- conf->get("bootstrap.servers", bootstrap_servers);
- delete conf;
- return bootstrap_servers;
-}
-
-
-class DrCb : public RdKafka::DeliveryReportCb {
- public:
- void dr_cb(RdKafka::Message &msg) {
- if (msg.err())
- Test::Fail("Delivery failed: " + RdKafka::err2str(msg.err()));
- }
-};
-
-
-/**
- * @brief Produce messages to partitions.
- *
- * The pair is Toppar,msg_cnt_per_partition.
- * The Toppar is topic,partition_cnt.
- */
-static void produce_msgs(vector<pair<Toppar, int> > partitions) {
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
-
- string errstr;
- DrCb dr;
- conf->set("dr_cb", &dr, errstr);
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create producer: " + errstr);
- delete conf;
-
- for (vector<pair<Toppar, int> >::iterator it = partitions.begin();
- it != partitions.end(); it++) {
- for (int part = 0; part < it->first.partition; part++) {
- for (int i = 0; i < it->second; i++) {
- RdKafka::ErrorCode err =
- p->produce(it->first.topic, part, RdKafka::Producer::RK_MSG_COPY,
- (void *)"Hello there", 11, NULL, 0, 0, NULL);
- TEST_ASSERT(!err, "produce(%s, %d) failed: %s", it->first.topic.c_str(),
- part, RdKafka::err2str(err).c_str());
-
- p->poll(0);
- }
- }
- }
-
- p->flush(10000);
-
- delete p;
-}
-
-
-
-static RdKafka::KafkaConsumer *make_consumer(
- string client_id,
- string group_id,
- string assignment_strategy,
- vector<pair<string, string> > *additional_conf,
- RdKafka::RebalanceCb *rebalance_cb,
- int timeout_s) {
- std::string bootstraps;
- std::string errstr;
- std::vector<std::pair<std::string, std::string> >::iterator itr;
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, timeout_s);
- Test::conf_set(conf, "client.id", client_id);
- Test::conf_set(conf, "group.id", group_id);
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "enable.auto.commit", "false");
- Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy);
- if (additional_conf != NULL) {
- for (itr = (*additional_conf).begin(); itr != (*additional_conf).end();
- itr++)
- Test::conf_set(conf, itr->first, itr->second);
- }
-
- if (rebalance_cb) {
- if (conf->set("rebalance_cb", rebalance_cb, errstr))
- Test::Fail("Failed to set rebalance_cb: " + errstr);
- }
- RdKafka::KafkaConsumer *consumer =
- RdKafka::KafkaConsumer::create(conf, errstr);
- if (!consumer)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- return consumer;
-}
-
-/**
- * @returns a CSV string of the vector
- */
-static string string_vec_to_str(const vector<string> &v) {
- ostringstream ss;
- for (vector<string>::const_iterator it = v.begin(); it != v.end(); it++)
- ss << (it == v.begin() ? "" : ", ") << *it;
- return ss.str();
-}
-
-void expect_assignment(RdKafka::KafkaConsumer *consumer, size_t count) {
- std::vector<RdKafka::TopicPartition *> partitions;
- RdKafka::ErrorCode err;
- err = consumer->assignment(partitions);
- if (err)
- Test::Fail(consumer->name() +
- " assignment() failed: " + RdKafka::err2str(err));
- if (partitions.size() != count)
- Test::Fail(tostr() << "Expecting consumer " << consumer->name()
- << " to have " << count
- << " assigned partition(s), not: " << partitions.size());
- RdKafka::TopicPartition::destroy(partitions);
-}
-
-
-static bool TopicPartition_cmp(const RdKafka::TopicPartition *a,
- const RdKafka::TopicPartition *b) {
- if (a->topic() < b->topic())
- return true;
- else if (a->topic() > b->topic())
- return false;
- return a->partition() < b->partition();
-}
-
-
-void expect_assignment(RdKafka::KafkaConsumer *consumer,
- vector<RdKafka::TopicPartition *> &expected) {
- vector<RdKafka::TopicPartition *> partitions;
- RdKafka::ErrorCode err;
- err = consumer->assignment(partitions);
- if (err)
- Test::Fail(consumer->name() +
- " assignment() failed: " + RdKafka::err2str(err));
-
- if (partitions.size() != expected.size())
- Test::Fail(tostr() << "Expecting consumer " << consumer->name()
- << " to have " << expected.size()
- << " assigned partition(s), not " << partitions.size());
-
- sort(partitions.begin(), partitions.end(), TopicPartition_cmp);
- sort(expected.begin(), expected.end(), TopicPartition_cmp);
-
- int fails = 0;
- for (int i = 0; i < (int)partitions.size(); i++) {
- if (!TopicPartition_cmp(partitions[i], expected[i]))
- continue;
-
- Test::Say(tostr() << _C_RED << consumer->name() << ": expected assignment #"
- << i << " " << expected[i]->topic() << " ["
- << expected[i]->partition() << "], not "
- << partitions[i]->topic() << " ["
- << partitions[i]->partition() << "]\n");
- fails++;
- }
-
- if (fails)
- Test::Fail(consumer->name() + ": Expected assignment mismatch, see above");
-
- RdKafka::TopicPartition::destroy(partitions);
-}
-
-
-class DefaultRebalanceCb : public RdKafka::RebalanceCb {
- private:
- static string part_list_print(
- const vector<RdKafka::TopicPartition *> &partitions) {
- ostringstream ss;
- for (unsigned int i = 0; i < partitions.size(); i++)
- ss << (i == 0 ? "" : ", ") << partitions[i]->topic() << " ["
- << partitions[i]->partition() << "]";
- return ss.str();
- }
-
- public:
- int assign_call_cnt;
- int revoke_call_cnt;
- int nonempty_assign_call_cnt; /**< ASSIGN_PARTITIONS with partitions */
- int lost_call_cnt;
- int partitions_assigned_net;
- bool wait_rebalance;
- int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */
- map<Toppar, int> msg_cnt; /**< Number of consumed messages per partition. */
-
- ~DefaultRebalanceCb() {
- reset_msg_cnt();
- }
-
- DefaultRebalanceCb() :
- assign_call_cnt(0),
- revoke_call_cnt(0),
- nonempty_assign_call_cnt(0),
- lost_call_cnt(0),
- partitions_assigned_net(0),
- wait_rebalance(false),
- ts_last_assign(0) {
- }
-
-
- void rebalance_cb(RdKafka::KafkaConsumer *consumer,
- RdKafka::ErrorCode err,
- std::vector<RdKafka::TopicPartition *> &partitions) {
- wait_rebalance = false;
-
- std::string protocol = consumer->rebalance_protocol();
-
- TEST_ASSERT(protocol == "COOPERATIVE",
- "%s: Expected rebalance_protocol \"COOPERATIVE\", not %s",
- consumer->name().c_str(), protocol.c_str());
-
- const char *lost_str = consumer->assignment_lost() ? " (LOST)" : "";
- Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": "
- << consumer->name() << " " << RdKafka::err2str(err)
- << lost_str << ": " << part_list_print(partitions)
- << "\n");
-
- if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
- if (consumer->assignment_lost())
- Test::Fail("unexpected lost assignment during ASSIGN rebalance");
- RdKafka::Error *error = consumer->incremental_assign(partitions);
- if (error)
- Test::Fail(tostr() << "consumer->incremental_assign() failed: "
- << error->str());
- if (partitions.size() > 0)
- nonempty_assign_call_cnt++;
- assign_call_cnt += 1;
- partitions_assigned_net += (int)partitions.size();
- ts_last_assign = test_clock();
-
- } else {
- if (consumer->assignment_lost())
- lost_call_cnt += 1;
- RdKafka::Error *error = consumer->incremental_unassign(partitions);
- if (error)
- Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
- << error->str());
- if (partitions.size() == 0)
- Test::Fail("revoked partitions size should never be 0");
- revoke_call_cnt += 1;
- partitions_assigned_net -= (int)partitions.size();
- }
-
- /* Reset message counters for the given partitions. */
- Test::Say(consumer->name() + ": resetting message counters:\n");
- reset_msg_cnt(partitions);
- }
-
- bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) {
- RdKafka::Message *msg = c->consume(timeout_ms);
- bool ret = msg->err() != RdKafka::ERR__TIMED_OUT;
- if (!msg->err())
- msg_cnt[Toppar(msg->topic_name(), msg->partition())]++;
- delete msg;
- return ret;
- }
-
- void reset_msg_cnt() {
- msg_cnt.clear();
- }
-
- void reset_msg_cnt(Toppar &tp) {
- int msgcnt = get_msg_cnt(tp);
- Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]"
- << " with " << msgcnt << " messages\n");
- if (!msg_cnt.erase(tp) && msgcnt)
- Test::Fail("erase failed!");
- }
-
- void reset_msg_cnt(const vector<RdKafka::TopicPartition *> &partitions) {
- for (unsigned int i = 0; i < partitions.size(); i++) {
- Toppar tp(partitions[i]->topic(), partitions[i]->partition());
- reset_msg_cnt(tp);
- }
- }
-
- int get_msg_cnt(const Toppar &tp) {
- map<Toppar, int>::iterator it = msg_cnt.find(tp);
- if (it == msg_cnt.end())
- return 0;
- return it->second;
- }
-};
-
-
-
-/**
- * @brief Verify that the consumer's assignment is a subset of the
- * subscribed topics.
- *
- * @param allow_mismatch Allow assignment of not subscribed topics.
- * This can happen when the subscription is updated
- * but a rebalance callback hasn't been seen yet.
- * @param all_assignments Accumulated assignments for all consumers.
- * If an assigned partition already exists it means
- * the partition is assigned to multiple consumers and
- * the test will fail.
- * @param exp_msg_cnt Expected message count per assigned partition, or -1
- * if not to check.
- *
- * @returns the number of assigned partitions, or fails if the
- * assignment is empty or there is an assignment for
- * topic that is not subscribed.
- */
-static int verify_consumer_assignment(
- RdKafka::KafkaConsumer *consumer,
- DefaultRebalanceCb &rebalance_cb,
- const vector<string> &topics,
- bool allow_empty,
- bool allow_mismatch,
- map<Toppar, RdKafka::KafkaConsumer *> *all_assignments,
- int exp_msg_cnt) {
- vector<RdKafka::TopicPartition *> partitions;
- RdKafka::ErrorCode err;
- int fails = 0;
- int count;
- ostringstream ss;
-
- err = consumer->assignment(partitions);
- TEST_ASSERT(!err, "Failed to get assignment for consumer %s: %s",
- consumer->name().c_str(), RdKafka::err2str(err).c_str());
-
- count = (int)partitions.size();
-
- for (vector<RdKafka::TopicPartition *>::iterator it = partitions.begin();
- it != partitions.end(); it++) {
- RdKafka::TopicPartition *p = *it;
-
- if (find(topics.begin(), topics.end(), p->topic()) == topics.end()) {
- Test::Say(tostr() << (allow_mismatch ? _C_YEL "Warning (allowed)"
- : _C_RED "Error")
- << ": " << consumer->name() << " is assigned "
- << p->topic() << " [" << p->partition() << "] which is "
- << "not in the list of subscribed topics: "
- << string_vec_to_str(topics) << "\n");
- if (!allow_mismatch)
- fails++;
- }
-
- Toppar tp(p);
- pair<map<Toppar, RdKafka::KafkaConsumer *>::iterator, bool> ret;
- ret = all_assignments->insert(
- pair<Toppar, RdKafka::KafkaConsumer *>(tp, consumer));
- if (!ret.second) {
- Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
- << " is assigned " << p->topic() << " ["
- << p->partition()
- << "] which is "
- "already assigned to consumer "
- << ret.first->second->name() << "\n");
- fails++;
- }
-
-
- int msg_cnt = rebalance_cb.get_msg_cnt(tp);
-
- if (exp_msg_cnt != -1 && msg_cnt != exp_msg_cnt) {
- Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
- << " expected " << exp_msg_cnt << " messages on "
- << p->topic() << " [" << p->partition() << "], not "
- << msg_cnt << "\n");
- fails++;
- }
-
- ss << (it == partitions.begin() ? "" : ", ") << p->topic() << " ["
- << p->partition() << "] (" << msg_cnt << "msgs)";
- }
-
- RdKafka::TopicPartition::destroy(partitions);
-
- Test::Say(tostr() << "Consumer " << consumer->name() << " assignment ("
- << count << "): " << ss.str() << "\n");
-
- if (count == 0 && !allow_empty)
- Test::Fail("Consumer " + consumer->name() +
- " has unexpected empty assignment");
-
- if (fails)
- Test::Fail(
- tostr() << "Consumer " + consumer->name()
- << " assignment verification failed (see previous error)");
-
- return count;
-}
-
-
-
-/* -------- a_assign_tests
- *
- * check behavior incremental assign / unassign outside the context of a
- * rebalance.
- */
-
-
-/** Incremental assign, then assign(NULL).
- */
-static void assign_test_1(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2) {
- RdKafka::ErrorCode err;
- RdKafka::Error *error;
-
- Test::Say("Incremental assign, then assign(NULL)\n");
-
- if ((error = consumer->incremental_assign(toppars1)))
- Test::Fail(tostr() << "Incremental assign failed: " << error->str());
- Test::check_assignment(consumer, 1, &toppars1[0]->topic());
-
- if ((err = consumer->unassign()))
- Test::Fail("Unassign failed: " + RdKafka::err2str(err));
- Test::check_assignment(consumer, 0, NULL);
-}
-
-
-/** Assign, then incremental unassign.
- */
-static void assign_test_2(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2) {
- RdKafka::ErrorCode err;
- RdKafka::Error *error;
-
- Test::Say("Assign, then incremental unassign\n");
-
- if ((err = consumer->assign(toppars1)))
- Test::Fail("Assign failed: " + RdKafka::err2str(err));
- Test::check_assignment(consumer, 1, &toppars1[0]->topic());
-
- if ((error = consumer->incremental_unassign(toppars1)))
- Test::Fail("Incremental unassign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-}
-
-
-/** Incremental assign, then incremental unassign.
- */
-static void assign_test_3(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2) {
- RdKafka::Error *error;
-
- Test::Say("Incremental assign, then incremental unassign\n");
-
- if ((error = consumer->incremental_assign(toppars1)))
- Test::Fail("Incremental assign failed: " + error->str());
- Test::check_assignment(consumer, 1, &toppars1[0]->topic());
-
- if ((error = consumer->incremental_unassign(toppars1)))
- Test::Fail("Incremental unassign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-}
-
-
-/** Multi-topic incremental assign and unassign + message consumption.
- */
-static void assign_test_4(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2) {
- RdKafka::Error *error;
-
- Test::Say(
- "Multi-topic incremental assign and unassign + message consumption\n");
-
- if ((error = consumer->incremental_assign(toppars1)))
- Test::Fail("Incremental assign failed: " + error->str());
- Test::check_assignment(consumer, 1, &toppars1[0]->topic());
-
- RdKafka::Message *m = consumer->consume(5000);
- if (m->err() != RdKafka::ERR_NO_ERROR)
- Test::Fail("Expecting a consumed message.");
- if (m->len() != 100)
- Test::Fail(tostr() << "Expecting msg len to be 100, not: "
- << m->len()); /* implies read from topic 1. */
- delete m;
-
- if ((error = consumer->incremental_unassign(toppars1)))
- Test::Fail("Incremental unassign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-
- m = consumer->consume(100);
- if (m->err() != RdKafka::ERR__TIMED_OUT)
- Test::Fail("Not expecting a consumed message.");
- delete m;
-
- if ((error = consumer->incremental_assign(toppars2)))
- Test::Fail("Incremental assign failed: " + error->str());
- Test::check_assignment(consumer, 1, &toppars2[0]->topic());
-
- m = consumer->consume(5000);
- if (m->err() != RdKafka::ERR_NO_ERROR)
- Test::Fail("Expecting a consumed message.");
- if (m->len() != 200)
- Test::Fail(tostr() << "Expecting msg len to be 200, not: "
- << m->len()); /* implies read from topic 2. */
- delete m;
-
- if ((error = consumer->incremental_assign(toppars1)))
- Test::Fail("Incremental assign failed: " + error->str());
- if (Test::assignment_partition_count(consumer, NULL) != 2)
- Test::Fail(tostr() << "Expecting current assignment to have size 2, not: "
- << Test::assignment_partition_count(consumer, NULL));
-
- m = consumer->consume(5000);
- if (m->err() != RdKafka::ERR_NO_ERROR)
- Test::Fail("Expecting a consumed message.");
- delete m;
-
- if ((error = consumer->incremental_unassign(toppars2)))
- Test::Fail("Incremental unassign failed: " + error->str());
- if ((error = consumer->incremental_unassign(toppars1)))
- Test::Fail("Incremental unassign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-}
-
-
-/** Incremental assign and unassign of empty collection.
- */
-static void assign_test_5(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2) {
- RdKafka::Error *error;
- std::vector<RdKafka::TopicPartition *> toppars3;
-
- Test::Say("Incremental assign and unassign of empty collection\n");
-
- if ((error = consumer->incremental_assign(toppars3)))
- Test::Fail("Incremental assign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-
- if ((error = consumer->incremental_unassign(toppars3)))
- Test::Fail("Incremental unassign failed: " + error->str());
- Test::check_assignment(consumer, 0, NULL);
-}
-
-
-
-static void run_test(
- const std::string &t1,
- const std::string &t2,
- void (*test)(RdKafka::KafkaConsumer *consumer,
- std::vector<RdKafka::TopicPartition *> toppars1,
- std::vector<RdKafka::TopicPartition *> toppars2)) {
- std::vector<RdKafka::TopicPartition *> toppars1;
- toppars1.push_back(RdKafka::TopicPartition::create(t1, 0));
- std::vector<RdKafka::TopicPartition *> toppars2;
- toppars2.push_back(RdKafka::TopicPartition::create(t2, 0));
-
- RdKafka::KafkaConsumer *consumer =
- make_consumer("C_1", t1, "cooperative-sticky", NULL, NULL, 10);
-
- test(consumer, toppars1, toppars2);
-
- RdKafka::TopicPartition::destroy(toppars1);
- RdKafka::TopicPartition::destroy(toppars2);
-
- consumer->close();
- delete consumer;
-}
-
-
-static void a_assign_tests() {
- SUB_TEST_QUICK();
-
- int msgcnt = 1000;
- const int msgsize1 = 100;
- const int msgsize2 = 200;
-
- std::string topic1_str = Test::mk_topic_name("0113-a1", 1);
- test_create_topic(NULL, topic1_str.c_str(), 1, 1);
- std::string topic2_str = Test::mk_topic_name("0113-a2", 1);
- test_create_topic(NULL, topic2_str.c_str(), 1, 1);
-
- test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1);
- test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2);
-
- run_test(topic1_str, topic2_str, assign_test_1);
- run_test(topic1_str, topic2_str, assign_test_2);
- run_test(topic1_str, topic2_str, assign_test_3);
- run_test(topic1_str, topic2_str, assign_test_4);
- run_test(topic1_str, topic2_str, assign_test_5);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Quick Assign 1,2, Assign 2,3, Assign 1,2,3 test to verify
- * that the correct OffsetFetch response is used.
- * See note in rdkafka_assignment.c for details.
- *
- * Makes use of the mock cluster to induce latency.
- */
-static void a_assign_rapid() {
- SUB_TEST_QUICK();
-
- std::string group_id = __FUNCTION__;
-
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
- int32_t coord_id = 1;
- rd_kafka_mock_coordinator_set(mcluster, "group", group_id.c_str(), coord_id);
-
- rd_kafka_mock_topic_create(mcluster, "topic1", 1, 1);
- rd_kafka_mock_topic_create(mcluster, "topic2", 1, 1);
- rd_kafka_mock_topic_create(mcluster, "topic3", 1, 1);
-
- /*
- * Produce messages to topics
- */
- const int msgs_per_partition = 1000;
-
- RdKafka::Conf *pconf;
- Test::conf_init(&pconf, NULL, 10);
- Test::conf_set(pconf, "bootstrap.servers", bootstraps);
- Test::conf_set(pconf, "security.protocol", "plaintext");
- std::string errstr;
- RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
- if (!p)
- Test::Fail(tostr() << __FUNCTION__
- << ": Failed to create producer: " << errstr);
- delete pconf;
-
- Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10,
- false /*no flush*/);
- Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10,
- false /*no flush*/);
- Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10,
- false /*no flush*/);
- p->flush(10 * 1000);
-
- delete p;
-
- vector<RdKafka::TopicPartition *> toppars1;
- toppars1.push_back(RdKafka::TopicPartition::create("topic1", 0));
- vector<RdKafka::TopicPartition *> toppars2;
- toppars2.push_back(RdKafka::TopicPartition::create("topic2", 0));
- vector<RdKafka::TopicPartition *> toppars3;
- toppars3.push_back(RdKafka::TopicPartition::create("topic3", 0));
-
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 20);
- Test::conf_set(conf, "bootstrap.servers", bootstraps);
- Test::conf_set(conf, "security.protocol", "plaintext");
- Test::conf_set(conf, "client.id", __FUNCTION__);
- Test::conf_set(conf, "group.id", group_id);
- Test::conf_set(conf, "auto.offset.reset", "earliest");
- Test::conf_set(conf, "enable.auto.commit", "false");
-
- RdKafka::KafkaConsumer *consumer;
- consumer = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!consumer)
- Test::Fail(tostr() << __FUNCTION__
- << ": Failed to create consumer: " << errstr);
- delete conf;
-
- vector<RdKafka::TopicPartition *> toppars;
- vector<RdKafka::TopicPartition *> expected;
-
- map<Toppar, int64_t> pos; /* Expected consume position per partition */
- pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 0;
- pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 0;
- pos[Toppar(toppars3[0]->topic(), toppars3[0]->partition())] = 0;
-
- /* To make sure offset commits are fetched in proper assign sequence
- * we commit an offset that should not be used in the final consume loop.
- * This commit will be overwritten below with another commit. */
- vector<RdKafka::TopicPartition *> offsets;
- offsets.push_back(RdKafka::TopicPartition::create(
- toppars1[0]->topic(), toppars1[0]->partition(), 11));
- /* This partition should start at this position even though
- * there will be a sub-sequent commit to overwrite it, that should not
- * be used since this partition is never unassigned. */
- offsets.push_back(RdKafka::TopicPartition::create(
- toppars2[0]->topic(), toppars2[0]->partition(), 22));
- pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 22;
-
- Test::print_TopicPartitions("pre-commit", offsets);
-
- RdKafka::ErrorCode err;
- err = consumer->commitSync(offsets);
- if (err)
- Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: "
- << RdKafka::err2str(err) << "\n");
-
- /* Add coordinator delay so that the OffsetFetchRequest originating
- * from the coming incremental_assign() will not finish before
- * we call incremental_unassign() and incremental_assign() again, resulting
- * in a situation where the initial OffsetFetchResponse will contain
- * an older offset for a previous assignment of one partition. */
- rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 5000);
-
-
- /* Assign 1,2 == 1,2 */
- toppars.push_back(toppars1[0]);
- toppars.push_back(toppars2[0]);
- expected.push_back(toppars1[0]);
- expected.push_back(toppars2[0]);
- Test::incremental_assign(consumer, toppars);
- expect_assignment(consumer, expected);
-
- /* Unassign -1 == 2 */
- toppars.clear();
- toppars.push_back(toppars1[0]);
- vector<RdKafka::TopicPartition *>::iterator it =
- find(expected.begin(), expected.end(), toppars1[0]);
- expected.erase(it);
-
- Test::incremental_unassign(consumer, toppars);
- expect_assignment(consumer, expected);
-
-
- /* Commit offset for the removed partition and the partition that is
- * unchanged in the assignment. */
- RdKafka::TopicPartition::destroy(offsets);
- offsets.push_back(RdKafka::TopicPartition::create(
- toppars1[0]->topic(), toppars1[0]->partition(), 55));
- offsets.push_back(RdKafka::TopicPartition::create(
- toppars2[0]->topic(), toppars2[0]->partition(), 33)); /* should not be
- * used. */
- pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 55;
- Test::print_TopicPartitions("commit", offsets);
-
- err = consumer->commitAsync(offsets);
- if (err)
- Test::Fail(tostr() << __FUNCTION__
- << ": commit failed: " << RdKafka::err2str(err) << "\n");
-
- /* Assign +3 == 2,3 */
- toppars.clear();
- toppars.push_back(toppars3[0]);
- expected.push_back(toppars3[0]);
- Test::incremental_assign(consumer, toppars);
- expect_assignment(consumer, expected);
-
- /* Now remove the latency */
- Test::Say(_C_MAG "Clearing rtt\n");
- rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 0);
-
- /* Assign +1 == 1,2,3 */
- toppars.clear();
- toppars.push_back(toppars1[0]);
- expected.push_back(toppars1[0]);
- Test::incremental_assign(consumer, toppars);
- expect_assignment(consumer, expected);
-
- /*
- * Verify consumed messages
- */
- int wait_end = (int)expected.size();
- while (wait_end > 0) {
- RdKafka::Message *msg = consumer->consume(10 * 1000);
- if (msg->err() == RdKafka::ERR__TIMED_OUT)
- Test::Fail(tostr() << __FUNCTION__
- << ": Consume timed out waiting "
- "for "
- << wait_end << " more partitions");
-
- Toppar tp = Toppar(msg->topic_name(), msg->partition());
- int64_t *exp_pos = &pos[tp];
-
- Test::Say(3, tostr() << __FUNCTION__ << ": Received " << tp.topic << " ["
- << tp.partition << "] at offset " << msg->offset()
- << " (expected offset " << *exp_pos << ")\n");
-
- if (*exp_pos != msg->offset())
- Test::Fail(tostr() << __FUNCTION__ << ": expected message offset "
- << *exp_pos << " for " << msg->topic_name() << " ["
- << msg->partition() << "], not " << msg->offset()
- << "\n");
- (*exp_pos)++;
- if (*exp_pos == msgs_per_partition) {
- TEST_ASSERT(wait_end > 0, "");
- wait_end--;
- } else if (msg->offset() > msgs_per_partition)
- Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with "
- << "offset " << msg->offset() << " on " << tp.topic
- << " [" << tp.partition << "]\n");
-
- delete msg;
- }
-
- RdKafka::TopicPartition::destroy(offsets);
- RdKafka::TopicPartition::destroy(toppars1);
- RdKafka::TopicPartition::destroy(toppars2);
- RdKafka::TopicPartition::destroy(toppars3);
-
- delete consumer;
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-/* Check behavior when:
- * 1. single topic with 2 partitions.
- * 2. consumer 1 (with rebalance_cb) subscribes to it.
- * 3. consumer 2 (with rebalance_cb) subscribes to it.
- * 4. close.
- */
-
-static void b_subscribe_with_cb_test(rd_bool_t close_consumer) {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name.c_str(), 2, 1);
-
- DefaultRebalanceCb rebalance_cb1;
- RdKafka::KafkaConsumer *c1 = make_consumer(
- "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25);
- DefaultRebalanceCb rebalance_cb2;
- RdKafka::KafkaConsumer *c2 = make_consumer(
- "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25);
- test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c1, topic_name);
-
- bool c2_subscribed = false;
- while (true) {
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- /* Start c2 after c1 has received initial assignment */
- if (!c2_subscribed && rebalance_cb1.assign_call_cnt > 0) {
- Test::subscribe(c2, topic_name);
- c2_subscribed = true;
- }
-
- /* Failure case: test will time out. */
- if (rebalance_cb1.assign_call_cnt == 3 &&
- rebalance_cb2.assign_call_cnt == 2) {
- break;
- }
- }
-
- /* Sequence of events:
- *
- * 1. c1 joins group.
- * 2. c1 gets assigned 2 partitions.
- * - there isn't a follow-on rebalance because there aren't any revoked
- * partitions.
- * 3. c2 joins group.
- * 4. This results in a rebalance with one partition being revoked from c1,
- * and no partitions assigned to either c1 or c2 (however the rebalance
- * callback will be called in each case with an empty set).
- * 5. c1 then re-joins the group since it had a partition revoked.
- * 6. c2 is now assigned a single partition, and c1's incremental assignment
- * is empty.
- * 7. Since there were no revoked partitions, no further rebalance is
- * triggered.
- */
-
- /* The rebalance cb is always called on assign, even if empty. */
- if (rebalance_cb1.assign_call_cnt != 3)
- Test::Fail(tostr() << "Expecting 3 assign calls on consumer 1, not "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expecting 2 assign calls on consumer 2, not: "
- << rebalance_cb2.assign_call_cnt);
-
- /* The rebalance cb is not called on and empty revoke (unless partitions lost,
- * which is not the case here) */
- if (rebalance_cb1.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: "
- << rebalance_cb1.revoke_call_cnt);
- if (rebalance_cb2.revoke_call_cnt != 0)
- Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: "
- << rebalance_cb2.revoke_call_cnt);
-
- /* Final state */
-
- /* Expect both consumers to have 1 assigned partition (via net calculation in
- * rebalance_cb) */
- if (rebalance_cb1.partitions_assigned_net != 1)
- Test::Fail(tostr()
- << "Expecting consumer 1 to have net 1 assigned partition, not: "
- << rebalance_cb1.partitions_assigned_net);
- if (rebalance_cb2.partitions_assigned_net != 1)
- Test::Fail(tostr()
- << "Expecting consumer 2 to have net 1 assigned partition, not: "
- << rebalance_cb2.partitions_assigned_net);
-
- /* Expect both consumers to have 1 assigned partition (via ->assignment()
- * query) */
- expect_assignment(c1, 1);
- expect_assignment(c2, 1);
-
- /* Make sure the fetchers are running */
- int msgcnt = 100;
- const int msgsize1 = 100;
- test_produce_msgs_easy_size(topic_name.c_str(), 0, 0, msgcnt, msgsize1);
- test_produce_msgs_easy_size(topic_name.c_str(), 0, 1, msgcnt, msgsize1);
-
- bool consumed_from_c1 = false;
- bool consumed_from_c2 = false;
- while (true) {
- RdKafka::Message *msg1 = c1->consume(100);
- RdKafka::Message *msg2 = c2->consume(100);
-
- if (msg1->err() == RdKafka::ERR_NO_ERROR)
- consumed_from_c1 = true;
- if (msg1->err() == RdKafka::ERR_NO_ERROR)
- consumed_from_c2 = true;
-
- delete msg1;
- delete msg2;
-
- /* Failure case: test will timeout. */
- if (consumed_from_c1 && consumed_from_c2)
- break;
- }
-
- if (!close_consumer) {
- delete c1;
- delete c2;
- return;
- }
-
- c1->close();
- c2->close();
-
- /* Closing the consumer should trigger rebalance_cb (revoke): */
- if (rebalance_cb1.revoke_call_cnt != 2)
- Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: "
- << rebalance_cb1.revoke_call_cnt);
- if (rebalance_cb2.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: "
- << rebalance_cb2.revoke_call_cnt);
-
- /* ..and net assigned partitions should drop to 0 in both cases: */
- if (rebalance_cb1.partitions_assigned_net != 0)
- Test::Fail(
- tostr()
- << "Expecting consumer 1 to have net 0 assigned partitions, not: "
- << rebalance_cb1.partitions_assigned_net);
- if (rebalance_cb2.partitions_assigned_net != 0)
- Test::Fail(
- tostr()
- << "Expecting consumer 2 to have net 0 assigned partitions, not: "
- << rebalance_cb2.partitions_assigned_net);
-
- /* Nothing in this test should result in lost partitions */
- if (rebalance_cb1.lost_call_cnt > 0)
- Test::Fail(
- tostr() << "Expecting consumer 1 to have 0 lost partition events, not: "
- << rebalance_cb1.lost_call_cnt);
- if (rebalance_cb2.lost_call_cnt > 0)
- Test::Fail(
- tostr() << "Expecting consumer 2 to have 0 lost partition events, not: "
- << rebalance_cb2.lost_call_cnt);
-
- delete c1;
- delete c2;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single topic with 2 partitions.
- * 2. Consumer 1 (no rebalance_cb) subscribes to it.
- * 3. Consumer 2 (no rebalance_cb) subscribes to it.
- * 4. Close.
- */
-
-static void c_subscribe_no_cb_test(rd_bool_t close_consumer) {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name.c_str(), 2, 1);
-
- RdKafka::KafkaConsumer *c1 =
- make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20);
- RdKafka::KafkaConsumer *c2 =
- make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20);
- test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c1, topic_name);
-
- bool c2_subscribed = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- if (Test::assignment_partition_count(c1, NULL) == 2 && !c2_subscribed) {
- Test::subscribe(c2, topic_name);
- c2_subscribed = true;
- }
-
- if (Test::assignment_partition_count(c1, NULL) == 1 &&
- Test::assignment_partition_count(c2, NULL) == 1) {
- Test::Say("Consumer 1 and 2 are both assigned to single partition.\n");
- done = true;
- }
- }
-
- if (close_consumer) {
- Test::Say("Closing consumer 1\n");
- c1->close();
- Test::Say("Closing consumer 2\n");
- c2->close();
- } else {
- Test::Say("Skipping close() of consumer 1 and 2.\n");
- }
-
- delete c1;
- delete c2;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single consumer (no rebalance_cb) subscribes to topic.
- * 2. Subscription is changed (topic added).
- * 3. Consumer is closed.
- */
-
-static void d_change_subscription_add_topic(rd_bool_t close_consumer) {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
- std::string topic_name_2 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1);
-
- bool subscribed_to_one_topic = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 2 &&
- !subscribed_to_one_topic) {
- subscribed_to_one_topic = true;
- Test::subscribe(c, topic_name_1, topic_name_2);
- }
-
- if (Test::assignment_partition_count(c, NULL) == 4) {
- Test::Say("Consumer is assigned to two topics.\n");
- done = true;
- }
- }
-
- if (close_consumer) {
- Test::Say("Closing consumer\n");
- c->close();
- } else
- Test::Say("Skipping close() of consumer\n");
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single consumer (no rebalance_cb) subscribes to topic.
- * 2. Subscription is changed (topic added).
- * 3. Consumer is closed.
- */
-
-static void e_change_subscription_remove_topic(rd_bool_t close_consumer) {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
- std::string topic_name_2 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1, topic_name_2);
-
- bool subscribed_to_two_topics = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 4 &&
- !subscribed_to_two_topics) {
- subscribed_to_two_topics = true;
- Test::subscribe(c, topic_name_1);
- }
-
- if (Test::assignment_partition_count(c, NULL) == 2) {
- Test::Say("Consumer is assigned to one topic\n");
- done = true;
- }
- }
-
- if (!close_consumer) {
- Test::Say("Closing consumer\n");
- c->close();
- } else
- Test::Say("Skipping close() of consumer\n");
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check that use of consumer->assign() and consumer->unassign() is disallowed
- * when a COOPERATIVE assignor is in use.
- *
- * Except when the consumer is closing, where all forms of unassign are
- * allowed and treated as a full unassign.
- */
-
-class FTestRebalanceCb : public RdKafka::RebalanceCb {
- public:
- bool assigned;
- bool closing;
-
- FTestRebalanceCb() : assigned(false), closing(false) {
- }
-
- void rebalance_cb(RdKafka::KafkaConsumer *consumer,
- RdKafka::ErrorCode err,
- std::vector<RdKafka::TopicPartition *> &partitions) {
- Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
- << RdKafka::err2str(err) << (closing ? " (closing)" : "")
- << "\n");
-
- if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
- RdKafka::ErrorCode err_resp = consumer->assign(partitions);
- Test::Say(tostr() << "consumer->assign() response code: " << err_resp
- << "\n");
- if (err_resp != RdKafka::ERR__STATE)
- Test::Fail(tostr() << "Expected assign to fail with error code: "
- << RdKafka::ERR__STATE << "(ERR__STATE)");
-
- RdKafka::Error *error = consumer->incremental_assign(partitions);
- if (error)
- Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
- << error->str());
-
- assigned = true;
-
- } else {
- RdKafka::ErrorCode err_resp = consumer->unassign();
- Test::Say(tostr() << "consumer->unassign() response code: " << err_resp
- << "\n");
-
- if (!closing) {
- if (err_resp != RdKafka::ERR__STATE)
- Test::Fail(tostr() << "Expected assign to fail with error code: "
- << RdKafka::ERR__STATE << "(ERR__STATE)");
-
- RdKafka::Error *error = consumer->incremental_unassign(partitions);
- if (error)
- Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
- << error->str());
-
- } else {
- /* During termination (close()) any type of unassign*() is allowed. */
- if (err_resp)
- Test::Fail(tostr() << "Expected unassign to succeed during close, "
- "but got: "
- << RdKafka::ERR__STATE << "(ERR__STATE)");
- }
- }
- }
-};
-
-
-static void f_assign_call_cooperative() {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name.c_str(), 1, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- FTestRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name);
-
- while (!rebalance_cb.assigned)
- Test::poll_once(c, 500);
-
- rebalance_cb.closing = true;
- c->close();
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check that use of consumer->incremental_assign() and
- * consumer->incremental_unassign() is disallowed when an EAGER assignor is in
- * use.
- */
-class GTestRebalanceCb : public RdKafka::RebalanceCb {
- public:
- bool assigned;
- bool closing;
-
- GTestRebalanceCb() : assigned(false), closing(false) {
- }
-
- void rebalance_cb(RdKafka::KafkaConsumer *consumer,
- RdKafka::ErrorCode err,
- std::vector<RdKafka::TopicPartition *> &partitions) {
- Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
- << RdKafka::err2str(err) << "\n");
-
- if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
- RdKafka::Error *error = consumer->incremental_assign(partitions);
- Test::Say(tostr() << "consumer->incremental_assign() response: "
- << (!error ? "NULL" : error->str()) << "\n");
- if (!error)
- Test::Fail("Expected consumer->incremental_assign() to fail");
- if (error->code() != RdKafka::ERR__STATE)
- Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail "
- "with error code "
- << RdKafka::ERR__STATE);
- delete error;
-
- RdKafka::ErrorCode err_resp = consumer->assign(partitions);
- if (err_resp)
- Test::Fail(tostr() << "consumer->assign() failed: " << err_resp);
-
- assigned = true;
-
- } else {
- RdKafka::Error *error = consumer->incremental_unassign(partitions);
- Test::Say(tostr() << "consumer->incremental_unassign() response: "
- << (!error ? "NULL" : error->str()) << "\n");
-
- if (!closing) {
- if (!error)
- Test::Fail("Expected consumer->incremental_unassign() to fail");
- if (error->code() != RdKafka::ERR__STATE)
- Test::Fail(tostr() << "Expected consumer->incremental_unassign() to "
- "fail with error code "
- << RdKafka::ERR__STATE);
- delete error;
-
- RdKafka::ErrorCode err_resp = consumer->unassign();
- if (err_resp)
- Test::Fail(tostr() << "consumer->unassign() failed: " << err_resp);
-
- } else {
- /* During termination (close()) any type of unassign*() is allowed. */
- if (error)
- Test::Fail(
- tostr()
- << "Expected incremental_unassign to succeed during close, "
- "but got: "
- << RdKafka::ERR__STATE << "(ERR__STATE)");
- }
- }
- }
-};
-
-static void g_incremental_assign_call_eager() {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name.c_str(), 1, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- GTestRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c = make_consumer(
- "C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name);
-
- while (!rebalance_cb.assigned)
- Test::poll_once(c, 500);
-
- rebalance_cb.closing = true;
- c->close();
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single consumer (rebalance_cb) subscribes to two topics.
- * 2. One of the topics is deleted.
- * 3. Consumer is closed.
- */
-
-static void h_delete_topic() {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
- std::string topic_name_2 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- DefaultRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1, topic_name_2);
-
- bool deleted = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- std::vector<RdKafka::TopicPartition *> partitions;
- c->assignment(partitions);
-
- if (partitions.size() == 2 && !deleted) {
- if (rebalance_cb.assign_call_cnt != 1)
- Test::Fail(tostr() << "Expected 1 assign call, saw "
- << rebalance_cb.assign_call_cnt << "\n");
- Test::delete_topic(c, topic_name_2.c_str());
- deleted = true;
- }
-
- if (partitions.size() == 1 && deleted) {
- if (partitions[0]->topic() != topic_name_1)
- Test::Fail(tostr() << "Expecting subscribed topic to be '"
- << topic_name_1 << "' not '"
- << partitions[0]->topic() << "'");
- Test::Say(tostr() << "Assignment no longer includes deleted topic '"
- << topic_name_2 << "'\n");
- done = true;
- }
-
- RdKafka::TopicPartition::destroy(partitions);
- }
-
- Test::Say("Closing consumer\n");
- c->close();
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single consumer (rebalance_cb) subscribes to a single topic.
- * 2. That topic is deleted leaving no topics.
- * 3. Consumer is closed.
- */
-
-static void i_delete_topic_2() {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- DefaultRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1);
-
- bool deleted = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
- if (rebalance_cb.assign_call_cnt != 1)
- Test::Fail(tostr() << "Expected one assign call, saw "
- << rebalance_cb.assign_call_cnt << "\n");
- Test::delete_topic(c, topic_name_1.c_str());
- deleted = true;
- }
-
- if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
- Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
- done = true;
- }
- }
-
- Test::Say("Closing consumer\n");
- c->close();
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. single consumer (without rebalance_cb) subscribes to a single topic.
- * 2. that topic is deleted leaving no topics.
- * 3. consumer is closed.
- */
-
-static void j_delete_topic_no_rb_callback() {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- RdKafka::KafkaConsumer *c = make_consumer(
- "C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1);
-
- bool deleted = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
- Test::delete_topic(c, topic_name_1.c_str());
- deleted = true;
- }
-
- if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
- Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
- done = true;
- }
- }
-
- Test::Say("Closing consumer\n");
- c->close();
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Single consumer (rebalance_cb) subscribes to a 1 partition topic.
- * 2. Number of partitions is increased to 2.
- * 3. Consumer is closed.
- */
-
-static void k_add_partition() {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- test_create_topic(NULL, topic_name.c_str(), 1, 1);
-
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
- DefaultRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name);
-
- bool subscribed = false;
- bool done = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) {
- if (rebalance_cb.assign_call_cnt != 1)
- Test::Fail(tostr() << "Expected 1 assign call, saw "
- << rebalance_cb.assign_call_cnt);
- if (rebalance_cb.revoke_call_cnt != 0)
- Test::Fail(tostr() << "Expected 0 revoke calls, saw "
- << rebalance_cb.revoke_call_cnt);
- Test::create_partitions(c, topic_name.c_str(), 2);
- subscribed = true;
- }
-
- if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) {
- if (rebalance_cb.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expected 2 assign calls, saw "
- << rebalance_cb.assign_call_cnt);
- if (rebalance_cb.revoke_call_cnt != 0)
- Test::Fail(tostr() << "Expected 0 revoke calls, saw "
- << rebalance_cb.revoke_call_cnt);
- done = true;
- }
- }
-
- Test::Say("Closing consumer\n");
- c->close();
- delete c;
-
- if (rebalance_cb.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expected 2 assign calls, saw "
- << rebalance_cb.assign_call_cnt);
- if (rebalance_cb.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expected 1 revoke call, saw "
- << rebalance_cb.revoke_call_cnt);
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. two consumers (with rebalance_cb's) subscribe to two topics.
- * 2. one of the consumers calls unsubscribe.
- * 3. consumers closed.
- */
-
-static void l_unsubscribe() {
- SUB_TEST();
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string topic_name_2 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
- test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
-
- DefaultRebalanceCb rebalance_cb1;
- RdKafka::KafkaConsumer *c1 = make_consumer(
- "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30);
- test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10 * 1000);
-
- Test::subscribe(c1, topic_name_1, topic_name_2);
-
- DefaultRebalanceCb rebalance_cb2;
- RdKafka::KafkaConsumer *c2 = make_consumer(
- "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30);
- Test::subscribe(c2, topic_name_1, topic_name_2);
-
- bool done = false;
- bool unsubscribed = false;
- while (!done) {
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- if (Test::assignment_partition_count(c1, NULL) == 2 &&
- Test::assignment_partition_count(c2, NULL) == 2) {
- if (rebalance_cb1.assign_call_cnt != 1)
- Test::Fail(
- tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 1)
- Test::Fail(
- tostr() << "Expecting consumer 2's assign_call_cnt to be 1 not: "
- << rebalance_cb2.assign_call_cnt);
- Test::Say("Unsubscribing consumer 1 from both topics\n");
- c1->unsubscribe();
- unsubscribed = true;
- }
-
- if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 &&
- Test::assignment_partition_count(c2, NULL) == 4) {
- if (rebalance_cb1.assign_call_cnt !=
- 1) /* is now unsubscribed, so rebalance_cb will no longer be called.
- */
- Test::Fail(
- tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 2)
- Test::Fail(
- tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: "
- << rebalance_cb2.assign_call_cnt);
- if (rebalance_cb1.revoke_call_cnt != 1)
- Test::Fail(
- tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: "
- << rebalance_cb1.revoke_call_cnt);
- if (rebalance_cb2.revoke_call_cnt !=
- 0) /* the rebalance_cb should not be called if the revoked partition
- list is empty */
- Test::Fail(
- tostr() << "Expecting consumer 2's revoke_call_cnt to be 0 not: "
- << rebalance_cb2.revoke_call_cnt);
- Test::Say("Unsubscribe completed");
- done = true;
- }
- }
-
- Test::Say("Closing consumer 1\n");
- c1->close();
- Test::Say("Closing consumer 2\n");
- c2->close();
-
- /* there should be no assign rebalance_cb calls on close */
- if (rebalance_cb1.assign_call_cnt != 1)
- Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: "
- << rebalance_cb2.assign_call_cnt);
-
- if (rebalance_cb1.revoke_call_cnt !=
- 1) /* should not be called a second revoke rebalance_cb */
- Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: "
- << rebalance_cb1.revoke_call_cnt);
- if (rebalance_cb2.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: "
- << rebalance_cb2.revoke_call_cnt);
-
- if (rebalance_cb1.lost_call_cnt != 0)
- Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: "
- << rebalance_cb1.lost_call_cnt);
- if (rebalance_cb2.lost_call_cnt != 0)
- Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: "
- << rebalance_cb2.lost_call_cnt);
-
- delete c1;
- delete c2;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. A consumers (with no rebalance_cb) subscribes to a topic.
- * 2. The consumer calls unsubscribe.
- * 3. Consumers closed.
- */
-
-static void m_unsubscribe_2() {
- SUB_TEST();
-
- std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name.c_str(), 2, 1);
-
- RdKafka::KafkaConsumer *c =
- make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
- test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name);
-
- bool done = false;
- bool unsubscribed = false;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (Test::assignment_partition_count(c, NULL) == 2) {
- Test::unsubscribe(c);
- unsubscribed = true;
- }
-
- if (unsubscribed && Test::assignment_partition_count(c, NULL) == 0) {
- Test::Say("Unsubscribe completed");
- done = true;
- }
- }
-
- Test::Say("Closing consumer\n");
- c->close();
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching
- * topics exist)
- * 2. Create two topics.
- * 3. Remove one of the topics.
- * 3. Consumers closed.
- */
-
-static void n_wildcard() {
- SUB_TEST();
-
- const string topic_base_name = Test::mk_topic_name("0113-n_wildcard", 1);
- const string topic_name_1 = topic_base_name + "_1";
- const string topic_name_2 = topic_base_name + "_2";
- const string topic_regex = "^" + topic_base_name + "_.";
- const string group_name = Test::mk_unique_group_name("0113-n_wildcard");
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
-
- DefaultRebalanceCb rebalance_cb1;
- RdKafka::KafkaConsumer *c1 =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb1, 30);
- Test::subscribe(c1, topic_regex);
-
- DefaultRebalanceCb rebalance_cb2;
- RdKafka::KafkaConsumer *c2 =
- make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb2, 30);
- Test::subscribe(c2, topic_regex);
-
- /* There are no matching topics, so the consumers should not join the group
- * initially */
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- if (rebalance_cb1.assign_call_cnt != 0)
- Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 0)
- Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: "
- << rebalance_cb2.assign_call_cnt);
-
- bool done = false;
- bool created_topics = false;
- bool deleted_topic = false;
- int last_cb1_assign_call_cnt = 0;
- int last_cb2_assign_call_cnt = 0;
- while (!done) {
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- if (Test::assignment_partition_count(c1, NULL) == 0 &&
- Test::assignment_partition_count(c2, NULL) == 0 && !created_topics) {
- Test::Say(
- "Creating two topics with 2 partitions each that match regex\n");
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
- test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
- /* The consumers should autonomously discover these topics and start
- * consuming from them. This happens in the background - is not
- * influenced by whether we wait for the topics to be created before
- * continuing the main loop. It is possible that both topics are
- * discovered simultaneously, requiring a single rebalance OR that
- * topic 1 is discovered first (it was created first), a rebalance
- * initiated, then topic 2 discovered, then another rebalance
- * initiated to include it.
- */
- created_topics = true;
- }
-
- if (Test::assignment_partition_count(c1, NULL) == 2 &&
- Test::assignment_partition_count(c2, NULL) == 2 && !deleted_topic) {
- if (rebalance_cb1.nonempty_assign_call_cnt == 1) {
- /* just one rebalance was required */
- TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 1,
- "Expecting C_1's nonempty_assign_call_cnt to be 1 not %d ",
- rebalance_cb1.nonempty_assign_call_cnt);
- TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 1,
- "Expecting C_2's nonempty_assign_call_cnt to be 1 not %d ",
- rebalance_cb2.nonempty_assign_call_cnt);
- } else {
- /* two rebalances were required (occurs infrequently) */
- TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 2,
- "Expecting C_1's nonempty_assign_call_cnt to be 2 not %d ",
- rebalance_cb1.nonempty_assign_call_cnt);
- TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 2,
- "Expecting C_2's nonempty_assign_call_cnt to be 2 not %d ",
- rebalance_cb2.nonempty_assign_call_cnt);
- }
-
- TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 0,
- "Expecting C_1's revoke_call_cnt to be 0 not %d ",
- rebalance_cb1.revoke_call_cnt);
- TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 0,
- "Expecting C_2's revoke_call_cnt to be 0 not %d ",
- rebalance_cb2.revoke_call_cnt);
-
- last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
- last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
-
- Test::Say("Deleting topic 1\n");
- Test::delete_topic(c1, topic_name_1.c_str());
- deleted_topic = true;
- }
-
- if (Test::assignment_partition_count(c1, NULL) == 1 &&
- Test::assignment_partition_count(c2, NULL) == 1 && deleted_topic) {
- /* accumulated in lost case as well */
- TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1,
- "Expecting C_1's revoke_call_cnt to be 1 not %d",
- rebalance_cb1.revoke_call_cnt);
- TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1,
- "Expecting C_2's revoke_call_cnt to be 1 not %d",
- rebalance_cb2.revoke_call_cnt);
- TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1,
- "Expecting C_1's lost_call_cnt to be 1 not %d",
- rebalance_cb1.lost_call_cnt);
- TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1,
- "Expecting C_2's lost_call_cnt to be 1 not %d",
- rebalance_cb2.lost_call_cnt);
-
- /* Consumers will rejoin group after revoking the lost partitions.
- * this will result in an rebalance_cb assign (empty partitions).
- * it follows the revoke, which has already been confirmed to have
- * happened. */
- Test::Say("Waiting for rebalance_cb assigns\n");
- while (rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt ||
- rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt) {
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
- }
-
- Test::Say("Consumers are subscribed to one partition each\n");
- done = true;
- }
- }
-
- Test::Say("Closing consumer 1\n");
- last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
- c1->close();
-
- /* There should be no assign rebalance_cb calls on close */
- TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt,
- "Expecting C_1's assign_call_cnt to be %d not %d",
- last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt);
-
- /* Let C_2 catch up on the rebalance and get assigned C_1's partitions. */
- last_cb2_assign_call_cnt = rebalance_cb2.nonempty_assign_call_cnt;
- while (rebalance_cb2.nonempty_assign_call_cnt == last_cb2_assign_call_cnt)
- Test::poll_once(c2, 500);
-
- Test::Say("Closing consumer 2\n");
- last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
- c2->close();
-
- /* There should be no assign rebalance_cb calls on close */
- TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt,
- "Expecting C_2's assign_call_cnt to be %d not %d",
- last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt);
-
- TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2,
- "Expecting C_1's revoke_call_cnt to be 2 not %d",
- rebalance_cb1.revoke_call_cnt);
- TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2,
- "Expecting C_2's revoke_call_cnt to be 2 not %d",
- rebalance_cb2.revoke_call_cnt);
-
- TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1,
- "Expecting C_1's lost_call_cnt to be 1, not %d",
- rebalance_cb1.lost_call_cnt);
- TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1,
- "Expecting C_2's lost_call_cnt to be 1, not %d",
- rebalance_cb2.lost_call_cnt);
-
- delete c1;
- delete c2;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * 1. Consumer (librdkafka) subscribes to two topics (2 and 6 partitions).
- * 2. Consumer (java) subscribes to the same two topics.
- * 3. Consumer (librdkafka) unsubscribes from the two partition topic.
- * 4. Consumer (java) process closes upon detecting the above unsubscribe.
- * 5. Consumer (librdkafka) will now be subscribed to 6 partitions.
- * 6. Close librdkafka consumer.
- */
-
-static void o_java_interop() {
- SUB_TEST();
-
- if (*test_conf_get(NULL, "sasl.mechanism") != '\0')
- SUB_TEST_SKIP(
- "Cluster is set up for SASL: we won't bother with that "
- "for the Java client\n");
-
- std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1);
- std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1);
- std::string group_name = Test::mk_unique_group_name("0113_o");
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
- test_create_topic(NULL, topic_name_2.c_str(), 6, 1);
-
- DefaultRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c = make_consumer(
- "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
-
- Test::subscribe(c, topic_name_1, topic_name_2);
-
- bool done = false;
- bool changed_subscription = false;
- bool changed_subscription_done = false;
- int java_pid = 0;
- while (!done) {
- Test::poll_once(c, 500);
-
- if (1) // FIXME: Remove after debugging
- Test::Say(tostr() << "Assignment partition count: "
- << Test::assignment_partition_count(c, NULL)
- << ", changed_sub " << changed_subscription
- << ", changed_sub_done " << changed_subscription_done
- << ", assign_call_cnt " << rebalance_cb.assign_call_cnt
- << "\n");
- if (Test::assignment_partition_count(c, NULL) == 8 && !java_pid) {
- Test::Say(_C_GRN "librdkafka consumer assigned to 8 partitions\n");
- string bootstrapServers = get_bootstrap_servers();
- const char *argv[1 + 1 + 1 + 1 + 1 + 1];
- size_t i = 0;
- argv[i++] = "test1";
- argv[i++] = bootstrapServers.c_str();
- argv[i++] = topic_name_1.c_str();
- argv[i++] = topic_name_2.c_str();
- argv[i++] = group_name.c_str();
- argv[i] = NULL;
- java_pid = test_run_java("IncrementalRebalanceCli", argv);
- if (java_pid <= 0)
- Test::Fail(tostr() << "Unexpected pid: " << java_pid);
- }
-
- if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 &&
- !changed_subscription) {
- if (rebalance_cb.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, "
- "not "
- << rebalance_cb.assign_call_cnt);
- Test::Say(_C_GRN "Java consumer is now part of the group\n");
- Test::subscribe(c, topic_name_1);
- changed_subscription = true;
- }
-
- /* Depending on the timing of resubscribe rebalancing and the
- * Java consumer terminating we might have one or two rebalances,
- * hence the fuzzy <=5 and >=5 checks. */
- if (Test::assignment_partition_count(c, NULL) == 2 &&
- changed_subscription && rebalance_cb.assign_call_cnt <= 5 &&
- !changed_subscription_done) {
- /* All topic 1 partitions will be allocated to this consumer whether or
- * not the Java consumer has unsubscribed yet because the sticky algorithm
- * attempts to ensure partition counts are even. */
- Test::Say(_C_GRN "Consumer 1 has unsubscribed from topic 2\n");
- changed_subscription_done = true;
- }
-
- if (Test::assignment_partition_count(c, NULL) == 2 &&
- changed_subscription && rebalance_cb.assign_call_cnt >= 5 &&
- changed_subscription_done) {
- /* When the java consumer closes, this will cause an empty assign
- * rebalance_cb event, allowing detection of when this has happened. */
- Test::Say(_C_GRN "Java consumer has left the group\n");
- done = true;
- }
- }
-
- Test::Say("Closing consumer\n");
- c->close();
-
- /* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout
- * otherwise. */
- test_waitpid(java_pid);
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * - Single consumer subscribes to topic.
- * - Soon after (timing such that rebalance is probably in progress) it
- * subscribes to a different topic.
- */
-
-static void s_subscribe_when_rebalancing(int variation) {
- SUB_TEST("variation %d", variation);
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string topic_name_2 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string topic_name_3 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
- test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
- test_create_topic(NULL, topic_name_3.c_str(), 1, 1);
-
- DefaultRebalanceCb rebalance_cb;
- RdKafka::KafkaConsumer *c = make_consumer(
- "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
- test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
- test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10 * 1000);
-
- if (variation == 2 || variation == 4 || variation == 6) {
- /* Pre-cache metadata for all topics. */
- class RdKafka::Metadata *metadata;
- c->metadata(true, NULL, &metadata, 5000);
- delete metadata;
- }
-
- Test::subscribe(c, topic_name_1);
- Test::wait_for_assignment(c, 1, &topic_name_1);
-
- Test::subscribe(c, topic_name_2);
-
- if (variation == 3 || variation == 5)
- Test::poll_once(c, 500);
-
- if (variation < 5) {
- // Very quickly after subscribing to topic 2, subscribe to topic 3.
- Test::subscribe(c, topic_name_3);
- Test::wait_for_assignment(c, 1, &topic_name_3);
- } else {
- // ..or unsubscribe.
- Test::unsubscribe(c);
- Test::wait_for_assignment(c, 0, NULL);
- }
-
- delete c;
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check behavior when:
- * - Two consumer subscribe to a topic.
- * - Max poll interval is exceeded on the first consumer.
- */
-
-static void t_max_poll_interval_exceeded(int variation) {
- SUB_TEST("variation %d", variation);
-
- std::string topic_name_1 =
- Test::mk_topic_name("0113-cooperative_rebalance", 1);
- std::string group_name =
- Test::mk_unique_group_name("0113-cooperative_rebalance");
- test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
-
- std::vector<std::pair<std::string, std::string> > additional_conf;
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("session.timeout.ms"), std::string("6000")));
- additional_conf.push_back(std::pair<std::string, std::string>(
- std::string("max.poll.interval.ms"), std::string("7000")));
-
- DefaultRebalanceCb rebalance_cb1;
- RdKafka::KafkaConsumer *c1 =
- make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb1, 30);
- DefaultRebalanceCb rebalance_cb2;
- RdKafka::KafkaConsumer *c2 =
- make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
- &rebalance_cb2, 30);
-
- test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
- test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10 * 1000);
-
- Test::subscribe(c1, topic_name_1);
- Test::subscribe(c2, topic_name_1);
-
- bool done = false;
- bool both_have_been_assigned = false;
- while (!done) {
- if (!both_have_been_assigned)
- Test::poll_once(c1, 500);
- Test::poll_once(c2, 500);
-
- if (Test::assignment_partition_count(c1, NULL) == 1 &&
- Test::assignment_partition_count(c2, NULL) == 1 &&
- !both_have_been_assigned) {
- Test::Say(
- tostr()
- << "Both consumers are assigned to topic " << topic_name_1
- << ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n");
- both_have_been_assigned = true;
- }
-
- if (Test::assignment_partition_count(c2, NULL) == 2 &&
- both_have_been_assigned) {
- Test::Say("Consumer 1 is no longer assigned any partitions, done\n");
- done = true;
- }
- }
-
- if (variation == 1) {
- if (rebalance_cb1.lost_call_cnt != 0)
- Test::Fail(
- tostr() << "Expected consumer 1 lost revoke count to be 0, not: "
- << rebalance_cb1.lost_call_cnt);
- Test::poll_once(c1,
- 500); /* Eat the max poll interval exceeded error message */
- Test::poll_once(c1,
- 500); /* Trigger the rebalance_cb with lost partitions */
- if (rebalance_cb1.lost_call_cnt != 1)
- Test::Fail(
- tostr() << "Expected consumer 1 lost revoke count to be 1, not: "
- << rebalance_cb1.lost_call_cnt);
- }
-
- c1->close();
- c2->close();
-
- if (rebalance_cb1.lost_call_cnt != 1)
- Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: "
- << rebalance_cb1.lost_call_cnt);
-
- if (rebalance_cb1.assign_call_cnt != 1)
- Test::Fail(tostr() << "Expected consumer 1 assign count to be 1, not: "
- << rebalance_cb1.assign_call_cnt);
- if (rebalance_cb2.assign_call_cnt != 2)
- Test::Fail(tostr() << "Expected consumer 1 assign count to be 2, not: "
- << rebalance_cb1.assign_call_cnt);
-
- if (rebalance_cb1.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expected consumer 1 revoke count to be 1, not: "
- << rebalance_cb1.revoke_call_cnt);
- if (rebalance_cb2.revoke_call_cnt != 1)
- Test::Fail(tostr() << "Expected consumer 2 revoke count to be 1, not: "
- << rebalance_cb1.revoke_call_cnt);
-
- delete c1;
- delete c2;
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Poll all consumers until there are no more events or messages
- * and the timeout has expired.
- */
-static void poll_all_consumers(RdKafka::KafkaConsumer **consumers,
- DefaultRebalanceCb *rebalance_cbs,
- size_t num,
- int timeout_ms) {
- int64_t ts_end = test_clock() + (timeout_ms * 1000);
-
- /* Poll all consumers until no more events are seen,
- * this makes sure we exhaust the current state events before returning. */
- bool evented;
- do {
- evented = false;
- for (size_t i = 0; i < num; i++) {
- int block_ms = min(10, (int)((ts_end - test_clock()) / 1000));
- while (rebalance_cbs[i].poll_once(consumers[i], max(block_ms, 0)))
- evented = true;
- }
- } while (evented || test_clock() < ts_end);
-}
-
-
-/**
- * @brief Stress test with 8 consumers subscribing, fetching and committing.
- *
- * @param subscription_variation 0..2
- *
- * TODO: incorporate committing offsets.
- */
-
-static void u_multiple_subscription_changes(bool use_rebalance_cb,
- int subscription_variation) {
- const int N_CONSUMERS = 8;
- const int N_TOPICS = 2;
- const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS;
- const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS;
- const int N_MSGS_PER_PARTITION = 1000;
-
- SUB_TEST("use_rebalance_cb: %d, subscription_variation: %d",
- (int)use_rebalance_cb, subscription_variation);
-
- string topic_name_1 = Test::mk_topic_name("0113u_1", 1);
- string topic_name_2 = Test::mk_topic_name("0113u_2", 1);
- string group_name = Test::mk_unique_group_name("0113u");
-
- test_create_topic(NULL, topic_name_1.c_str(), N_PARTS_PER_TOPIC, 1);
- test_create_topic(NULL, topic_name_2.c_str(), N_PARTS_PER_TOPIC, 1);
-
- Test::Say("Creating consumers\n");
- DefaultRebalanceCb rebalance_cbs[N_CONSUMERS];
- RdKafka::KafkaConsumer *consumers[N_CONSUMERS];
-
- for (int i = 0; i < N_CONSUMERS; i++) {
- std::string name = tostr() << "C_" << i;
- consumers[i] =
- make_consumer(name.c_str(), group_name, "cooperative-sticky", NULL,
- use_rebalance_cb ? &rebalance_cbs[i] : NULL, 120);
- }
-
- test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(),
- 10 * 1000);
- test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(),
- 10 * 1000);
-
-
- /*
- * Seed all partitions with the same number of messages so we later can
- * verify that consumption is working.
- */
- vector<pair<Toppar, int> > ptopics;
- ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_1, N_PARTS_PER_TOPIC),
- N_MSGS_PER_PARTITION));
- ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_2, N_PARTS_PER_TOPIC),
- N_MSGS_PER_PARTITION));
- produce_msgs(ptopics);
-
-
- /*
- * Track what topics a consumer should be subscribed to and use this to
- * verify both its subscription and assignment throughout the test.
- */
-
- /* consumer -> currently subscribed topics */
- map<int, vector<string> > consumer_topics;
-
- /* topic -> consumers subscribed to topic */
- map<string, set<int> > topic_consumers;
-
- /* The subscription alternatives that consumers
- * alter between in the playbook. */
- vector<string> SUBSCRIPTION_1;
- vector<string> SUBSCRIPTION_2;
-
- SUBSCRIPTION_1.push_back(topic_name_1);
-
- switch (subscription_variation) {
- case 0:
- SUBSCRIPTION_2.push_back(topic_name_1);
- SUBSCRIPTION_2.push_back(topic_name_2);
- break;
-
- case 1:
- SUBSCRIPTION_2.push_back(topic_name_2);
- break;
-
- case 2:
- /* No subscription */
- break;
- }
-
- sort(SUBSCRIPTION_1.begin(), SUBSCRIPTION_1.end());
- sort(SUBSCRIPTION_2.begin(), SUBSCRIPTION_2.end());
-
-
- /*
- * Define playbook
- */
- const struct {
- int timestamp_ms;
- int consumer;
- const vector<string> *topics;
- } playbook[] = {/* timestamp_ms, consumer_number, subscribe-to-topics */
- {0, 0, &SUBSCRIPTION_1}, /* Cmd 0 */
- {4000, 1, &SUBSCRIPTION_1}, {4000, 1, &SUBSCRIPTION_1},
- {4000, 1, &SUBSCRIPTION_1}, {4000, 2, &SUBSCRIPTION_1},
- {6000, 3, &SUBSCRIPTION_1}, /* Cmd 5 */
- {6000, 4, &SUBSCRIPTION_1}, {6000, 5, &SUBSCRIPTION_1},
- {6000, 6, &SUBSCRIPTION_1}, {6000, 7, &SUBSCRIPTION_2},
- {6000, 1, &SUBSCRIPTION_1}, /* Cmd 10 */
- {6000, 1, &SUBSCRIPTION_2}, {6000, 1, &SUBSCRIPTION_1},
- {6000, 2, &SUBSCRIPTION_2}, {7000, 2, &SUBSCRIPTION_1},
- {7000, 1, &SUBSCRIPTION_2}, /* Cmd 15 */
- {8000, 0, &SUBSCRIPTION_2}, {8000, 1, &SUBSCRIPTION_1},
- {8000, 0, &SUBSCRIPTION_1}, {13000, 2, &SUBSCRIPTION_1},
- {13000, 1, &SUBSCRIPTION_2}, /* Cmd 20 */
- {13000, 5, &SUBSCRIPTION_2}, {14000, 6, &SUBSCRIPTION_2},
- {15000, 7, &SUBSCRIPTION_1}, {15000, 1, &SUBSCRIPTION_1},
- {15000, 5, &SUBSCRIPTION_1}, /* Cmd 25 */
- {15000, 6, &SUBSCRIPTION_1}, {INT_MAX, 0, 0}};
-
- /*
- * Run the playbook
- */
- int cmd_number = 0;
- uint64_t ts_start = test_clock();
-
- while (playbook[cmd_number].timestamp_ms != INT_MAX) {
- TEST_ASSERT(playbook[cmd_number].consumer < N_CONSUMERS);
-
- Test::Say(tostr() << "Cmd #" << cmd_number << ": wait "
- << playbook[cmd_number].timestamp_ms << "ms\n");
-
- poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS,
- playbook[cmd_number].timestamp_ms -
- (int)((test_clock() - ts_start) / 1000));
-
- /* Verify consumer assignments match subscribed topics */
- map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
- for (int i = 0; i < N_CONSUMERS; i++)
- verify_consumer_assignment(
- consumers[i], rebalance_cbs[i], consumer_topics[i],
- /* Allow empty assignment */
- true,
- /* Allow mismatch between subscribed topics
- * and actual assignment since we can't
- * synchronize the last subscription
- * to the current assignment due to
- * an unknown number of rebalances required
- * for the final assignment to settle.
- * This is instead checked at the end of
- * this test case. */
- true, &all_assignments, -1 /* no msgcnt check*/);
-
- int cid = playbook[cmd_number].consumer;
- RdKafka::KafkaConsumer *consumer = consumers[playbook[cmd_number].consumer];
- const vector<string> *topics = playbook[cmd_number].topics;
-
- /*
- * Update our view of the consumer's subscribed topics and vice versa.
- */
- for (vector<string>::const_iterator it = consumer_topics[cid].begin();
- it != consumer_topics[cid].end(); it++) {
- topic_consumers[*it].erase(cid);
- }
-
- consumer_topics[cid].clear();
-
- for (vector<string>::const_iterator it = topics->begin();
- it != topics->end(); it++) {
- consumer_topics[cid].push_back(*it);
- topic_consumers[*it].insert(cid);
- }
-
- RdKafka::ErrorCode err;
-
- /*
- * Change subscription
- */
- if (!topics->empty()) {
- Test::Say(tostr() << "Consumer: " << consumer->name()
- << " is subscribing to topics "
- << string_vec_to_str(*topics) << " after "
- << ((test_clock() - ts_start) / 1000) << "ms\n");
- err = consumer->subscribe(*topics);
- TEST_ASSERT(!err, "Expected subscribe() to succeed, got %s",
- RdKafka::err2str(err).c_str());
- } else {
- Test::Say(tostr() << "Consumer: " << consumer->name()
- << " is unsubscribing after "
- << ((test_clock() - ts_start) / 1000) << "ms\n");
- Test::unsubscribe(consumer);
- }
-
- /* Mark this consumer as waiting for rebalance so that
- * verify_consumer_assignment() allows assigned partitions that
- * (no longer) match the subscription. */
- rebalance_cbs[cid].wait_rebalance = true;
-
-
- /*
- * Verify subscription matches what we think it should be.
- */
- vector<string> subscription;
- err = consumer->subscription(subscription);
- TEST_ASSERT(!err, "consumer %s subscription() failed: %s",
- consumer->name().c_str(), RdKafka::err2str(err).c_str());
-
- sort(subscription.begin(), subscription.end());
-
- Test::Say(tostr() << "Consumer " << consumer->name()
- << " subscription is now "
- << string_vec_to_str(subscription) << "\n");
-
- if (subscription != *topics)
- Test::Fail(tostr() << "Expected consumer " << consumer->name()
- << " subscription: " << string_vec_to_str(*topics)
- << " but got: " << string_vec_to_str(subscription));
-
- cmd_number++;
- }
-
-
- /*
- * Wait for final rebalances and all consumers to settle,
- * then verify assignments and received message counts.
- */
- Test::Say(_C_YEL "Waiting for final assignment state\n");
- int done_count = 0;
- /* Allow at least 20 seconds for group to stabilize. */
- int64_t stabilize_until = test_clock() + (20 * 1000 * 1000); /* 20s */
-
- while (done_count < 2) {
- bool stabilized = test_clock() > stabilize_until;
-
- poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, 5000);
-
- /* Verify consumer assignments */
- int counts[N_CONSUMERS];
- map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
- Test::Say(tostr() << "Consumer assignments "
- << "(subscription_variation " << subscription_variation
- << ")" << (stabilized ? " (stabilized)" : "")
- << (use_rebalance_cb ? " (use_rebalance_cb)"
- : " (no rebalance cb)")
- << ":\n");
- for (int i = 0; i < N_CONSUMERS; i++) {
- bool last_rebalance_stabilized =
- stabilized &&
- (!use_rebalance_cb ||
- /* session.timeout.ms * 2 + 1 */
- test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000 * 1000));
-
- counts[i] = verify_consumer_assignment(
- consumers[i], rebalance_cbs[i], consumer_topics[i],
- /* allow empty */
- true,
- /* if we're waiting for a
- * rebalance it is okay for the
- * current assignment to contain
- * topics that this consumer
- * (no longer) subscribes to. */
- !last_rebalance_stabilized || !use_rebalance_cb ||
- rebalance_cbs[i].wait_rebalance,
- /* do not allow assignments for
- * topics that are not subscribed*/
- &all_assignments,
- /* Verify received message counts
- * once the assignments have
- * stabilized.
- * Requires the rebalance cb.*/
- done_count > 0 && use_rebalance_cb ? N_MSGS_PER_PARTITION : -1);
- }
-
- Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS
- << " partitions assigned\n");
-
- bool done = true;
- for (int i = 0; i < N_CONSUMERS; i++) {
- /* For each topic the consumer subscribes to it should
- * be assigned its share of partitions. */
- int exp_parts = 0;
- for (vector<string>::const_iterator it = consumer_topics[i].begin();
- it != consumer_topics[i].end(); it++)
- exp_parts += N_PARTS_PER_TOPIC / (int)topic_consumers[*it].size();
-
- Test::Say(tostr() << (counts[i] == exp_parts ? "" : _C_YEL) << "Consumer "
- << consumers[i]->name() << " has " << counts[i]
- << " assigned partitions (" << consumer_topics[i].size()
- << " subscribed topic(s))"
- << ", expecting " << exp_parts
- << " assigned partitions\n");
-
- if (counts[i] != exp_parts)
- done = false;
- }
-
- if (done && stabilized) {
- done_count++;
- Test::Say(tostr() << "All assignments verified, done count is "
- << done_count << "\n");
- }
- }
-
- Test::Say("Disposing consumers\n");
- for (int i = 0; i < N_CONSUMERS; i++) {
- TEST_ASSERT(!use_rebalance_cb || !rebalance_cbs[i].wait_rebalance,
- "Consumer %d still waiting for rebalance", i);
- if (i & 1)
- consumers[i]->close();
- delete consumers[i];
- }
-
- SUB_TEST_PASS();
-}
-
-
-
-extern "C" {
-
-static int rebalance_cnt;
-static rd_kafka_resp_err_t rebalance_exp_event;
-static rd_bool_t rebalance_exp_lost;
-
-extern void test_print_partition_list(
- const rd_kafka_topic_partition_list_t *partitions);
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- rebalance_cnt++;
- TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt,
- rd_kafka_err2name(err), parts->cnt);
-
- test_print_partition_list(parts);
-
- TEST_ASSERT(err == rebalance_exp_event ||
- rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR,
- "Expected rebalance event %s, not %s",
- rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err));
-
- if (rebalance_exp_lost) {
- TEST_ASSERT(rd_kafka_assignment_lost(rk), "Expected partitions lost");
- TEST_SAY("Partitions were lost\n");
- }
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- test_consumer_incremental_assign("assign", rk, parts);
- } else {
- test_consumer_incremental_unassign("unassign", rk, parts);
- }
-}
-
-/**
- * @brief Wait for an expected rebalance event, or fail.
- */
-static void expect_rebalance0(const char *func,
- int line,
- const char *what,
- rd_kafka_t *c,
- rd_kafka_resp_err_t exp_event,
- rd_bool_t exp_lost,
- int timeout_s) {
- int64_t tmout = test_clock() + (timeout_s * 1000000);
- int start_cnt = rebalance_cnt;
-
- TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", func, line, what,
- rd_kafka_err2name(exp_event), timeout_s);
-
- rebalance_exp_lost = exp_lost;
- rebalance_exp_event = exp_event;
-
- while (tmout > test_clock() && rebalance_cnt == start_cnt) {
- test_consumer_poll_once(c, NULL, 1000);
- }
-
- if (rebalance_cnt == start_cnt + 1) {
- rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR;
- rebalance_exp_lost = exp_lost = rd_false;
- return;
- }
-
- TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", func, line, what,
- rd_kafka_err2name(exp_event));
-}
-
-#define expect_rebalance(WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) \
- expect_rebalance0(__FUNCTION__, __LINE__, WHAT, C, EXP_EVENT, EXP_LOST, \
- TIMEOUT_S)
-
-
-/* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error.
- */
-
-static void p_lost_partitions_heartbeat_illegal_generation_test() {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- const char *groupid = "mygroup";
- const char *topic = "test";
- rd_kafka_t *c;
- rd_kafka_conf_t *conf;
-
- SUB_TEST_QUICK();
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10",
- "security.protocol", "plaintext", NULL);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "security.protocol", "PLAINTEXT");
- test_conf_set(conf, "group.id", groupid);
- test_conf_set(conf, "session.timeout.ms", "5000");
- test_conf_set(conf, "heartbeat.interval.ms", "1000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
-
- c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c, topic);
-
- expect_rebalance("initial assignment", c,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*don't expect lost*/, 5 + 2);
-
- /* Fail heartbeats */
- rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Heartbeat, 5,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
-
- expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- rd_true /*expect lost*/, 10 + 2);
-
- rd_kafka_mock_clear_request_errors(mcluster, RD_KAFKAP_Heartbeat);
-
- expect_rebalance("rejoin after lost", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*don't expect lost*/, 10 + 2);
-
- TEST_SAY("Closing consumer\n");
- test_consumer_close(c);
-
- TEST_SAY("Destroying consumer\n");
- rd_kafka_destroy(c);
-
- TEST_SAY("Destroying mock cluster\n");
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup
- * or SyncGroup error.
- */
-
-static void q_lost_partitions_illegal_generation_test(
- rd_bool_t test_joingroup_fail) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- const char *groupid = "mygroup";
- const char *topic1 = "test1";
- const char *topic2 = "test2";
- rd_kafka_t *c;
- rd_kafka_conf_t *conf;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *topics;
-
- SUB_TEST0(!test_joingroup_fail /*quick*/, "test_joingroup_fail=%d",
- test_joingroup_fail);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Seed the topic1 with messages */
- test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10",
- "security.protocol", "plaintext", NULL);
-
- /* Seed the topic2 with messages */
- test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10",
- "security.protocol", "plaintext", NULL);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "security.protocol", "PLAINTEXT");
- test_conf_set(conf, "group.id", groupid);
- test_conf_set(conf, "session.timeout.ms", "5000");
- test_conf_set(conf, "heartbeat.interval.ms", "1000");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
-
- c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c, topic1);
-
- expect_rebalance("initial assignment", c,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*don't expect lost*/, 5 + 2);
-
- /* Fail JoinGroups or SyncGroups */
- rd_kafka_mock_push_request_errors(
- mcluster, test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup,
- 5, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
-
- topics = rd_kafka_topic_partition_list_new(2);
- rd_kafka_topic_partition_list_add(topics, topic1, RD_KAFKA_PARTITION_UA);
- rd_kafka_topic_partition_list_add(topics, topic2, RD_KAFKA_PARTITION_UA);
- err = rd_kafka_subscribe(c, topics);
- if (err)
- TEST_FAIL("%s: Failed to subscribe to topics: %s\n", rd_kafka_name(c),
- rd_kafka_err2str(err));
- rd_kafka_topic_partition_list_destroy(topics);
-
- expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- rd_true /*expect lost*/, 10 + 2);
-
- rd_kafka_mock_clear_request_errors(mcluster, test_joingroup_fail
- ? RD_KAFKAP_JoinGroup
- : RD_KAFKAP_SyncGroup);
-
- expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*expect lost*/, 10 + 2);
-
- TEST_SAY("Closing consumer\n");
- test_consumer_close(c);
-
- TEST_SAY("Destroying consumer\n");
- rd_kafka_destroy(c);
-
- TEST_SAY("Destroying mock cluster\n");
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-
-/* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit
- * error.
- */
-
-static void r_lost_partitions_commit_illegal_generation_test_local() {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
- const char *groupid = "mygroup";
- const char *topic = "test";
- const int msgcnt = 100;
- rd_kafka_t *c;
- rd_kafka_conf_t *conf;
-
- SUB_TEST();
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10",
- "security.protocol", "plaintext", NULL);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "security.protocol", "PLAINTEXT");
- test_conf_set(conf, "group.id", groupid);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
-
- c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c, topic);
-
- expect_rebalance("initial assignment", c,
- RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*don't expect lost*/, 5 + 2);
-
-
- /* Consume some messages so that the commit has something to commit. */
- test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL);
-
- /* Fail Commit */
- rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
- RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
-
- rd_kafka_commit(c, NULL, rd_false);
-
- expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
- rd_true /*expect lost*/, 10 + 2);
-
- expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
- rd_false /*expect lost*/, 20 + 2);
-
- TEST_SAY("Closing consumer\n");
- test_consumer_close(c);
-
- TEST_SAY("Destroying consumer\n");
- rd_kafka_destroy(c);
-
- TEST_SAY("Destroying mock cluster\n");
- test_mock_cluster_destroy(mcluster);
-}
-
-
-/**
- * @brief Rebalance callback for the v_.. test below.
- */
-static void v_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- bool *auto_commitp = (bool *)opaque;
-
- TEST_SAY("%s: %s: %d partition(s)%s\n", rd_kafka_name(rk),
- rd_kafka_err2name(err), parts->cnt,
- rd_kafka_assignment_lost(rk) ? " - assignment lost" : "");
-
- test_print_partition_list(parts);
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- test_consumer_incremental_assign("assign", rk, parts);
- } else {
- test_consumer_incremental_unassign("unassign", rk, parts);
-
- if (!*auto_commitp) {
- rd_kafka_resp_err_t commit_err;
-
- TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n");
- /* Sleep enough to have the generation-id bumped by rejoin. */
- rd_sleep(2);
- commit_err = rd_kafka_commit(rk, NULL, 0 /*sync*/);
- TEST_ASSERT(!commit_err || commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
- commit_err == RD_KAFKA_RESP_ERR__DESTROY,
- "%s: manual commit failed: %s", rd_kafka_name(rk),
- rd_kafka_err2str(commit_err));
- }
- }
-}
-
-/**
- * @brief Commit callback for the v_.. test.
- */
-static void v_commit_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- TEST_SAY("%s offset commit for %d offsets: %s\n", rd_kafka_name(rk),
- offsets ? offsets->cnt : -1, rd_kafka_err2name(err));
- TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
- err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */,
- "%s offset commit failed: %s", rd_kafka_name(rk),
- rd_kafka_err2str(err));
-}
-
-
-static void v_commit_during_rebalance(bool with_rebalance_cb,
- bool auto_commit) {
- rd_kafka_t *p, *c1, *c2;
- rd_kafka_conf_t *conf;
- const char *topic = test_mk_topic_name("0113_v", 1);
- const int partition_cnt = 6;
- const int msgcnt_per_partition = 100;
- const int msgcnt = partition_cnt * msgcnt_per_partition;
- uint64_t testid;
- int i;
-
-
- SUB_TEST("With%s rebalance callback and %s-commit",
- with_rebalance_cb ? "" : "out", auto_commit ? "auto" : "manual");
-
- test_conf_init(&conf, NULL, 30);
- testid = test_id_generate();
-
- /*
- * Produce messages to topic
- */
- p = test_create_producer();
-
- test_create_topic(p, topic, partition_cnt, 1);
-
- for (i = 0; i < partition_cnt; i++) {
- test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition,
- msgcnt_per_partition, NULL, 0);
- }
-
- test_flush(p, -1);
-
- rd_kafka_destroy(p);
-
-
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false");
- test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
- rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb);
- rd_kafka_conf_set_opaque(conf, (void *)&auto_commit);
-
- TEST_SAY("Create and subscribe first consumer\n");
- c1 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
- rd_kafka_conf_dup(conf), NULL);
- TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit,
- "c1 opaque mismatch");
- test_consumer_subscribe(c1, topic);
-
- /* Consume some messages so that we know we have an assignment
- * and something to commit. */
- test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0,
- msgcnt / partition_cnt / 2, NULL);
-
- TEST_SAY("Create and subscribe second consumer\n");
- c2 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
- conf, NULL);
- TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit,
- "c2 opaque mismatch");
- test_consumer_subscribe(c2, topic);
-
- /* Poll both consumers */
- for (i = 0; i < 10; i++) {
- test_consumer_poll_once(c1, NULL, 1000);
- test_consumer_poll_once(c2, NULL, 1000);
- }
-
- TEST_SAY("Closing consumers\n");
- test_consumer_close(c1);
- test_consumer_close(c2);
-
- rd_kafka_destroy(c1);
- rd_kafka_destroy(c2);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Verify that incremental rebalances retain stickyness.
- */
-static void x_incremental_rebalances(void) {
-#define _NUM_CONS 3
- rd_kafka_t *c[_NUM_CONS];
- rd_kafka_conf_t *conf;
- const char *topic = test_mk_topic_name("0113_x", 1);
- int i;
-
- SUB_TEST();
- test_conf_init(&conf, NULL, 60);
-
- test_create_topic(NULL, topic, 6, 1);
-
- test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
- for (i = 0; i < _NUM_CONS; i++) {
- char clientid[32];
- rd_snprintf(clientid, sizeof(clientid), "consumer%d", i);
- test_conf_set(conf, "client.id", clientid);
-
- c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- }
- rd_kafka_conf_destroy(conf);
-
- /* First consumer joins group */
- TEST_SAY("%s: joining\n", rd_kafka_name(c[0]));
- test_consumer_subscribe(c[0], topic);
- test_consumer_wait_assignment(c[0], rd_true /*poll*/);
- test_consumer_verify_assignment(c[0], rd_true /*fail immediately*/, topic, 0,
- topic, 1, topic, 2, topic, 3, topic, 4, topic,
- 5, NULL);
-
-
- /* Second consumer joins group */
- TEST_SAY("%s: joining\n", rd_kafka_name(c[1]));
- test_consumer_subscribe(c[1], topic);
- test_consumer_wait_assignment(c[1], rd_true /*poll*/);
- rd_sleep(3);
- test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3,
- topic, 4, topic, 5, NULL);
- test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0,
- topic, 1, topic, 2, NULL);
-
- /* Third consumer joins group */
- TEST_SAY("%s: joining\n", rd_kafka_name(c[2]));
- test_consumer_subscribe(c[2], topic);
- test_consumer_wait_assignment(c[2], rd_true /*poll*/);
- rd_sleep(3);
- test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4,
- topic, 5, NULL);
- test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1,
- topic, 2, NULL);
- test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3,
- topic, 0, NULL);
-
- /* Raise any previously failed verify_assignment calls and fail the test */
- TEST_LATER_CHECK();
-
- for (i = 0; i < _NUM_CONS; i++)
- rd_kafka_destroy(c[i]);
-
- SUB_TEST_PASS();
-
-#undef _NUM_CONS
-}
-
-/* Local tests not needing a cluster */
-int main_0113_cooperative_rebalance_local(int argc, char **argv) {
- a_assign_rapid();
- p_lost_partitions_heartbeat_illegal_generation_test();
- q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/);
- q_lost_partitions_illegal_generation_test(rd_true /*syncgroup*/);
- r_lost_partitions_commit_illegal_generation_test_local();
- return 0;
-}
-
-int main_0113_cooperative_rebalance(int argc, char **argv) {
- int i;
-
- a_assign_tests();
- b_subscribe_with_cb_test(true /*close consumer*/);
- b_subscribe_with_cb_test(false /*don't close consumer*/);
- c_subscribe_no_cb_test(true /*close consumer*/);
-
- if (test_quick) {
- Test::Say("Skipping tests >= c_ .. due to quick mode\n");
- return 0;
- }
-
- c_subscribe_no_cb_test(false /*don't close consumer*/);
- d_change_subscription_add_topic(true /*close consumer*/);
- d_change_subscription_add_topic(false /*don't close consumer*/);
- e_change_subscription_remove_topic(true /*close consumer*/);
- e_change_subscription_remove_topic(false /*don't close consumer*/);
- f_assign_call_cooperative();
- g_incremental_assign_call_eager();
- h_delete_topic();
- i_delete_topic_2();
- j_delete_topic_no_rb_callback();
- k_add_partition();
- l_unsubscribe();
- m_unsubscribe_2();
- n_wildcard();
- o_java_interop();
- for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */
- s_subscribe_when_rebalancing(i);
- for (i = 1; i <= 2; i++)
- t_max_poll_interval_exceeded(i);
- /* Run all 2*3 variations of the u_.. test */
- for (i = 0; i < 3; i++) {
- u_multiple_subscription_changes(true /*with rebalance_cb*/, i);
- u_multiple_subscription_changes(false /*without rebalance_cb*/, i);
- }
- v_commit_during_rebalance(true /*with rebalance callback*/,
- true /*auto commit*/);
- v_commit_during_rebalance(false /*without rebalance callback*/,
- true /*auto commit*/);
- v_commit_during_rebalance(true /*with rebalance callback*/,
- false /*manual commit*/);
- x_incremental_rebalances();
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp
deleted file mode 100644
index 8ef88e7df..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Test sticky.partitioning.linger.ms producer property.
- *
- */
-
-#include <iostream>
-#include <fstream>
-#include <iterator>
-#include <string>
-#include "testcpp.h"
-#include "test.h"
-
-/**
- * @brief Specify sticky.partitioning.linger.ms and check consumed
- * messages to verify it takes effect.
- */
-static void do_test_sticky_partitioning(int sticky_delay) {
- std::string topic = Test::mk_topic_name(__FILE__, 1);
- Test::create_topic(NULL, topic.c_str(), 3, 1);
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
-
- Test::conf_set(conf, "sticky.partitioning.linger.ms",
- tostr() << sticky_delay);
-
- std::string errstr;
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
-
- RdKafka::Consumer *c = RdKafka::Consumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create Consumer: " + errstr);
- delete conf;
-
- RdKafka::Topic *t = RdKafka::Topic::create(c, topic, NULL, errstr);
- if (!t)
- Test::Fail("Failed to create Topic: " + errstr);
-
- c->start(t, 0, RdKafka::Topic::OFFSET_BEGINNING);
- c->start(t, 1, RdKafka::Topic::OFFSET_BEGINNING);
- c->start(t, 2, RdKafka::Topic::OFFSET_BEGINNING);
-
- const int msgrate = 100;
- const int msgsize = 10;
-
- /* Produce messages */
- char val[msgsize];
- memset(val, 'a', msgsize);
-
- /* produce for for seconds at 100 msgs/sec */
- for (int s = 0; s < 4; s++) {
- int64_t end_wait = test_clock() + (1 * 1000000);
-
- for (int i = 0; i < msgrate; i++) {
- RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA,
- RdKafka::Producer::RK_MSG_COPY, val,
- msgsize, NULL, 0, -1, NULL);
- if (err)
- Test::Fail("Produce failed: " + RdKafka::err2str(err));
- }
-
- while (test_clock() < end_wait)
- p->poll(100);
- }
-
- Test::Say(tostr() << "Produced " << 4 * msgrate << " messages\n");
- p->flush(5 * 1000);
-
- /* Consume messages */
- int partition_msgcnt[3] = {0, 0, 0};
- int num_partitions_active = 0;
- int i = 0;
-
- int64_t end_wait = test_clock() + (5 * 1000000);
- while (test_clock() < end_wait) {
- RdKafka::Message *msg = c->consume(t, i, 5);
-
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- i++;
- if (i > 2)
- i = 0;
- break;
-
- case RdKafka::ERR_NO_ERROR:
- partition_msgcnt[msg->partition()]++;
- break;
-
- default:
- Test::Fail("Consume error: " + msg->errstr());
- break;
- }
-
- delete msg;
- }
-
- c->stop(t, 0);
- c->stop(t, 1);
- c->stop(t, 2);
-
- for (int i = 0; i < 3; i++) {
- /* Partitions must receive 100+ messages to be deemed 'active'. This
- * is because while topics are being updated, it is possible for some
- * number of messages to be partitioned to joining partitions before
- * they become available. This can cause some initial turnover in
- * selecting a sticky partition. This behavior is acceptable, and is
- * not important for the purpose of this segment of the test. */
-
- if (partition_msgcnt[i] > (msgrate - 1))
- num_partitions_active++;
- }
-
- Test::Say("Partition Message Count: \n");
- for (int i = 0; i < 3; i++) {
- Test::Say(tostr() << " " << i << ": " << partition_msgcnt[i] << "\n");
- }
-
- /* When sticky.partitioning.linger.ms is long (greater than expected
- * length of run), one partition should be sticky and receive messages. */
- if (sticky_delay == 5000 && num_partitions_active > 1)
- Test::Fail(tostr() << "Expected only 1 partition to receive msgs"
- << " but " << num_partitions_active
- << " partitions received msgs.");
-
- /* When sticky.partitioning.linger.ms is short (sufficiently smaller than
- * length of run), it is extremely likely that all partitions are sticky
- * at least once and receive messages. */
- if (sticky_delay == 1000 && num_partitions_active <= 1)
- Test::Fail(tostr() << "Expected more than one partition to receive msgs"
- << " but only " << num_partitions_active
- << " partition received msgs.");
-
- delete t;
- delete p;
- delete c;
-}
-
-extern "C" {
-int main_0114_sticky_partitioning(int argc, char **argv) {
- /* long delay (5 secs) */
- do_test_sticky_partitioning(5000);
- /* short delay (0.001 secs) */
- do_test_sticky_partitioning(1);
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp
deleted file mode 100644
index c4d1a96aa..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <map>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-
-namespace {
-class DrCb : public RdKafka::DeliveryReportCb {
- public:
- DrCb(RdKafka::ErrorCode exp_err) : cnt(0), exp_err(exp_err) {
- }
-
- void dr_cb(RdKafka::Message &msg) {
- Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n");
- if (msg.err() != exp_err)
- Test::Fail("Delivery report: Expected " + RdKafka::err2str(exp_err) +
- " but got " + RdKafka::err2str(msg.err()));
- cnt++;
- }
-
- int cnt;
- RdKafka::ErrorCode exp_err;
-};
-}; // namespace
-
-/**
- * @brief Test producer auth failures.
- *
- * @param topic_known If true we make sure the producer knows about the topic
- * before restricting access to it and producing,
- * this should result in the ProduceRequest failing,
- * if false we restrict access prior to this which should
- * result in MetadataRequest failing.
- */
-
-
-static void do_test_producer(bool topic_known) {
- Test::Say(tostr() << _C_MAG << "[ Test producer auth with topic "
- << (topic_known ? "" : "not ") << "known ]\n");
-
- /* Create producer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 20);
-
- std::string errstr;
- DrCb dr(RdKafka::ERR_NO_ERROR);
- conf->set("dr_cb", &dr, errstr);
-
- std::string bootstraps;
- if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to retrieve bootstrap.servers");
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- /* Create topic */
- std::string topic_unauth = Test::mk_topic_name("0115-unauthorized", 1);
- Test::create_topic(NULL, topic_unauth.c_str(), 3, 1);
-
- int exp_dr_cnt = 0;
-
- RdKafka::ErrorCode err;
-
- if (topic_known) {
- /* Produce a single message to make sure metadata is known. */
- Test::Say("Producing seeding message 0\n");
- err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
- RdKafka::Producer::RK_MSG_COPY, (void *)"0", 1, NULL, 0, 0,
- NULL);
- TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
-
- p->flush(-1);
- exp_dr_cnt++;
- }
-
- /* Add denying ACL for unauth topic */
- test_kafka_cmd(
- "kafka-acls.sh --bootstrap-server %s "
- "--add --deny-principal 'User:*' "
- "--operation All --deny-host '*' "
- "--topic '%s'",
- bootstraps.c_str(), topic_unauth.c_str());
-
- /* Produce message to any partition. */
- Test::Say("Producing message 1 to any partition\n");
- err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
- RdKafka::Producer::RK_MSG_COPY, (void *)"1", 1, NULL, 0, 0,
- NULL);
- TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
- exp_dr_cnt++;
-
- /* Produce message to specific partition. */
- Test::Say("Producing message 2 to partition 0\n");
- err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"3",
- 1, NULL, 0, 0, NULL);
- TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
- exp_dr_cnt++;
-
- /* Wait for DRs */
- dr.exp_err = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED;
- p->flush(-1);
-
-
- /* Produce message to any and specific partition, should fail immediately. */
- Test::Say("Producing message 3 to any partition\n");
- err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
- RdKafka::Producer::RK_MSG_COPY, (void *)"3", 1, NULL, 0, 0,
- NULL);
- TEST_ASSERT(err == dr.exp_err,
- "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, "
- "not %s",
- RdKafka::err2str(err).c_str());
-
- /* Specific partition */
- Test::Say("Producing message 4 to partition 0\n");
- err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"4",
- 1, NULL, 0, 0, NULL);
- TEST_ASSERT(err == dr.exp_err,
- "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, "
- "not %s",
- RdKafka::err2str(err).c_str());
-
- /* Final flush just to make sure */
- p->flush(-1);
-
- TEST_ASSERT(exp_dr_cnt == dr.cnt, "Expected %d deliveries, not %d",
- exp_dr_cnt, dr.cnt);
-
- Test::Say(tostr() << _C_GRN << "[ Test producer auth with topic "
- << (topic_known ? "" : "not ") << "known: PASS ]\n");
-
- delete p;
-}
-
-extern "C" {
-int main_0115_producer_auth(int argc, char **argv) {
- /* We can't bother passing Java security config to kafka-acls.sh */
- if (test_needs_auth()) {
- Test::Skip("Cluster authentication required\n");
- return 0;
- }
-
- do_test_producer(true);
- do_test_producer(false);
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp
deleted file mode 100644
index c674d4443..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <map>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-extern "C" {
-#include "test.h"
-#include "tinycthread.h"
-#include "rdatomic.h"
-}
-
-/**
- * Test KafkaConsumer close and destructor behaviour.
- */
-
-
-struct args {
- RdKafka::Queue *queue;
- RdKafka::KafkaConsumer *c;
-};
-
-static int run_polling_thread(void *p) {
- struct args *args = (struct args *)p;
-
- while (!args->c->closed()) {
- RdKafka::Message *msg;
-
- /* We use a long timeout to also verify that the
- * consume() call is yielded/woken by librdkafka
- * when consumer_close_queue() finishes. */
- msg = args->queue->consume(60 * 1000 /*60s*/);
- if (msg)
- delete msg;
- }
-
- return 0;
-}
-
-
-static void start_polling_thread(thrd_t *thrd, struct args *args) {
- if (thrd_create(thrd, run_polling_thread, (void *)args) != thrd_success)
- Test::Fail("Failed to create thread");
-}
-
-static void stop_polling_thread(thrd_t thrd, struct args *args) {
- int ret;
- if (thrd_join(thrd, &ret) != thrd_success)
- Test::Fail("Thread join failed");
-}
-
-
-static void do_test_consumer_close(bool do_subscribe,
- bool do_unsubscribe,
- bool do_close,
- bool with_queue) {
- std::string testname = tostr()
- << "Test C++ KafkaConsumer close "
- << "subscribe=" << do_subscribe
- << ", unsubscribe=" << do_unsubscribe
- << ", close=" << do_close << ", queue=" << with_queue;
- SUB_TEST("%s", testname.c_str());
-
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- std::string errstr;
-
- /*
- * Produce messages to topics
- */
- const int msgs_per_partition = 10;
- RdKafka::Conf *pconf;
- Test::conf_init(&pconf, NULL, 10);
- Test::conf_set(pconf, "bootstrap.servers", bootstraps);
- RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
- if (!p)
- Test::Fail(tostr() << __FUNCTION__
- << ": Failed to create producer: " << errstr);
- delete pconf;
- Test::produce_msgs(p, "some_topic", 0, msgs_per_partition, 10,
- true /*flush*/);
- delete p;
-
- /* Create consumer */
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
- Test::conf_set(conf, "bootstrap.servers", bootstraps);
- Test::conf_set(conf, "group.id", "mygroup");
- Test::conf_set(conf, "auto.offset.reset", "beginning");
-
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- if (do_subscribe) {
- std::vector<std::string> topics;
- topics.push_back("some_topic");
- RdKafka::ErrorCode err;
- if ((err = c->subscribe(topics)))
- Test::Fail("subscribe failed: " + RdKafka::err2str(err));
- }
-
- int received = 0;
- while (received < msgs_per_partition) {
- RdKafka::Message *msg = c->consume(500);
- if (msg) {
- ++received;
- delete msg;
- }
- }
-
- RdKafka::ErrorCode err;
- if (do_unsubscribe)
- if ((err = c->unsubscribe()))
- Test::Fail("unsubscribe failed: " + RdKafka::err2str(err));
-
- if (do_close) {
- if (with_queue) {
- RdKafka::Queue *queue = RdKafka::Queue::create(c);
- struct args args = {queue, c};
- thrd_t thrd;
-
- /* Serve queue in background thread until close() is done */
- start_polling_thread(&thrd, &args);
-
- RdKafka::Error *error;
-
- Test::Say("Closing with queue\n");
- if ((error = c->close(queue)))
- Test::Fail("close(queue) failed: " + error->str());
-
- stop_polling_thread(thrd, &args);
-
- Test::Say("Attempting second close\n");
- /* A second call should fail */
- if (!(error = c->close(queue)))
- Test::Fail("Expected second close(queue) to fail");
- if (error->code() != RdKafka::ERR__DESTROY)
- Test::Fail("Expected second close(queue) to fail with DESTROY, not " +
- error->str());
- delete error;
-
- delete queue;
-
- } else {
- if ((err = c->close()))
- Test::Fail("close failed: " + RdKafka::err2str(err));
-
- /* A second call should fail */
- if ((err = c->close()) != RdKafka::ERR__DESTROY)
- Test::Fail("Expected second close to fail with DESTROY, not " +
- RdKafka::err2str(err));
- }
- }
-
- /* Call an async method that will do nothing but verify that we're not
- * crashing due to use-after-free. */
- if ((err = c->commitAsync()))
- Test::Fail("Expected commitAsync close to succeed, got " +
- RdKafka::err2str(err));
-
- delete c;
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-extern "C" {
-int main_0116_kafkaconsumer_close(int argc, char **argv) {
- /* Parameters:
- * subscribe, unsubscribe, close, with_queue */
- for (int i = 0; i < 1 << 4; i++) {
- bool subscribe = i & (1 << 0);
- bool unsubscribe = i & (1 << 1);
- bool do_close = i & (1 << 2);
- bool with_queue = i & (1 << 3);
- do_test_consumer_close(subscribe, unsubscribe, do_close, with_queue);
- }
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c
deleted file mode 100644
index 7a82f713e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#include "../src/rdkafka_proto.h"
-#include "../src/rdunittest.h"
-
-#include <stdarg.h>
-
-
-/**
- * @name Misc mock-injected errors.
- *
- */
-
-/**
- * @brief Test producer handling (retry) of ERR_KAFKA_STORAGE_ERROR.
- */
-static void do_test_producer_storage_error(rd_bool_t too_few_retries) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_resp_err_t err;
-
- SUB_TEST_QUICK("%s", too_few_retries ? "with too few retries" : "");
-
- test_conf_init(&conf, NULL, 10);
-
- test_conf_set(conf, "test.mock.num.brokers", "3");
- test_conf_set(conf, "retries", too_few_retries ? "1" : "10");
- test_conf_set(conf, "retry.backoff.ms", "500");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- test_curr->ignore_dr_err = rd_false;
- if (too_few_retries) {
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR;
- test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
- } else {
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED;
- }
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- mcluster = rd_kafka_handle_mock_cluster(rk);
- TEST_ASSERT(mcluster, "missing mock cluster");
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_Produce, 3,
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
- RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR);
-
- err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
- RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
- TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
-
- /* Wait for delivery report. */
- test_flush(rk, 5000);
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief Issue #2933. Offset commit being retried when failing due to
- * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers
- * to not start.
- */
-static void do_test_offset_commit_error_during_rebalance(void) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *c1, *c2;
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
- const char *topic = "test";
- const int msgcnt = 100;
- rd_kafka_resp_err_t err;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
- rd_kafka_mock_topic_create(mcluster, topic, 4, 3);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "1", NULL);
-
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit", "false");
-
- /* Make sure we don't consume the entire partition in one Fetch */
- test_conf_set(conf, "fetch.message.max.bytes", "100");
-
- c1 = test_create_consumer("mygroup", test_rebalance_cb,
- rd_kafka_conf_dup(conf), NULL);
-
- c2 = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c1, topic);
- test_consumer_subscribe(c2, topic);
-
-
- /* Wait for assignment and one message */
- test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL);
- test_consumer_poll("C2.PRE", c2, 0, -1, -1, 1, NULL);
-
- /* Trigger rebalance */
- test_consumer_close(c2);
- rd_kafka_destroy(c2);
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_OffsetCommit, 6,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS);
-
- /* This commit should fail (async) */
- TEST_SAY("Committing (should fail)\n");
- err = rd_kafka_commit(c1, NULL, 0 /*sync*/);
- TEST_SAY("Commit returned %s\n", rd_kafka_err2name(err));
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- "Expected commit to fail with ERR_REBALANCE_IN_PROGRESS, "
- "not %s",
- rd_kafka_err2name(err));
-
- /* Wait for new assignment and able to read all messages */
- test_consumer_poll("C1.PRE", c1, 0, -1, -1, msgcnt, NULL);
-
- rd_kafka_destroy(c1);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-
-
-/**
- * @brief Issue #2933. Offset commit being retried when failing due to
- * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers
- * to not start.
- */
-static void do_test_offset_commit_request_timed_out(rd_bool_t auto_commit) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *c1, *c2;
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
- const char *topic = "test";
- const int msgcnt = 1;
- rd_kafka_topic_partition_list_t *partitions;
-
- SUB_TEST_QUICK("enable.auto.commit=%s", auto_commit ? "true" : "false");
-
- test_conf_init(&conf, NULL, 60);
-
- mcluster = test_mock_cluster_new(1, &bootstraps);
-
- rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
-
- /* Seed the topic with messages */
- test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "1", NULL);
-
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.commit",
- auto_commit ? "true" : "false");
- /* Too high to be done by interval in this test */
- test_conf_set(conf, "auto.commit.interval.ms", "90000");
-
- /* Make sure we don't consume the entire partition in one Fetch */
- test_conf_set(conf, "fetch.message.max.bytes", "100");
-
- c1 = test_create_consumer("mygroup", NULL, rd_kafka_conf_dup(conf),
- NULL);
-
-
- test_consumer_subscribe(c1, topic);
-
- /* Wait for assignment and one message */
- test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL);
-
- rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 2,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
- RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT);
-
- if (!auto_commit)
- TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, 0 /*sync*/));
-
- /* Rely on consumer_close() doing final commit
- * when auto commit is enabled */
-
- test_consumer_close(c1);
-
- rd_kafka_destroy(c1);
-
- /* Create a new consumer and retrieve the committed offsets to verify
- * they were properly committed */
- c2 = test_create_consumer("mygroup", NULL, conf, NULL);
-
- partitions = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(partitions, topic, 0)->offset =
- RD_KAFKA_OFFSET_INVALID;
-
- TEST_CALL_ERR__(rd_kafka_committed(c2, partitions, 10 * 1000));
- TEST_ASSERT(partitions->elems[0].offset == 1,
- "Expected committed offset to be 1, not %" PRId64,
- partitions->elems[0].offset);
-
- rd_kafka_topic_partition_list_destroy(partitions);
-
- rd_kafka_destroy(c2);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-/**
- * @brief Verify that a cluster roll does not cause consumer_poll() to return
- * the temporary and retriable COORDINATOR_LOAD_IN_PROGRESS error. We should
- * backoff and retry in that case.
- */
-static void do_test_joingroup_coordinator_load_in_progress() {
- rd_kafka_conf_t *conf;
- rd_kafka_t *consumer;
- rd_kafka_mock_cluster_t *mcluster;
- const char *bootstraps;
- const char *topic = "test";
- const int msgcnt = 1;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
-
- mcluster = test_mock_cluster_new(1, &bootstraps);
-
- rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
-
- test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
- "bootstrap.servers", bootstraps,
- "batch.num.messages", "1", NULL);
-
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_FindCoordinator, 1,
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
-
- consumer = test_create_consumer("mygroup", NULL, conf, NULL);
-
- test_consumer_subscribe(consumer, topic);
-
- /* Wait for assignment and one message */
- test_consumer_poll("consumer", consumer, 0, -1, -1, msgcnt, NULL);
-
- test_consumer_close(consumer);
-
- rd_kafka_destroy(consumer);
-
- test_mock_cluster_destroy(mcluster);
-
- SUB_TEST_PASS();
-}
-
-int main_0117_mock_errors(int argc, char **argv) {
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- do_test_producer_storage_error(rd_false);
- do_test_producer_storage_error(rd_true);
-
- do_test_offset_commit_error_during_rebalance();
-
- do_test_offset_commit_request_timed_out(rd_true);
- do_test_offset_commit_request_timed_out(rd_false);
-
- do_test_joingroup_coordinator_load_in_progress();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c
deleted file mode 100644
index 1cdcda462..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/**
- * Issue #2933: Offset commit on revoke would cause hang.
- */
-
-static rd_kafka_t *c1, *c2;
-
-
-static void rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
-
- TEST_SAY("Rebalance for %s: %s: %d partition(s)\n", rd_kafka_name(rk),
- rd_kafka_err2name(err), parts->cnt);
-
- if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
- TEST_CALL_ERR__(rd_kafka_assign(rk, parts));
-
- } else if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
- rd_kafka_resp_err_t commit_err;
-
- TEST_CALL_ERR__(rd_kafka_position(rk, parts));
-
- TEST_CALL_ERR__(rd_kafka_assign(rk, NULL));
-
- if (rk == c1)
- return;
-
- /* Give the closing consumer some time to handle the
- * unassignment and leave so that the coming commit fails. */
- rd_sleep(5);
-
- /* Committing after unassign will trigger an
- * Illegal generation error from the broker, which would
- * previously cause the cgrp to not properly transition
- * the next assigned state to fetching.
- * The closing consumer's commit is denied by the consumer
- * since it will have started to shut down after the assign
- * call. */
- TEST_SAY("%s: Committing\n", rd_kafka_name(rk));
- commit_err = rd_kafka_commit(rk, parts, 0 /*sync*/);
- TEST_SAY("%s: Commit result: %s\n", rd_kafka_name(rk),
- rd_kafka_err2name(commit_err));
-
- TEST_ASSERT(commit_err,
- "Expected closing consumer %s's commit to "
- "fail, but got %s",
- rd_kafka_name(rk), rd_kafka_err2name(commit_err));
-
- } else {
- TEST_FAIL("Unhandled event: %s", rd_kafka_err2name(err));
- }
-}
-
-
-int main_0118_commit_rebalance(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_conf_t *conf;
- const int msgcnt = 1000;
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
-
- test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
- NULL);
-
- c1 = test_create_consumer(topic, rebalance_cb, rd_kafka_conf_dup(conf),
- NULL);
- c2 = test_create_consumer(topic, rebalance_cb, conf, NULL);
-
- test_consumer_subscribe(c1, topic);
- test_consumer_subscribe(c2, topic);
-
-
- test_consumer_poll("C1.PRE", c1, 0, -1, -1, 10, NULL);
- test_consumer_poll("C2.PRE", c2, 0, -1, -1, 10, NULL);
-
- /* Trigger rebalance */
- test_consumer_close(c2);
- rd_kafka_destroy(c2);
-
- /* Since no offsets were successfully committed the remaining consumer
- * should be able to receive all messages. */
- test_consumer_poll("C1.POST", c1, 0, -1, -1, msgcnt, NULL);
-
- rd_kafka_destroy(c1);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp
deleted file mode 100644
index 507b67302..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <iostream>
-#include <map>
-#include <cstring>
-#include <cstdlib>
-#include "testcpp.h"
-
-
-/**
- * @brief Let FetchRequests fail with authorization failure.
- *
- */
-
-
-static void do_test_fetch_unauth() {
- Test::Say(tostr() << _C_MAG << "[ Test unauthorized Fetch ]\n");
-
- std::string topic = Test::mk_topic_name("0119-fetch_unauth", 1);
-
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 20);
-
- Test::conf_set(conf, "group.id", topic);
-
- std::string bootstraps;
- if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
- Test::Fail("Failed to retrieve bootstrap.servers");
-
- std::string errstr;
- RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
- if (!c)
- Test::Fail("Failed to create KafkaConsumer: " + errstr);
- delete conf;
-
- /* Create topic */
- const int partition_cnt = 3;
- Test::create_topic(NULL, topic.c_str(), partition_cnt, 1);
-
- /* Produce messages */
- test_produce_msgs_easy(topic.c_str(), 0, RdKafka::Topic::PARTITION_UA, 1000);
-
- /* Add ACLs:
- * Allow Describe (Metadata)
- * Deny Read (Fetch)
- */
-
- test_kafka_cmd(
- "kafka-acls.sh --bootstrap-server %s "
- "--add --allow-principal 'User:*' "
- "--operation Describe --allow-host '*' "
- "--topic '%s'",
- bootstraps.c_str(), topic.c_str());
-
- test_kafka_cmd(
- "kafka-acls.sh --bootstrap-server %s "
- "--add --deny-principal 'User:*' "
- "--operation Read --deny-host '*' "
- "--topic '%s'",
- bootstraps.c_str(), topic.c_str());
-
- Test::subscribe(c, topic);
-
- int auth_err_cnt = 0;
-
- /* Consume for 15s (30*0.5), counting the number of auth errors,
- * should only see one error per consumed partition, and no messages. */
- for (int i = 0; i < 30; i++) {
- RdKafka::Message *msg;
-
- msg = c->consume(500);
- TEST_ASSERT(msg, "Expected msg");
-
- switch (msg->err()) {
- case RdKafka::ERR__TIMED_OUT:
- break;
-
- case RdKafka::ERR_NO_ERROR:
- Test::Fail("Did not expect a valid message");
- break;
-
- case RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED:
- Test::Say(tostr() << "Consumer error on " << msg->topic_name() << " ["
- << msg->partition() << "]: " << msg->errstr() << "\n");
-
- if (auth_err_cnt++ > partition_cnt)
- Test::Fail(
- "Too many auth errors received, "
- "expected same as number of partitions");
- break;
-
- default:
- Test::Fail(tostr() << "Unexpected consumer error on " << msg->topic_name()
- << " [" << msg->partition() << "]: " << msg->errstr());
- break;
- }
-
- delete msg;
- }
-
- TEST_ASSERT(auth_err_cnt == partition_cnt,
- "Expected exactly %d auth errors, saw %d", partition_cnt,
- auth_err_cnt);
-
- delete c;
-
- Test::Say(tostr() << _C_GRN << "[ Test unauthorized Fetch PASS ]\n");
-}
-
-extern "C" {
-int main_0119_consumer_auth(int argc, char **argv) {
- /* We can't bother passing Java security config to kafka-acls.sh */
- if (test_needs_auth()) {
- Test::Skip("Cluster authentication required\n");
- return 0;
- }
-
- do_test_fetch_unauth();
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c
deleted file mode 100644
index 2031dcba1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-#define _PART_CNT 4
-
-
-/**
- * @brief Verify proper assignment for asymmetrical subscriptions.
- */
-static void do_test_asymmetric(const char *assignor, const char *bootstraps) {
- rd_kafka_conf_t *conf;
-#define _C_CNT 3
- rd_kafka_t *c[_C_CNT];
-#define _S_CNT 2 /* max subscription count per consumer */
- const char *topics[_C_CNT][_S_CNT] = {
- /* c0 */ {"t1", "t2"},
- /* c1 */ {"t2", "t3"},
- /* c2 */ {"t4"},
- };
- struct {
- const char *topic;
- const int cnt;
- int seen;
- } expect[_C_CNT][_S_CNT] = {
- /* c0 */
- {
- {"t1", _PART_CNT},
- {"t2", _PART_CNT / 2},
- },
- /* c1 */
- {
- {"t2", _PART_CNT / 2},
- {"t3", _PART_CNT},
- },
- /* c2 */
- {
- {"t4", _PART_CNT},
- },
- };
- const char *groupid = assignor;
- int i;
-
- SUB_TEST_QUICK("%s assignor", assignor);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- test_conf_set(conf, "partition.assignment.strategy", assignor);
-
- for (i = 0; i < _C_CNT; i++) {
- char name[16];
- rd_kafka_topic_partition_list_t *tlist =
- rd_kafka_topic_partition_list_new(2);
- int j;
-
- rd_snprintf(name, sizeof(name), "c%d", i);
- test_conf_set(conf, "client.id", name);
-
- for (j = 0; j < _S_CNT && topics[i][j]; j++)
- rd_kafka_topic_partition_list_add(
- tlist, topics[i][j], RD_KAFKA_PARTITION_UA);
-
- c[i] = test_create_consumer(groupid, NULL,
- rd_kafka_conf_dup(conf), NULL);
-
- TEST_CALL_ERR__(rd_kafka_subscribe(c[i], tlist));
-
- rd_kafka_topic_partition_list_destroy(tlist);
- }
-
- rd_kafka_conf_destroy(conf);
-
-
- /* Await assignments for all consumers */
- for (i = 0; i < _C_CNT; i++)
- test_consumer_wait_assignment(c[i], rd_true);
-
- /* All have assignments, grab them. */
- for (i = 0; i < _C_CNT; i++) {
- int j;
- int p;
- rd_kafka_topic_partition_list_t *assignment;
-
- TEST_CALL_ERR__(rd_kafka_assignment(c[i], &assignment));
-
- TEST_ASSERT(assignment, "No assignment for %s",
- rd_kafka_name(c[i]));
-
- for (p = 0; p < assignment->cnt; p++) {
- const rd_kafka_topic_partition_t *part =
- &assignment->elems[p];
- rd_bool_t found = rd_false;
-
- for (j = 0; j < _S_CNT && expect[i][j].topic; j++) {
- if (!strcmp(part->topic, expect[i][j].topic)) {
- expect[i][j].seen++;
- found = rd_true;
- break;
- }
- }
-
- TEST_ASSERT(found,
- "%s was assigned unexpected topic %s",
- rd_kafka_name(c[i]), part->topic);
- }
-
- for (j = 0; j < _S_CNT && expect[i][j].topic; j++) {
- TEST_ASSERT(expect[i][j].seen == expect[i][j].cnt,
- "%s expected %d assigned partitions "
- "for %s, not %d",
- rd_kafka_name(c[i]), expect[i][j].cnt,
- expect[i][j].topic, expect[i][j].seen);
- }
-
- rd_kafka_topic_partition_list_destroy(assignment);
- }
-
-
- for (i = 0; i < _C_CNT; i++) {
- if (strcmp(assignor, "range") && (i & 1) == 0)
- test_consumer_close(c[i]);
- rd_kafka_destroy(c[i]);
- }
-
-
- SUB_TEST_PASS();
-}
-
-
-int main_0120_asymmetric_subscription(int argc, char **argv) {
- const char *bootstraps;
- rd_kafka_mock_cluster_t *mcluster;
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- mcluster = test_mock_cluster_new(3, &bootstraps);
-
-
- /* Create topics */
- rd_kafka_mock_topic_create(mcluster, "t1", _PART_CNT, 1);
- rd_kafka_mock_topic_create(mcluster, "t2", _PART_CNT, 1);
- rd_kafka_mock_topic_create(mcluster, "t3", _PART_CNT, 1);
- rd_kafka_mock_topic_create(mcluster, "t4", _PART_CNT, 1);
-
-
- do_test_asymmetric("roundrobin", bootstraps);
- do_test_asymmetric("range", bootstraps);
- do_test_asymmetric("cooperative-sticky", bootstraps);
-
- test_mock_cluster_destroy(mcluster);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c
deleted file mode 100644
index 35f5d529e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#include "../src/rdkafka_proto.h"
-#include "../src/rdunittest.h"
-
-#include <stdarg.h>
-
-
-/**
- * @name Connecting to two different clusters should emit warning.
- *
- */
-
-static void
-log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
- rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
- rd_bool_t matched = !strcmp(fac, "CLUSTERID") &&
- strstr(buf, "reports different ClusterId");
-
- TEST_SAY("%sLog: %s level %d fac %s: %s\n", matched ? _C_GRN : "",
- rd_kafka_name(rk), level, fac, buf);
-
- if (matched)
- rd_atomic32_add(log_cntp, 1);
-}
-
-
-int main_0121_clusterid(int argc, char **argv) {
- rd_kafka_mock_cluster_t *cluster_a, *cluster_b;
- const char *bootstraps_a, *bootstraps_b;
- size_t bs_size;
- char *bootstraps;
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_atomic32_t log_cnt;
- int cnt = 0;
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- /* Create two clusters */
- cluster_a = test_mock_cluster_new(1, &bootstraps_a);
- cluster_b = test_mock_cluster_new(1, &bootstraps_b);
- rd_kafka_mock_broker_set_down(cluster_b, 1);
-
- test_conf_init(&conf, NULL, 10);
-
- /* Combine bootstraps from both clusters */
- bs_size = strlen(bootstraps_a) + strlen(bootstraps_b) + 2;
- bootstraps = malloc(bs_size);
- rd_snprintf(bootstraps, bs_size, "%s,%s", bootstraps_a, bootstraps_b);
- test_conf_set(conf, "bootstrap.servers", bootstraps);
- free(bootstraps);
-
- rd_atomic32_init(&log_cnt, 0);
- rd_kafka_conf_set_log_cb(conf, log_cb);
- rd_kafka_conf_set_opaque(conf, &log_cnt);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
-
- while (rd_atomic32_get(&log_cnt) == 0) {
- const rd_kafka_metadata_t *md;
-
- /* After 3 seconds bring down cluster a and bring up
- * cluster b, this is to force the client to connect to
- * the other cluster. */
- if (cnt == 3) {
- rd_kafka_mock_broker_set_down(cluster_a, 1);
- rd_kafka_mock_broker_set_up(cluster_b, 1);
- }
-
- if (!rd_kafka_metadata(rk, 1, NULL, &md, 1000))
- rd_kafka_metadata_destroy(md);
- rd_sleep(1);
-
- cnt++;
- }
-
-
- rd_kafka_destroy(rk);
- test_mock_cluster_destroy(cluster_a);
- test_mock_cluster_destroy(cluster_b);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c
deleted file mode 100644
index 4f8727017..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-typedef struct consumer_s {
- const char *what;
- rd_kafka_queue_t *rkq;
- int timeout_ms;
- int consume_msg_cnt;
- int expected_msg_cnt;
- rd_kafka_t *rk;
- uint64_t testid;
- test_msgver_t *mv;
- struct test *test;
-} consumer_t;
-
-static int consumer_batch_queue(void *arg) {
- consumer_t *arguments = arg;
- int msg_cnt = 0;
- int i;
- test_timing_t t_cons;
-
- rd_kafka_queue_t *rkq = arguments->rkq;
- int timeout_ms = arguments->timeout_ms;
- const int consume_msg_cnt = arguments->consume_msg_cnt;
- rd_kafka_t *rk = arguments->rk;
- uint64_t testid = arguments->testid;
- rd_kafka_message_t **rkmessage =
- malloc(consume_msg_cnt * sizeof(*rkmessage));
-
- if (arguments->test)
- test_curr = arguments->test;
-
- TEST_SAY(
- "%s calling consume_batch_queue(timeout=%d, msgs=%d) "
- "and expecting %d messages back\n",
- rd_kafka_name(rk), timeout_ms, consume_msg_cnt,
- arguments->expected_msg_cnt);
-
- TIMING_START(&t_cons, "CONSUME");
- msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage,
- consume_msg_cnt);
- TIMING_STOP(&t_cons);
-
- TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk),
- msg_cnt, arguments->consume_msg_cnt,
- arguments->expected_msg_cnt);
- TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt,
- "consumed %d messages, expected %d", msg_cnt,
- arguments->expected_msg_cnt);
-
- for (i = 0; i < msg_cnt; i++) {
- if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0)
- TEST_FAIL(
- "The message is not from testid "
- "%" PRId64 " \n",
- testid);
- rd_kafka_message_destroy(rkmessage[i]);
- }
-
- free(rkmessage);
-
- return 0;
-}
-
-
-/**
- * @brief Produce 400 messages and consume 500 messages totally by 2 consumers
- * using batch queue method, verify if there isn't any missed or
- * duplicate messages received by the two consumers.
- * The reasons for setting the consume messages number is higher than
- * or equal to the produce messages number are:
- * 1) Make sure each consumer can at most receive half of the produced
- * messages even though the consumers expect more.
- * 2) If the consume messages number is smaller than the produce
- * messages number, it's hard to verify that the messages returned
- * are added to the batch queue before or after the rebalancing.
- * But if the consume messages number is larger than the produce
- * messages number, and we still received half of the produced
- * messages by each consumer, we can make sure that the buffer
- * cleaning is happened during the batch queue process to guarantee
- * only received messages added to the batch queue after the
- * rebalance.
- *
- * 1. Produce 100 messages to each of the 4 partitions
- * 2. First consumer subscribes to the topic, wait for it's assignment
- * 3. The first consumer consumes 500 messages using the batch queue
- * method
- * 4. Second consumer subscribes to the topic, wait for it's assignment
- * 5. Rebalance happenes
- * 6. The second consumer consumes 500 messages using the batch queue
- * method
- * 7. Each consumer receives 200 messages finally
- * 8. Combine all the messages received by the 2 consumers and
- * verify if there isn't any missed or duplicate messages
- *
- */
-static void do_test_consume_batch(const char *strategy) {
- const int partition_cnt = 4;
- rd_kafka_queue_t *rkq1, *rkq2;
- const char *topic;
- rd_kafka_t *c1;
- rd_kafka_t *c2;
- int p;
- const int timeout_ms = 12000; /* Must be > rebalance time */
- uint64_t testid;
- const int consume_msg_cnt = 500;
- const int produce_msg_cnt = 400;
- rd_kafka_conf_t *conf;
- consumer_t c1_args = RD_ZERO_INIT;
- consumer_t c2_args = RD_ZERO_INIT;
- test_msgver_t mv;
- thrd_t thread_id;
-
- SUB_TEST("partition.assignment.strategy = %s", strategy);
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "partition.assignment.strategy", strategy);
-
- testid = test_id_generate();
- test_msgver_init(&mv, testid);
-
- /* Produce messages */
- topic = test_mk_topic_name("0122-buffer_cleaning", 1);
-
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topic, testid, p,
- produce_msg_cnt / partition_cnt);
-
- /* Create consumers */
- c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- c2 = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(c1, topic);
- test_consumer_wait_assignment(c1, rd_false);
-
- /* Create generic consume queue */
- rkq1 = rd_kafka_queue_get_consumer(c1);
-
- c1_args.what = "C1.PRE";
- c1_args.rkq = rkq1;
- c1_args.timeout_ms = timeout_ms;
- c1_args.consume_msg_cnt = consume_msg_cnt;
- c1_args.expected_msg_cnt = produce_msg_cnt / 2;
- c1_args.rk = c1;
- c1_args.testid = testid;
- c1_args.mv = &mv;
- c1_args.test = test_curr;
- if (thrd_create(&thread_id, consumer_batch_queue, &c1_args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread for %s", "C1.PRE");
-
- test_consumer_subscribe(c2, topic);
- test_consumer_wait_assignment(c2, rd_false);
-
- thrd_join(thread_id, NULL);
-
- /* Create generic consume queue */
- rkq2 = rd_kafka_queue_get_consumer(c2);
-
- c2_args.what = "C2.PRE";
- c2_args.rkq = rkq2;
- /* Second consumer should be able to consume all messages right away */
- c2_args.timeout_ms = 5000;
- c2_args.consume_msg_cnt = consume_msg_cnt;
- c2_args.expected_msg_cnt = produce_msg_cnt / 2;
- c2_args.rk = c2;
- c2_args.testid = testid;
- c2_args.mv = &mv;
-
- consumer_batch_queue(&c2_args);
-
- test_msgver_verify("C1.PRE + C2.PRE", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0,
- produce_msg_cnt);
- test_msgver_clear(&mv);
-
- rd_kafka_queue_destroy(rkq1);
- rd_kafka_queue_destroy(rkq2);
-
- test_consumer_close(c1);
- test_consumer_close(c2);
-
- rd_kafka_destroy(c1);
- rd_kafka_destroy(c2);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0122_buffer_cleaning_after_rebalance(int argc, char **argv) {
- do_test_consume_batch("range");
- do_test_consume_batch("cooperative-sticky");
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c
deleted file mode 100644
index 734467017..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#include "../src/rdkafka_proto.h"
-#include "../src/rdunittest.h"
-
-#include <stdarg.h>
-
-
-/**
- * @name Verify connections.max.idle.ms
- *
- */
-
-static void
-log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
- rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
-
- if (!strstr(buf, "Connection max idle time exceeded"))
- return;
-
- TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac,
- buf);
-
- rd_atomic32_add(log_cntp, 1);
-}
-
-static void do_test_idle(rd_bool_t set_idle) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_atomic32_t log_cnt;
-
- SUB_TEST_QUICK("set_idle = %s", set_idle ? "yes" : "no");
-
- test_conf_init(&conf, NULL, 10);
- test_conf_set(conf, "debug", "broker");
- test_conf_set(conf, "connections.max.idle.ms", set_idle ? "5000" : "0");
- rd_atomic32_init(&log_cnt, 0);
- rd_kafka_conf_set_log_cb(conf, log_cb);
- rd_kafka_conf_set_opaque(conf, &log_cnt);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rd_sleep(3);
- TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
- "Should not have seen an idle disconnect this soon");
-
- rd_sleep(5);
- if (set_idle)
- TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0,
- "Should have seen at least one idle "
- "disconnect by now");
- else
- TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
- "Should not have seen an idle disconnect");
-
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0123_connections_max_idle(int argc, char **argv) {
-
- do_test_idle(rd_true);
- do_test_idle(rd_false);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c
deleted file mode 100644
index 5c61e5318..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-int main_0124_openssl_invalid_engine(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- char errstr[512];
- rd_kafka_conf_res_t res;
-
- test_conf_init(&conf, NULL, 30);
- res = rd_kafka_conf_set(conf, "ssl.engine.location", "invalid_path",
- errstr, sizeof(errstr));
-
- if (res == RD_KAFKA_CONF_INVALID) {
- rd_kafka_conf_destroy(conf);
- TEST_SKIP("%s\n", errstr);
- return 0;
- }
-
- if (res != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- if (rd_kafka_conf_set(conf, "security.protocol", "ssl", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(!rk,
- "kafka_new() should not succeed with invalid engine"
- " path, error: %s",
- errstr);
- TEST_SAY("rd_kafka_new() failed (as expected): %s\n", errstr);
-
- TEST_ASSERT(strstr(errstr, "engine initialization failed in"),
- "engine"
- " initialization failure expected because of invalid engine"
- " path, error: %s",
- errstr);
-
- rd_kafka_conf_destroy(conf);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c
deleted file mode 100644
index 12f36cf19..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * Verify that flush() overrides the linger.ms time.
- *
- */
-int main_0125_immediate_flush(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- const char *topic = test_mk_topic_name("0125_immediate_flush", 1);
- const int msgcnt = 100;
- int remains = 0;
- test_timing_t t_time;
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "linger.ms", "10000");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(rk, topic, 1, 1);
-
- /* Produce half set of messages without waiting for delivery. */
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50,
- &remains);
-
- TIMING_START(&t_time, "NO_FLUSH");
- do {
- rd_kafka_poll(rk, 1000);
- } while (remains > 0);
- TIMING_ASSERT(&t_time, 10000, 15000);
-
- /* Produce remaining messages without waiting for delivery. */
- test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50,
- &remains);
-
- /* The linger time should be overriden when flushing */
- TIMING_START(&t_time, "FLUSH");
- TEST_CALL_ERR__(rd_kafka_flush(rk, 2000));
- TIMING_ASSERT(&t_time, 0, 2500);
-
- rd_kafka_destroy(rk);
-
-
- /* Verify messages were actually produced by consuming them back. */
- test_consume_msgs_easy(topic, topic, 0, 1, msgcnt, NULL);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c
deleted file mode 100644
index 8eb187068..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-static rd_bool_t error_seen;
-/**
- * @brief After config OIDC, make sure the producer and consumer
- * can work successfully.
- *
- */
-static void
-do_test_produce_consumer_with_OIDC(const rd_kafka_conf_t *base_conf) {
- const char *topic;
- uint64_t testid;
- rd_kafka_t *p1;
- rd_kafka_t *c1;
- rd_kafka_conf_t *conf;
-
- const char *url = test_getenv("VALID_OIDC_URL", NULL);
-
- SUB_TEST("Test producer and consumer with oidc configuration");
-
- if (!url) {
- SUB_TEST_SKIP(
- "VALID_OIDC_URL environment variable is not set\n");
- return;
- }
-
- conf = rd_kafka_conf_dup(base_conf);
- test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", url);
-
- testid = test_id_generate();
-
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
-
- topic = test_mk_topic_name("0126-oauthbearer_oidc", 1);
- test_create_topic(p1, topic, 1, 3);
- TEST_SAY("Topic: %s is created\n", topic);
-
- test_produce_msgs2(p1, topic, testid, 0, 0, 1, NULL, 0);
-
- test_conf_set(conf, "auto.offset.reset", "earliest");
- c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
- test_consumer_subscribe(c1, topic);
-
- /* Give it some time to trigger the token refresh. */
- rd_usleep(5 * 1000 * 1000, NULL);
- test_consumer_poll("OIDC.C1", c1, testid, 1, -1, 1, NULL);
-
- test_consumer_close(c1);
-
- rd_kafka_destroy(p1);
- rd_kafka_destroy(c1);
- SUB_TEST_PASS();
-}
-
-
-static void
-auth_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
- if (err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
- err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) {
- TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err),
- reason);
- error_seen = rd_true;
- } else
- TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err),
- reason);
- rd_kafka_yield(rk);
-}
-
-
-/**
- * @brief After config OIDC, if the token is expired, make sure
- * the authentication fail as expected.
- *
- */
-static void do_test_produce_consumer_with_OIDC_expired_token_should_fail(
- const rd_kafka_conf_t *base_conf) {
- rd_kafka_t *c1;
- uint64_t testid;
- rd_kafka_conf_t *conf;
-
- const char *expired_url = test_getenv("EXPIRED_TOKEN_OIDC_URL", NULL);
-
- SUB_TEST("Test OAUTHBEARER/OIDC failing with expired JWT");
-
- if (!expired_url) {
- SUB_TEST_SKIP(
- "EXPIRED_TOKEN_OIDC_URL environment variable is not set\n");
- return;
- }
-
- conf = rd_kafka_conf_dup(base_conf);
-
- error_seen = rd_false;
- test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", expired_url);
-
- rd_kafka_conf_set_error_cb(conf, auth_error_cb);
-
- testid = test_id_generate();
-
- c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
-
- test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
- TEST_ASSERT(error_seen);
-
- test_consumer_close(c1);
- rd_kafka_destroy(c1);
- SUB_TEST_PASS();
-}
-
-
-/**
- * @brief After config OIDC, if the token is not valid, make sure the
- * authentication fail as expected.
- *
- */
-static void do_test_produce_consumer_with_OIDC_should_fail(
- const rd_kafka_conf_t *base_conf) {
- rd_kafka_t *c1;
- uint64_t testid;
- rd_kafka_conf_t *conf;
-
- const char *invalid_url = test_getenv("INVALID_OIDC_URL", NULL);
-
- SUB_TEST("Test OAUTHBEARER/OIDC failing with invalid JWT");
-
- if (!invalid_url) {
- SUB_TEST_SKIP(
- "INVALID_OIDC_URL environment variable is not set\n");
- return;
- }
-
- conf = rd_kafka_conf_dup(base_conf);
-
- error_seen = rd_false;
-
- test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", invalid_url);
-
- rd_kafka_conf_set_error_cb(conf, auth_error_cb);
-
- testid = test_id_generate();
-
- c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
-
- test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
-
- TEST_ASSERT(error_seen);
-
- test_consumer_close(c1);
- rd_kafka_destroy(c1);
- SUB_TEST_PASS();
-}
-
-
-int main_0126_oauthbearer_oidc(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- const char *sec;
- const char *oidc;
-
- test_conf_init(&conf, NULL, 60);
-
- sec = test_conf_get(conf, "security.protocol");
- if (!strstr(sec, "sasl")) {
- TEST_SKIP("Apache Kafka cluster not configured for SASL\n");
- return 0;
- }
-
- oidc = test_conf_get(conf, "sasl.oauthbearer.method");
- if (rd_strcasecmp(oidc, "OIDC")) {
- TEST_SKIP("`sasl.oauthbearer.method=OIDC` is required\n");
- return 0;
- }
-
- do_test_produce_consumer_with_OIDC(conf);
- do_test_produce_consumer_with_OIDC_should_fail(conf);
- do_test_produce_consumer_with_OIDC_expired_token_should_fail(conf);
-
- rd_kafka_conf_destroy(conf);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp
deleted file mode 100644
index 784f09bf6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Verify that background SASL callback queues work by calling
- * a non-polling API after client creation.
- */
-#include "testcpp.h"
-#include "rdatomic.h"
-
-namespace {
-/* Provide our own token refresh callback */
-class MyCb : public RdKafka::OAuthBearerTokenRefreshCb {
- public:
- MyCb() {
- rd_atomic32_init(&called_, 0);
- }
-
- bool called() {
- return rd_atomic32_get(&called_) > 0;
- }
-
- void oauthbearer_token_refresh_cb(RdKafka::Handle *handle,
- const std::string &oauthbearer_config) {
- handle->oauthbearer_set_token_failure(
- "Not implemented by this test, "
- "but that's okay");
- rd_atomic32_add(&called_, 1);
- Test::Say("Callback called!\n");
- }
-
- rd_atomic32_t called_;
-};
-}; // namespace
-
-
-static void do_test(bool use_background_queue) {
- SUB_TEST("Use background queue = %s", use_background_queue ? "yes" : "no");
-
- bool expect_called = use_background_queue;
-
- RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
-
- Test::conf_set(conf, "security.protocol", "SASL_PLAINTEXT");
- Test::conf_set(conf, "sasl.mechanism", "OAUTHBEARER");
-
- std::string errstr;
-
- MyCb mycb;
- if (conf->set("oauthbearer_token_refresh_cb", &mycb, errstr))
- Test::Fail("Failed to set refresh callback: " + errstr);
-
- if (use_background_queue)
- if (conf->enable_sasl_queue(true, errstr))
- Test::Fail("Failed to enable SASL queue: " + errstr);
-
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- if (use_background_queue) {
- RdKafka::Error *error = p->sasl_background_callbacks_enable();
- if (error)
- Test::Fail("sasl_background_callbacks_enable() failed: " + error->str());
- }
-
- /* This call should fail since the refresh callback fails,
- * and there are no brokers configured anyway. */
- const std::string clusterid = p->clusterid(5 * 1000);
-
- TEST_ASSERT(clusterid.empty(),
- "Expected clusterid() to fail since the token was not set");
-
- if (expect_called)
- TEST_ASSERT(mycb.called(),
- "Expected refresh callback to have been called by now");
- else
- TEST_ASSERT(!mycb.called(),
- "Did not expect refresh callback to have been called");
-
- delete p;
-
- SUB_TEST_PASS();
-}
-
-extern "C" {
-int main_0128_sasl_callback_queue(int argc, char **argv) {
- if (!test_check_builtin("sasl_oauthbearer")) {
- Test::Skip("Test requires OAUTHBEARER support\n");
- return 0;
- }
-
- do_test(true);
- do_test(false);
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c
deleted file mode 100644
index cc150fecc..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2021, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * @brief Verify that a FetchResponse containing only aborted messages does not
- * raise a ERR_MSG_SIZE_TOO_LARGE error. #2993.
- *
- * 1. Create topic with a small message.max.bytes to make sure that
- * there's at least one full fetch response without any control messages,
- * just aborted messages.
- * 2. Transactionally produce 10x the message.max.bytes.
- * 3. Abort the transaction.
- * 4. Consume from start, verify that no error is received, wait for EOF.
- *
- */
-int main_0129_fetch_aborted_msgs(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- const char *topic = test_mk_topic_name("0129_fetch_aborted_msgs", 1);
- const int msgcnt = 1000;
- const size_t msgsize = 1000;
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "linger.ms", "10000");
- test_conf_set(conf, "transactional.id", topic);
- test_conf_set(conf, "message.max.bytes", "10000");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_admin_create_topic(rk, topic, 1, 1,
- (const char *[]) {"max.message.bytes", "10000",
- "segment.bytes", "20000",
- NULL});
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
-
- /* Produce half set of messages without waiting for delivery. */
- test_produce_msgs2(rk, topic, 0, 0, 0, msgcnt, NULL, msgsize);
-
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
-
- rd_kafka_destroy(rk);
-
- /* Verify messages were actually produced by consuming them back. */
- test_consume_msgs_easy(topic, topic, 0, 1, 0, NULL);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c
deleted file mode 100644
index 9fb8d2350..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-/**
- * Verify that offsets_store() is not allowed for unassigned partitions,
- * and that those offsets are not committed.
- */
-static void do_test_store_unassigned(void) {
- const char *topic = test_mk_topic_name("0130_store_unassigned", 1);
- rd_kafka_conf_t *conf;
- rd_kafka_t *c;
- rd_kafka_topic_partition_list_t *parts;
- rd_kafka_resp_err_t err;
- rd_kafka_message_t *rkmessage;
- const int64_t proper_offset = 900, bad_offset = 300;
-
- SUB_TEST_QUICK();
-
- test_produce_msgs_easy(topic, 0, 0, 1000);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "auto.offset.reset", "earliest");
- test_conf_set(conf, "enable.auto.offset.store", "false");
- test_conf_set(conf, "enable.partition.eof", "true");
-
- c = test_create_consumer(topic, NULL, conf, NULL);
-
- parts = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(parts, topic, 0);
- TEST_CALL_ERR__(rd_kafka_assign(c, parts));
-
- TEST_SAY("Consume one message\n");
- test_consumer_poll_once(c, NULL, tmout_multip(3000));
-
- parts->elems[0].offset = proper_offset;
- TEST_SAY("Storing offset %" PRId64 " while assigned: should succeed\n",
- parts->elems[0].offset);
- TEST_CALL_ERR__(rd_kafka_offsets_store(c, parts));
-
- TEST_SAY("Committing\n");
- TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false /*sync*/));
-
- TEST_SAY("Unassigning partitions and trying to store again\n");
- TEST_CALL_ERR__(rd_kafka_assign(c, NULL));
-
- parts->elems[0].offset = bad_offset;
- TEST_SAY("Storing offset %" PRId64 " while unassigned: should fail\n",
- parts->elems[0].offset);
- err = rd_kafka_offsets_store(c, parts);
- TEST_ASSERT_LATER(err != RD_KAFKA_RESP_ERR_NO_ERROR,
- "Expected offsets_store() to fail");
- TEST_ASSERT(parts->cnt == 1);
-
- TEST_ASSERT(parts->elems[0].err == RD_KAFKA_RESP_ERR__STATE,
- "Expected %s [%" PRId32
- "] to fail with "
- "_STATE, not %s",
- parts->elems[0].topic, parts->elems[0].partition,
- rd_kafka_err2name(parts->elems[0].err));
-
- TEST_SAY("Committing: should fail\n");
- err = rd_kafka_commit(c, NULL, rd_false /*sync*/);
- TEST_ASSERT(err == RD_KAFKA_RESP_ERR__NO_OFFSET,
- "Expected commit() to fail with NO_OFFSET, not %s",
- rd_kafka_err2name(err));
-
- TEST_SAY("Assigning partition again\n");
- parts->elems[0].offset = RD_KAFKA_OFFSET_INVALID; /* Use committed */
- TEST_CALL_ERR__(rd_kafka_assign(c, parts));
-
- TEST_SAY("Consuming message to verify committed offset\n");
- rkmessage = rd_kafka_consumer_poll(c, tmout_multip(3000));
- TEST_ASSERT(rkmessage != NULL, "Expected message");
- TEST_SAY("Consumed message with offset %" PRId64 "\n",
- rkmessage->offset);
- TEST_ASSERT(!rkmessage->err, "Expected proper message, not error %s",
- rd_kafka_message_errstr(rkmessage));
- TEST_ASSERT(rkmessage->offset == proper_offset,
- "Expected first message to be properly stored "
- "offset %" PRId64 ", not %" PRId64,
- proper_offset, rkmessage->offset);
-
- rd_kafka_message_destroy(rkmessage);
-
- rd_kafka_topic_partition_list_destroy(parts);
-
- rd_kafka_consumer_close(c);
- rd_kafka_destroy(c);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0130_store_offsets(int argc, char **argv) {
-
- do_test_store_unassigned();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c
deleted file mode 100644
index 8cac87ea0..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-
-/**
- * @name Verify socket.connection.setup.timeout.ms by using
- * a mock cluster with an rtt > timeout.
- */
-
-static void
-log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
- rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
-
- if (!strstr(buf, "Connection setup timed out"))
- return;
-
- TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac,
- buf);
-
- rd_atomic32_add(log_cntp, 1);
-}
-
-int main_0131_connect_timeout(int argc, char **argv) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_atomic32_t log_cnt;
-
- test_conf_init(NULL, NULL, 20);
- conf = rd_kafka_conf_new();
- test_conf_set(conf, "test.mock.num.brokers", "2");
- test_conf_set(conf, "test.mock.broker.rtt", "10000");
- test_conf_set(conf, "socket.connection.setup.timeout.ms", "6000");
- test_conf_set(conf, "debug", "broker");
- rd_atomic32_init(&log_cnt, 0);
- rd_kafka_conf_set_log_cb(conf, log_cb);
- rd_kafka_conf_set_opaque(conf, &log_cnt);
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rd_sleep(3);
- TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
- "Should not have seen a disconnect this soon");
-
- rd_sleep(5);
- TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0,
- "Should have seen at least one "
- "disconnect by now");
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c
deleted file mode 100644
index 5199f4f81..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-#define _PART_CNT 4
-
-static void verify_roundrobin_assignment(rd_kafka_t *c[]) {
- rd_kafka_topic_partition_list_t *assignment1;
- rd_kafka_topic_partition_list_t *assignment2;
-
- TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1));
-
- TEST_ASSERT(assignment1->cnt == _PART_CNT / 2,
- "Roundrobin: Assignment partitions for %s"
- "is %d, but the expected is %d\n",
- rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2);
-
- TEST_ASSERT(assignment1->elems[0].partition == 0,
- "Roundrobin: First assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[0]), assignment1->elems[0].partition, 0);
- TEST_ASSERT(assignment1->elems[1].partition == 2,
- "Roundrobin: Second assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[0]), assignment1->elems[1].partition, 2);
-
- TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2));
- TEST_ASSERT(assignment2->cnt == _PART_CNT / 2,
- "Roundrobin: Assignment partitions for %s"
- "is %d, but the expected is %d\n",
- rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2);
-
- TEST_ASSERT(assignment2->elems[0].partition == 1,
- "Roundrobin: First assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[1]), assignment2->elems[0].partition, 1);
- TEST_ASSERT(assignment2->elems[1].partition == 3,
- "Roundrobin: Second assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[1]), assignment2->elems[1].partition, 3);
-
- rd_kafka_topic_partition_list_destroy(assignment1);
- rd_kafka_topic_partition_list_destroy(assignment2);
-}
-
-static void verify_range_assignment(rd_kafka_t *c[]) {
- rd_kafka_topic_partition_list_t *assignment1;
- rd_kafka_topic_partition_list_t *assignment2;
-
- TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1));
-
- TEST_ASSERT(assignment1->cnt == _PART_CNT / 2,
- "Range: Assignment partition for %s"
- "is %d, but the expected is %d\n",
- rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2);
-
- TEST_ASSERT(assignment1->elems[0].partition == 0,
- "Range: First assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[0]), assignment1->elems[0].partition, 0);
- TEST_ASSERT(assignment1->elems[1].partition == 1,
- "Range: Second assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[0]), assignment1->elems[1].partition, 1);
-
- TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2));
- TEST_ASSERT(assignment2->cnt == _PART_CNT / 2,
- "Range: Assignment partition for %s"
- "is %d, but the expected is %d\n",
- rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2);
-
- TEST_ASSERT(assignment2->elems[0].partition == 2,
- "Range: First assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[1]), assignment2->elems[0].partition, 2);
- TEST_ASSERT(assignment2->elems[1].partition == 3,
- "Range: Second assignment partition for %s"
- "is %d, but the expectation is %d\n",
- rd_kafka_name(c[1]), assignment2->elems[1].partition, 3);
-
- rd_kafka_topic_partition_list_destroy(assignment1);
- rd_kafka_topic_partition_list_destroy(assignment2);
-}
-
-static void do_test_stragety_ordering(const char *assignor,
- const char *expected_assignor) {
- rd_kafka_conf_t *conf;
-#define _C_CNT 2
- rd_kafka_t *c[_C_CNT];
-
- const char *topic;
- const int msgcnt = 100;
- int i;
- uint64_t testid;
-
- SUB_TEST("partition.assignment.strategy = %s", assignor);
-
- testid = test_id_generate();
-
- topic = test_mk_topic_name("0132-strategy_ordering", 1);
- test_create_topic(NULL, topic, _PART_CNT, 1);
- test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "partition.assignment.strategy", assignor);
-
- for (i = 0; i < _C_CNT; i++) {
- char name[16];
-
- rd_snprintf(name, sizeof(name), "c%d", i);
- test_conf_set(conf, "client.id", name);
-
- c[i] = test_create_consumer(assignor, NULL,
- rd_kafka_conf_dup(conf), NULL);
-
- test_consumer_subscribe(c[i], topic);
- }
-
- rd_kafka_conf_destroy(conf);
-
- /* Await assignments for all consumers */
- for (i = 0; i < _C_CNT; i++) {
- test_consumer_wait_assignment(c[i], rd_true);
- }
-
- if (!strcmp(expected_assignor, "range"))
- verify_range_assignment(c);
- else
- verify_roundrobin_assignment(c);
-
- for (i = 0; i < _C_CNT; i++) {
- test_consumer_close(c[i]);
- rd_kafka_destroy(c[i]);
- }
-
- SUB_TEST_PASS();
-}
-
-
-int main_0132_strategy_ordering(int argc, char **argv) {
- do_test_stragety_ordering("roundrobin,range", "roundrobin");
- do_test_stragety_ordering("range,roundrobin", "range");
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c
deleted file mode 100644
index 850fa2761..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-#include "rdstring.h"
-
-/**
- * @brief Tests reading SSL PKCS#12 keystore or PEM certificate and key from
- * file. Decoding it with the correct password or not.
- *
- * Ensures it's read correctly on Windows too.
- * See https://github.com/edenhill/librdkafka/issues/3992
- */
-static void do_test_ssl_keys(const char *type, rd_bool_t correct_password) {
-#define TEST_FIXTURES_FOLDER "./fixtures"
-#define TEST_FIXTURES_SSL_FOLDER TEST_FIXTURES_FOLDER "/ssl/"
-#define TEST_FIXTURES_KEYSTORE_PASSWORD "use_strong_password_keystore_client"
-#define TEST_FIXTURES_KEY_PASSWORD "use_strong_password_keystore_client2"
-#define TEST_KEYSTORE_LOCATION TEST_FIXTURES_SSL_FOLDER "client.keystore.p12"
-#define TEST_CERTIFICATE_LOCATION \
- TEST_FIXTURES_SSL_FOLDER "client2.certificate.pem"
-#define TEST_KEY_LOCATION TEST_FIXTURES_SSL_FOLDER "client2.key"
-
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- char errstr[256];
-
- SUB_TEST_QUICK("keystore type = %s, correct password = %s", type,
- RD_STR_ToF(correct_password));
-
- test_conf_init(&conf, NULL, 30);
- test_conf_set(conf, "security.protocol", "SSL");
-
- if (!strcmp(type, "PKCS12")) {
- test_conf_set(conf, "ssl.keystore.location",
- TEST_KEYSTORE_LOCATION);
- if (correct_password)
- test_conf_set(conf, "ssl.keystore.password",
- TEST_FIXTURES_KEYSTORE_PASSWORD);
- else
- test_conf_set(conf, "ssl.keystore.password",
- TEST_FIXTURES_KEYSTORE_PASSWORD
- " and more");
- } else if (!strcmp(type, "PEM")) {
- test_conf_set(conf, "ssl.certificate.location",
- TEST_CERTIFICATE_LOCATION);
- test_conf_set(conf, "ssl.key.location", TEST_KEY_LOCATION);
- if (correct_password)
- test_conf_set(conf, "ssl.key.password",
- TEST_FIXTURES_KEY_PASSWORD);
- else
- test_conf_set(conf, "ssl.keystore.password",
- TEST_FIXTURES_KEYSTORE_PASSWORD
- " and more");
- } else {
- TEST_FAIL("Unexpected key type\n");
- }
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- if ((rk != NULL) != correct_password) {
- TEST_FAIL("Expected rd_kafka creation to %s\n",
- correct_password ? "succeed" : "fail");
- }
-
- if (rk)
- rd_kafka_destroy(rk);
- else
- rd_kafka_conf_destroy(conf);
-
- SUB_TEST_PASS();
-
-#undef TEST_FIXTURES_KEYSTORE_PASSWORD
-#undef TEST_FIXTURES_KEY_PASSWORD
-#undef TEST_KEYSTORE_LOCATION
-#undef TEST_CERTIFICATE_LOCATION
-#undef TEST_KEY_LOCATION
-#undef TEST_FIXTURES_FOLDER
-#undef TEST_FIXTURES_SSL_FOLDER
-}
-
-
-int main_0133_ssl_keys(int argc, char **argv) {
- do_test_ssl_keys("PKCS12", rd_true);
- do_test_ssl_keys("PKCS12", rd_false);
- do_test_ssl_keys("PEM", rd_true);
- do_test_ssl_keys("PEM", rd_false);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c
deleted file mode 100644
index d24d52c64..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-
-static void test_providers(const char *providers,
- rd_bool_t must_pass,
- rd_bool_t must_fail) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- char errstr[512];
-
- SUB_TEST_QUICK("providers=%s, %s pass, %s fail", providers,
- must_pass ? "must" : "may", must_fail ? "must" : "may");
-
- test_conf_init(&conf, NULL, 10);
-
- /* Enable debugging so we get some extra information on
- * OpenSSL version and provider versions in the test log. */
- test_conf_set(conf, "debug", "security");
- test_conf_set(conf, "ssl.providers", providers);
- test_conf_set(conf, "security.protocol", "ssl");
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
-
- TEST_SAY("rd_kafka_new(ssl.providers=%s): %s\n", providers,
- rk ? "success" : errstr);
-
- if (must_pass && !rk)
- TEST_FAIL("Expected ssl.providers=%s to work, got %s",
- providers, errstr);
- else if (must_fail && rk)
- TEST_FAIL("Expected ssl.providers=%s to fail", providers);
-
- if (!rk)
- rd_kafka_conf_destroy(conf);
- else
- rd_kafka_destroy(rk);
-
- SUB_TEST_PASS();
-}
-
-int main_0134_ssl_provider(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- char errstr[512];
- rd_kafka_conf_res_t res;
-
- test_conf_init(&conf, NULL, 10);
-
- /* Check that we're linked/built with OpenSSL 3.x */
- res = rd_kafka_conf_set(conf, "ssl.providers", "a,b", errstr,
- sizeof(errstr));
- rd_kafka_conf_destroy(conf);
- if (res == RD_KAFKA_CONF_INVALID) {
- TEST_SKIP("%s\n", errstr);
- return 0;
- }
-
- /* Must pass since 'default' is always built in */
- test_providers("default", rd_true, rd_false);
- /* May fail, if legacy provider is not available. */
- test_providers("default,legacy", rd_false, rd_false);
- /* Must fail since non-existent provider */
- test_providers("default,thisProviderDoesNotExist", rd_false, rd_true);
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp
deleted file mode 100644
index 20e2e4f65..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Verify that SASL credentials can be updated.
- */
-#include "testcpp.h"
-
-
-
-class authErrorEventCb : public RdKafka::EventCb {
- public:
- authErrorEventCb() : error_seen(false) {
- }
-
- void event_cb(RdKafka::Event &event) {
- switch (event.type()) {
- case RdKafka::Event::EVENT_ERROR:
- Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": "
- << event.str() << "\n");
- if (event.err() == RdKafka::ERR__AUTHENTICATION)
- error_seen = true;
- break;
-
- case RdKafka::Event::EVENT_LOG:
- Test::Say(tostr() << "Log: " << event.str() << "\n");
- break;
-
- default:
- break;
- }
- }
-
- bool error_seen;
-};
-
-
-/**
- * @brief Test setting SASL credentials.
- *
- * 1. Switch out the proper username/password for invalid ones.
- * 2. Verify that we get an auth failure.
- * 3. Set the proper username/passwords.
- * 4. Verify that we can now connect.
- */
-static void do_test(bool set_after_auth_failure) {
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 30);
-
- SUB_TEST_QUICK("set_after_auth_failure=%s",
- set_after_auth_failure ? "yes" : "no");
-
- /* Get the correct sasl.username and sasl.password */
- std::string username, password;
- if (conf->get("sasl.username", username) ||
- conf->get("sasl.password", password)) {
- delete conf;
- SUB_TEST_SKIP("sasl.username and/or sasl.password not configured\n");
- return;
- }
-
- /* Replace with incorrect ones */
- Test::conf_set(conf, "sasl.username", "ThisIsNotRight");
- Test::conf_set(conf, "sasl.password", "Neither Is This");
-
- /* Set up an event callback to track authentication errors */
- authErrorEventCb pEvent = authErrorEventCb();
- std::string errstr;
- if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail(errstr);
-
- /* Create client */
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- if (set_after_auth_failure) {
- Test::Say("Awaiting auth failure\n");
-
- while (!pEvent.error_seen)
- p->poll(1000);
-
- Test::Say("Authentication error seen\n");
- }
-
- Test::Say("Setting proper credentials\n");
- RdKafka::Error *error = p->sasl_set_credentials(username, password);
- if (error)
- Test::Fail("Failed to set credentials: " + error->str());
-
- Test::Say("Expecting successful cluster authentication\n");
- const std::string clusterid = p->clusterid(5 * 1000);
-
- if (clusterid.empty())
- Test::Fail("Expected clusterid() to succeed");
-
- delete p;
-
- SUB_TEST_PASS();
-}
-
-extern "C" {
-int main_0135_sasl_credentials(int argc, char **argv) {
- const char *mech = test_conf_get(NULL, "sasl.mechanism");
-
- if (strcmp(mech, "PLAIN") && strncmp(mech, "SCRAM", 5)) {
- Test::Skip("Test requires SASL PLAIN or SASL SCRAM\n");
- return 0;
- }
-
- do_test(false);
- do_test(true);
-
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c
deleted file mode 100644
index 2c29bd14a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "rdkafka.h"
-
-#ifndef _WIN32
-#include <netdb.h>
-#else
-#define WIN32_MEAN_AND_LEAN
-#include <winsock2.h>
-#include <ws2ipdef.h>
-#include <ws2tcpip.h>
-#endif
-
-/**
- * @name Test a custom address resolution callback.
- *
- * The test sets bogus bootstrap.servers, uses the resolution callback to
- * resolve to a bogus address, and then verifies that the address is passed
- * to the connect callback. If the resolution callback is not invoked, or if the
- * connect callback is not invoked with the output of the resolution callback,
- * the test will fail.
- */
-
-/**
- * Stage of the test:
- * 0: expecting resolve_cb to be invoked with TESTING_RESOLVE_CB:1234
- * 1: expecting resolve_cb to be invoked with NULL, NULL
- * 2: expecting connect_cb to invoked with socket address 127.1.2.3:57616
- * 3: done
- */
-static rd_atomic32_t stage;
-
-/** Exposes current test struct (in TLS) to callbacks. */
-static struct test *this_test;
-
-static int resolve_cb(const char *node,
- const char *service,
- const struct addrinfo *hints,
- struct addrinfo **res,
- void *opaque) {
-
- int32_t cnt;
-
- test_curr = this_test;
-
- cnt = rd_atomic32_get(&stage);
-
- TEST_SAY("resolve_cb invoked: node=%s service=%s stage=%d\n", node,
- service, cnt);
-
- if (cnt == 0) {
- /* Stage 0: return a bogus address. */
-
- struct sockaddr_in *addr;
-
- TEST_ASSERT(node != NULL);
- TEST_ASSERT(strcmp(node, "TESTING_RESOLVE_CB") == 0,
- "unexpected node: %s", node);
- TEST_ASSERT(service != NULL);
- TEST_ASSERT(strcmp(service, "1234") == 0,
- "unexpected service: %s", service);
-
- addr = calloc(1, sizeof(struct sockaddr_in));
- addr->sin_family = AF_INET;
- addr->sin_port = htons(4321);
- addr->sin_addr.s_addr = htonl(0x7f010203) /* 127.1.2.3 */;
-
- *res = calloc(1, sizeof(struct addrinfo));
- (*res)->ai_family = AF_INET;
- (*res)->ai_socktype = SOCK_STREAM;
- (*res)->ai_protocol = IPPROTO_TCP;
- (*res)->ai_addrlen = sizeof(struct sockaddr_in);
- (*res)->ai_addr = (struct sockaddr *)addr;
- } else if (cnt == 1) {
- /* Stage 1: free the bogus address returned in stage 0. */
-
- TEST_ASSERT(node == NULL);
- TEST_ASSERT(service == NULL);
- TEST_ASSERT(hints == NULL);
- free((*res)->ai_addr);
- free(*res);
- } else {
- /* Stage 2+: irrelevant, simply fail to resolve. */
-
- return -1;
- }
-
- rd_atomic32_add(&stage, 1);
- return 0;
-}
-
-static int connect_cb(int s,
- const struct sockaddr *addr,
- int addrlen,
- const char *id,
- void *opaque) {
- /* Stage 3: assert address is expected bogus. */
-
- int32_t cnt;
- struct sockaddr_in *addr_in;
-
- test_curr = this_test;
-
- cnt = rd_atomic32_get(&stage);
-
- TEST_SAY("connect_cb invoked: stage=%d\n", cnt);
-
- TEST_ASSERT(cnt == 2, "connect_cb invoked in unexpected stage: %d",
- cnt);
-
- TEST_ASSERT(addr->sa_family == AF_INET,
- "address has unexpected type: %d", addr->sa_family);
-
- addr_in = (struct sockaddr_in *)(void *)addr;
-
- TEST_ASSERT(addr_in->sin_port == htons(4321),
- "address has unexpected port: %d",
- ntohs(addr_in->sin_port));
- TEST_ASSERT(addr_in->sin_addr.s_addr == htonl(0x7f010203),
- "address has unexpected host: 0x%x",
- ntohl(addr_in->sin_addr.s_addr));
-
- rd_atomic32_add(&stage, 1);
-
- /* The test has succeeded. Just report the connection as faile
- * for simplicity. */
- return -1;
-}
-
-int main_0136_resolve_cb(int argc, char **argv) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
-
- this_test = test_curr;
-
- rd_atomic32_init(&stage, 0);
-
- test_conf_init(&conf, NULL, 0);
- rd_kafka_conf_set_resolve_cb(conf, resolve_cb);
- rd_kafka_conf_set_connect_cb(conf, connect_cb);
-
- TEST_SAY("Setting bogus broker list\n");
- test_conf_set(conf, "bootstrap.servers", "TESTING_RESOLVE_CB:1234");
-
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- while (rd_atomic32_get(&stage) != 3)
- rd_sleep(1);
-
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c
deleted file mode 100644
index 4e3c855d2..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c
+++ /dev/null
@@ -1,608 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2022, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-typedef struct consumer_s {
- const char *what;
- rd_kafka_queue_t *rkq;
- int timeout_ms;
- int consume_msg_cnt;
- int expected_msg_cnt;
- rd_kafka_t *rk;
- uint64_t testid;
- test_msgver_t *mv;
- struct test *test;
-} consumer_t;
-
-static int consumer_batch_queue(void *arg) {
- consumer_t *arguments = arg;
- int msg_cnt = 0;
- int i;
- test_timing_t t_cons;
-
- rd_kafka_queue_t *rkq = arguments->rkq;
- int timeout_ms = arguments->timeout_ms;
- const int consume_msg_cnt = arguments->consume_msg_cnt;
- rd_kafka_t *rk = arguments->rk;
- uint64_t testid = arguments->testid;
- rd_kafka_message_t **rkmessage =
- malloc(consume_msg_cnt * sizeof(*rkmessage));
-
- if (arguments->test)
- test_curr = arguments->test;
-
- TEST_SAY(
- "%s calling consume_batch_queue(timeout=%d, msgs=%d) "
- "and expecting %d messages back\n",
- rd_kafka_name(rk), timeout_ms, consume_msg_cnt,
- arguments->expected_msg_cnt);
-
- TIMING_START(&t_cons, "CONSUME");
- msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage,
- consume_msg_cnt);
- TIMING_STOP(&t_cons);
-
- TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk),
- msg_cnt, arguments->consume_msg_cnt,
- arguments->expected_msg_cnt);
- TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt,
- "consumed %d messages, expected %d", msg_cnt,
- arguments->expected_msg_cnt);
-
- for (i = 0; i < msg_cnt; i++) {
- if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0)
- TEST_FAIL(
- "The message is not from testid "
- "%" PRId64,
- testid);
- rd_kafka_message_destroy(rkmessage[i]);
- }
-
- rd_free(rkmessage);
-
- return 0;
-}
-
-
-static void do_test_consume_batch_with_seek(void) {
- rd_kafka_queue_t *rkq;
- const char *topic;
- rd_kafka_t *consumer;
- int p;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- consumer_t consumer_args = RD_ZERO_INIT;
- test_msgver_t mv;
- thrd_t thread_id;
- rd_kafka_error_t *err;
- rd_kafka_topic_partition_list_t *seek_toppars;
- const int partition_cnt = 2;
- const int timeout_ms = 10000;
- const int consume_msg_cnt = 10;
- const int produce_msg_cnt = 8;
- const int32_t seek_partition = 0;
- const int64_t seek_offset = 1;
- const int expected_msg_cnt = produce_msg_cnt - seek_offset;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- testid = test_id_generate();
- test_msgver_init(&mv, testid);
-
- /* Produce messages */
- topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
-
- test_create_topic(NULL, topic, partition_cnt, 1);
-
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topic, testid, p,
- produce_msg_cnt / partition_cnt);
-
- /* Create consumers */
- consumer = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_false);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_consumer(consumer);
-
- consumer_args.what = "CONSUMER";
- consumer_args.rkq = rkq;
- consumer_args.timeout_ms = timeout_ms;
- consumer_args.consume_msg_cnt = consume_msg_cnt;
- consumer_args.expected_msg_cnt = expected_msg_cnt;
- consumer_args.rk = consumer;
- consumer_args.testid = testid;
- consumer_args.mv = &mv;
- consumer_args.test = test_curr;
- if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread for %s", "CONSUMER");
-
- seek_toppars = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(seek_toppars, topic, seek_partition);
- rd_kafka_topic_partition_list_set_offset(seek_toppars, topic,
- seek_partition, seek_offset);
- err = rd_kafka_seek_partitions(consumer, seek_toppars, 2000);
-
- TEST_ASSERT(
- !err, "Failed to seek partition %d for topic %s to offset %" PRId64,
- seek_partition, topic, seek_offset);
-
- thrd_join(thread_id, NULL);
-
- test_msgver_verify("CONSUME", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_OFFSET,
- 0, expected_msg_cnt);
- test_msgver_clear(&mv);
-
- rd_kafka_topic_partition_list_destroy(seek_toppars);
-
- rd_kafka_queue_destroy(rkq);
-
- test_consumer_close(consumer);
-
- rd_kafka_destroy(consumer);
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_consume_batch_with_pause_and_resume_different_batch(void) {
- rd_kafka_queue_t *rkq;
- const char *topic;
- rd_kafka_t *consumer;
- int p;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- consumer_t consumer_args = RD_ZERO_INIT;
- test_msgver_t mv;
- thrd_t thread_id;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *pause_partition_list;
- const int timeout_ms = 2000;
- const int consume_msg_cnt = 10;
- const int produce_msg_cnt = 8;
- const int partition_cnt = 2;
- const int expected_msg_cnt = 4;
- int32_t pause_partition = 0;
- int32_t running_partition = 1;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- testid = test_id_generate();
- test_msgver_init(&mv, testid);
-
- /* Produce messages */
- topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
-
- test_create_topic(NULL, topic, partition_cnt, 1);
-
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topic, testid, p,
- produce_msg_cnt / partition_cnt);
-
- /* Create consumers */
- consumer = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_false);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_consumer(consumer);
-
- consumer_args.what = "CONSUMER";
- consumer_args.rkq = rkq;
- consumer_args.timeout_ms = timeout_ms;
- consumer_args.consume_msg_cnt = consume_msg_cnt;
- consumer_args.expected_msg_cnt = expected_msg_cnt;
- consumer_args.rk = consumer;
- consumer_args.testid = testid;
- consumer_args.mv = &mv;
- consumer_args.test = test_curr;
- if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread for %s", "CONSUMER");
-
- pause_partition_list = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(pause_partition_list, topic,
- pause_partition);
-
- rd_sleep(1);
- err = rd_kafka_pause_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
- pause_partition, topic);
-
- thrd_join(thread_id, NULL);
-
- test_msgver_verify_part("CONSUME", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_OFFSET,
- topic, running_partition, 0, expected_msg_cnt);
-
- test_msgver_clear(&mv);
- test_msgver_init(&mv, testid);
- consumer_args.mv = &mv;
-
- err = rd_kafka_resume_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
- pause_partition, topic);
-
- consumer_batch_queue(&consumer_args);
-
- test_msgver_verify_part("CONSUME", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_OFFSET,
- topic, pause_partition, 0, expected_msg_cnt);
-
- rd_kafka_topic_partition_list_destroy(pause_partition_list);
-
- test_msgver_clear(&mv);
-
- rd_kafka_queue_destroy(rkq);
-
- test_consumer_close(consumer);
-
- rd_kafka_destroy(consumer);
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_consume_batch_with_pause_and_resume_same_batch(void) {
- rd_kafka_queue_t *rkq;
- const char *topic;
- rd_kafka_t *consumer;
- int p;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- consumer_t consumer_args = RD_ZERO_INIT;
- test_msgver_t mv;
- thrd_t thread_id;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_partition_list_t *pause_partition_list;
- const int timeout_ms = 10000;
- const int consume_msg_cnt = 10;
- const int produce_msg_cnt = 8;
- const int partition_cnt = 2;
- int32_t pause_partition = 0;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- testid = test_id_generate();
- test_msgver_init(&mv, testid);
-
- /* Produce messages */
- topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
-
- test_create_topic(NULL, topic, partition_cnt, 1);
-
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topic, testid, p,
- produce_msg_cnt / partition_cnt);
-
- /* Create consumers */
- consumer = test_create_consumer(topic, NULL, conf, NULL);
-
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_false);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_consumer(consumer);
-
- consumer_args.what = "CONSUMER";
- consumer_args.rkq = rkq;
- consumer_args.timeout_ms = timeout_ms;
- consumer_args.consume_msg_cnt = consume_msg_cnt;
- consumer_args.expected_msg_cnt = produce_msg_cnt;
- consumer_args.rk = consumer;
- consumer_args.testid = testid;
- consumer_args.mv = &mv;
- consumer_args.test = test_curr;
- if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread for %s", "CONSUMER");
-
- pause_partition_list = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(pause_partition_list, topic,
- pause_partition);
-
- rd_sleep(1);
- err = rd_kafka_pause_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
- pause_partition, topic);
-
- rd_sleep(1);
-
- err = rd_kafka_resume_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
- pause_partition, topic);
-
- thrd_join(thread_id, NULL);
-
- test_msgver_verify("CONSUME", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_OFFSET,
- 0, produce_msg_cnt);
-
- rd_kafka_topic_partition_list_destroy(pause_partition_list);
-
- test_msgver_clear(&mv);
-
- rd_kafka_queue_destroy(rkq);
-
- test_consumer_close(consumer);
-
- rd_kafka_destroy(consumer);
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_consume_batch_store_offset(void) {
- rd_kafka_queue_t *rkq;
- const char *topic;
- rd_kafka_t *consumer;
- int p;
- int i;
- uint64_t testid;
- rd_kafka_conf_t *conf;
- consumer_t consumer_args = RD_ZERO_INIT;
- test_msgver_t mv;
- const int partition_cnt = 1;
- const int timeout_ms = 10000;
- const int consume_msg_cnt = 4;
- const int no_of_consume = 2;
- const int produce_msg_cnt = 8;
- const int expected_msg_cnt = produce_msg_cnt;
-
- SUB_TEST();
-
- test_conf_init(&conf, NULL, 60);
- test_conf_set(conf, "enable.auto.commit", "false");
- test_conf_set(conf, "enable.auto.offset.store", "true");
- test_conf_set(conf, "auto.offset.reset", "earliest");
-
- testid = test_id_generate();
- test_msgver_init(&mv, testid);
-
- /* Produce messages */
- topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
-
- test_create_topic(NULL, topic, partition_cnt, 1);
-
- for (p = 0; p < partition_cnt; p++)
- test_produce_msgs_easy(topic, testid, p,
- produce_msg_cnt / partition_cnt);
-
- for (i = 0; i < no_of_consume; i++) {
-
- /* Create consumers */
- consumer = test_create_consumer(topic, NULL,
- rd_kafka_conf_dup(conf), NULL);
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_false);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_consumer(consumer);
-
- consumer_args.what = "CONSUMER";
- consumer_args.rkq = rkq;
- consumer_args.timeout_ms = timeout_ms;
- consumer_args.consume_msg_cnt = consume_msg_cnt;
- consumer_args.expected_msg_cnt =
- produce_msg_cnt / no_of_consume;
- consumer_args.rk = consumer;
- consumer_args.testid = testid;
- consumer_args.mv = &mv;
- consumer_args.test = test_curr;
-
- consumer_batch_queue(&consumer_args);
- rd_kafka_commit(consumer, NULL, rd_false);
-
- rd_kafka_queue_destroy(rkq);
- test_consumer_close(consumer);
- rd_kafka_destroy(consumer);
- }
-
- test_msgver_verify("CONSUME", &mv,
- TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
- TEST_MSGVER_BY_OFFSET,
- 0, expected_msg_cnt);
-
- test_msgver_clear(&mv);
-
- rd_kafka_conf_destroy(conf);
-
- SUB_TEST_PASS();
-}
-
-
-static void do_test_consume_batch_control_msgs(void) {
- const char *topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
- const int32_t partition = 0;
- rd_kafka_conf_t *conf, *c_conf;
- rd_kafka_t *producer, *consumer;
- uint64_t testid;
- const int msgcnt[2] = {2, 3};
- test_msgver_t mv;
- rd_kafka_queue_t *rkq;
- consumer_t consumer_args = RD_ZERO_INIT;
- const int partition_cnt = 1;
- const int timeout_ms = 5000;
- const int consume_msg_cnt = 10;
- const int expected_msg_cnt = 2;
- int32_t pause_partition = 0;
- int64_t expected_offset = msgcnt[0] + msgcnt[1] + 2;
- rd_kafka_topic_partition_list_t *pause_partition_list;
- rd_kafka_resp_err_t err;
- thrd_t thread_id;
-
- SUB_TEST("Testing control msgs flow");
-
- testid = test_id_generate();
-
- test_conf_init(&conf, NULL, 30);
-
- test_conf_set(conf, "transactional.id", topic);
- test_conf_set(conf, "batch.num.messages", "1");
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- producer = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- test_create_topic(producer, topic, partition_cnt, 1);
-
- TEST_CALL_ERROR__(rd_kafka_init_transactions(producer, 30 * 1000));
-
- /*
- * Transaction 1
- */
- TEST_SAY("Transaction 1: %d msgs\n", msgcnt[0]);
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer));
- test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[0],
- NULL, 0);
- TEST_CALL_ERROR__(rd_kafka_commit_transaction(producer, -1));
-
- /*
- * Transaction 2
- */
- TEST_SAY("Transaction 2: %d msgs\n", msgcnt[1]);
- TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer));
- test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[1],
- NULL, 0);
- TEST_CALL_ERROR__(rd_kafka_abort_transaction(producer, -1));
-
- rd_kafka_destroy(producer);
-
- rd_sleep(2);
-
- /*
- * Consumer
- */
- test_conf_init(&c_conf, NULL, 0);
- test_conf_set(c_conf, "enable.auto.commit", "false");
- test_conf_set(c_conf, "enable.auto.offset.store", "true");
- test_conf_set(c_conf, "auto.offset.reset", "earliest");
- consumer = test_create_consumer(topic, NULL, c_conf, NULL);
-
- test_consumer_subscribe(consumer, topic);
- test_consumer_wait_assignment(consumer, rd_false);
-
- /* Create generic consume queue */
- rkq = rd_kafka_queue_get_consumer(consumer);
-
- test_msgver_init(&mv, testid);
- test_msgver_ignore_eof(&mv);
-
- consumer_args.what = "CONSUMER";
- consumer_args.rkq = rkq;
- consumer_args.timeout_ms = timeout_ms;
- consumer_args.consume_msg_cnt = consume_msg_cnt;
- consumer_args.expected_msg_cnt = expected_msg_cnt;
- consumer_args.rk = consumer;
- consumer_args.testid = testid;
- consumer_args.mv = &mv;
- consumer_args.test = test_curr;
-
-
- if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
- thrd_success)
- TEST_FAIL("Failed to create thread for %s", "CONSUMER");
-
- pause_partition_list = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(pause_partition_list, topic,
- pause_partition);
-
- rd_sleep(1);
- err = rd_kafka_pause_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
- pause_partition, topic);
-
- rd_sleep(1);
-
- err = rd_kafka_resume_partitions(consumer, pause_partition_list);
-
- TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
- pause_partition, topic);
-
- thrd_join(thread_id, NULL);
-
- rd_kafka_commit(consumer, NULL, rd_false);
-
- rd_kafka_committed(consumer, pause_partition_list, timeout_ms);
-
- TEST_ASSERT(pause_partition_list->elems[0].offset == expected_offset,
- "Expected offset should be %" PRId64 ", but it is %" PRId64,
- expected_offset, pause_partition_list->elems[0].offset);
-
- rd_kafka_topic_partition_list_destroy(pause_partition_list);
-
- rd_kafka_queue_destroy(rkq);
-
- test_msgver_clear(&mv);
-
- test_consumer_close(consumer);
-
- rd_kafka_destroy(consumer);
-
- SUB_TEST_PASS();
-}
-
-
-int main_0137_barrier_batch_consume(int argc, char **argv) {
- do_test_consume_batch_with_seek();
- do_test_consume_batch_store_offset();
- do_test_consume_batch_with_pause_and_resume_different_batch();
- do_test_consume_batch_with_pause_and_resume_same_batch();
- do_test_consume_batch_control_msgs();
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c
deleted file mode 100644
index 0f9021de9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2023, Confluent Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-#include "../src/rdkafka_proto.h"
-
-#include <stdarg.h>
-
-/**
- * @brief Verify that a error codes returned by the OffsetCommit call of
- * AlterConsumerGroupOffsets return the corresponding error code
- * in the passed partition.
- */
-static void do_test_AlterConsumerGroupOffsets_errors(int req_timeout_ms) {
-#define TEST_ERR_SIZE 10
- int i, j;
- rd_kafka_conf_t *conf;
- rd_kafka_t *rk;
- rd_kafka_queue_t *q;
- rd_kafka_mock_cluster_t *mcluster;
- rd_kafka_topic_partition_list_t *to_alter;
- const rd_kafka_topic_partition_list_t *partitions;
- rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets;
- const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
- const rd_kafka_group_result_t **gres;
- size_t gres_cnt;
- char errstr[512];
- const char *bootstraps;
- const char *topic = "test";
- const char *group_id = topic;
- rd_kafka_AdminOptions_t *options = NULL;
- rd_kafka_event_t *rkev = NULL;
- rd_kafka_resp_err_t errs[TEST_ERR_SIZE] = {
- RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
- RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
- RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
- RD_KAFKA_RESP_ERR_INVALID_GROUP_ID,
- RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
- RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
- RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
- RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
- RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED};
-
- SUB_TEST_QUICK("request timeout %d", req_timeout_ms);
-
- test_conf_init(&conf, NULL, 60);
-
- mcluster = test_mock_cluster_new(1, &bootstraps);
-
- rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
-
- test_conf_set(conf, "bootstrap.servers", bootstraps);
-
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- q = rd_kafka_queue_get_main(rk);
-
- if (req_timeout_ms > 0) {
- /* Admin options */
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
- TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
- options, req_timeout_ms, errstr, sizeof(errstr)));
- }
-
-
- for (i = 0; i < TEST_ERR_SIZE; i++) {
- /* Offsets to alter */
- to_alter = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(to_alter, topic, 0)->offset =
- 3;
- cgoffsets =
- rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter);
-
- TEST_SAY("Call AlterConsumerGroupOffsets, err %s\n",
- rd_kafka_err2name(errs[i]));
- rd_kafka_mock_push_request_errors(
- mcluster, RD_KAFKAP_OffsetCommit, 1, errs[i]);
- rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options,
- q);
-
- rd_kafka_topic_partition_list_destroy(to_alter);
- rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets);
-
- TEST_SAY("AlterConsumerGroupOffsets.queue_poll, err %s\n",
- rd_kafka_err2name(errs[i]));
- /* Poll result queue for AlterConsumerGroupOffsets result.
- * Print but otherwise ignore other event types
- * (typically generic Error events). */
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
- TEST_SAY("AlterConsumerGroupOffsets: got %s\n",
- rd_kafka_event_name(rkev));
- if (rkev == NULL)
- continue;
- if (rd_kafka_event_error(rkev))
- TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
-
- if (rd_kafka_event_type(rkev) ==
- RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
- break;
-
- rd_kafka_event_destroy(rkev);
- }
-
- /* Convert event to proper result */
- res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
- TEST_ASSERT(res,
- "expected AlterConsumerGroupOffsets_result, not %s",
- rd_kafka_event_name(rkev));
-
- gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(
- res, &gres_cnt);
- TEST_ASSERT(gres && gres_cnt == 1,
- "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
-
- partitions = rd_kafka_group_result_partitions(gres[0]);
-
- /* Verify expected errors */
- for (j = 0; j < partitions->cnt; j++) {
- rd_kafka_topic_partition_t *rktpar =
- &partitions->elems[j];
- TEST_ASSERT_LATER(rktpar->err == errs[i],
- "Result %s [%" PRId32
- "] has error %s, "
- "expected %s",
- topic, 0,
- rd_kafka_err2name(rktpar->err),
- rd_kafka_err2name(errs[i]));
- }
-
- rd_kafka_event_destroy(rkev);
- }
- if (options)
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_queue_destroy(q);
-
- rd_kafka_destroy(rk);
-
- test_mock_cluster_destroy(mcluster);
-
- TEST_LATER_CHECK();
-
- SUB_TEST_PASS();
-
-#undef TEST_ERR_SIZE
-}
-
-int main_0138_admin_mock(int argc, char **argv) {
-
- if (test_needs_auth()) {
- TEST_SKIP("Mock cluster does not support SSL/SASL\n");
- return 0;
- }
-
- do_test_AlterConsumerGroupOffsets_errors(-1);
- do_test_AlterConsumerGroupOffsets_errors(1000);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c b/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c
deleted file mode 100644
index ad2b7e870..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * Tests that producing to unknown topic fails.
- * Issue #39
- *
- * NOTE! This test requires auto.create.topics.enable=false to be
- * configured on the broker!
- */
-
-#define _GNU_SOURCE
-#include <sys/time.h>
-#include <time.h>
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-static int msgs_wait = 0; /* bitmask */
-
-/**
- * Delivery report callback.
- * Called for each message once to signal its delivery status.
- */
-static void dr_cb(rd_kafka_t *rk,
- void *payload,
- size_t len,
- rd_kafka_resp_err_t err,
- void *opaque,
- void *msg_opaque) {
- int msgid = *(int *)msg_opaque;
-
- free(msg_opaque);
-
- if (!(msgs_wait & (1 << msgid)))
- TEST_FAIL(
- "Unwanted delivery report for message #%i "
- "(waiting for 0x%x)\n",
- msgid, msgs_wait);
-
- TEST_SAY("Delivery report for message #%i: %s\n", msgid,
- rd_kafka_err2str(err));
-
- msgs_wait &= ~(1 << msgid);
-
- if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
- TEST_FAIL("Message #%i failed with unexpected error %s\n",
- msgid, rd_kafka_err2str(err));
-}
-
-
-int main(int argc, char **argv) {
- char topic[64];
- int partition = 0;
- int r;
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- rd_kafka_conf_t *conf;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- char msg[128];
- int msgcnt = 10;
- int i;
-
- /* Generate unique topic name */
- test_conf_init(&conf, &topic_conf, 10);
-
- rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", rand(),
- rand());
-
- TEST_SAY(
- "\033[33mNOTE! This test requires "
- "auto.create.topics.enable=false to be configured on "
- "the broker!\033[0m\n");
-
- /* Set delivery report callback */
- rd_kafka_conf_set_dr_cb(conf, dr_cb);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n", strerror(errno));
-
- /* Produce a message */
- for (i = 0; i < msgcnt; i++) {
- int *msgidp = malloc(sizeof(*msgidp));
- *msgidp = i;
- rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
- i);
- r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
- strlen(msg), NULL, 0, msgidp);
- if (r == -1) {
- if (errno == ENOENT)
- TEST_SAY(
- "Failed to produce message #%i: "
- "unknown topic: good!\n",
- i);
- else
- TEST_FAIL("Failed to produce message #%i: %s\n",
- i, strerror(errno));
- } else {
- if (i > 5)
- TEST_FAIL(
- "Message #%i produced: "
- "should've failed\n",
- i);
- msgs_wait |= (1 << i);
- }
-
- /* After half the messages: sleep to allow the metadata
- * to be fetched from broker and update the actual partition
- * count: this will make subsequent produce() calls fail
- * immediately. */
- if (i == 5)
- sleep(2);
- }
-
- /* Wait for messages to time out */
- while (rd_kafka_outq_len(rk) > 0)
- rd_kafka_poll(rk, 50);
-
- if (msgs_wait != 0)
- TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
-
- /* Destroy topic */
- rd_kafka_topic_destroy(rkt);
-
- /* Destroy rdkafka instance */
- TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
- rd_kafka_destroy(rk);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp
deleted file mode 100644
index 9659ade97..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2016, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "testcpp.h"
-#include <cstring>
-
-/**
- * Manual test: idle producer
- */
-
-
-static void do_test_idle_producer() {
- RdKafka::Conf *conf;
- Test::conf_init(&conf, NULL, 0);
-
- std::string errstr;
- RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
- if (!p)
- Test::Fail("Failed to create Producer: " + errstr);
- delete conf;
-
- while (true)
- p->poll(1000);
-
- delete p;
-}
-
-
-extern "C" {
-int main_8000_idle(int argc, char **argv) {
- do_test_idle_producer();
- return 0;
-}
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt
deleted file mode 100644
index a9dccfa5e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-set(
- sources
- 0000-unittests.c
- 0001-multiobj.c
- 0002-unkpart.c
- 0003-msgmaxsize.c
- 0004-conf.c
- 0005-order.c
- 0006-symbols.c
- 0007-autotopic.c
- 0008-reqacks.c
- 0009-mock_cluster.c
- 0011-produce_batch.c
- 0012-produce_consume.c
- 0013-null-msgs.c
- 0014-reconsume-191.c
- 0015-offset_seeks.c
- 0016-client_swname.c
- 0017-compression.c
- 0018-cgrp_term.c
- 0019-list_groups.c
- 0020-destroy_hang.c
- 0021-rkt_destroy.c
- 0022-consume_batch.c
- 0025-timers.c
- 0026-consume_pause.c
- 0028-long_topicnames.c
- 0029-assign_offset.c
- 0030-offset_commit.c
- 0031-get_offsets.c
- 0033-regex_subscribe.c
- 0034-offset_reset.c
- 0035-api_version.c
- 0036-partial_fetch.c
- 0037-destroy_hang_local.c
- 0038-performance.c
- 0039-event.c
- 0040-io_event.c
- 0041-fetch_max_bytes.c
- 0042-many_topics.c
- 0043-no_connection.c
- 0044-partition_cnt.c
- 0045-subscribe_update.c
- 0046-rkt_cache.c
- 0047-partial_buf_tmout.c
- 0048-partitioner.c
- 0049-consume_conn_close.c
- 0050-subscribe_adds.c
- 0051-assign_adds.c
- 0052-msg_timestamps.c
- 0053-stats_cb.cpp
- 0054-offset_time.cpp
- 0055-producer_latency.c
- 0056-balanced_group_mt.c
- 0057-invalid_topic.cpp
- 0058-log.cpp
- 0059-bsearch.cpp
- 0060-op_prio.cpp
- 0061-consumer_lag.cpp
- 0062-stats_event.c
- 0063-clusterid.cpp
- 0064-interceptors.c
- 0065-yield.cpp
- 0066-plugins.cpp
- 0067-empty_topic.cpp
- 0068-produce_timeout.c
- 0069-consumer_add_parts.c
- 0070-null_empty.cpp
- 0072-headers_ut.c
- 0073-headers.c
- 0074-producev.c
- 0075-retry.c
- 0076-produce_retry.c
- 0077-compaction.c
- 0078-c_from_cpp.cpp
- 0079-fork.c
- 0080-admin_ut.c
- 0081-admin.c
- 0082-fetch_max_bytes.cpp
- 0083-cb_event.c
- 0084-destroy_flags.c
- 0085-headers.cpp
- 0086-purge.c
- 0088-produce_metadata_timeout.c
- 0089-max_poll_interval.c
- 0090-idempotence.c
- 0091-max_poll_interval_timeout.c
- 0092-mixed_msgver.c
- 0093-holb.c
- 0094-idempotence_msg_timeout.c
- 0095-all_brokers_down.cpp
- 0097-ssl_verify.cpp
- 0098-consumer-txn.cpp
- 0099-commit_metadata.c
- 0100-thread_interceptors.cpp
- 0101-fetch-from-follower.cpp
- 0102-static_group_rebalance.c
- 0103-transactions.c
- 0104-fetch_from_follower_mock.c
- 0105-transactions_mock.c
- 0106-cgrp_sess_timeout.c
- 0107-topic_recreate.c
- 0109-auto_create_topics.cpp
- 0110-batch_size.cpp
- 0111-delay_create_topics.cpp
- 0112-assign_unknown_part.c
- 0113-cooperative_rebalance.cpp
- 0114-sticky_partitioning.cpp
- 0115-producer_auth.cpp
- 0116-kafkaconsumer_close.cpp
- 0117-mock_errors.c
- 0118-commit_rebalance.c
- 0119-consumer_auth.cpp
- 0120-asymmetric_subscription.c
- 0121-clusterid.c
- 0122-buffer_cleaning_after_rebalance.c
- 0123-connections_max_idle.c
- 0124-openssl_invalid_engine.c
- 0125-immediate_flush.c
- 0126-oauthbearer_oidc.c
- 0128-sasl_callback_queue.cpp
- 0129-fetch_aborted_msgs.c
- 0130-store_offsets.c
- 0131-connect_timeout.c
- 0132-strategy_ordering.c
- 0133-ssl_keys.c
- 0134-ssl_provider.c
- 0135-sasl_credentials.cpp
- 0136-resolve_cb.c
- 0137-barrier_batch_consume.c
- 0138-admin_mock.c
- 8000-idle.cpp
- test.c
- testcpp.cpp
- rusage.c
-)
-
-if(NOT WIN32)
- list(APPEND sources sockem.c sockem_ctrl.c)
-else()
- list(APPEND sources ../src/tinycthread.c ../src/tinycthread_extra.c)
-endif()
-
-add_executable(test-runner ${sources})
-target_link_libraries(test-runner PUBLIC rdkafka++)
-
-add_test(NAME RdKafkaTestInParallel COMMAND test-runner -p5)
-add_test(NAME RdKafkaTestSequentially COMMAND test-runner -p1)
-add_test(NAME RdKafkaTestBrokerLess COMMAND test-runner -p5 -l)
-
-if(NOT WIN32 AND NOT APPLE)
- set(tests_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
- add_subdirectory(interceptor_test)
-endif()
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py b/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py
deleted file mode 100644
index 696fa88cc..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/env python3
-#
-# librdkafka test trivup app module
-#
-# Requires:
-# trivup python module
-# gradle in your PATH
-
-from trivup.trivup import App, UuidAllocator
-from trivup.apps.ZookeeperApp import ZookeeperApp
-from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
-from trivup.apps.KerberosKdcApp import KerberosKdcApp
-from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
-
-import json
-
-
-class LibrdkafkaTestApp(App):
- """ Sets up and executes the librdkafka regression tests.
- Assumes tests are in the current directory.
- Must be instantiated after ZookeeperApp and KafkaBrokerApp """
-
- def __init__(self, cluster, version, conf=None,
- tests=None, scenario="default"):
- super(LibrdkafkaTestApp, self).__init__(cluster, conf=conf)
-
- self.appid = UuidAllocator(self.cluster).next(self, trunc=8)
- self.autostart = False
- self.local_tests = True
- self.test_mode = conf.get('test_mode', 'bare')
- self.version = version
-
- # Generate test config file
- conf_blob = list()
- self.security_protocol = 'PLAINTEXT'
-
- f, self.test_conf_file = self.open_file('test.conf', 'perm')
- f.write('broker.address.family=v4\n'.encode('ascii'))
- f.write(('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
- f.write('test.timeout.multiplier=2\n'.encode('ascii'))
-
- sparse = conf.get('sparse_connections', None)
- if sparse is not None:
- f.write('enable.sparse.connections={}\n'.format(
- sparse).encode('ascii'))
-
- if version.startswith('0.9') or version.startswith('0.8'):
- conf_blob.append('api.version.request=false')
- conf_blob.append('broker.version.fallback=%s' % version)
- else:
- # any broker version with ApiVersion support
- conf_blob.append('broker.version.fallback=0.10.0.0')
- conf_blob.append('api.version.fallback.ms=0')
-
- # SASL (only one mechanism supported at a time)
- mech = self.conf.get('sasl_mechanisms', '').split(',')[0]
- if mech != '':
- conf_blob.append('sasl.mechanisms=%s' % mech)
- if mech == 'PLAIN' or mech.find('SCRAM-') != -1:
- self.security_protocol = 'SASL_PLAINTEXT'
- # Use first user as SASL user/pass
- for up in self.conf.get('sasl_users', '').split(','):
- u, p = up.split('=')
- conf_blob.append('sasl.username=%s' % u)
- conf_blob.append('sasl.password=%s' % p)
- break
-
- elif mech == 'OAUTHBEARER':
- self.security_protocol = 'SASL_PLAINTEXT'
- oidc = cluster.find_app(OauthbearerOIDCApp)
- if oidc is not None:
- conf_blob.append('sasl.oauthbearer.method=%s\n' %
- oidc.conf.get('sasl_oauthbearer_method'))
- conf_blob.append('sasl.oauthbearer.client.id=%s\n' %
- oidc.conf.get(
- 'sasl_oauthbearer_client_id'))
- conf_blob.append('sasl.oauthbearer.client.secret=%s\n' %
- oidc.conf.get(
- 'sasl_oauthbearer_client_secret'))
- conf_blob.append('sasl.oauthbearer.extensions=%s\n' %
- oidc.conf.get(
- 'sasl_oauthbearer_extensions'))
- conf_blob.append('sasl.oauthbearer.scope=%s\n' %
- oidc.conf.get('sasl_oauthbearer_scope'))
- conf_blob.append('sasl.oauthbearer.token.endpoint.url=%s\n'
- % oidc.conf.get('valid_url'))
- self.env_add('VALID_OIDC_URL', oidc.conf.get('valid_url'))
- self.env_add(
- 'INVALID_OIDC_URL',
- oidc.conf.get('badformat_url'))
- self.env_add(
- 'EXPIRED_TOKEN_OIDC_URL',
- oidc.conf.get('expired_url'))
- else:
- conf_blob.append(
- 'enable.sasl.oauthbearer.unsecure.jwt=true\n')
- conf_blob.append(
- 'sasl.oauthbearer.config=%s\n' %
- self.conf.get('sasl_oauthbearer_config'))
-
- elif mech == 'GSSAPI':
- self.security_protocol = 'SASL_PLAINTEXT'
- kdc = cluster.find_app(KerberosKdcApp)
- if kdc is None:
- self.log(
- 'WARNING: sasl_mechanisms is GSSAPI set but no '
- 'KerberosKdcApp available: client SASL config will '
- 'be invalid (which might be intentional)')
- else:
- self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf'])
- self.env_add('KRB5_KDC_PROFILE', kdc.conf['kdc_conf'])
- principal, keytab = kdc.add_principal(
- self.name,
- conf.get('advertised_hostname', self.node.name))
- conf_blob.append('sasl.kerberos.service.name=%s' %
- self.conf.get('sasl_servicename',
- 'kafka'))
- conf_blob.append('sasl.kerberos.keytab=%s' % keytab)
- conf_blob.append(
- 'sasl.kerberos.principal=%s' %
- principal.split('@')[0])
-
- else:
- self.log(
- 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % # noqa: E501
- (mech, self.test_conf_file))
-
- # SSL config
- if getattr(cluster, 'ssl', None) is not None:
- ssl = cluster.ssl
-
- key = ssl.create_cert('librdkafka%s' % self.appid)
-
- conf_blob.append('ssl.ca.location=%s' % ssl.ca['pem'])
- conf_blob.append('ssl.certificate.location=%s' % key['pub']['pem'])
- conf_blob.append('ssl.key.location=%s' % key['priv']['pem'])
- conf_blob.append('ssl.key.password=%s' % key['password'])
-
- # Some tests need fine-grained access to various cert files,
- # set up the env vars accordingly.
- for k, v in ssl.ca.items():
- self.env_add('SSL_ca_{}'.format(k), v)
-
- # Set envs for all generated keys so tests can find them.
- for k, v in key.items():
- if isinstance(v, dict):
- for k2, v2 in v.items():
- # E.g. "SSL_priv_der=path/to/librdkafka-priv.der"
- self.env_add('SSL_{}_{}'.format(k, k2), v2)
- else:
- self.env_add('SSL_{}'.format(k), v)
-
- if 'SASL' in self.security_protocol:
- self.security_protocol = 'SASL_SSL'
- else:
- self.security_protocol = 'SSL'
-
- # Define bootstrap brokers based on selected security protocol
- self.dbg('Using client security.protocol=%s' % self.security_protocol)
- all_listeners = (
- ','.join(
- cluster.get_all(
- 'advertised.listeners',
- '',
- KafkaBrokerApp))).split(',')
- bootstrap_servers = ','.join(
- [x for x in all_listeners if x.startswith(self.security_protocol)])
- if len(bootstrap_servers) == 0:
- bootstrap_servers = all_listeners[0]
- self.log(
- 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % # noqa: E501
- (self.security_protocol, all_listeners, bootstrap_servers))
-
- self.bootstrap_servers = bootstrap_servers
-
- conf_blob.append('bootstrap.servers=%s' % bootstrap_servers)
- conf_blob.append('security.protocol=%s' % self.security_protocol)
-
- f.write(('\n'.join(conf_blob)).encode('ascii'))
- f.close()
-
- self.env_add('TEST_SCENARIO', scenario)
- self.env_add('RDKAFKA_TEST_CONF', self.test_conf_file)
- self.env_add('TEST_KAFKA_VERSION', version)
- self.env_add('TRIVUP_ROOT', cluster.instance_path())
-
- if self.test_mode != 'bash':
- self.test_report_file = self.mkpath('test_report', pathtype='perm')
- self.env_add('TEST_REPORT', self.test_report_file)
-
- if tests is not None:
- self.env_add('TESTS', ','.join(tests))
-
- def start_cmd(self):
- self.env_add(
- 'KAFKA_PATH',
- self.cluster.get_all(
- 'destdir',
- '',
- KafkaBrokerApp)[0],
- False)
- self.env_add(
- 'ZK_ADDRESS',
- self.cluster.get_all(
- 'address',
- '',
- ZookeeperApp)[0],
- False)
- self.env_add('BROKERS', self.cluster.bootstrap_servers(), False)
-
- # Provide a HTTPS REST endpoint for the HTTP client tests.
- self.env_add(
- 'RD_UT_HTTP_URL',
- 'https://jsonplaceholder.typicode.com/users')
-
- # Per broker env vars
- for b in [x for x in self.cluster.apps if isinstance(
- x, KafkaBrokerApp)]:
- self.env_add('BROKER_ADDRESS_%d' % b.appid,
- ','.join([x for x in
- b.conf['listeners'].split(',')
- if x.startswith(self.security_protocol)]))
- # Add each broker pid as an env so they can be killed
- # indivdidually.
- self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid))
- # JMX port, if available
- jmx_port = b.conf.get('jmx_port', None)
- if jmx_port is not None:
- self.env_add('BROKER_JMX_PORT_%d' % b.appid, str(jmx_port))
-
- extra_args = list()
- if not self.local_tests:
- extra_args.append('-L')
- if self.conf.get('args', None) is not None:
- extra_args.append(self.conf.get('args'))
- extra_args.append('-E')
- return './run-test.sh -p%d -K %s %s' % (
- int(self.conf.get('parallel', 5)), ' '.join(extra_args),
- self.test_mode)
-
- def report(self):
- if self.test_mode == 'bash':
- return None
-
- try:
- with open(self.test_report_file, 'r') as f:
- res = json.load(f)
- except Exception as e:
- self.log(
- 'Failed to read report %s: %s' %
- (self.test_report_file, str(e)))
- return {'root_path': self.root_path(), 'error': str(e)}
- return res
-
- def deploy(self):
- pass
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile
deleted file mode 100644
index 73eab2140..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile
+++ /dev/null
@@ -1,182 +0,0 @@
-TESTSRCS_C = $(wildcard [08]*-*.c)
-TESTSRCS_CXX= $(wildcard [08]*-*.cpp)
-OBJS = $(TESTSRCS_C:%.c=%.o) $(TESTSRCS_CXX:%.cpp=%.o)
-
-BIN = test-runner
-LIBS += -lrdkafka++ -lrdkafka
-OBJS += test.o rusage.o testcpp.o \
- tinycthread.o tinycthread_extra.o rdlist.o sockem.o \
- sockem_ctrl.o
-CFLAGS += -I../src
-CXXFLAGS += -I../src -I../src-cpp
-LDFLAGS += -rdynamic -L../src -L../src-cpp
-
-# Latest Kafka version
-KAFKA_VERSION?=3.1.0
-# Kafka versions for compatibility tests
-COMPAT_KAFKA_VERSIONS?=0.8.2.2 0.9.0.1 0.11.0.3 1.0.2 2.4.1 2.8.1 $(KAFKA_VERSION)
-
-# Non-default scenarios (FIXME: read from scenarios/*)
-SCENARIOS?=noautocreate ak23
-
-# A subset of rudimentary (and quick) tests suitable for quick smoke testing.
-# The smoke test should preferably finish in under a minute.
-SMOKE_TESTS?=0000,0001,0004,0012,0017,0022,0030,0039,0049,0087,0103
-
--include ../Makefile.config
-
-# Use C++ compiler as linker
-CC_LD=$(CXX)
-
-all: $(BIN) run_par
-
-#
-# These targets spin up a cluster and runs the test suite
-# with different parameters.
-#
-
-broker: $(BIN)
- ./broker_version_tests.py --conf '{"parallel":1, "args":"-Q"}' $(KAFKA_VERSION)
-
-broker_idempotent: $(BIN)
- ./broker_version_tests.py --conf '{"parallel":1, "args":"-P -L -Q"}' $(KAFKA_VERSION)
-
-sasl: $(BIN)
- ./sasl_test.py --conf '{"parallel":1, "args":"-L -Q"}' $(KAFKA_VERSION)
-
-# Run the full test suite(s)
-full: broker broker_idempotent sasl
-
-
-#
-# The following targets require an existing cluster running (test.conf)
-#
-quick:
- @echo "Running quick(er) test suite (without sockem)"
- ./run-test.sh -Q -E
-
-smoke:
- @echo "Running smoke tests: $(SMOKE_TESTS)"
- TESTS="$(SMOKE_TESTS)" $(MAKE) quick
-
-run_par: $(BIN)
- @echo "Running tests in parallel"
- ./run-test.sh
-
-run_seq: $(BIN)
- @echo "Running tests sequentially"
- ./run-test.sh -p1
-
-run_local: $(BIN)
- @echo "Running local broker-less tests with idempotent producer"
- ./run-test.sh -l -P
-
-run_local_quick: $(BIN)
- @echo "Running quick local broker-less tests with idempotent producer"
- ./run-test.sh -l -Q -P
-
-idempotent_par: $(BIN)
- ./run-test.sh -P
-
-idempotent_seq: $(BIN)
- ./run-test.sh -P
-
-idempotent: idempotent_par
-
-transactions: $(BIN)
- for _test in 0098 0101; do TESTS=$$_test ./run-test.sh ./$(BIN) ; done
-
-# Run unit tests
-unit: $(BIN)
- TESTS=0000 ./run-test.sh -p1
-
-
-# Delete all test topics (based on prefix)
-delete_topics:
- TESTS=none ./run-test.sh -D bare
-
-.PHONY:
-
-build: $(BIN) interceptor_test
-
-test.o: ../src/librdkafka.a ../src-cpp/librdkafka++.a interceptor_test
-
-
-
-include ../mklove/Makefile.base
-
-ifeq ($(_UNAME_S),Darwin)
-interceptor_test: .PHONY
-else
-interceptor_test: .PHONY
- $(MAKE) -C $@
-endif
-
-
-tinycthread.o: ../src/tinycthread.c
- $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
-
-tinycthread_extra.o: ../src/tinycthread_extra.c
- $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
-
-rdlist.o: ../src/rdlist.c
- $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
-
-
-clean:
- rm -f *.test $(OBJS) $(BIN)
- $(MAKE) -C interceptor_test clean
-
-# Remove test reports, temporary test files, crash dumps, etc.
-clean-output:
- rm -f *.offset stats_*.json core vgcore.* _until_fail_*.log gdbrun??????
-
-realclean: clean clean-output
- rm -f test_report_*.json
-
-java: .PHONY
- make -C java
-
-# Run test-suite with ASAN
-asan:
- @(echo "### Running tests with AddressSanitizer")
- (cd .. ; ./dev-conf.sh asan)
- CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION)
-
-# Run test-suite with TSAN
-tsan:
- @(echo "### Running tests with ThreadSanitizer")
- (cd .. ; ./dev-conf.sh tsan)
- CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION)
-
-# Run full test-suite with a clean release build
-pristine-full:
- @(echo "### Running full test-suite with clean build")
- (cd .. ; ./dev-conf.sh clean)
- make full
-
-# Run backward compatibility tests
-compat:
- @(echo "### Running compatibility tests with Apache Kafka versions $(COMPAT_KAFKA_VERSIONS)")
- ./broker_version_tests.py --rdkconf '{"args": "-Q"}' \
- $(COMPAT_KAFKA_VERSIONS)
-
-# Run non-default scenarios
-scenarios: .PHONY
- @echo "### Running test scenarios: $(SCENARIOS)"
- @(for _SCENARIO in $(SCENARIOS) ; do \
- ./broker_version_tests.py --scenario "$$_SCENARIO" $(KAFKA_VERSION) ; \
- done)
-
-
-# Run a full release / PR test.
-# (| is for not running suites in parallel)
-release-test: | asan tsan pristine-full scenarios compat
-
-# Check resource usage (requires a running cluster environment)
-rusage:
- ./run-test.sh -R bare
-
-
-
--include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/README.md
deleted file mode 100644
index b0d99b0bb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/README.md
+++ /dev/null
@@ -1,505 +0,0 @@
-# Automated regression tests for librdkafka
-
-
-## Supported test environments
-
-While the standard test suite works well on OSX and Windows,
-the full test suite (which must be run for PRs and releases) will
-only run on recent Linux distros due to its use of ASAN, Kerberos, etc.
-
-
-## Automated broker cluster setup using trivup
-
-A local broker cluster can be set up using
-[trivup](https://github.com/edenhill/trivup), which is a Python package
-available on PyPi.
-These self-contained clusters are used to run the librdkafka test suite
-on a number of different broker versions or with specific broker configs.
-
-trivup will download the specified Kafka version into its root directory,
-the root directory is also used for cluster instances, where Kafka will
-write messages, logs, etc.
-The trivup root directory is by default `tmp` in the current directory but
-may be specified by setting the `TRIVUP_ROOT` environment variable
-to alternate directory, e.g., `TRIVUP_ROOT=$HOME/trivup make full`.
-
-First install required Python packages (trivup with friends):
-
- $ python3 -m pip install -U -r requirements.txt
-
-Bring up a Kafka cluster (with the specified version) and start an interactive
-shell, when the shell is exited the cluster is brought down and deleted.
-
- $ python3 -m trivup.clusters.KafkaCluster 2.3.0 # Broker version
- # You can also try adding:
- # --ssl To enable SSL listeners
- # --sasl <mechanism> To enable SASL authentication
- # --sr To provide a Schema-Registry instance
- # .. and so on, see --help for more.
-
-In the trivup shell, run the test suite:
-
- $ make
-
-
-If you'd rather use an existing cluster, you may omit trivup and
-provide a `test.conf` file that specifies the brokers and possibly other
-librdkafka configuration properties:
-
- $ cp test.conf.example test.conf
- $ $EDITOR test.conf
-
-
-
-## Run specific tests
-
-To run tests:
-
- # Run tests in parallel (quicker, but harder to troubleshoot)
- $ make
-
- # Run a condensed test suite (quickest)
- # This is what is run on CI builds.
- $ make quick
-
- # Run tests in sequence
- $ make run_seq
-
- # Run specific test
- $ TESTS=0004 make
-
- # Run test(s) with helgrind, valgrind, gdb
- $ TESTS=0009 ./run-test.sh valgrind|helgrind|gdb
-
-
-All tests in the 0000-0999 series are run automatically with `make`.
-
-Tests 1000-1999 are subject to specific non-standard setups or broker
-configuration, these tests are run with `TESTS=1nnn make`.
-See comments in the test's source file for specific requirements.
-
-To insert test results into SQLite database make sure the `sqlite3` utility
-is installed, then add this to `test.conf`:
-
- test.sql.command=sqlite3 rdktests
-
-
-
-## Adding a new test
-
-The simplest way to add a new test is to copy one of the recent
-(higher `0nnn-..` number) tests to the next free
-`0nnn-<what-is-tested>` file.
-
-If possible and practical, try to use the C++ API in your test as that will
-cover both the C and C++ APIs and thus provide better test coverage.
-Do note that the C++ test framework is not as feature rich as the C one,
-so if you need message verification, etc, you're better off with a C test.
-
-After creating your test file it needs to be added in a couple of places:
-
- * Add to [tests/CMakeLists.txt](tests/CMakeLists.txt)
- * Add to [win32/tests/tests.vcxproj](win32/tests/tests.vcxproj)
- * Add to both locations in [tests/test.c](tests/test.c) - search for an
- existing test number to see what needs to be done.
-
-You don't need to add the test to the Makefile, it is picked up automatically.
-
-Some additional guidelines:
- * If your test depends on a minimum broker version, make sure to specify it
- in test.c using `TEST_BRKVER()` (see 0091 as an example).
- * If your test can run without an active cluster, flag the test
- with `TEST_F_LOCAL`.
- * If your test runs for a long time or produces/consumes a lot of messages
- it might not be suitable for running on CI (which should run quickly
- and are bound by both time and resources). In this case it is preferred
- if you modify your test to be able to run quicker and/or with less messages
- if the `test_quick` variable is true.
- * There's plenty of helper wrappers in test.c for common librdkafka functions
- that makes tests easier to write by not having to deal with errors, etc.
- * Fail fast, use `TEST_ASSERT()` et.al., the sooner an error is detected
- the better since it makes troubleshooting easier.
- * Use `TEST_SAY()` et.al. to inform the developer what your test is doing,
- making it easier to troubleshoot upon failure. But try to keep output
- down to reasonable levels. There is a `TEST_LEVEL` environment variable
- that can be used with `TEST_SAYL()` to only emit certain printouts
- if the test level is increased. The default test level is 2.
- * The test runner will automatically adjust timeouts (it knows about)
- if running under valgrind, on CI, or similar environment where the
- execution speed may be slower.
- To make sure your test remains sturdy in these type of environments, make
- sure to use the `tmout_multip(milliseconds)` macro when passing timeout
- values to non-test functions, e.g, `rd_kafka_poll(rk, tmout_multip(3000))`.
- * If your test file contains multiple separate sub-tests, use the
- `SUB_TEST()`, `SUB_TEST_QUICK()` and `SUB_TEST_PASS()` from inside
- the test functions to help differentiate test failures.
-
-
-## Test scenarios
-
-A test scenario defines the cluster configuration used by tests.
-The majority of tests use the "default" scenario which matches the
-Apache Kafka default broker configuration (topic auto creation enabled, etc).
-
-If a test relies on cluster configuration that is mutually exclusive with
-the default configuration an alternate scenario must be defined in
-`scenarios/<scenario>.json` which is a configuration object which
-is passed to [trivup](https://github.com/edenhill/trivup).
-
-Try to reuse an existing test scenario as far as possible to speed up
-test times, since each new scenario will require a new cluster incarnation.
-
-
-## A guide to testing, verifying, and troubleshooting, librdkafka
-
-
-### Creating a development build
-
-The [dev-conf.sh](../dev-conf.sh) script configures and builds librdkafka and
-the test suite for development use, enabling extra runtime
-checks (`ENABLE_DEVEL`, `rd_dassert()`, etc), disabling optimization
-(to get accurate stack traces and line numbers), enable ASAN, etc.
-
- # Reconfigure librdkafka for development use and rebuild.
- $ ./dev-conf.sh
-
-**NOTE**: Performance tests and benchmarks should not use a development build.
-
-
-### Controlling the test framework
-
-A test run may be dynamically set up using a number of environment variables.
-These environment variables work for all different ways of invocing the tests,
-be it `make`, `run-test.sh`, `until-fail.sh`, etc.
-
- * `TESTS=0nnn` - only run a single test identified by its full number, e.g.
- `TESTS=0102 make`. (Yes, the var should have been called TEST)
- * `SUBTESTS=...` - only run sub-tests (tests that are using `SUB_TEST()`)
- that contains this string.
- * `TESTS_SKIP=...` - skip these tests.
- * `TEST_DEBUG=...` - this will automatically set the `debug` config property
- of all instantiated clients to the value.
- E.g.. `TEST_DEBUG=broker,protocol TESTS=0001 make`
- * `TEST_LEVEL=n` - controls the `TEST_SAY()` output level, a higher number
- yields more test output. Default level is 2.
- * `RD_UT_TEST=name` - only run unittest containing `name`, should be used
- with `TESTS=0000`.
- See [../src/rdunittest.c](../src/rdunittest.c) for
- unit test names.
-
-
-Let's say that you run the full test suite and get a failure in test 0061,
-which is a consumer test. You want to quickly reproduce the issue
-and figure out what is wrong, so limit the tests to just 0061, and provide
-the relevant debug options (which is typically `cgrp,fetch` for consumers):
-
- $ TESTS=0061 TEST_DEBUG=cgrp,fetch make
-
-If the test did not fail you've found an intermittent issue, this is where
-[until-fail.sh](until-fail.sh) comes in to play, so run the test until it fails:
-
- # bare means to run the test without valgrind
- $ TESTS=0061 TEST_DEBUG=cgrp,fetch ./until-fail.sh bare
-
-
-### How to run tests
-
-The standard way to run the test suite is firing up a trivup cluster
-in an interactive shell:
-
- $ ./interactive_broker_version.py 2.3.0 # Broker version
-
-
-And then running the test suite in parallel:
-
- $ make
-
-
-Run one test at a time:
-
- $ make run_seq
-
-
-Run a single test:
-
- $ TESTS=0034 make
-
-
-Run test suite with valgrind (see instructions below):
-
- $ ./run-test.sh valgrind # memory checking
-
-or with helgrind (the valgrind thread checker):
-
- $ ./run-test.sh helgrind # thread checking
-
-
-To run the tests in gdb:
-
-**NOTE**: gdb support is flaky on OSX due to signing issues.
-
- $ ./run-test.sh gdb
- (gdb) run
-
- # wait for test to crash, or interrupt with Ctrl-C
-
- # backtrace of current thread
- (gdb) bt
- # move up or down a stack frame
- (gdb) up
- (gdb) down
- # select specific stack frame
- (gdb) frame 3
- # show code at location
- (gdb) list
-
- # print variable content
- (gdb) p rk.rk_conf.group_id
- (gdb) p *rkb
-
- # continue execution (if interrupted)
- (gdb) cont
-
- # single-step one instruction
- (gdb) step
-
- # restart
- (gdb) run
-
- # see all threads
- (gdb) info threads
-
- # see backtraces of all threads
- (gdb) thread apply all bt
-
- # exit gdb
- (gdb) exit
-
-
-If a test crashes and produces a core file (make sure your shell has
-`ulimit -c unlimited` set!), do:
-
- # On linux
- $ LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner <core-file>
- (gdb) bt
-
- # On OSX
- $ DYLD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner /cores/core.<pid>
- (gdb) bt
-
-
-To run all tests repeatedly until one fails, this is a good way of finding
-intermittent failures, race conditions, etc:
-
- $ ./until-fail.sh bare # bare is to run the test without valgrind,
- # may also be one or more of the modes supported
- # by run-test.sh:
- # bare valgrind helgrind gdb, etc..
-
-To run a single test repeatedly with valgrind until failure:
-
- $ TESTS=0103 ./until-fail.sh valgrind
-
-
-
-### Finding memory leaks, memory corruption, etc.
-
-There are two ways to verifying there are no memory leaks, out of bound
-memory accesses, use after free, etc. ASAN or valgrind.
-
-#### ASAN - AddressSanitizer
-
-The first option is using AddressSanitizer, this is build-time instrumentation
-provided by clang and gcc to insert memory checks in the build library.
-
-To enable AddressSanitizer (ASAN), run `./dev-conf.sh asan` from the
-librdkafka root directory.
-This script will rebuild librdkafka and the test suite with ASAN enabled.
-
-Then run tests as usual. Memory access issues will be reported on stderr
-in real time as they happen (and the test will fail eventually), while
-memory leaks will be reported on stderr when the test run exits successfully,
-i.e., no tests failed.
-
-Test failures will typically cause the current test to exit hard without
-cleaning up, in which case there will be a large number of reported memory
-leaks, these shall be ignored. The memory leak report is only relevant
-when the test suite passes.
-
-**NOTE**: The OSX version of ASAN does not provide memory leak protection,
- you will need to run the test suite on Linux (native or in Docker).
-
-**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
-
-
-#### Valgrind - memory checker
-
-Valgrind is a powerful virtual machine that intercepts all memory accesses
-of an unmodified program, reporting memory access violations, use after free,
-memory leaks, etc.
-
-Valgrind provides additional checks over ASAN and is mostly useful
-for troubleshooting crashes, memory issues and leaks when ASAN falls short.
-
-To use valgrind, make sure librdkafka and the test suite is built without
-ASAN or TSAN, it must be a clean build without any other instrumentation,
-then simply run:
-
- $ ./run-test.sh valgrind
-
-Valgrind will report to stderr, just like ASAN.
-
-
-**NOTE**: Valgrind only runs on Linux.
-
-**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
-
-
-### TSAN - Thread and locking issues
-
-librdkafka uses a number of internal threads which communicate and share state
-through op queues, conditional variables, mutexes and atomics.
-
-While the docstrings in the librdkafka source code specify what locking is
-required it is very hard to manually verify that the correct locks
-are acquired, and in the correct order (to avoid deadlocks).
-
-TSAN, ThreadSanitizer, is of great help here. As with ASAN, TSAN is a
-build-time option: run `./dev-conf.sh tsan` to rebuild with TSAN.
-
-Run the test suite as usual, preferably in parallel. TSAN will output
-thread errors to stderr and eventually fail the test run.
-
-If you're having threading issues and TSAN does not provide enough information
-to sort it out, you can also try running the test with helgrind, which
-is valgrind's thread checker (`./run-test.sh helgrind`).
-
-
-**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
-
-
-### Resource usage thresholds (experimental)
-
-**NOTE**: This is an experimental feature, some form of system-specific
- calibration will be needed.
-
-If the `-R` option is passed to the `test-runner`, or the `make rusage`
-target is used, the test framework will monitor each test's resource usage
-and fail the test if the default or test-specific thresholds are exceeded.
-
-Per-test thresholds are specified in test.c using the `_THRES()` macro.
-
-Currently monitored resources are:
- * `utime` - User CPU time in seconds (default 1.0s)
- * `stime` - System/Kernel CPU time in seconds (default 0.5s).
- * `rss` - RSS (memory) usage (default 10.0 MB)
- * `ctxsw` - Number of voluntary context switches, e.g. syscalls (default 10000).
-
-Upon successful test completion a log line will be emitted with a resource
-usage summary, e.g.:
-
- Test resource usage summary: 20.161s (32.3%) User CPU time, 12.976s (20.8%) Sys CPU time, 0.000MB RSS memory increase, 4980 Voluntary context switches
-
-The User and Sys CPU thresholds are based on observations running the
-test suite on an Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz (8 cores)
-which define the base line system.
-
-Since no two development environments are identical a manual CPU calibration
-value can be passed as `-R<C>`, where `C` is the CPU calibration for
-the local system compared to the base line system.
-The CPU threshold will be multiplied by the CPU calibration value (default 1.0),
-thus a value less than 1.0 means the local system is faster than the
-base line system, and a value larger than 1.0 means the local system is
-slower than the base line system.
-I.e., if you are on an i5 system, pass `-R2.0` to allow higher CPU usages,
-or `-R0.8` if your system is faster than the base line system.
-The the CPU calibration value may also be set with the
-`TEST_CPU_CALIBRATION=1.5` environment variable.
-
-In an ideal future, the test suite would be able to auto-calibrate.
-
-
-**NOTE**: The resource usage threshold checks will run tests in sequence,
- not parallell, to be able to effectively measure per-test usage.
-
-
-# PR and release verification
-
-Prior to pushing your PR you must verify that your code change has not
-introduced any regression or new issues, this requires running the test
-suite in multiple different modes:
-
- * PLAINTEXT, SSL transports
- * All SASL mechanisms (PLAIN, GSSAPI, SCRAM, OAUTHBEARER)
- * Idempotence enabled for all tests
- * With memory checking
- * With thread checking
- * Compatibility with older broker versions
-
-These tests must also be run for each release candidate that is created.
-
- $ make release-test
-
-This will take approximately 30 minutes.
-
-**NOTE**: Run this on Linux (for ASAN and Kerberos tests to work properly), not OSX.
-
-
-# Test mode specifics
-
-The following sections rely on trivup being installed.
-
-
-### Compatbility tests with multiple broker versions
-
-To ensure compatibility across all supported broker versions the entire
-test suite is run in a trivup based cluster, one test run for each
-relevant broker version.
-
- $ ./broker_version_tests.py
-
-
-### SASL tests
-
-Testing SASL requires a bit of configuration on the brokers, to automate
-this the entire test suite is run on trivup based clusters.
-
- $ ./sasl_tests.py
-
-
-
-### Full test suite(s) run
-
-To run all tests, including the broker version and SASL tests, etc, use
-
- $ make full
-
-**NOTE**: `make full` is a sub-set of the more complete `make release-test` target.
-
-
-### Idempotent Producer tests
-
-To run the entire test suite with `enable.idempotence=true` enabled, use
-`make idempotent_seq` or `make idempotent_par` for sequencial or
-parallel testing.
-Some tests are skipped or slightly modified when idempotence is enabled.
-
-
-## Manual testing notes
-
-The following manual tests are currently performed manually, they should be
-implemented as automatic tests.
-
-### LZ4 interop
-
- $ ./interactive_broker_version.py -c ./lz4_manual_test.py 0.8.2.2 0.9.0.1 2.3.0
-
-Check the output and follow the instructions.
-
-
-
-
-## Test numbers
-
-Automated tests: 0000-0999
-Manual tests: 8000-8999
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh
deleted file mode 100755
index 9d17706f3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-#
-# autotest.sh runs the integration tests using a temporary Kafka cluster.
-# This is intended to be used on CI.
-#
-
-set -e
-
-KAFKA_VERSION=$1
-
-if [[ -z $KAFKA_VERSION ]]; then
- echo "Usage: $0 <broker-version>"
- exit 1
-fi
-
-set -x
-
-pushd tests
-
-[[ -d _venv ]] || virtualenv _venv
-source _venv/bin/activate
-
-# Install the requirements
-pip3 install -U -r requirements.txt
-
-# Run tests that automatically spin up their clusters
-export KAFKA_VERSION
-
-echo "## Running full test suite for broker version $KAFKA_VERSION ##"
-time make full
-
-
-popd # tests
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb b/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb
deleted file mode 100644
index f98d9b462..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb
+++ /dev/null
@@ -1,30 +0,0 @@
-p *test
-bt full
-list
-
-p *rk
-p *rkb
-p *rkb.rkb_rk
-
-up
-p *rk
-p *rkb
-p *rkb.rkb_rk
-
-up
-p *rk
-p *rkb
-p *rkb.rkb_rk
-
-up
-p *rk
-p *rkb
-p *rkb.rkb_rk
-
-up
-p *rk
-p *rkb
-p *rkb.rkb_rk
-
-thread apply all bt
-quit
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py b/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py
deleted file mode 100755
index 717da28d5..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py
+++ /dev/null
@@ -1,297 +0,0 @@
-#!/usr/bin/env python3
-#
-#
-# Run librdkafka regression tests on with different SASL parameters
-# and broker verisons.
-#
-# Requires:
-# trivup python module
-# gradle in your PATH
-
-from cluster_testing import (
- LibrdkafkaTestCluster,
- print_report_summary,
- read_scenario_conf)
-from LibrdkafkaTestApp import LibrdkafkaTestApp
-
-import subprocess
-import tempfile
-import os
-import sys
-import argparse
-import json
-
-
-def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None,
- interact=False, debug=False, scenario="default"):
- """
- @brief Create, deploy and start a Kafka cluster using Kafka \\p version
- Then run librdkafka's regression tests.
- """
-
- cluster = LibrdkafkaTestCluster(version, conf,
- num_brokers=int(conf.get('broker_cnt', 3)),
- debug=debug, scenario=scenario)
-
- # librdkafka's regression tests, as an App.
- _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf
- _rdkconf.update(rdkconf)
- rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests,
- scenario=scenario)
- rdkafka.do_cleanup = False
-
- if deploy:
- cluster.deploy()
-
- cluster.start(timeout=30)
-
- if conf.get('test_mode', '') == 'bash':
- cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( # noqa: E501
- cluster.name, version)
- subprocess.call(
- cmd,
- env=rdkafka.env,
- shell=True,
- executable='/bin/bash')
- report = None
-
- else:
- rdkafka.start()
- print(
- '# librdkafka regression tests started, logs in %s' %
- rdkafka.root_path())
- rdkafka.wait_stopped(timeout=60 * 30)
-
- report = rdkafka.report()
- report['root_path'] = rdkafka.root_path()
-
- if report.get('tests_failed', 0) > 0 and interact:
- print(
- '# Connect to cluster with bootstrap.servers %s' %
- cluster.bootstrap_servers())
- print('# Exiting the shell will bring down the cluster. '
- 'Good luck.')
- subprocess.call(
- 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % # noqa: E501
- (cluster.name, version), env=rdkafka.env, shell=True,
- executable='/bin/bash')
-
- cluster.stop(force=True)
-
- cluster.cleanup()
- return report
-
-
-def handle_report(report, version, suite):
- """ Parse test report and return tuple (Passed(bool), Reason(str)) """
- test_cnt = report.get('tests_run', 0)
-
- if test_cnt == 0:
- return (False, 'No tests run')
-
- passed = report.get('tests_passed', 0)
- failed = report.get('tests_failed', 0)
- if 'all' in suite.get('expect_fail', []) or version in suite.get(
- 'expect_fail', []):
- expect_fail = True
- else:
- expect_fail = False
-
- if expect_fail:
- if failed == test_cnt:
- return (True, 'All %d/%d tests failed as expected' %
- (failed, test_cnt))
- else:
- return (False, '%d/%d tests failed: expected all to fail' %
- (failed, test_cnt))
- else:
- if failed > 0:
- return (False, '%d/%d tests passed: expected all to pass' %
- (passed, test_cnt))
- else:
- return (True, 'All %d/%d tests passed as expected' %
- (passed, test_cnt))
-
-
-if __name__ == '__main__':
-
- parser = argparse.ArgumentParser(
- description='Run librdkafka tests on a range of broker versions')
-
- parser.add_argument('--debug', action='store_true', default=False,
- help='Enable trivup debugging')
- parser.add_argument('--conf', type=str, dest='conf', default=None,
- help='trivup JSON config object (not file)')
- parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None,
- help='trivup JSON config object (not file) '
- 'for LibrdkafkaTestApp')
- parser.add_argument('--scenario', type=str, dest='scenario',
- default='default',
- help='Test scenario (see scenarios/ directory)')
- parser.add_argument('--tests', type=str, dest='tests', default=None,
- help='Test to run (e.g., "0002")')
- parser.add_argument('--report', type=str, dest='report', default=None,
- help='Write test suites report to this filename')
- parser.add_argument('--interact', action='store_true', dest='interact',
- default=False,
- help='On test failure start a shell before bringing '
- 'the cluster down.')
- parser.add_argument('versions', type=str, nargs='*',
- default=['0.8.1.1', '0.8.2.2', '0.9.0.1', '2.3.0'],
- help='Broker versions to test')
- parser.add_argument('--interactive', action='store_true',
- dest='interactive',
- default=False,
- help='Start a shell instead of running tests')
- parser.add_argument(
- '--root',
- type=str,
- default=os.environ.get(
- 'TRIVUP_ROOT',
- 'tmp'),
- help='Root working directory')
- parser.add_argument(
- '--port',
- default=None,
- help='Base TCP port to start allocating from')
- parser.add_argument(
- '--kafka-src',
- dest='kafka_path',
- type=str,
- default=None,
- help='Path to Kafka git repo checkout (used for version=trunk)')
- parser.add_argument(
- '--brokers',
- dest='broker_cnt',
- type=int,
- default=3,
- help='Number of Kafka brokers')
- parser.add_argument('--ssl', dest='ssl', action='store_true',
- default=False,
- help='Enable SSL endpoints')
- parser.add_argument(
- '--sasl',
- dest='sasl',
- type=str,
- default=None,
- help='SASL mechanism (PLAIN, GSSAPI)')
-
- args = parser.parse_args()
-
- conf = dict()
- rdkconf = dict()
-
- if args.conf is not None:
- args.conf = json.loads(args.conf)
- else:
- args.conf = {}
-
- if args.port is not None:
- args.conf['port_base'] = int(args.port)
- if args.kafka_path is not None:
- args.conf['kafka_path'] = args.kafka_path
- if args.ssl:
- args.conf['security.protocol'] = 'SSL'
- if args.sasl:
- if args.sasl == 'PLAIN' and 'sasl_users' not in args.conf:
- args.conf['sasl_users'] = 'testuser=testpass'
- args.conf['sasl_mechanisms'] = args.sasl
- args.conf['sasl_servicename'] = 'kafka'
- if args.interactive:
- args.conf['test_mode'] = 'bash'
- args.conf['broker_cnt'] = args.broker_cnt
-
- conf.update(args.conf)
- if args.rdkconf is not None:
- rdkconf.update(json.loads(args.rdkconf))
-
- conf.update(read_scenario_conf(args.scenario))
-
- if args.tests is not None:
- tests = args.tests.split(',')
- elif 'tests' in conf:
- tests = conf.get('tests', '').split(',')
- else:
- tests = None
-
- # Test version + suite matrix
- if 'versions' in conf:
- versions = conf.get('versions')
- else:
- versions = args.versions
- suites = [{'name': 'standard'}]
-
- pass_cnt = 0
- fail_cnt = 0
- for version in versions:
- for suite in suites:
- _conf = conf.copy()
- _conf.update(suite.get('conf', {}))
- _rdkconf = rdkconf.copy()
- _rdkconf.update(suite.get('rdkconf', {}))
-
- if 'version' not in suite:
- suite['version'] = dict()
-
- # Run tests
- print('#### Version %s, suite %s, scenario %s: STARTING' %
- (version, suite['name'], args.scenario))
- report = test_it(version, tests=tests, conf=_conf,
- rdkconf=_rdkconf,
- interact=args.interact, debug=args.debug,
- scenario=args.scenario)
-
- if not report:
- continue
-
- # Handle test report
- report['version'] = version
- passed, reason = handle_report(report, version, suite)
- report['PASSED'] = passed
- report['REASON'] = reason
-
- if passed:
- print('\033[42m#### Version %s, suite %s: PASSED: %s\033[0m' %
- (version, suite['name'], reason))
- pass_cnt += 1
- else:
- print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' %
- (version, suite['name'], reason))
- fail_cnt += 1
-
- # Emit hopefully relevant parts of the log on failure
- subprocess.call(
- "grep --color=always -B100 -A10 FAIL %s" %
- (os.path.join(
- report['root_path'],
- 'stderr.log')),
- shell=True)
-
- print('#### Test output: %s/stderr.log' % (report['root_path']))
-
- suite['version'][version] = report
-
- # Write test suite report JSON file
- if args.report is not None:
- test_suite_report_file = args.report
- f = open(test_suite_report_file, 'w')
- else:
- fd, test_suite_report_file = tempfile.mkstemp(prefix='test_suite_',
- suffix='.json',
- dir='.')
- f = os.fdopen(fd, 'w')
-
- full_report = {'suites': suites, 'pass_cnt': pass_cnt,
- 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt}
-
- f.write(json.dumps(full_report))
- f.close()
-
- print('\n\n\n')
- print_report_summary(full_report)
- print('#### Full test suites report in: %s' % test_suite_report_file)
-
- if pass_cnt == 0 or fail_cnt > 0:
- sys.exit(1)
- else:
- sys.exit(0)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh
deleted file mode 100755
index bce137109..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Build script for buildbox.io
-# Must be ran from top-level directory.
-
-PFX=tmp_install
-
-[ -d $PFX ] && rm -rf "$PFX"
-
-make clean || true
-./configure --clean
-./configure "--prefix=$PFX" || exit 1
-make || exit 1
-make install || exit 1
-
-
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh
deleted file mode 100755
index f396d8bed..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-#
-# This script runs all tests with valgrind, one by one, forever, to
-# make sure there aren't any memory leaks.
-
-ALL=$(seq 0 15)
-CNT=0
-while true ; do
- for T in $ALL; do
- echo "#################### Test $T run #$CNT #################"
- TESTS=$(printf %04d $T) ./run-test.sh -p valgrind || exit 1
- CNT=$(expr $CNT + 1)
- done
- echo "################## Cleaning up"
- rm -f *.offset
- ./delete-test-topics.sh 0
-done
-done
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py b/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py
deleted file mode 100755
index cfdc08db6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python3
-#
-#
-# Cluster testing helper
-#
-# Requires:
-# trivup python module
-# gradle in your PATH
-
-from trivup.trivup import Cluster
-from trivup.apps.ZookeeperApp import ZookeeperApp
-from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
-from trivup.apps.KerberosKdcApp import KerberosKdcApp
-from trivup.apps.SslApp import SslApp
-from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
-
-import os
-import sys
-import json
-import argparse
-import re
-from jsoncomment import JsonComment
-
-
-def version_as_list(version):
- if version == 'trunk':
- return [sys.maxsize]
- return [int(a) for a in re.findall('\\d+', version)][0:3]
-
-
-def read_scenario_conf(scenario):
- """ Read scenario configuration from scenarios/<scenario>.json """
- parser = JsonComment(json)
- with open(os.path.join('scenarios', scenario + '.json'), 'r') as f:
- return parser.load(f)
-
-
-class LibrdkafkaTestCluster(Cluster):
- def __init__(self, version, conf={}, num_brokers=3, debug=False,
- scenario="default"):
- """
- @brief Create, deploy and start a Kafka cluster using Kafka \\p version
-
- Supported \\p conf keys:
- * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL
-
- \\p conf dict is passed to KafkaBrokerApp classes, etc.
- """
-
- super(LibrdkafkaTestCluster, self).__init__(
- self.__class__.__name__,
- os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug)
-
- # Read trivup config from scenario definition.
- defconf = read_scenario_conf(scenario)
- defconf.update(conf)
-
- # Enable SSL if desired
- if 'SSL' in conf.get('security.protocol', ''):
- self.ssl = SslApp(self, defconf)
-
- self.brokers = list()
-
- # One ZK (from Kafka repo)
- ZookeeperApp(self)
-
- # Start Kerberos KDC if GSSAPI (Kerberos) is configured
- if 'GSSAPI' in defconf.get('sasl_mechanisms', []):
- kdc = KerberosKdcApp(self, 'MYREALM')
- # Kerberos needs to be started prior to Kafka so that principals
- # and keytabs are available at the time of Kafka config generation.
- kdc.start()
-
- if 'OAUTHBEARER'.casefold() == \
- defconf.get('sasl_mechanisms', "").casefold() and \
- 'OIDC'.casefold() == \
- defconf.get('sasl_oauthbearer_method', "").casefold():
- self.oidc = OauthbearerOIDCApp(self)
-
- # Brokers
- defconf.update({'replication_factor': min(num_brokers, 3),
- 'version': version,
- 'security.protocol': 'PLAINTEXT'})
- self.conf = defconf
-
- for n in range(0, num_brokers):
- # Configure rack & replica selector if broker supports
- # fetch-from-follower
- if version_as_list(version) >= [2, 4, 0]:
- defconf.update(
- {
- 'conf': [
- 'broker.rack=RACK${appid}',
- 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501
- self.brokers.append(KafkaBrokerApp(self, defconf))
-
- def bootstrap_servers(self):
- """ @return Kafka bootstrap servers based on security.protocol """
- all_listeners = (
- ','.join(
- self.get_all(
- 'advertised_listeners',
- '',
- KafkaBrokerApp))).split(',')
- return ','.join([x for x in all_listeners if x.startswith(
- self.conf.get('security.protocol'))])
-
-
-def result2color(res):
- if res == 'PASSED':
- return '\033[42m'
- elif res == 'FAILED':
- return '\033[41m'
- else:
- return ''
-
-
-def print_test_report_summary(name, report):
- """ Print summary for a test run. """
- passed = report.get('PASSED', False)
- if passed:
- resstr = '\033[42mPASSED\033[0m'
- else:
- resstr = '\033[41mFAILED\033[0m'
-
- print('%6s %-50s: %s' % (resstr, name, report.get('REASON', 'n/a')))
- if not passed:
- # Print test details
- for name, test in report.get('tests', {}).items():
- testres = test.get('state', '')
- if testres == 'SKIPPED':
- continue
- print('%s --> %-20s \033[0m' %
- ('%s%s\033[0m' %
- (result2color(test.get('state', 'n/a')),
- test.get('state', 'n/a')),
- test.get('name', 'n/a')))
- print('%8s --> %s/%s' %
- ('', report.get('root_path', '.'), 'stderr.log'))
-
-
-def print_report_summary(fullreport):
- """ Print summary from a full report suite """
- suites = fullreport.get('suites', list())
- print('#### Full test suite report (%d suite(s))' % len(suites))
- for suite in suites:
- for version, report in suite.get('version', {}).items():
- print_test_report_summary('%s @ %s' %
- (suite.get('name', 'n/a'), version),
- report)
-
- pass_cnt = fullreport.get('pass_cnt', -1)
- if pass_cnt == 0:
- pass_clr = ''
- else:
- pass_clr = '\033[42m'
-
- fail_cnt = fullreport.get('fail_cnt', -1)
- if fail_cnt == 0:
- fail_clr = ''
- else:
- fail_clr = '\033[41m'
-
- print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' %
- (pass_cnt, pass_clr, fail_cnt, fail_clr))
-
-
-if __name__ == '__main__':
-
- parser = argparse.ArgumentParser(description='Show test suite report')
- parser.add_argument('report', type=str, nargs=1,
- help='Show summary from test suites report file')
-
- args = parser.parse_args()
-
- passed = False
- with open(args.report[0], 'r') as f:
- passed = print_report_summary(json.load(f))
-
- if passed:
- sys.exit(0)
- else:
- sys.exit(1)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh
deleted file mode 100755
index bc40bf65d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-#
-
-set -e
-
-if [[ "$1" == "-n" ]]; then
- DO_DELETE=0
- shift
-else
- DO_DELETE=1
-fi
-
-ZK=$1
-KATOPS=$2
-RE=$3
-
-if [[ -z "$ZK" ]]; then
- ZK="$ZK_ADDRESS"
-fi
-
-if [[ -z "$KATOPS" ]]; then
- if [[ -d "$KAFKA_PATH" ]]; then
- KATOPS="$KAFKA_PATH/bin/kafka-topics.sh"
- fi
-fi
-
-if [[ -z "$RE" ]]; then
- RE="^rdkafkatest_"
-fi
-
-if [[ -z "$KATOPS" ]]; then
- echo "Usage: $0 [-n] <zookeeper-address> <kafka-topics.sh> [<topic-name-regex>]"
- echo ""
- echo "Deletes all topics matching regex $RE"
- echo ""
- echo " -n - Just collect, dont actually delete anything"
- exit 1
-fi
-
-set -u
-echo -n "Collecting list of matching topics... "
-TOPICS=$($KATOPS --zookeeper $ZK --list 2>/dev/null | grep "$RE") || true
-N_TOPICS=$(echo "$TOPICS" | wc -w)
-echo "$N_TOPICS topics found"
-
-
-for t in $TOPICS; do
- if [[ $DO_DELETE == 1 ]]; then
- echo -n "Deleting topic $t... "
- ($KATOPS --zookeeper $ZK --delete --topic "$t" 2>/dev/null && echo "deleted") || echo "failed"
- else
- echo "Topic $t"
- fi
-done
-
-echo "Done"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore
deleted file mode 100644
index e58fd014d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore
+++ /dev/null
@@ -1,11 +0,0 @@
-*.key
-*.crt
-*.jks
-*.csr
-*.pem
-*.p12
-*.srl
-extfile
-!client.keystore.p12
-!client2.certificate.pem
-!client2.key
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile
deleted file mode 100644
index d12bbda9f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-ssl_keys: clear_keys
- @./create_keys.sh client client2
-
-clear_keys:
- @rm -f *.key *.crt *.jks \
- *.csr *.pem *.p12 *.srl extfile
-
-.PHONY: ssl_keys
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md
deleted file mode 100644
index 43204036c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# SSL keys generation for tests
-
-The Makefile in this directory generates a PKCS#12 keystore
-and corresponding PEM certificate and key for testing
-SSL keys and keystore usage in librdkafka.
-
-To update those files with a newer OpenSSL version, just run `make`.
-
-# Requirements
-
-* OpenSSL >= 1.1.1
-* Java keytool >= Java 11
-* GNU Make >= 4.2 \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12 b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12
deleted file mode 100644
index e8c8347ee..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12
+++ /dev/null
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem
deleted file mode 100644
index 34a1da408..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem
+++ /dev/null
@@ -1,109 +0,0 @@
-Bag Attributes
- friendlyName: client2
- localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
-Key Attributes: <No Attributes>
------BEGIN PRIVATE KEY-----
-MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDMrI+QK7Q6L9TU
-cVjEbl4sMu3KhXgs71JNgQl8joFPVjb3PZF6YHegZo0FAOU1F6lysD3NNnI21HIz
-LbCe6BJRogNFKtcFvWS6uQok1HperDO/DVQkH9ARAcvlxE/I6dPbb1YCi7EMHrjM
-Dle+NXWV3nKCe7BcMkETkki5Bj5fNA5oa/pmS0gSS/HXnB8rxyFv4mB/R+oGC1wO
-WOvgn6ip5bKdjMEEnyqYsDCH8w3xYkKlZ6Ag5w1yxnr6D41J64Go2R62MuLrScVr
-+4CM+XJl3Y08+emlCz5m5wuh6A31bp7MFY+f3Gs9AI5qiN3tyjZ//EzoIrfb68tQ
-td+UvT4fAgMBAAECggEALoLkWQHlgfeOqPxdDL57/hVQvl4YUjXMgTpamoiT0CCq
-ewLtxV6YsMW9NC7g53DKG/r7AGBoEhezH/g5E9NvHkfv8E7s8Cv68QfNy1LRwCPn
-2nm/7jmggczjtgInk2O3tj0V0ZxHDpcIra5wuBPT9cvIP+i1yi3NZhIvHoTRtbZp
-lWelovML6SGcbmYDZHWwL8C/quX2/Vp72dJa7ySatlJCe8lcdolazUAhe6W3FGf2
-DojupWddAbwcogQsjQ0WNgtIov5JDF1vHjLkw0uCvh24P+DYBA0JjHybLTR70Ypp
-POwCV5O96JntWfcXYivi4LQrSDFCIDyDwwrbkIkdoQKBgQDuNesfC7C0LJikB+I1
-UgrDJiu4lFVoXwbaWRRuZD58j0mDGeTY9gZzBJ7pJgv3qJbfk1iwpUU25R2Np946
-h63EqpSSoP/TnMBePUBjnu+C5iXxk2KPjNb9Xu8m4Q8tgYvYf5IJ7iLllY2uiT6B
-e+0EGAEPvP1HLbPP22IUMsG6jwKBgQDb9X6fHMeHtP6Du+qhqiMmLK6R2lB7cQ1j
-2FSDySekabucaFhDpK3n2klw2MfF2oZHMrxAfYFySV1kGMil4dvFox8mGBJHc/d5
-lNXGNOfQbVV8P1NRjaPwjyAAgAPZfZgFr+6s+pawMRGnGw5Y6p03sLnD5FWU9Wfa
-vM6RLE5LcQJ/FHiNvB1FEjbC51XGGs7yHdMp7rLQpCeGbz04hEQZGps1tg6DnCGI
-bFn5Tg/291GFpbED7ipFyHHoGERU1LLUPBJssi0jzwupfG/HGMiPzK/6ksgXsD5q
-O1vtMWol48M+QVy1MCVG2nP/uQASXw5HUBLABJo5KeTDjxlLVHEINQKBgAe54c64
-9hFAPEhoS1+OWFm47BDXeEg9ulitepp+cFQIGrzttVv65tjkA/xgwPOkL19E2vPw
-9KENDqi7biDVhCC3EBsIcWvtGN4+ahviM9pQXNZWaxjMPtvuSxN5a6kyDir0+Q8+
-ZhieQJ58Bs78vrT8EipdVNw8mn9GboMO6VkhAoGBAJ+NUvcO3nIVJOCEG3qnweHA
-zqa4JyxFonljwsUFKCIHoiKYlp0KW4wTJJIkTKvLYcRY6kMzP/H1Ja9GqdVnf8ou
-tJOe793M+HkYUMTxscYGoCXXtsWKN2ZOv8aVBA7RvpJS8gE6ApScUrjeM76h20CS
-xxqrrSc37NSjuiaTyOTG
------END PRIVATE KEY-----
-Bag Attributes
- friendlyName: client2
- localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
-subject=C = , ST = , L = , O = , OU = , CN = client2
-
-issuer=CN = caroot
-
------BEGIN CERTIFICATE-----
-MIIDCzCCAfOgAwIBAgIUIRg5w7eGA6xivHxzAmzh2PLUJq8wDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGY2Fyb290MCAXDTIyMTAwNzE1MTI0NFoYDzIwNTAwMjIx
-MTUxMjQ0WjBJMQkwBwYDVQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYD
-VQQKEwAxCTAHBgNVBAsTADEQMA4GA1UEAxMHY2xpZW50MjCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMysj5ArtDov1NRxWMRuXiwy7cqFeCzvUk2BCXyO
-gU9WNvc9kXpgd6BmjQUA5TUXqXKwPc02cjbUcjMtsJ7oElGiA0Uq1wW9ZLq5CiTU
-el6sM78NVCQf0BEBy+XET8jp09tvVgKLsQweuMwOV741dZXecoJ7sFwyQROSSLkG
-Pl80Dmhr+mZLSBJL8decHyvHIW/iYH9H6gYLXA5Y6+CfqKnlsp2MwQSfKpiwMIfz
-DfFiQqVnoCDnDXLGevoPjUnrgajZHrYy4utJxWv7gIz5cmXdjTz56aULPmbnC6Ho
-DfVunswVj5/caz0AjmqI3e3KNn/8TOgit9vry1C135S9Ph8CAwEAAaMhMB8wHQYD
-VR0RBBYwFIIHY2xpZW50MoIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBd
-d5Sl51/aLcCnc5vo2h2fyNQIVbZGbgEyWRbYdHv5a4X7JxUalipvRhXTpYLQ+0R5
-Fzgl5Mwo6dUpJjtzwXZUOAt59WhqVV5+TMe8eDHBl+lKM/YUgZ+kOlGMExEaygrh
-cG+/rVZLAgcC+HnHNaIo2guyn6RqFtBMzkRmjhH96AcygbsN5OFHY0NOzGV9WTDJ
-+A9dlJIy2bEU/yYpXerdXp9lM8fKaPc0JDYwwESMS7ND70dcpGmrRa9pSTSDPUaK
-KSzzOyK+8E5mzcqEbUCrlpz0sklNYDNMIn48Qjkz52Kv8XHvcYS1gv0XvQZtIH3M
-x6X3/J+ivx6L72BOm+ar
------END CERTIFICATE-----
-Bag Attributes
- friendlyName: CN=caroot
-subject=CN = caroot
-
-issuer=CN = caroot
-
------BEGIN CERTIFICATE-----
-MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1
-MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj
-FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk
-daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA
-xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4
-B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m
-bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH
-18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N
-L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF
-UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn
-KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc
-MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK
-0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7
-MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e
-QHIFE8+PTQ==
------END CERTIFICATE-----
-Bag Attributes
- friendlyName: caroot
- 2.16.840.1.113894.746875.1.1: <Unsupported tag 6>
-subject=CN = caroot
-
-issuer=CN = caroot
-
------BEGIN CERTIFICATE-----
-MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL
-BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1
-MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj
-FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk
-daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA
-xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4
-B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m
-bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH
-18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N
-L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF
-UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn
-KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc
-MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK
-0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7
-MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e
-QHIFE8+PTQ==
------END CERTIFICATE-----
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key
deleted file mode 100644
index 6b0b0f87d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key
+++ /dev/null
@@ -1,34 +0,0 @@
-Bag Attributes
- friendlyName: client2
- localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
-Key Attributes: <No Attributes>
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIFFDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQILalIN2MbG7QCAggA
-MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECD+gqk7gSkEFBIIEwETSFzC1yYTM
-/O6lA8BMkl5Wzt4e7Jw7WnfWSmOFTtpXZqOgxvN9dNPsMIpxvU7nF3Iwhqw0WXMF
-lpKqCy2FLM+XWqaQYV+2++s23lH0Eqfofc0IZoYk7FB92MAO1dUI7iDJeT0kwrmU
-mgAKAqa6e4REZgDEUXYVAOiAHqszs0JjXlsxlPSws2EZQyU8kEALggy+60Jozviq
-a9fUZ9JnbtCPkuSOipC8N+erNIEkruzbXRbookTQF+qAyTyXMciL0fTqdAJB/xfO
-h66TQvr1XZorqqVPYI+yXwRBF7oVfJyk0kVfhcpo6SoedNJ3onUlyktcF2RPj1xh
-612L4ytNp/TN8jvSs5EKHTuwS2+dnYp2jTS4rcbSRe53RylhFudAn9/aZad0/C72
-JXeiax3i0071sWbvKX3YsW/2QCaeMALhiqbzx+8PcgVV9BVfjO8qxJSNjaOwmVRy
-I/22pufTDkoNL/aQSiw1NAL22IPdD0uvLCHj27nBct4KancvgSdTxMK9lfwJZet1
-D0S9ChUa2tCY0pDH7F9XUfcS7VAij+VWtlGIyEw7rPOWx6fGT15fj/QnepuJ5xON
-qiAH7IhJesWWhG7xp7c3QsdeGNowkMtoLBlz5fEKDRaauPlbLI5IoXy+ZyOO1tIo
-kH5wHDE1bn5cWn7qRy5X5HtPga1OjF11R+XquJ88+6gqmxPlsrK45/FiGdP4iLN/
-dp10cnFgAVA2kEaTXCH1LctGlR+3XQgfrwWDfvk7uMtvybqFcEEBv8vBih1UsF6v
-RFfoUYq8Zle2x9kX/cfad52FxtDWnhZAgNtT53tWRUb/oAt7fXQxJMlRXKjSV05q
-S/uwevnj49eVFdyiroPofipB8LAK4I+gzZ8AYJob5GoRTlPonC1pj/n3vKRsDMOA
-Lwy3gXoyQ+/MBUPcDG/ewdusrJncnkAlFNt0w97CmOJU0czuJJw5rRozfvZF1Hs9
-2BVcwVPmZH9Nr3+6Yb+GTCRvsM7DBuLZIEN4WzjoLYAcrjZ2XYLsC6XmnDzIp1HF
-nZwrXUROp4MhKuy+SIdFqZLoU/+AIB28WI3euIDDuERSZLff11hphRG5S9wZ8EJH
-Jyl2WgP4r8wQtHs71iT06KDFuBcNqGYPwCjnvE86WFXE3wOJ91+l9u8MYvOSVOHq
-4iUIpRFD4hlCWOIc1V9QYKf2s8Vkeoop/pUutK5NpLtMFgJpFPNYxyfBL13fo9lM
-0iVuoG3W+iDjqZyUPoDxG4rI6Q9WvkswLxVwpMgzDUbUl2aKHcm4Z215dBMm40zh
-ft+QzZEnMVzln2eTCcH91IXcsyPPACmKwraAik5ULEn4m++KtdwDZ6R1zzgRJrn9
-FI6L7C0nfKKemBdzGMCzQuciuPLIjfzXHdKr5bb0C1WS88IB0lYIs+pzpvms2P0F
-AQ2nDgFKA9xlzX2f1O/YQNKA1ctc8RH5tpZUUVfheIqd0U4udp9Rqecd+/r23ENU
-7kjeuxXfUbH83P0hrsQQFkkOeRWWz8+UYvqIEwWaSObdZCvTdIjRpNmmamWsAmsJ
-D5Q2AMMMmNwIi5fUKYJgwTfsgY0XIekk6wmugKs3gCj1RKX930b9fniiol/Gv2VS
-fJRrqds7F0s=
------END ENCRYPTED PRIVATE KEY-----
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh
deleted file mode 100755
index 36e92bd30..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/sh
-set -e
-CA_PASSWORD="${CA_PASSWORD:-use_strong_password_ca}"
-KEYSTORE_PASSWORD="${KEYSTORE_PASSWORD:-use_strong_password_keystore}"
-TRUSTSTORE_PASSWORD="${TRUSTSTORE_PASSWORD:-use_strong_password_truststore}"
-OUTPUT_FOLDER=${OUTPUT_FOLDER:-$( dirname "$0" )}
-CNS=${@:-client}
-
-cd ${OUTPUT_FOLDER}
-CA_ROOT_KEY=caroot.key
-CA_ROOT_CRT=caroot.crt
-
-echo "# Generate CA"
-openssl req -new -x509 -keyout $CA_ROOT_KEY \
- -out $CA_ROOT_CRT -days 3650 -subj \
- '/CN=caroot/OU=/O=/L=/ST=/C=' -passin "pass:${CA_PASSWORD}" \
- -passout "pass:${CA_PASSWORD}"
-
-for CN in $CNS; do
- KEYSTORE=$CN.keystore.p12
- TRUSTSTORE=$CN.truststore.p12
- SIGNED_CRT=$CN-ca-signed.crt
- CERTIFICATE=$CN.certificate.pem
- KEY=$CN.key
- # Get specific password for this CN
- CN_KEYSTORE_PASSWORD="$(eval echo \$${CN}_KEYSTORE_PASSWORD)"
- if [ -z "$CN_KEYSTORE_PASSWORD" ]; then
- CN_KEYSTORE_PASSWORD=${KEYSTORE_PASSWORD}_$CN
- fi
-
- echo ${CN_KEYSTORE_PASSWORD}
-
- echo "# $CN: Generate Keystore"
- keytool -genkey -noprompt \
- -alias $CN \
- -dname "CN=$CN,OU=,O=,L=,S=,C=" \
- -ext "SAN=dns:$CN,dns:localhost" \
- -keystore $KEYSTORE \
- -keyalg RSA \
- -storepass "${CN_KEYSTORE_PASSWORD}" \
- -storetype pkcs12
-
- echo "# $CN: Generate Truststore"
- keytool -noprompt -keystore \
- $TRUSTSTORE -alias caroot -import \
- -file $CA_ROOT_CRT -storepass "${TRUSTSTORE_PASSWORD}"
-
- echo "# $CN: Generate CSR"
- keytool -keystore $KEYSTORE -alias $CN \
- -certreq -file $CN.csr -storepass "${CN_KEYSTORE_PASSWORD}" \
- -keypass "${CN_KEYSTORE_PASSWORD}" \
- -ext "SAN=dns:$CN,dns:localhost"
-
- echo "# $CN: Generate extfile"
- cat << EOF > extfile
-[req]
-distinguished_name = req_distinguished_name
-x509_extensions = v3_req
-prompt = no
-[req_distinguished_name]
-CN = $CN
-[v3_req]
-subjectAltName = @alt_names
-[alt_names]
-DNS.1 = $CN
-DNS.2 = localhost
-EOF
-
- echo "# $CN: Sign the certificate with the CA"
- openssl x509 -req -CA $CA_ROOT_CRT -CAkey $CA_ROOT_KEY \
- -in $CN.csr \
- -out $CN-ca-signed.crt -days 9999 \
- -CAcreateserial -passin "pass:${CA_PASSWORD}" \
- -extensions v3_req -extfile extfile
-
- echo "# $CN: Import root certificate"
- keytool -noprompt -keystore $KEYSTORE \
- -alias caroot -import -file $CA_ROOT_CRT -storepass "${CN_KEYSTORE_PASSWORD}"
-
- echo "# $CN: Import signed certificate"
- keytool -noprompt -keystore $KEYSTORE -alias $CN \
- -import -file $SIGNED_CRT -storepass "${CN_KEYSTORE_PASSWORD}" \
- -ext "SAN=dns:$CN,dns:localhost"
-
- echo "# $CN: Export PEM certificate"
- openssl pkcs12 -in "$KEYSTORE" -out "$CERTIFICATE" \
- -nodes -passin "pass:${CN_KEYSTORE_PASSWORD}"
-
- echo "# $CN: Export PEM key"
- openssl pkcs12 -in "$KEYSTORE" -out "$KEY" \
- -nocerts -passin "pass:${CN_KEYSTORE_PASSWORD}" \
- -passout "pass:${CN_KEYSTORE_PASSWORD}"
-done
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore
deleted file mode 100644
index ee48ae07b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-fuzz_regex
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile
deleted file mode 100644
index dc3e78bf3..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-PROGRAMS?=fuzz_regex
-
-all: $(PROGRAMS)
-
-
-fuzz_%:
- $(CC) -fsanitize=address -D WITH_MAIN -g -Wall \
- -I../../src $@.c -o $@ ../../src/librdkafka.a
-
-
-clean:
- rm -f $(PROGRAMS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md
deleted file mode 100644
index b5a0333b1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Fuzzing
-librdkafka supports fuzzing by way of Libfuzzer and OSS-Fuzz. This is ongoing work.
-
-## Launching the fuzzers
-The easiest way to launch the fuzzers are to go through OSS-Fuzz. The only prerequisite to this is having Docker installed.
-
-With Docker installed, the following commands will build and run the fuzzers in this directory:
-
-```
-git clone https://github.com/google/oss-fuzz
-cd oss-fuzz
-python3 infra/helper.py build_image librdkafka
-python3 infra/helper.py build_fuzzers librdkafka
-python3 infra/helper.py run_fuzzer librdkafka FUZZ_NAME
-```
-where FUZZ_NAME references the name of the fuzzer. Currently the only fuzzer we have is fuzz_regex
-
-Notice that the OSS-Fuzz `helper.py` script above will create a Docker image in which the code of librdkafka will be built. As such, depending on how you installed Docker, you may be asked to have root access (i.e. run with `sudo`).
-
-
-## Running a single reproducer
-
-Download the reproducer file from the OSS-Fuzz issue tracker, then build
-the failed test case by running `make` in this directory, and then
-run the test case and pass it the reproducer files, e.g:
-
- $ make
- $ ./fuzz_regex ~/Downloads/clusterfuzz-testcase-...
-
-**Note:** Some test cases, such as fuzz_regex, requires specific librdkafka
- build configuration. See the test case source for details.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c
deleted file mode 100644
index 2facc19f0..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * Fuzzer test case for the builtin regexp engine in src/regexp.c
- *
- * librdkafka must be built with --disable-regex-ext
- */
-
-#include "rd.h"
-
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-
-#include "regexp.h"
-
-int LLVMFuzzerTestOneInput(uint8_t *data, size_t size) {
- /* wrap random data in a null-terminated string */
- char *null_terminated = malloc(size + 1);
- memcpy(null_terminated, data, size);
- null_terminated[size] = '\0';
-
- const char *error;
- Reprog *p = re_regcomp(null_terminated, 0, &error);
- if (p != NULL) {
- re_regfree(p);
- }
-
- /* cleanup */
- free(null_terminated);
-
- return 0;
-}
-
-#if WITH_MAIN
-#include "helpers.h"
-
-int main(int argc, char **argv) {
- int i;
- for (i = 1; i < argc; i++) {
- size_t size;
- uint8_t *buf = read_file(argv[i], &size);
- LLVMFuzzerTestOneInput(buf, size);
- free(buf);
- }
-}
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h
deleted file mode 100644
index cfab03777..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _HELPERS_H_
-#define _HELPERS_H_
-
-#include <stdio.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <string.h>
-#include <errno.h>
-
-
-/**
- * Fuzz program helpers
- */
-
-static __attribute__((unused)) uint8_t *read_file(const char *path,
- size_t *sizep) {
- int fd;
- uint8_t *buf;
- struct stat st;
-
- if ((fd = open(path, O_RDONLY)) == -1) {
- fprintf(stderr, "Failed to open %s: %s\n", path,
- strerror(errno));
- exit(2);
- return NULL; /* NOTREACHED */
- }
-
- if (fstat(fd, &st) == -1) {
- fprintf(stderr, "Failed to stat %s: %s\n", path,
- strerror(errno));
- close(fd);
- exit(2);
- return NULL; /* NOTREACHED */
- }
-
-
- buf = malloc(st.st_size + 1);
- if (!buf) {
- fprintf(stderr, "Failed to malloc %d bytes for %s\n",
- (int)st.st_size, path);
- close(fd);
- exit(2);
- return NULL; /* NOTREACHED */
- }
-
- buf[st.st_size] = '\0';
-
- *sizep = read(fd, buf, st.st_size);
- if (*sizep != st.st_size) {
- fprintf(stderr, "Could only read %d/%d bytes from %s\n",
- (int)*sizep, (int)st.st_size, path);
- free(buf);
- close(fd);
- exit(2);
- return NULL; /* NOTREACHED */
- }
-
- return buf;
-}
-
-
-#endif /* _HELPERS_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh
deleted file mode 100755
index 0e04c149d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/bin/bash
-#
-#
-# This scripts generates:
-# - root CA certificate
-# - server certificate and keystore
-# - client keys
-#
-# https://cwiki.apache.org/confluence/display/KAFKA/Deploying+SSL+for+Kafka
-#
-
-
-if [[ "$1" == "-k" ]]; then
- USE_KEYTOOL=1
- shift
-else
- USE_KEYTOOL=0
-fi
-
-OP="$1"
-CA_CERT="$2"
-PFX="$3"
-HOST="$4"
-
-C=NN
-ST=NN
-L=NN
-O=NN
-OU=NN
-CN="$HOST"
-
-
-# Password
-PASS="abcdefgh"
-
-# Cert validity, in days
-VALIDITY=10000
-
-set -e
-
-export LC_ALL=C
-
-if [[ $OP == "ca" && ! -z "$CA_CERT" && ! -z "$3" ]]; then
- CN="$3"
- openssl req -new -x509 -keyout ${CA_CERT}.key -out $CA_CERT -days $VALIDITY -passin "pass:$PASS" -passout "pass:$PASS" <<EOF
-${C}
-${ST}
-${L}
-${O}
-${OU}
-${CN}
-$USER@${CN}
-.
-.
-EOF
-
-
-
-elif [[ $OP == "server" && ! -z "$CA_CERT" && ! -z "$PFX" && ! -z "$CN" ]]; then
-
- #Step 1
- echo "############ Generating key"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -validity $VALIDITY -genkey -keyalg RSA <<EOF
-$CN
-$OU
-$O
-$L
-$ST
-$C
-yes
-yes
-EOF
-
- #Step 2
- echo "############ Adding CA"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.truststore.jks -alias CARoot -import -file $CA_CERT <<EOF
-yes
-EOF
-
- #Step 3
- echo "############ Export certificate"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -certreq -file ${PFX}cert-file
-
- echo "############ Sign certificate"
- openssl x509 -req -CA $CA_CERT -CAkey ${CA_CERT}.key -in ${PFX}cert-file -out ${PFX}cert-signed -days $VALIDITY -CAcreateserial -passin "pass:$PASS"
-
-
- echo "############ Import CA"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias CARoot -import -file $CA_CERT <<EOF
-yes
-EOF
-
- echo "############ Import signed CA"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -import -file ${PFX}cert-signed
-
-
-elif [[ $OP == "client" && ! -z "$CA_CERT" && ! -z "$PFX" && ! -z "$CN" ]]; then
-
- if [[ $USE_KEYTOOL == 1 ]]; then
- echo "############ Creating client truststore"
-
- [[ -f ${PFX}client.truststore.jks ]] || keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.truststore.jks -alias CARoot -import -file $CA_CERT <<EOF
-yes
-EOF
-
- echo "############ Generating key"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -validity $VALIDITY -genkey -keyalg RSA <<EOF
-$CN
-$OU
-$O
-$L
-$ST
-$C
-yes
-yes
-EOF
- echo "########### Export certificate"
- keytool -storepass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -certreq -file ${PFX}cert-file
-
- echo "########### Sign certificate"
- openssl x509 -req -CA ${CA_CERT} -CAkey ${CA_CERT}.key -in ${PFX}cert-file -out ${PFX}cert-signed -days $VALIDITY -CAcreateserial -passin pass:$PASS
-
- echo "########### Import CA"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias CARoot -import -file ${CA_CERT} <<EOF
-yes
-EOF
-
- echo "########### Import signed CA"
- keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -import -file ${PFX}cert-signed
-
- else
- # Standard OpenSSL keys
- echo "############ Generating key"
- openssl genrsa -des3 -passout "pass:$PASS" -out ${PFX}client.key 2048
-
- echo "############ Generating request"
- openssl req -passin "pass:$PASS" -passout "pass:$PASS" -key ${PFX}client.key -new -out ${PFX}client.req \
- <<EOF
-$C
-$ST
-$L
-$O
-$OU
-$CN
-.
-$PASS
-.
-EOF
-
- echo "########### Signing key"
- openssl x509 -req -passin "pass:$PASS" -in ${PFX}client.req -CA $CA_CERT -CAkey ${CA_CERT}.key -CAcreateserial -out ${PFX}client.pem -days $VALIDITY
-
- fi
-
-
-
-
-else
- echo "Usage: $0 ca <ca-cert-file> <CN>"
- echo " $0 [-k] server|client <ca-cert-file> <file_prefix> <hostname>"
- echo ""
- echo " -k = Use keytool/Java Keystore, else standard SSL keys"
- exit 1
-fi
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py b/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py
deleted file mode 100755
index bcd4931f9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python3
-#
-#
-# Run librdkafka regression tests on different supported broker versions.
-#
-# Requires:
-# trivup python module
-# gradle in your PATH
-
-from trivup.trivup import Cluster
-from trivup.apps.ZookeeperApp import ZookeeperApp
-from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
-from trivup.apps.KerberosKdcApp import KerberosKdcApp
-from trivup.apps.SslApp import SslApp
-from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
-
-from cluster_testing import read_scenario_conf
-
-import subprocess
-import tempfile
-import os
-import sys
-import argparse
-import json
-
-
-def version_as_number(version):
- if version == 'trunk':
- return sys.maxsize
- tokens = version.split('.')
- return float('%s.%s' % (tokens[0], tokens[1]))
-
-
-def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
- exec_cnt=1,
- root_path='tmp', broker_cnt=3, scenario='default'):
- """
- @brief Create, deploy and start a Kafka cluster using Kafka \\p version
- Then run librdkafka's regression tests.
- """
-
- print('## Test version %s' % version)
-
- cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)
-
- if conf.get('sasl_oauthbearer_method') == 'OIDC':
- oidc = OauthbearerOIDCApp(cluster)
-
- # Enable SSL if desired
- if 'SSL' in conf.get('security.protocol', ''):
- cluster.ssl = SslApp(cluster, conf)
-
- # One ZK (from Kafka repo)
- zk1 = ZookeeperApp(cluster)
- zk_address = zk1.get('address')
-
- # Start Kerberos KDC if GSSAPI is configured
- if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
- KerberosKdcApp(cluster, 'MYREALM').start()
-
- defconf = {'version': version}
- defconf.update(conf)
-
- print('conf: ', defconf)
-
- brokers = []
- for n in range(0, broker_cnt):
- # Configure rack & replica selector if broker supports
- # fetch-from-follower
- if version_as_number(version) >= 2.4:
- defconf.update(
- {
- 'conf': [
- 'broker.rack=RACK${appid}',
- 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501
- brokers.append(KafkaBrokerApp(cluster, defconf))
-
- cmd_env = os.environ.copy()
-
- # Generate test config file
- security_protocol = 'PLAINTEXT'
- fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
- os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
- os.write(fd, 'broker.address.family=v4\n'.encode('ascii'))
- if version.startswith('0.9') or version.startswith('0.8'):
- os.write(fd, 'api.version.request=false\n'.encode('ascii'))
- os.write(
- fd, ('broker.version.fallback=%s\n' %
- version).encode('ascii'))
- # SASL (only one mechanism supported)
- mech = defconf.get('sasl_mechanisms', '').split(',')[0]
- if mech != '':
- os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
- if mech == 'PLAIN' or mech.find('SCRAM') != -1:
- print(
- '# Writing SASL %s client config to %s' %
- (mech, test_conf_file))
- security_protocol = 'SASL_PLAINTEXT'
- # Use first user as SASL user/pass
- for up in defconf.get('sasl_users', '').split(','):
- u, p = up.split('=')
- os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
- os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
- break
- elif mech == 'OAUTHBEARER':
- security_protocol = 'SASL_PLAINTEXT'
- if defconf.get('sasl_oauthbearer_method') == 'OIDC':
- os.write(
- fd, ('sasl.oauthbearer.method=OIDC\n'.encode(
- 'ascii')))
- os.write(
- fd, ('sasl.oauthbearer.client.id=123\n'.encode(
- 'ascii')))
- os.write(
- fd, ('sasl.oauthbearer.client.secret=abc\n'.encode(
- 'ascii')))
- os.write(
- fd, ('sasl.oauthbearer.extensions=\
- ExtensionworkloadIdentity=develC348S,\
- Extensioncluster=lkc123\n'.encode(
- 'ascii')))
- os.write(
- fd, ('sasl.oauthbearer.scope=test\n'.encode(
- 'ascii')))
- cmd_env['VALID_OIDC_URL'] = oidc.conf.get('valid_url')
- cmd_env['INVALID_OIDC_URL'] = oidc.conf.get('badformat_url')
- cmd_env['EXPIRED_TOKEN_OIDC_URL'] = oidc.conf.get(
- 'expired_url')
-
- else:
- os.write(
- fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode(
- 'ascii')))
- os.write(fd, ('sasl.oauthbearer.config=%s\n' %
- 'scope=requiredScope principal=admin').encode(
- 'ascii'))
- else:
- print(
- '# FIXME: SASL %s client config not written to %s' %
- (mech, test_conf_file))
-
- # SSL support
- ssl = getattr(cluster, 'ssl', None)
- if ssl is not None:
- if 'SASL' in security_protocol:
- security_protocol = 'SASL_SSL'
- else:
- security_protocol = 'SSL'
-
- key = ssl.create_cert('librdkafka')
-
- os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii'))
- os.write(fd, ('ssl.certificate.location=%s\n' %
- key['pub']['pem']).encode('ascii'))
- os.write(
- fd, ('ssl.key.location=%s\n' %
- key['priv']['pem']).encode('ascii'))
- os.write(
- fd, ('ssl.key.password=%s\n' %
- key['password']).encode('ascii'))
-
- for k, v in ssl.ca.items():
- cmd_env['SSL_ca_{}'.format(k)] = v
-
- # Set envs for all generated keys so tests can find them.
- for k, v in key.items():
- if isinstance(v, dict):
- for k2, v2 in v.items():
- # E.g. "SSL_priv_der=path/to/librdkafka-priv.der"
- cmd_env['SSL_{}_{}'.format(k, k2)] = v2
- else:
- cmd_env['SSL_{}'.format(k)] = v
-
- # Define bootstrap brokers based on selected security protocol
- print('# Using client security.protocol=%s' % security_protocol)
- all_listeners = (
- ','.join(
- cluster.get_all(
- 'listeners',
- '',
- KafkaBrokerApp))).split(',')
- bootstrap_servers = ','.join(
- [x for x in all_listeners if x.startswith(security_protocol)])
- os.write(fd, ('bootstrap.servers=%s\n' %
- bootstrap_servers).encode('ascii'))
- os.write(fd, ('security.protocol=%s\n' %
- security_protocol).encode('ascii'))
- os.close(fd)
-
- if deploy:
- print('# Deploying cluster')
- cluster.deploy()
- else:
- print('# Not deploying')
-
- print('# Starting cluster, instance path %s' % cluster.instance_path())
- cluster.start()
-
- print('# Waiting for brokers to come up')
-
- if not cluster.wait_operational(30):
- cluster.stop(force=True)
- raise Exception('Cluster %s did not go operational, see logs in %s/%s' % # noqa: E501
- (cluster.name, cluster.root_path, cluster.instance))
-
- print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)
-
- cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir')
- cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file
- cmd_env['ZK_ADDRESS'] = zk_address
- cmd_env['BROKERS'] = bootstrap_servers
- cmd_env['TEST_KAFKA_VERSION'] = version
- cmd_env['TRIVUP_ROOT'] = cluster.instance_path()
- cmd_env['TEST_SCENARIO'] = scenario
-
- # Provide a HTTPS REST endpoint for the HTTP client tests.
- cmd_env['RD_UT_HTTP_URL'] = 'https://jsonplaceholder.typicode.com/users'
-
- # Per broker env vars
- for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]:
- cmd_env['BROKER_ADDRESS_%d' % b.appid] = \
- ','.join([x for x in b.conf['listeners'].split(
- ',') if x.startswith(security_protocol)])
- # Add each broker pid as an env so they can be killed indivdidually.
- cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid)
- # JMX port, if available
- jmx_port = b.conf.get('jmx_port', None)
- if jmx_port is not None:
- cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port)
-
- if not cmd:
- cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\\w$ ' % (
- cluster.name, version)
- cmd = 'bash --rcfile <(cat ~/.bashrc)'
-
- ret = True
-
- for i in range(0, exec_cnt):
- retcode = subprocess.call(
- cmd,
- env=cmd_env,
- shell=True,
- executable='/bin/bash')
- if retcode != 0:
- print('# Command failed with returncode %d: %s' % (retcode, cmd))
- ret = False
-
- try:
- os.remove(test_conf_file)
- except BaseException:
- pass
-
- cluster.stop(force=True)
-
- cluster.cleanup(keeptypes=['log'])
- return ret
-
-
-if __name__ == '__main__':
-
- parser = argparse.ArgumentParser(
- description='Start a Kafka cluster and provide an interactive shell')
-
- parser.add_argument('versions', type=str, default=None, nargs='+',
- help='Kafka version(s) to deploy')
- parser.add_argument('--no-deploy', action='store_false', dest='deploy',
- default=True,
- help='Dont deploy applications, '
- 'assume already deployed.')
- parser.add_argument('--conf', type=str, dest='conf', default=None,
- help='JSON config object (not file)')
- parser.add_argument('--scenario', type=str, dest='scenario',
- default='default',
- help='Test scenario (see scenarios/ directory)')
- parser.add_argument('-c', type=str, dest='cmd', default=None,
- help='Command to execute instead of shell')
- parser.add_argument('-n', type=int, dest='exec_cnt', default=1,
- help='Number of times to execute -c ..')
- parser.add_argument('--debug', action='store_true', dest='debug',
- default=False,
- help='Enable trivup debugging')
- parser.add_argument(
- '--root',
- type=str,
- default=os.environ.get(
- 'TRIVUP_ROOT',
- 'tmp'),
- help='Root working directory')
- parser.add_argument(
- '--port',
- default=None,
- help='Base TCP port to start allocating from')
- parser.add_argument(
- '--kafka-src',
- dest='kafka_path',
- type=str,
- default=None,
- help='Path to Kafka git repo checkout (used for version=trunk)')
- parser.add_argument(
- '--brokers',
- dest='broker_cnt',
- type=int,
- default=3,
- help='Number of Kafka brokers')
- parser.add_argument('--ssl', dest='ssl', action='store_true',
- default=False,
- help='Enable SSL endpoints')
- parser.add_argument(
- '--sasl',
- dest='sasl',
- type=str,
- default=None,
- help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)')
- parser.add_argument(
- '--oauthbearer-method',
- dest='sasl_oauthbearer_method',
- type=str,
- default=None,
- help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \
- must config SASL mechanism to OAUTHBEARER')
-
- args = parser.parse_args()
- if args.conf is not None:
- args.conf = json.loads(args.conf)
- else:
- args.conf = {}
-
- args.conf.update(read_scenario_conf(args.scenario))
-
- if args.port is not None:
- args.conf['port_base'] = int(args.port)
- if args.kafka_path is not None:
- args.conf['kafka_path'] = args.kafka_path
- if args.ssl:
- args.conf['security.protocol'] = 'SSL'
- if args.sasl:
- if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM')
- != -1) and 'sasl_users' not in args.conf:
- args.conf['sasl_users'] = 'testuser=testpass'
- args.conf['sasl_mechanisms'] = args.sasl
- retcode = 0
- if args.sasl_oauthbearer_method:
- if args.sasl_oauthbearer_method == "OIDC" and \
- args.conf['sasl_mechanisms'] != 'OAUTHBEARER':
- print('If config `--oauthbearer-method=OIDC`, '
- '`--sasl` must be set to `OAUTHBEARER`')
- retcode = 3
- sys.exit(retcode)
- args.conf['sasl_oauthbearer_method'] = \
- args.sasl_oauthbearer_method
-
- args.conf.get('conf', list()).append("log.retention.bytes=1000000000")
-
- for version in args.versions:
- r = test_version(version, cmd=args.cmd, deploy=args.deploy,
- conf=args.conf, debug=args.debug,
- exec_cnt=args.exec_cnt,
- root_path=args.root, broker_cnt=args.broker_cnt,
- scenario=args.scenario)
- if not r:
- retcode = 2
-
- sys.exit(retcode)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore
deleted file mode 100644
index 6fd0ef029..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.pc
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt
deleted file mode 100644
index c606bc426..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-set(
- sources
- interceptor_test.c
-)
-
-
-add_library(interceptor_test SHARED ${sources})
-
-target_include_directories(interceptor_test PUBLIC ${PROJECT_SOURCE_DIR}/src)
-
-target_link_libraries(interceptor_test PUBLIC rdkafka)
-
-# Remove "lib" prefix
-set_target_properties(interceptor_test PROPERTIES PREFIX "")
-set_target_properties(interceptor_test PROPERTIES
- LIBRARY_OUTPUT_DIRECTORY ${tests_OUTPUT_DIRECTORY}/interceptor_test/)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile
deleted file mode 100644
index 125e36032..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-PKGNAME= interceptor_test
-LIBNAME= interceptor_test
-LIBVER= 1
-
--include ../../Makefile.config
-
-SRCS= interceptor_test.c
-
-OBJS= $(SRCS:.c=.o)
-
-# For rdkafka.h
-CPPFLAGS+=-I../../src
-LDFLAGS+=-L../../src
-LIBS+=-lrdkafka
-
-all: lib
-
-include ../../mklove/Makefile.base
-
-clean: lib-clean
-
--include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c
deleted file mode 100644
index ee8a63ba9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Interceptor plugin test library
- *
- * Interceptors can be implemented in the app itself and use
- * the direct API to set the interceptors methods, or be implemented
- * as an external plugin library that uses the direct APIs.
- *
- * This file implements the latter, an interceptor plugin library.
- */
-
-#define _CRT_SECURE_NO_WARNINGS /* Silence MSVC nonsense */
-
-#include "../test.h"
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-
-/* typical include path outside tests is <librdkafka/rdkafka.h> */
-#include "rdkafka.h"
-
-#include "interceptor_test.h"
-
-#ifdef _WIN32
-#define DLL_EXPORT __declspec(dllexport)
-#else
-#define DLL_EXPORT
-#endif
-
-/**
- * @brief Interceptor instance.
- *
- * An interceptor instance is created for each intercepted configuration
- * object (triggered through conf_init() which is the plugin loader,
- * or by conf_dup() which is a copying of a conf previously seen by conf_init())
- */
-struct ici {
- rd_kafka_conf_t *conf; /**< Interceptor config */
- char *config1; /**< Interceptor-specific config */
- char *config2;
-
- int on_new_cnt;
- int on_conf_destroy_cnt;
-};
-
-static char *my_interceptor_plug_opaque = "my_interceptor_plug_opaque";
-
-
-
-/* Producer methods */
-rd_kafka_resp_err_t
-on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- struct ici *ici = ic_opaque;
- printf("on_send: %p\n", ici);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-rd_kafka_resp_err_t on_acknowledgement(rd_kafka_t *rk,
- rd_kafka_message_t *rkmessage,
- void *ic_opaque) {
- struct ici *ici = ic_opaque;
- printf("on_acknowledgement: %p: err %d, partition %" PRId32 "\n", ici,
- rkmessage->err, rkmessage->partition);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/* Consumer methods */
-rd_kafka_resp_err_t
-on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
- struct ici *ici = ic_opaque;
- printf("on_consume: %p: partition %" PRId32 " @ %" PRId64 "\n", ici,
- rkmessage->partition, rkmessage->offset);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-rd_kafka_resp_err_t on_commit(rd_kafka_t *rk,
- const rd_kafka_topic_partition_list_t *offsets,
- rd_kafka_resp_err_t err,
- void *ic_opaque) {
- struct ici *ici = ic_opaque;
- printf("on_commit: %p: err %d\n", ici, err);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static void ici_destroy(struct ici *ici) {
- if (ici->conf)
- rd_kafka_conf_destroy(ici->conf);
- if (ici->config1)
- free(ici->config1);
- if (ici->config2)
- free(ici->config2);
- free(ici);
-}
-
-rd_kafka_resp_err_t on_destroy(rd_kafka_t *rk, void *ic_opaque) {
- struct ici *ici = ic_opaque;
- printf("on_destroy: %p\n", ici);
- /* the ici is freed from on_conf_destroy() */
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Called from rd_kafka_new(). We use it to set up interceptors.
- */
-static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
- const rd_kafka_conf_t *conf,
- void *ic_opaque,
- char *errstr,
- size_t errstr_size) {
- struct ici *ici = ic_opaque;
-
- ictest.on_new.cnt++;
- ici->on_new_cnt++;
-
- TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", rk, conf,
- ici->conf, ici, ictest.on_new.cnt);
-
- ICTEST_CNT_CHECK(on_new);
- TEST_ASSERT(ici->on_new_cnt == 1);
-
- TEST_ASSERT(!ictest.session_timeout_ms);
- TEST_ASSERT(!ictest.socket_timeout_ms);
- /* Extract some well known config properties from the interceptor's
- * configuration. */
- ictest.session_timeout_ms =
- rd_strdup(test_conf_get(ici->conf, "session.timeout.ms"));
- ictest.socket_timeout_ms =
- rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms"));
- ictest.config1 = rd_strdup(ici->config1);
- ictest.config2 = rd_strdup(ici->config2);
-
- rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send, ici);
- rd_kafka_interceptor_add_on_acknowledgement(rk, __FILE__,
- on_acknowledgement, ici);
- rd_kafka_interceptor_add_on_consume(rk, __FILE__, on_consume, ici);
- rd_kafka_interceptor_add_on_commit(rk, __FILE__, on_commit, ici);
- rd_kafka_interceptor_add_on_destroy(rk, __FILE__, on_destroy, ici);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-/**
- * @brief Configuration set handler
- */
-static rd_kafka_conf_res_t on_conf_set(rd_kafka_conf_t *conf,
- const char *name,
- const char *val,
- char *errstr,
- size_t errstr_size,
- void *ic_opaque) {
- struct ici *ici = ic_opaque;
- int level = 3;
-
- if (!strcmp(name, "session.timeout.ms") ||
- !strcmp(name, "socket.timeout.ms") ||
- !strncmp(name, "interceptor_test", strlen("interceptor_test")))
- level = 2;
-
- TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", conf,
- name, val, ici);
-
- if (!strcmp(name, "interceptor_test.good"))
- return RD_KAFKA_CONF_OK;
- else if (!strcmp(name, "interceptor_test.bad")) {
- strncpy(errstr, "on_conf_set failed deliberately",
- errstr_size - 1);
- errstr[errstr_size - 1] = '\0';
- return RD_KAFKA_CONF_INVALID;
- } else if (!strcmp(name, "interceptor_test.config1")) {
- if (ici->config1) {
- free(ici->config1);
- ici->config1 = NULL;
- }
- if (val)
- ici->config1 = rd_strdup(val);
- TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", conf, name, val,
- ici);
- return RD_KAFKA_CONF_OK;
- } else if (!strcmp(name, "interceptor_test.config2")) {
- if (ici->config2) {
- free(ici->config2);
- ici->config2 = NULL;
- }
- if (val)
- ici->config2 = rd_strdup(val);
- return RD_KAFKA_CONF_OK;
- } else {
- /* Apply intercepted client's config properties on
- * interceptor config. */
- rd_kafka_conf_set(ici->conf, name, val, errstr, errstr_size);
- /* UNKNOWN makes the conf_set() call continue with
- * other interceptors and finally the librdkafka properties. */
- return RD_KAFKA_CONF_UNKNOWN;
- }
-
- return RD_KAFKA_CONF_UNKNOWN;
-}
-
-static void conf_init0(rd_kafka_conf_t *conf);
-
-
-/**
- * @brief Set up new configuration on copy.
- */
-static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf,
- const rd_kafka_conf_t *old_conf,
- size_t filter_cnt,
- const char **filter,
- void *ic_opaque) {
- struct ici *ici = ic_opaque;
- TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %" PRIusz
- ", ici %p)\n",
- new_conf, old_conf, filter_cnt, ici);
- conf_init0(new_conf);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-static rd_kafka_resp_err_t on_conf_destroy(void *ic_opaque) {
- struct ici *ici = ic_opaque;
- ici->on_conf_destroy_cnt++;
- printf("conf_destroy called (opaque %p vs %p) ici %p\n", ic_opaque,
- my_interceptor_plug_opaque, ici);
- TEST_ASSERT(ici->on_conf_destroy_cnt == 1);
- ici_destroy(ici);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-
-
-/**
- * @brief Configuration init is intercepted both from plugin.library.paths
- * as well as rd_kafka_conf_dup().
- * This internal method serves both cases.
- */
-static void conf_init0(rd_kafka_conf_t *conf) {
- struct ici *ici;
- const char *filter[] = {"plugin.library.paths", "interceptor_test."};
- size_t filter_cnt = sizeof(filter) / sizeof(*filter);
-
- /* Create new interceptor instance */
- ici = calloc(1, sizeof(*ici));
-
- ictest.conf_init.cnt++;
- ICTEST_CNT_CHECK(conf_init);
-
- /* Create own copy of configuration, after filtering out what
- * brought us here (plugins and our own interceptor config). */
- ici->conf = rd_kafka_conf_dup_filter(conf, filter_cnt, filter);
- TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", conf,
- ici, ici->conf);
-
-
- /* Add interceptor methods */
- rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, ici);
-
- rd_kafka_conf_interceptor_add_on_conf_set(conf, __FILE__, on_conf_set,
- ici);
- rd_kafka_conf_interceptor_add_on_conf_dup(conf, __FILE__, on_conf_dup,
- ici);
- rd_kafka_conf_interceptor_add_on_conf_destroy(conf, __FILE__,
- on_conf_destroy, ici);
-}
-
-/**
- * @brief Plugin conf initializer called when plugin.library.paths is set.
- */
-DLL_EXPORT
-rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf,
- void **plug_opaquep,
- char *errstr,
- size_t errstr_size) {
- *plug_opaquep = (void *)my_interceptor_plug_opaque;
-
- TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", conf,
- *plug_opaquep);
-
- conf_init0(conf);
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h
deleted file mode 100644
index 646b4b4d6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _INTERCEPTOR_TEST_H_
-#define _INTERCEPTOR_TEST_H_
-
-
-struct ictcnt {
- int cnt;
- int min;
- int max;
-};
-
-struct ictest {
- struct ictcnt conf_init;
- struct ictcnt on_new;
-
- /* intercepted interceptor_test.config1 and .config2 properties */
- char *config1;
- char *config2;
-
- /* intercepted session.timeout.ms and socket.timeout.ms */
- char *session_timeout_ms;
- char *socket_timeout_ms;
-};
-
-#define ictest_init(ICT) memset((ICT), 0, sizeof(ictest))
-#define ictest_cnt_init(CNT, MIN, MAX) \
- do { \
- (CNT)->cnt = 0; \
- (CNT)->min = MIN; \
- (CNT)->max = MAX; \
- } while (0)
-
-#define ictest_free(ICT) \
- do { \
- if ((ICT)->config1) \
- free((ICT)->config1); \
- if ((ICT)->config2) \
- free((ICT)->config2); \
- if ((ICT)->session_timeout_ms) \
- free((ICT)->session_timeout_ms); \
- if ((ICT)->socket_timeout_ms) \
- free((ICT)->socket_timeout_ms); \
- } while (0)
-
-#define ICTEST_CNT_CHECK(F) \
- do { \
- if (ictest.F.cnt > ictest.F.max) \
- TEST_FAIL("interceptor %s count %d > max %d", #F, \
- ictest.F.cnt, ictest.F.max); \
- } while (0)
-
-/* The ictest struct is defined and set up by the calling test. */
-extern struct ictest ictest;
-
-#endif /* _INTERCEPTOR_TEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore
deleted file mode 100644
index 5241a7220..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.class \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java
deleted file mode 100644
index de044ae58..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.CooperativeStickyAssignor;
-import org.apache.kafka.common.KafkaException;
-
-import java.lang.Integer;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Properties;
-import java.time.Duration;
-
-
-public class IncrementalRebalanceCli {
- public static void main (String[] args) throws Exception {
- String testName = args[0];
- String brokerList = args[1];
- String topic1 = args[2];
- String topic2 = args[3];
- String group = args[4];
-
- if (!testName.equals("test1")) {
- throw new Exception("Unknown command: " + testName);
- }
-
- Properties consumerConfig = new Properties();
- consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
- consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group);
- consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, "java_incrreb_consumer");
- consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
- consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
- consumerConfig.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, CooperativeStickyAssignor.class.getName());
- Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig);
-
- List<String> topics = new ArrayList<>();
- topics.add(topic1);
- topics.add(topic2);
- consumer.subscribe(topics);
-
- long startTime = System.currentTimeMillis();
- long timeout_s = 300;
-
- try {
- boolean running = true;
- while (running) {
- ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000));
- if (System.currentTimeMillis() - startTime > 1000 * timeout_s) {
- // Ensure process exits eventually no matter what happens.
- System.out.println("IncrementalRebalanceCli timed out");
- running = false;
- }
- if (consumer.assignment().size() == 6) {
- // librdkafka has unsubscribed from topic #2, exit cleanly.
- running = false;
- }
- }
- } finally {
- consumer.close();
- }
-
- System.out.println("Java consumer process exiting");
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile
deleted file mode 100644
index 68847075a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-
-KAFKA_JARS?=$(KAFKA_PATH)/libs
-
-CLASSES=Murmur2Cli.class TransactionProducerCli.class IncrementalRebalanceCli.class
-
-all: $(CLASSES)
-
-%.class: %.java
- javac -classpath $(KAFKA_JARS)/kafka-clients-*.jar $^
-
-clean:
- rm -f *.class
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java
deleted file mode 100644
index 22444532d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java
+++ /dev/null
@@ -1,46 +0,0 @@
-
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-import org.apache.kafka.common.utils.Utils;
-
-public class Murmur2Cli {
- public static int toPositive(int number) {
- return number & 0x7fffffff;
- }
- public static void main (String[] args) throws Exception {
- for (String key : args) {
- System.out.println(String.format("%s\t0x%08x", key,
- toPositive(Utils.murmur2(key.getBytes()))));
- }
- /* If no args, print hash for empty string */
- if (args.length == 0)
- System.out.println(String.format("%s\t0x%08x", "",
- toPositive(Utils.murmur2("".getBytes()))));
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md
deleted file mode 100644
index a2754c258..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Misc Java tools
-
-## Murmur2 CLI
-
-Build:
-
- $ KAFKA_JARS=/your/kafka/libs make
-
-Run:
-
- $ KAFKA_JARS=/your/kafka/libs ./run-class.sh Murmur2Cli "a sentence" and a word
-
-If KAFKA_JARS is not set it will default to $KAFKA_PATH/libs
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java
deleted file mode 100644
index f880c1422..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2020, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.KafkaException;
-
-import java.lang.Integer;
-import java.util.HashMap;
-import java.util.Properties;
-
-
-public class TransactionProducerCli {
-
- enum TransactionType {
- None,
- BeginAbort,
- BeginCommit,
- BeginOpen,
- ContinueAbort,
- ContinueCommit,
- ContinueOpen
- }
-
- enum FlushType {
- DoFlush,
- DontFlush
- }
-
- static Producer<byte[], byte[]> createProducer(String testid, String id, String brokerList, boolean transactional) {
- Properties producerConfig = new Properties();
- producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
- producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, transactional ? "transactional-producer-" + id : "producer-" + id);
- producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
- if (transactional) {
- producerConfig.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-transactional-id-" + testid + "-" + id);
- }
- producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
- producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
- producerConfig.put(ProducerConfig.LINGER_MS_CONFIG, "5"); // ensure batching.
- Producer<byte[], byte[]> producer = new KafkaProducer<>(producerConfig);
- if (transactional) {
- producer.initTransactions();
- }
- return producer;
- }
-
- static void makeTestMessages(
- Producer<byte[], byte[]> producer,
- String topic, int partition,
- int idStart, int count,
- TransactionType tt,
- FlushType flush) throws InterruptedException {
- byte[] payload = { 0x10, 0x20, 0x30, 0x40 };
- if (tt != TransactionType.None &&
- tt != TransactionType.ContinueOpen &&
- tt != TransactionType.ContinueCommit &&
- tt != TransactionType.ContinueAbort) {
- producer.beginTransaction();
- }
- for (int i = 0; i <count; ++i) {
- ProducerRecord<byte[], byte[]> r = partition != -1
- ? new ProducerRecord<byte[],byte[]>(topic, partition, new byte[] { (byte)(i + idStart) }, payload)
- : new ProducerRecord<byte[], byte[]>(topic, new byte[] { (byte)(i + idStart) }, payload);
- producer.send(r);
- }
- if (flush == FlushType.DoFlush) {
- producer.flush();
- }
- if (tt == TransactionType.BeginAbort || tt == TransactionType.ContinueAbort) {
- producer.abortTransaction();
- } else if (tt == TransactionType.BeginCommit || tt == TransactionType.ContinueCommit) {
- producer.commitTransaction();
- }
- }
-
- static String[] csvSplit(String input) {
- return input.split("\\s*,\\s*");
- }
-
- public static void main (String[] args) throws Exception {
-
- String bootstrapServers = args[0];
-
- HashMap<String, Producer<byte[], byte[]>> producers = new HashMap<String, Producer<byte[], byte[]>>();
-
- String topic = null;
- String testid = null;
-
- /* Parse commands */
- for (int i = 1 ; i < args.length ; i++) {
- String cmd[] = csvSplit(args[i]);
-
- System.out.println("TransactionProducerCli.java: command: '" + args[i] + "'");
-
- if (cmd[0].equals("sleep")) {
- Thread.sleep(Integer.decode(cmd[1]));
-
- } else if (cmd[0].equals("exit")) {
- System.exit(Integer.decode(cmd[1]));
-
- } else if (cmd[0].equals("topic")) {
- topic = cmd[1];
-
- } else if (cmd[0].equals("testid")) {
- testid = cmd[1];
-
- } else if (cmd[0].startsWith("producer")) {
- Producer<byte[], byte[]> producer = producers.get(cmd[0]);
-
- if (producer == null) {
- producer = createProducer(testid, cmd[0], bootstrapServers,
- TransactionType.valueOf(cmd[4]) != TransactionType.None);
- producers.put(cmd[0], producer);
- }
-
- makeTestMessages(producer, /* producer */
- topic, /* topic */
- Integer.decode(cmd[1]), /* partition, or -1 for any */
- Integer.decode(cmd[2]), /* idStart */
- Integer.decode(cmd[3]), /* msg count */
- TransactionType.valueOf(cmd[4]), /* TransactionType */
- FlushType.valueOf(cmd[5])); /* Flush */
-
- } else {
- throw new Exception("Unknown command: " + args[i]);
- }
- }
-
- producers.forEach((k,p) -> p.close());
- }
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh
deleted file mode 100755
index e3e52b1cc..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-#
-
-if [[ -z $KAFKA_PATH ]]; then
- echo "$0: requires \$KAFKA_PATH to point to the kafka release top directory"
- exit 1
-fi
-
-JAVA_TESTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-
-CLASSPATH=$JAVA_TESTS_DIR $KAFKA_PATH/bin/kafka-run-class.sh "$@"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions b/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions
deleted file mode 100644
index 6259dadb1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions
+++ /dev/null
@@ -1,483 +0,0 @@
-# Valgrind suppression file for librdkafka
-{
- allocate_tls_despite_detached_1
- Memcheck:Leak
- fun:calloc
- fun:_dl_allocate_tls
- fun:pthread_create@@GLIBC_2.2.5
-}
-
-{
- helgrind---_dl_allocate_tls
- Helgrind:Race
- fun:mempcpy
- fun:_dl_allocate_tls_init
- ...
- fun:pthread_create@@GLIBC_2.2*
- fun:pthread_create_WRK
- fun:pthread_create@*
-}
-{
- drd_nss1
- drd:ConflictingAccess
- fun:pthread_mutex_lock
- fun:_nss_files_gethostbyname4_r
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-
-{
- drd_nss2
- drd:ConflictingAccess
- fun:strlen
- fun:nss_load_library
- fun:__nss_lookup_function
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-{
- drd_nss3
- drd:ConflictingAccess
- fun:__GI_stpcpy
- fun:nss_load_library
- fun:__nss_lookup_function
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-{
- drd_nss4
- drd:ConflictingAccess
- fun:strlen
- fun:__nss_lookup_function
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-{
- drd_nss5
- drd:ConflictingAccess
- fun:strlen
- fun:__nss_lookup_function
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-{
- drd_nss6
- drd:ConflictingAccess
- fun:internal_setent
- fun:_nss_files_gethostbyname4_r
- fun:gaih_inet
- fun:getaddrinfo
- fun:rd_getaddrinfo
- fun:rd_kafka_broker_resolve
- fun:rd_kafka_broker_connect
- fun:rd_kafka_broker_thread_main
- fun:_thrd_wrapper_function
- obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
- fun:start_thread
- fun:clone
-}
-{
- ssl_read
- Memcheck:Cond
- fun:ssl3_read_bytes
- fun:ssl3_read_internal
-}
-
-
-
-{
- ssl_noterm_leak1
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:SSL_library_init
-}
-{
- ssl_noterm_leak2
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:OPENSSL_add_all_algorithms_noconf
-}
-{
- ssl_noterm_leak3
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:OpenSSL_add_all_digests
-}
-{
- ssl_noterm_leak3b
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:realloc
- ...
- fun:OpenSSL_add_all_digests
-}
-{
- ssl_noterm_leak4
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:EVP_add_digest
-}
-{
- ssl_noterm_leak5
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:SSL_load_error_strings
-}
-{
- ssl_noterm_leak6
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:realloc
- ...
- fun:OPENSSL_add_all_algorithms_noconf
-}
-{
- ssl_noterm_leak7
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:ERR_load_SSL_strings
-}
-{
- ssl_noterm_leak8
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:err_load_strings
-}
-{
- ssl_noterm_leak8b
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:ERR_load_strings
-}
-{
- ssl_noterm_leak8c
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:realloc
- ...
- fun:ERR_load_strings
-}
-{
- ssl_noterm_leak9
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:realloc
- ...
- fun:ERR_load_SSL_strings
-}
-{
- ssl_noterm_leak10
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:OPENSSL_init_library
-}
-{
- ssl_noterm_leak10b
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:calloc
- ...
- fun:OPENSSL_init_library
-}
-{
- ssl_noterm_leak11
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:EVP_SignFinal
-}
-{
- ssl_noterm_leak12
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:FIPS_mode_set
-}
-{
- thrd_tls_alloc_stack
- Memcheck:Leak
- match-leak-kinds: possible
- fun:calloc
- fun:allocate_dtv
- fun:_dl_allocate_tls
- fun:allocate_stack
- fun:pthread_create@@GLIBC_2.2.5
- fun:thrd_create
-}
-{
- more_tls1
- Memcheck:Leak
- match-leak-kinds: possible
- fun:calloc
- fun:allocate_dtv
- fun:_dl_allocate_tls
- fun:allocate_stack
-}
-
-{
- ssl_uninit1
- Memcheck:Cond
- fun:rd_kafka_metadata_handle
- fun:rd_kafka_broker_metadata_reply
-}
-{
- ssl_uninit2
- Memcheck:Value8
- fun:rd_kafka_metadata_handle
- fun:rd_kafka_broker_metadata_reply
-}
-{
- ssl_uninit3
- Memcheck:Cond
- fun:memcpy@@GLIBC_2.14
- fun:rd_kafka_metadata_handle
- fun:rd_kafka_broker_metadata_reply
-}
-
-{
- log_races0
- Helgrind:Race
- fun:rd_kafka_log0
-}
-{
- glibc_tls
- Helgrind:Race
- fun:mempcpy
- fun:_dl_allocate_tls_init
- fun:get_cached_stack
- fun:allocate_stack
- fun:pthread_create@@GLIBC_2.2.5
-}
-{
- false_tls
- Helgrind:Race
- fun:thrd_detach
-}
-
-
-# cyrus libsasl2 global/once memory "leaks"
-{
- leak_sasl_global_init1
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:sasl_client_init
-}
-{
- leak_sasl_global_init6
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:calloc
- ...
- fun:sasl_client_init
-}
-
-{
- leak_sasl_dlopen
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:?alloc
- ...
- fun:_dl_catch_error
-}
-{
- leak_sasl_add_plugin
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- ...
- fun:sasl_client_add_plugin
-}
-{
- leak_sasl_add_plugin2
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:calloc
- ...
- fun:sasl_client_add_plugin
-}
-{
- debian_testing_ld_uninitialized
- Memcheck:Cond
- fun:index
- fun:expand_dynamic_string_token
- ...
- fun:_dl_start
- ...
-}
-{
- glibc_internals_nss_race1
- Helgrind:Race
- ...
- fun:getaddrinfo
- ...
-}
-{
- nss_files
- Helgrind:Race
- ...
- fun:_dl_runtime_resolve_avx
- ...
-}
-{
- cpp_glibc_globals
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- fun:pool
- fun:__static_initialization_and_destruction_0
- fun:_GLOBAL__sub_I_eh_alloc.cc
-}
-{
- mtx_unlock_plus_destroy
- Helgrind:Race
- obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
- obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
- fun:rd_kafka_q_destroy_final
-}
-{
- mtx_unlock_plus_destroy2
- Helgrind:Race
- obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
- obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
- fun:rd_refcnt_destroy
-}
-{
- nss_dl_lookup
- Helgrind:Race
- ...
- fun:do_lookup_x
- fun:_dl_lookup_symbol_x
- ...
-}
-{
- dlopen1
- Memcheck:Leak
- match-leak-kinds: reachable
- ...
- fun:_dl_open
-}
-
-{
- atomics32_set
- Helgrind:Race
- fun:rd_atomic32_set
-}
-
-{
- atomics32_get
- Helgrind:Race
- fun:rd_atomic32_get
-}
-
-{
- atomics64_set
- Helgrind:Race
- fun:rd_atomic64_set
-}
-
-{
- atomics64_get
- Helgrind:Race
- fun:rd_atomic64_get
-}
-
-{
- osx_dyld_img
- Memcheck:Leak
- match-leak-kinds: reachable
- fun:malloc
- fun:strdup
- fun:__si_module_static_ds_block_invoke
- fun:_dispatch_client_callout
- fun:_dispatch_once_callout
- fun:si_module_static_ds
- fun:si_module_with_name
- fun:si_module_config_modules_for_category
- fun:__si_module_static_search_block_invoke
- fun:_dispatch_client_callout
- fun:_dispatch_once_callout
- fun:si_module_static_search
- fun:si_module_with_name
- fun:si_search
- fun:getpwuid_r
- fun:_CFRuntimeBridgeClasses
- fun:__CFInitialize
- fun:_ZN16ImageLoaderMachO11doImageInitERKN11ImageLoader11LinkContextE
- fun:_ZN16ImageLoaderMachO16doInitializationERKN11ImageLoader11LinkContextE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader19processInitializersERKNS_11LinkContextEjRNS_21InitializerTimingListERNS_15UninitedUpwardsE
- fun:_ZN11ImageLoader15runInitializersERKNS_11LinkContextERNS_21InitializerTimingListE
- fun:_ZN4dyld24initializeMainExecutableEv
- fun:_ZN4dyld5_mainEPK12macho_headermiPPKcS5_S5_Pm
- fun:_ZN13dyldbootstrap5startEPKN5dyld311MachOLoadedEiPPKcS3_Pm
- fun:_dyld_start
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh
deleted file mode 100755
index 7c604df73..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-#
-
-#
-# Manual test (verification) of LZ4
-# See README for details
-#
-
-set -e
-# Debug what commands are being executed:
-#set -x
-
-TOPIC=lz4
-
-if [[ $TEST_KAFKA_VERSION == "trunk" ]]; then
- RDK_ARGS="$RDK_ARGS -X api.version.request=true"
-else
- if [[ $TEST_KAFKA_VERSION == 0.8.* ]]; then
- BROKERS=$(echo $BROKERS | sed -e 's/PLAINTEXT:\/\///g')
- fi
- RDK_ARGS="$RDK_ARGS -X broker.version.fallback=$TEST_KAFKA_VERSION"
-fi
-
-# Create topic
-${KAFKA_PATH}/bin/kafka-topics.sh --zookeeper $ZK_ADDRESS --create \
- --topic $TOPIC --partitions 1 --replication-factor 1
-
-# Produce messages with rdkafka
-echo "### Producing with librdkafka: ids 1000-1010"
-seq 1000 1010 | ../examples/rdkafka_example -P -b $BROKERS -t $TOPIC \
- -z lz4 $RDK_ARGS
-
-# Produce with Kafka
-echo "### Producing with Kafka: ids 2000-2010"
-seq 2000 2010 | ${KAFKA_PATH}/bin/kafka-console-producer.sh \
- --broker-list $BROKERS --compression-codec lz4 \
- --topic $TOPIC
-
-# Consume with rdkafka
-echo "### Consuming with librdkafka: expect 1000-1010 and 2000-2010"
-../examples/rdkafka_example -C -b $BROKERS -t $TOPIC -p 0 -o beginning -e -q -A \
- $RDK_ARGS
-
-# Consume with Kafka
-echo "### Consuming with Kafka: expect 1000-1010 and 2000-2010"
-if [[ $TEST_KAFKA_VERSION == "trunk" ]]; then
- ${KAFKA_PATH}/bin/kafka-console-consumer.sh -new-consumer \
- --bootstrap-server $BROKERS --from-beginning --topic $TOPIC \
- --timeout-ms 1000
-else
- ${KAFKA_PATH}/bin/kafka-console-consumer.sh \
- --zookeeper $ZK_ADDRESS --from-beginning --topic $TOPIC \
- --max-messages 22
-fi
-
-
-echo ""
-echo "### $TEST_KAFKA_VERSION: Did you see messages 1000-1010 and 2000-2010 from both consumers?"
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh
deleted file mode 100755
index 3a0a9d104..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-#
-
-set -e
-
-# Test current librdkafka with multiple broker versions.
-
-if [[ ! -z $TEST_KAFKA_VERSION ]]; then
- echo "Must not be run from within a trivup session"
- exit 1
-fi
-
-
-VERSIONS="$*"
-if [[ -z $VERSIONS ]]; then
- VERSIONS="0.8.2.1 0.9.0.1 0.10.0.1 0.10.1.1 0.10.2.1 0.11.0.0"
-fi
-
-FAILED_VERSIONS=""
-PASSED_VERSIONS=""
-for VERSION in $VERSIONS ; do
- echo "Testing broker version $VERSION"
- if [[ $VERSION == "trunk" ]]; then
- extra_args="--kafka-src ~/src/kafka --no-deploy"
- else
- extra_args=""
- fi
- ./interactive_broker_version.py \
- --root ~/old/kafka -c "make run_seq" $extra_args "$VERSION"
-
- if [[ $? == 0 ]] ; then
- echo "#### broker $VERSION passed ####"
- PASSED_VERSIONS="${PASSED_VERSIONS}${VERSION} "
- else
- echo "#### broker $VERSION FAILED ####"
- FAILED_VERSIONS="${FAILED_VERSIONS}${VERSION} "
- fi
-done
-
-
-echo "broker versions PASSED: ${PASSED_VERSIONS}"
-echo "broker versions FAILED: ${FAILED_VERSIONS}"
-
-if [[ ! -z $FAILED_VERSIONS ]]; then
- exit 1
-else
- exit 0
-fi
-
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh
deleted file mode 100755
index f77b2a127..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-#
-#
-
-set -e
-
-# Parse a log with --enable-refcnt output enabled.
-
-log="$1"
-
-if [[ ! -f $log ]]; then
- echo "Usage: $0 <log-file>"
- exit 1
-fi
-
-
-# Create a file with all refcnt creations
-cfile=$(mktemp)
-grep 'REFCNT.* 0 +1:' $log | awk '{print $6}' | sort > $cfile
-
-# .. and one file with all refcnt destructions
-dfile=$(mktemp)
-grep 'REFCNT.* 1 -1:' $log | awk '{print $6}' | sort > $dfile
-
-# For each refcnt that was never destructed (never reached 0), find it
-# in the input log.
-
-seen=
-for p in $(grep -v -f $dfile $cfile) ; do
- echo "=== REFCNT $p never reached 0 ==="
- grep -nH "$p" $log
- echo ""
- seen=yes
-done
-
-rm -f "$cfile" "$dfile"
-
-if [[ -z $seen ]]; then
- echo "No refcount leaks found"
- exit 0
-fi
-
-exit 2
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py b/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py
deleted file mode 100755
index b699377f1..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-
-import sys
-import json
-import numpy as np
-import matplotlib.pyplot as plt
-
-from collections import defaultdict
-
-
-def semver2int(semver):
- if semver == 'trunk':
- semver = '0.10.0.0'
- vi = 0
- i = 0
- for v in reversed(semver.split('.')):
- vi += int(v) * (i * 10)
- i += 1
- return vi
-
-
-def get_perf_data(perfname, stats):
- """ Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
- labels: broker versions
- x: list with identical value (to plot on same x point)
- y: perfname counter (average)
- errs: errors
- """
- ver = defaultdict(list)
-
- # Per version:
- # * accumulate values
- # * calculate average
- # * calculate error
-
- # Accumulate values per version
- for x in stats:
- v = str(x[0])
- ver[v].append(x[1][perfname])
- print('%s is %s' % (perfname, ver))
-
- labels0 = sorted(ver.keys(), key=semver2int)
- y0 = list()
- errs0 = list()
-
- # Maintain order by using labels0
- for v in labels0:
- # Calculate average
- avg = sum(ver[v]) / float(len(ver[v]))
- y0.append(avg)
- # Calculate error
- errs0.append(max(ver[v]) - avg)
-
- labels = np.array(labels0)
- y1 = np.array(y0)
- x1 = np.array(range(0, len(labels)))
- errs = np.array(errs0)
- return [labels, x1, y1, errs]
-
-
-def plot(description, name, stats, perfname, outfile=None):
- labels, x, y, errs = get_perf_data(perfname, stats)
- plt.title('%s: %s %s' % (description, name, perfname))
- plt.xlabel('Kafka version')
- plt.ylabel(perfname)
- plt.errorbar(x, y, yerr=errs, alpha=0.5)
- plt.xticks(x, labels, rotation='vertical')
- plt.margins(0.2)
- plt.subplots_adjust(bottom=0.2)
- if outfile is None:
- plt.show()
- else:
- plt.savefig(outfile, bbox_inches='tight')
- return
-
-
-if __name__ == '__main__':
-
- outfile = sys.argv[1]
-
- reports = []
- for rf in sys.argv[2:]:
- with open(rf) as f:
- reports.append(json.load(f))
-
- stats = defaultdict(list)
-
- # Extract performance test data
- for rep in reports:
- perfs = rep.get(
- 'tests',
- dict()).get(
- '0038_performance',
- list).get(
- 'report',
- None)
- if perfs is None:
- continue
-
- for perf in perfs:
- for n in ['producer', 'consumer']:
- o = perf.get(n, None)
- if o is None:
- print('no %s in %s' % (n, perf))
- continue
-
- stats[n].append((rep.get('broker_version', 'unknown'), o))
-
- for t in ['producer', 'consumer']:
- for perfname in ['mb_per_sec', 'records_per_sec']:
- plot('librdkafka 0038_performance test: %s (%d samples)' %
- (outfile, len(reports)),
- t, stats[t], perfname, outfile='%s_%s_%s.png' % (
- outfile, t, perfname))
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile
deleted file mode 100644
index a39f18270..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-LIBNAME= plugin_test
-LIBVER= 1
-
--include ../../Makefile.config
-
-SRCS= plugin_test.c
-
-OBJS= $(SRCS:.c=.o)
-
-# For rdkafka.h
-CPPFLAGS+=-I../../src
-
-all: lib
-
-include ../../mklove/Makefile.base
-
-clean: lib-clean
-
--include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c
deleted file mode 100644
index 54639a5a8..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2017 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @brief Plugin test library
- */
-
-#include <stdio.h>
-#include <assert.h>
-
-/* typical include path outside tests is <librdkafka/rdkafka.h> */
-#include "rdkafka.h"
-
-
-
-static void *my_opaque = (void *)0x5678;
-/*
- * Common methods
- */
-rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf,
- void **plug_opaquep,
- char *errstr,
- size_t errstr_size) {
- printf("plugin conf_init called!\n");
- *plug_opaquep = my_opaque;
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-void conf_destroy(const rd_kafka_conf_t *conf, void *plug_opaque) {
- assert(plug_opaque == plug_opaque);
- printf("plugin destroy called\n");
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt
deleted file mode 100644
index c15a66f47..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-trivup >= 0.12.1
-jsoncomment
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh
deleted file mode 100755
index 32165c2d4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-#
-#
-# Run all tests that employ a consumer.
-#
-
-set -e
-
-TESTS=$(for t in $(grep -l '[Cc]onsume' 0*.{c,cpp}); do \
- echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \
- done)
-
-export TESTS
-echo "# Running consumer tests: $TESTS"
-
-./run-test.sh $*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh
deleted file mode 100755
index 7f1035cbb..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-#
-#
-# Run all tests that employ a producer.
-#
-
-set -e
-
-TESTS=$(for t in $(grep -l '[pp]roduce' 0*.{c,cpp}); do \
- echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \
- done)
-
-export TESTS
-echo "# Running producer tests: $TESTS"
-
-./run-test.sh $*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh
deleted file mode 100755
index 2f531c61f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env bash
-#
-
-RED='\033[31m'
-GREEN='\033[32m'
-CYAN='\033[36m'
-CCLR='\033[0m'
-
-if [[ $1 == -h ]]; then
- echo "Usage: $0 [-..] [modes..]"
- echo ""
- echo " Modes: bare valgrind helgrind cachegrind drd gdb lldb bash"
- echo " Options:"
- echo " -.. - test-runner command arguments (pass thru)"
- exit 0
-fi
-
-ARGS=
-
-while [[ $1 == -* ]]; do
- ARGS="$ARGS $1"
- shift
-done
-
-TEST=./test-runner
-
-if [ ! -z "$1" ]; then
- MODES=$1
-else
- MODES="bare"
- # Enable valgrind:
- #MODES="bare valgrind"
-fi
-
-FAILED=0
-
-export RDKAFKA_GITVER="$(git rev-parse --short HEAD)@$(git symbolic-ref -q --short HEAD)"
-
-# Enable valgrind suppressions for false positives
-SUPP="--suppressions=librdkafka.suppressions"
-
-# Uncomment to generate valgrind suppressions
-#GEN_SUPP="--gen-suppressions=yes"
-
-# Common valgrind arguments
-VALGRIND_ARGS="--error-exitcode=3"
-
-# Enable vgdb on valgrind errors.
-#VALGRIND_ARGS="$VALGRIND_ARGS --vgdb-error=1"
-
-# Exit valgrind on first error
-VALGRIND_ARGS="$VALGRIND_ARGS --exit-on-first-error=yes"
-
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../src:../src-cpp
-export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:../src:../src-cpp
-
-echo -e "${CYAN}############## $TEST ################${CCLR}"
-
-for mode in $MODES; do
- echo -e "${CYAN}### Running test $TEST in $mode mode ###${CCLR}"
- export TEST_MODE=$mode
- case "$mode" in
- valgrind)
- valgrind $VALGRIND_ARGS --leak-check=full --show-leak-kinds=all \
- --errors-for-leak-kinds=all \
- --track-origins=yes \
- --track-fds=yes \
- $SUPP $GEN_SUPP \
- $TEST $ARGS
- RET=$?
- ;;
- helgrind)
- valgrind $VALGRIND_ARGS --tool=helgrind \
- --sim-hints=no-nptl-pthread-stackcache \
- $SUPP $GEN_SUPP \
- $TEST $ARGS
- RET=$?
- ;;
- cachegrind|callgrind)
- valgrind $VALGRIND_ARGS --tool=$mode \
- $SUPP $GEN_SUPP \
- $TEST $ARGS
- RET=$?
- ;;
- drd)
- valgrind $VALGRIND_ARGS --tool=drd $SUPP $GEN_SUPP \
- $TEST $ARGS
- RET=$?
- ;;
- callgrind)
- valgrind $VALGRIND_ARGS --tool=callgrind $SUPP $GEN_SUPP \
- $TEST $ARGS
- RET=$?
- ;;
- gdb)
- grun=$(mktemp gdbrunXXXXXX)
- cat >$grun <<EOF
-set \$_exitcode = -999
-run $ARGS
-if \$_exitcode != -999
- quit
-end
-EOF
- export ASAN_OPTIONS="$ASAN_OPTIONS:abort_on_error=1"
- gdb -x $grun $TEST
- RET=$?
- rm $grun
- ;;
- bare)
- $TEST $ARGS
- RET=$?
- ;;
- lldb)
- lldb -b -o "process launch --environment DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" -- $TEST $ARGS
- RET=$?
- ;;
- bash)
- PS1="[run-test.sh] $PS1" bash
- RET=$?
- ;;
- *)
- echo -e "${RED}### Unknown mode $mode for $TEST ###${CCLR}"
- RET=1
- ;;
- esac
-
- if [ $RET -gt 0 ]; then
- echo -e "${RED}###"
- echo -e "### Test $TEST in $mode mode FAILED! (return code $RET) ###"
- echo -e "###${CCLR}"
- FAILED=1
- else
- echo -e "${GREEN}###"
- echo -e "### $Test $TEST in $mode mode PASSED! ###"
- echo -e "###${CCLR}"
- fi
-done
-
-exit $FAILED
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c b/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c
deleted file mode 100644
index 48e702f3f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * librdkafka - The Apache Kafka C/C++ library
- *
- * Copyright (c) 2019 Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name Track test resource usage.
- */
-
-#ifdef __APPLE__
-#define _DARWIN_C_SOURCE /* required for rusage.ru_maxrss, etc. */
-#endif
-
-#include "test.h"
-
-#if HAVE_GETRUSAGE
-
-#include <sys/time.h>
-#include <sys/resource.h>
-#include "rdfloat.h"
-
-
-/**
- * @brief Call getrusage(2)
- */
-static int test_getrusage(struct rusage *ru) {
- if (getrusage(RUSAGE_SELF, ru) == -1) {
- TEST_WARN("getrusage() failed: %s\n", rd_strerror(errno));
- return -1;
- }
-
- return 0;
-}
-
-/* Convert timeval to seconds */
-#define _tv2s(TV) \
- (double)((double)(TV).tv_sec + ((double)(TV).tv_usec / 1000000.0))
-
-/* Convert timeval to CPU usage percentage (5 = 5%, 130.3 = 130.3%) */
-#define _tv2cpu(TV, DURATION) ((_tv2s(TV) / (DURATION)) * 100.0)
-
-
-/**
- * @brief Calculate difference between \p end and \p start rusage.
- *
- * @returns the delta
- */
-static struct rusage test_rusage_calc(const struct rusage *start,
- const struct rusage *end,
- double duration) {
- struct rusage delta = RD_ZERO_INIT;
-
- timersub(&end->ru_utime, &start->ru_utime, &delta.ru_utime);
- timersub(&end->ru_stime, &start->ru_stime, &delta.ru_stime);
- /* FIXME: maxrss doesn't really work when multiple tests are
- * run in the same process since it only registers the
- * maximum RSS, not the current one.
- * Read this from /proc/<pid>/.. instead */
- delta.ru_maxrss = end->ru_maxrss - start->ru_maxrss;
- delta.ru_nvcsw = end->ru_nvcsw - start->ru_nvcsw;
- /* skip fields we're not interested in */
-
- TEST_SAY(_C_MAG
- "Test resource usage summary: "
- "%.3fs (%.1f%%) User CPU time, "
- "%.3fs (%.1f%%) Sys CPU time, "
- "%.3fMB RSS memory increase, "
- "%ld Voluntary context switches\n",
- _tv2s(delta.ru_utime), _tv2cpu(delta.ru_utime, duration),
- _tv2s(delta.ru_stime), _tv2cpu(delta.ru_stime, duration),
- (double)delta.ru_maxrss / (1024.0 * 1024.0), delta.ru_nvcsw);
-
- return delta;
-}
-
-
-/**
- * @brief Check that test ran within threshold levels
- */
-static int test_rusage_check_thresholds(struct test *test,
- const struct rusage *ru,
- double duration) {
- static const struct rusage_thres defaults = {
- .ucpu = 5.0, /* min value, see below */
- .scpu = 2.5, /* min value, see below */
- .rss = 10.0, /* 10 megs */
- .ctxsw = 100, /* this is the default number of context switches
- * per test second.
- * note: when ctxsw is specified on a test
- * it should be specified as the total
- * number of context switches. */
- };
- /* CPU usage thresholds are too blunt for very quick tests.
- * Use a forgiving default CPU threshold for any test that
- * runs below a certain duration. */
- const double min_duration = 2.0; /* minimum test duration for
- * CPU thresholds to have effect. */
- const double lax_cpu = 1000.0; /* 1000% CPU usage (e.g 10 cores
- * at full speed) allowed for any
- * test that finishes in under 2s */
- const struct rusage_thres *thres = &test->rusage_thres;
- double cpu, mb, uthres, uthres_orig, sthres, rssthres;
- int csthres;
- char reasons[3][128];
- int fails = 0;
-
- if (duration < min_duration)
- uthres = lax_cpu;
- else if (rd_dbl_zero((uthres = thres->ucpu)))
- uthres = defaults.ucpu;
-
- uthres_orig = uthres;
- uthres *= test_rusage_cpu_calibration;
-
- cpu = _tv2cpu(ru->ru_utime, duration);
- if (cpu > uthres) {
- rd_snprintf(reasons[fails], sizeof(reasons[fails]),
- "User CPU time (%.3fs) exceeded: %.1f%% > %.1f%%",
- _tv2s(ru->ru_utime), cpu, uthres);
- TEST_WARN("%s\n", reasons[fails]);
- fails++;
- }
-
- /* Let the default Sys CPU be the maximum of the defaults.cpu
- * and 20% of the User CPU. */
- if (rd_dbl_zero((sthres = thres->scpu)))
- sthres = duration < min_duration
- ? lax_cpu
- : RD_MAX(uthres_orig * 0.20, defaults.scpu);
-
- sthres *= test_rusage_cpu_calibration;
-
- cpu = _tv2cpu(ru->ru_stime, duration);
- if (cpu > sthres) {
- rd_snprintf(reasons[fails], sizeof(reasons[fails]),
- "Sys CPU time (%.3fs) exceeded: %.1f%% > %.1f%%",
- _tv2s(ru->ru_stime), cpu, sthres);
- TEST_WARN("%s\n", reasons[fails]);
- fails++;
- }
-
- rssthres = thres->rss > 0.0 ? thres->rss : defaults.rss;
- if ((mb = (double)ru->ru_maxrss / (1024.0 * 1024.0)) > rssthres) {
- rd_snprintf(reasons[fails], sizeof(reasons[fails]),
- "RSS memory exceeded: %.2fMB > %.2fMB", mb,
- rssthres);
- TEST_WARN("%s\n", reasons[fails]);
- fails++;
- }
-
-
- if (!(csthres = thres->ctxsw))
- csthres = duration < min_duration
- ? defaults.ctxsw * 100
- : (int)(duration * (double)defaults.ctxsw);
-
- /* FIXME: not sure how to use this */
- if (0 && ru->ru_nvcsw > csthres) {
- TEST_WARN(
- "Voluntary context switches exceeded: "
- "%ld > %d\n",
- ru->ru_nvcsw, csthres);
- fails++;
- }
-
- TEST_ASSERT(fails <= (int)RD_ARRAYSIZE(reasons),
- "reasons[] array not big enough (needs %d slots)", fails);
-
- if (!fails || !test_rusage)
- return 0;
-
- TEST_FAIL("Test resource usage exceeds %d threshold(s): %s%s%s%s%s",
- fails, reasons[0], fails > 1 ? ", " : "",
- fails > 1 ? reasons[1] : "", fails > 2 ? ", " : "",
- fails > 2 ? reasons[2] : "");
-
-
- return -1;
-}
-#endif
-
-
-
-void test_rusage_start(struct test *test) {
-#if HAVE_GETRUSAGE
- /* Can't do per-test rusage checks when tests run in parallel. */
- if (test_concurrent_max > 1)
- return;
-
- if (test_getrusage(&test->rusage) == -1)
- return;
-#endif
-}
-
-
-/**
- * @brief Stop test rusage and check if thresholds were exceeded.
- * Call when test has finished.
- *
- * @returns -1 if thresholds were exceeded, else 0.
- */
-int test_rusage_stop(struct test *test, double duration) {
-#if HAVE_GETRUSAGE
- struct rusage start, end;
-
- /* Can't do per-test rusage checks when tests run in parallel. */
- if (test_concurrent_max > 1)
- return 0;
-
- if (test_getrusage(&end) == -1)
- return 0;
-
- /* Let duration be at least 1ms to avoid
- * too-close-to-zero comparisons */
- if (duration < 0.001)
- duration = 0.001;
-
- start = test->rusage;
- test->rusage = test_rusage_calc(&start, &end, duration);
-
- return test_rusage_check_thresholds(test, &test->rusage, duration);
-#else
- return 0;
-#endif
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py b/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py
deleted file mode 100755
index 9cb7d194a..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/env python3
-#
-#
-# Run librdkafka regression tests on with different SASL parameters
-# and broker verisons.
-#
-# Requires:
-# trivup python module
-# gradle in your PATH
-
-from cluster_testing import (
- LibrdkafkaTestCluster,
- print_report_summary,
- print_test_report_summary,
- read_scenario_conf)
-from LibrdkafkaTestApp import LibrdkafkaTestApp
-
-import os
-import sys
-import argparse
-import json
-import tempfile
-
-
-def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False,
- scenario="default"):
- """
- @brief Create, deploy and start a Kafka cluster using Kafka \\p version
- Then run librdkafka's regression tests.
- """
-
- cluster = LibrdkafkaTestCluster(
- version, conf, debug=debug, scenario=scenario)
-
- # librdkafka's regression tests, as an App.
- rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests,
- scenario=scenario)
- rdkafka.do_cleanup = False
- rdkafka.local_tests = False
-
- if deploy:
- cluster.deploy()
-
- cluster.start(timeout=30)
-
- print(
- '# Connect to cluster with bootstrap.servers %s' %
- cluster.bootstrap_servers())
- rdkafka.start()
- print(
- '# librdkafka regression tests started, logs in %s' %
- rdkafka.root_path())
- try:
- rdkafka.wait_stopped(timeout=60 * 30)
- rdkafka.dbg(
- 'wait stopped: %s, runtime %ds' %
- (rdkafka.state, rdkafka.runtime()))
- except KeyboardInterrupt:
- print('# Aborted by user')
-
- report = rdkafka.report()
- if report is not None:
- report['root_path'] = rdkafka.root_path()
-
- cluster.stop(force=True)
-
- cluster.cleanup()
- return report
-
-
-def handle_report(report, version, suite):
- """ Parse test report and return tuple (Passed(bool), Reason(str)) """
- test_cnt = report.get('tests_run', 0)
-
- if test_cnt == 0:
- return (False, 'No tests run')
-
- passed = report.get('tests_passed', 0)
- failed = report.get('tests_failed', 0)
- if 'all' in suite.get('expect_fail', []) or version in suite.get(
- 'expect_fail', []):
- expect_fail = True
- else:
- expect_fail = False
-
- if expect_fail:
- if failed == test_cnt:
- return (True, 'All %d/%d tests failed as expected' %
- (failed, test_cnt))
- else:
- return (False, '%d/%d tests failed: expected all to fail' %
- (failed, test_cnt))
- else:
- if failed > 0:
- return (False, '%d/%d tests passed: expected all to pass' %
- (passed, test_cnt))
- else:
- return (True, 'All %d/%d tests passed as expected' %
- (passed, test_cnt))
-
-
-if __name__ == '__main__':
-
- parser = argparse.ArgumentParser(
- description='Run librdkafka test suit using SASL on a '
- 'trivupped cluster')
-
- parser.add_argument('--conf', type=str, dest='conf', default=None,
- help='trivup JSON config object (not file)')
- parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None,
- help='trivup JSON config object (not file) '
- 'for LibrdkafkaTestApp')
- parser.add_argument('--scenario', type=str, dest='scenario',
- default='default',
- help='Test scenario (see scenarios/ directory)')
- parser.add_argument('--tests', type=str, dest='tests', default=None,
- help='Test to run (e.g., "0002")')
- parser.add_argument('--no-ssl', action='store_false', dest='ssl',
- default=True,
- help='Don\'t run SSL tests')
- parser.add_argument('--no-sasl', action='store_false', dest='sasl',
- default=True,
- help='Don\'t run SASL tests')
- parser.add_argument('--no-oidc', action='store_false', dest='oidc',
- default=True,
- help='Don\'t run OAuth/OIDC tests')
- parser.add_argument('--no-plaintext', action='store_false',
- dest='plaintext', default=True,
- help='Don\'t run PLAINTEXT tests')
-
- parser.add_argument('--report', type=str, dest='report', default=None,
- help='Write test suites report to this filename')
- parser.add_argument('--debug', action='store_true', dest='debug',
- default=False,
- help='Enable trivup debugging')
- parser.add_argument('--suite', type=str, default=None,
- help='Only run matching suite(s) (substring match)')
- parser.add_argument('versions', type=str, default=None,
- nargs='*', help='Limit broker versions to these')
- args = parser.parse_args()
-
- conf = dict()
- rdkconf = dict()
-
- if args.conf is not None:
- conf.update(json.loads(args.conf))
- if args.rdkconf is not None:
- rdkconf.update(json.loads(args.rdkconf))
- if args.tests is not None:
- tests = args.tests.split(',')
- else:
- tests = None
-
- conf.update(read_scenario_conf(args.scenario))
-
- # Test version,supported mechs + suite matrix
- versions = list()
- if len(args.versions):
- for v in args.versions:
- versions.append(
- (v, ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']))
- else:
- versions = [('3.1.0',
- ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
- ('2.1.0',
- ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
- ('0.10.2.0', ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI']),
- ('0.9.0.1', ['GSSAPI']),
- ('0.8.2.2', [])]
- sasl_plain_conf = {'sasl_mechanisms': 'PLAIN',
- 'sasl_users': 'myuser=mypassword'}
- sasl_scram_conf = {'sasl_mechanisms': 'SCRAM-SHA-512',
- 'sasl_users': 'myuser=mypassword'}
- ssl_sasl_plain_conf = {'sasl_mechanisms': 'PLAIN',
- 'sasl_users': 'myuser=mypassword',
- 'security.protocol': 'SSL'}
- sasl_oauthbearer_conf = {'sasl_mechanisms': 'OAUTHBEARER',
- 'sasl_oauthbearer_config':
- 'scope=requiredScope principal=admin'}
- sasl_oauth_oidc_conf = {'sasl_mechanisms': 'OAUTHBEARER',
- 'sasl_oauthbearer_method': 'OIDC'}
- sasl_kerberos_conf = {'sasl_mechanisms': 'GSSAPI',
- 'sasl_servicename': 'kafka'}
- suites = [{'name': 'SASL PLAIN',
- 'run': (args.sasl and args.plaintext),
- 'conf': sasl_plain_conf,
- 'tests': ['0001'],
- 'expect_fail': ['0.9.0.1', '0.8.2.2']},
- {'name': 'SASL SCRAM',
- 'run': (args.sasl and args.plaintext),
- 'conf': sasl_scram_conf,
- 'expect_fail': ['0.9.0.1', '0.8.2.2']},
- {'name': 'PLAINTEXT (no SASL)',
- 'run': args.plaintext,
- 'tests': ['0001']},
- {'name': 'SSL (no SASL)',
- 'run': args.ssl,
- 'conf': {'security.protocol': 'SSL'},
- 'expect_fail': ['0.8.2.2']},
- {'name': 'SASL_SSL PLAIN',
- 'run': (args.sasl and args.ssl and args.plaintext),
- 'conf': ssl_sasl_plain_conf,
- 'expect_fail': ['0.9.0.1', '0.8.2.2']},
- {'name': 'SASL PLAIN with wrong username',
- 'run': (args.sasl and args.plaintext),
- 'conf': sasl_plain_conf,
- 'rdkconf': {'sasl_users': 'wrongjoe=mypassword'},
- 'tests': ['0001'],
- 'expect_fail': ['all']},
- {'name': 'SASL OAUTHBEARER',
- 'run': args.sasl,
- 'conf': sasl_oauthbearer_conf,
- 'tests': ['0001'],
- 'expect_fail': ['0.10.2.0', '0.9.0.1', '0.8.2.2']},
- {'name': 'SASL OAUTHBEARER with wrong scope',
- 'run': args.sasl,
- 'conf': sasl_oauthbearer_conf,
- 'rdkconf': {'sasl_oauthbearer_config': 'scope=wrongScope'},
- 'tests': ['0001'],
- 'expect_fail': ['all']},
- {'name': 'OAuth/OIDC',
- 'run': args.oidc,
- 'tests': ['0001', '0126'],
- 'conf': sasl_oauth_oidc_conf,
- 'minver': '3.1.0',
- 'expect_fail': ['2.8.1', '2.1.0', '0.10.2.0',
- '0.9.0.1', '0.8.2.2']},
- {'name': 'SASL Kerberos',
- 'run': args.sasl,
- 'conf': sasl_kerberos_conf,
- 'expect_fail': ['0.8.2.2']}]
-
- pass_cnt = 0
- fail_cnt = 0
- for version, supported in versions:
- if len(args.versions) > 0 and version not in args.versions:
- print('### Skipping version %s' % version)
- continue
-
- for suite in suites:
- if not suite.get('run', True):
- continue
-
- if args.suite is not None and suite['name'].find(args.suite) == -1:
- print(
- f'# Skipping {suite["name"]} due to --suite {args.suite}')
- continue
-
- if 'minver' in suite:
- minver = [int(x) for x in suite['minver'].split('.')][:3]
- this_version = [int(x) for x in version.split('.')][:3]
- if this_version < minver:
- print(
- f'# Skipping {suite["name"]} due to version {version} < minimum required version {suite["minver"]}') # noqa: E501
- continue
-
- _conf = conf.copy()
- _conf.update(suite.get('conf', {}))
- _rdkconf = _conf.copy()
- _rdkconf.update(rdkconf)
- _rdkconf.update(suite.get('rdkconf', {}))
-
- if 'version' not in suite:
- suite['version'] = dict()
-
- # Disable SASL broker config if broker version does
- # not support the selected mechanism
- mech = suite.get('conf', dict()).get('sasl_mechanisms', None)
- if mech is not None and mech not in supported:
- print('# Disabled SASL for broker version %s' % version)
- _conf.pop('sasl_mechanisms', None)
-
- # Run tests
- print(
- '#### Version %s, suite %s: STARTING' %
- (version, suite['name']))
- if tests is None:
- tests_to_run = suite.get('tests', None)
- else:
- tests_to_run = tests
- report = test_it(version, tests=tests_to_run, conf=_conf,
- rdkconf=_rdkconf,
- debug=args.debug, scenario=args.scenario)
-
- # Handle test report
- report['version'] = version
- passed, reason = handle_report(report, version, suite)
- report['PASSED'] = passed
- report['REASON'] = reason
-
- if passed:
- print('\033[42m#### Version %s, suite %s: PASSED: %s\033[0m' %
- (version, suite['name'], reason))
- pass_cnt += 1
- else:
- print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' %
- (version, suite['name'], reason))
- print_test_report_summary('%s @ %s' %
- (suite['name'], version), report)
- fail_cnt += 1
- print('#### Test output: %s/stderr.log' % (report['root_path']))
-
- suite['version'][version] = report
-
- # Write test suite report JSON file
- if args.report is not None:
- test_suite_report_file = args.report
- f = open(test_suite_report_file, 'w')
- else:
- fd, test_suite_report_file = tempfile.mkstemp(prefix='test_suite_',
- suffix='.json',
- dir='.')
- f = os.fdopen(fd, 'w')
-
- full_report = {'suites': suites, 'pass_cnt': pass_cnt,
- 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt}
-
- f.write(json.dumps(full_report))
- f.close()
-
- print('\n\n\n')
- print_report_summary(full_report)
- print('#### Full test suites report in: %s' % test_suite_report_file)
-
- if pass_cnt == 0 or fail_cnt > 0:
- sys.exit(1)
- else:
- sys.exit(0)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md
deleted file mode 100644
index 97027f386..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Test scenarios
-
-A test scenario defines the trivup Kafka cluster setup.
-
-The scenario name is the name of the file (without .json extension)
-and the contents is the trivup configuration dict.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json
deleted file mode 100644
index 80a587589..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "versions": ["2.3.0"],
- "auto_create_topics": "true",
- "num_partitions": 4,
- "replication_factor": 3,
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json
deleted file mode 100644
index 92287a763..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "auto_create_topics": "true",
- "num_partitions": 4,
- "replication_factor": 3,
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json
deleted file mode 100644
index 8727995bd..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "auto_create_topics": "false",
- "num_partitions": 4,
- "replication_factor": 3,
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c
deleted file mode 100644
index 2de01627d..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c
+++ /dev/null
@@ -1,801 +0,0 @@
-/*
- * sockem - socket-level network emulation
- *
- * Copyright (c) 2016, Magnus Edenhill, Andreas Smas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define _GNU_SOURCE /* for strdupa() and RTLD_NEXT */
-#include <errno.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <poll.h>
-#include <assert.h>
-#include <netinet/in.h>
-#include <dlfcn.h>
-
-#include "sockem.h"
-
-#include <sys/queue.h>
-
-#ifdef __APPLE__
-#include <sys/time.h> /* for gettimeofday() */
-#endif
-
-#ifdef _WIN32
-#define socket_errno() WSAGetLastError()
-#else
-#define socket_errno() errno
-#define SOCKET_ERROR -1
-#endif
-
-#ifndef strdupa
-#define strdupa(s) \
- ({ \
- const char *_s = (s); \
- size_t _len = strlen(_s) + 1; \
- char *_d = (char *)alloca(_len); \
- (char *)memcpy(_d, _s, _len); \
- })
-#endif
-
-#include <pthread.h>
-typedef pthread_mutex_t mtx_t;
-#define mtx_init(M) pthread_mutex_init(M, NULL)
-#define mtx_destroy(M) pthread_mutex_destroy(M)
-#define mtx_lock(M) pthread_mutex_lock(M)
-#define mtx_unlock(M) pthread_mutex_unlock(M)
-
-typedef pthread_t thrd_t;
-#define thrd_create(THRD, START_ROUTINE, ARG) \
- pthread_create(THRD, NULL, START_ROUTINE, ARG)
-#define thrd_join0(THRD) pthread_join(THRD, NULL)
-
-
-static mtx_t sockem_lock;
-static LIST_HEAD(, sockem_s) sockems;
-
-static pthread_once_t sockem_once = PTHREAD_ONCE_INIT;
-static char *sockem_conf_str = "";
-
-typedef int64_t sockem_ts_t;
-
-
-#ifdef LIBSOCKEM_PRELOAD
-static int (*sockem_orig_connect)(int, const struct sockaddr *, socklen_t);
-static int (*sockem_orig_close)(int);
-
-#define sockem_close0(S) (sockem_orig_close(S))
-#define sockem_connect0(S, A, AL) (sockem_orig_connect(S, A, AL))
-#else
-#define sockem_close0(S) close(S)
-#define sockem_connect0(S, A, AL) connect(S, A, AL)
-#endif
-
-
-struct sockem_conf {
- /* FIXME: these needs to be implemented */
- int tx_thruput; /* app->peer bytes/second */
- int rx_thruput; /* peer->app bytes/second */
- int delay; /* latency in ms */
- int jitter; /* latency variation in ms */
- int debug; /* enable sockem printf debugging */
- size_t recv_bufsz; /* recv chunk/buffer size */
- int direct; /* direct forward, no delay or rate-limiting */
-};
-
-
-typedef struct sockem_buf_s {
- TAILQ_ENTRY(sockem_buf_s) sb_link;
- size_t sb_size;
- size_t sb_of;
- char *sb_data;
- int64_t sb_at; /* Transmit at this absolute time. */
-} sockem_buf_t;
-
-
-struct sockem_s {
- LIST_ENTRY(sockem_s) link;
-
- enum {
- /* Forwarder thread run states */
- SOCKEM_INIT,
- SOCKEM_START,
- SOCKEM_RUN,
- SOCKEM_TERM
- } run;
-
- int as; /* application's socket. */
- int ls; /* internal application listen socket */
- int ps; /* internal peer socket connecting sockem to the peer.*/
-
- void *recv_buf; /* Receive buffer */
- size_t recv_bufsz; /* .. size */
-
- int linked; /* On sockems list */
-
- thrd_t thrd; /* Forwarder thread */
-
- mtx_t lock;
-
- struct sockem_conf conf; /* application-set config.
- * protected by .lock */
-
- struct sockem_conf use; /* last copy of .conf
- * local to skm thread */
-
- TAILQ_HEAD(, sockem_buf_s)
- bufs; /* Buffers in queue waiting for
- * transmission (delayed) */
-
- size_t bufs_size; /* Total number of bytes currently enqueued
- * for transmission */
- size_t bufs_size_max; /* Soft max threshold for bufs_size,
- * when this value is exceeded the app fd
- * is removed from the poll set until
- * bufs_size falls below the threshold again. */
- int poll_fd_cnt;
- int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */
-};
-
-
-static int sockem_vset(sockem_t *skm, va_list ap);
-
-
-/**
- * A microsecond monotonic clock
- */
-static __attribute__((unused)) __inline int64_t sockem_clock(void) {
-#ifdef __APPLE__
- /* No monotonic clock on Darwin */
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec;
-#elif defined(_WIN32)
- return (int64_t)GetTickCount64() * 1000LLU;
-#else
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ((int64_t)ts.tv_sec * 1000000LLU) +
- ((int64_t)ts.tv_nsec / 1000LLU);
-#endif
-}
-
-/**
- * @brief Initialize libsockem once.
- */
-static void sockem_init(void) {
- mtx_init(&sockem_lock);
- sockem_conf_str = getenv("SOCKEM_CONF");
- if (!sockem_conf_str)
- sockem_conf_str = "";
- if (strstr(sockem_conf_str, "debug"))
- fprintf(stderr, "%% libsockem pre-loaded (%s)\n",
- sockem_conf_str);
-#ifdef LIBSOCKEM_PRELOAD
- sockem_orig_connect = dlsym(RTLD_NEXT, "connect");
- sockem_orig_close = dlsym(RTLD_NEXT, "close");
-#endif
-}
-
-
-/**
- * @returns the maximum waittime in ms for poll(), at most 1000 ms.
- * @remark lock must be held
- */
-static int sockem_calc_waittime(sockem_t *skm, int64_t now) {
- const sockem_buf_t *sb;
- int64_t r;
-
- if (!(sb = TAILQ_FIRST(&skm->bufs)))
- return 1000;
- else if (now >= sb->sb_at || skm->use.direct)
- return 0;
- else if ((r = (sb->sb_at - now)) < 1000000) {
- if (r < 1000)
- return 1; /* Ceil to 1 to avoid busy-loop during
- * last millisecond. */
- else
- return (int)(r / 1000);
- } else
- return 1000;
-}
-
-
-/**
- * @brief Unlink and destroy a buffer
- */
-static void sockem_buf_destroy(sockem_t *skm, sockem_buf_t *sb) {
- skm->bufs_size -= sb->sb_size - sb->sb_of;
- TAILQ_REMOVE(&skm->bufs, sb, sb_link);
- free(sb);
-}
-
-/**
- * @brief Add delayed buffer to transmit.
- */
-static sockem_buf_t *
-sockem_buf_add(sockem_t *skm, size_t size, const void *data) {
- sockem_buf_t *sb;
-
- skm->bufs_size += size;
- if (skm->bufs_size > skm->bufs_size_max) {
- /* No more buffer space, halt recv fd until
- * queued buffers drop below threshold. */
- skm->poll_fd_cnt = 1;
- }
-
- sb = malloc(sizeof(*sb) + size);
-
- sb->sb_of = 0;
- sb->sb_size = size;
- sb->sb_data = (char *)(sb + 1);
- sb->sb_at = sockem_clock() +
- ((skm->use.delay + (skm->use.jitter / 2) /*FIXME*/) * 1000);
- memcpy(sb->sb_data, data, size);
-
- TAILQ_INSERT_TAIL(&skm->bufs, sb, sb_link);
-
- return sb;
-}
-
-
-/**
- * @brief Forward any delayed buffers that have passed their deadline
- * @remark lock must be held but will be released momentarily while
- * performing send syscall.
- */
-static int sockem_fwd_bufs(sockem_t *skm, int ofd) {
- sockem_buf_t *sb;
- int64_t now = sockem_clock();
- size_t to_write;
- int64_t elapsed;
-
-
- if (skm->use.direct)
- to_write = 1024 * 1024 * 100;
- else if ((elapsed = now - skm->ts_last_fwd)) {
- /* Calculate how many bytes to send to adhere to rate-limit */
- to_write = (size_t)((double)skm->use.tx_thruput *
- ((double)elapsed / 1000000.0));
- } else
- return 0;
-
- while (to_write > 0 && (sb = TAILQ_FIRST(&skm->bufs)) &&
- (skm->use.direct || sb->sb_at <= now)) {
- ssize_t r;
- size_t remain = sb->sb_size - sb->sb_of;
- size_t wr = to_write < remain ? to_write : remain;
-
- if (wr == 0)
- break;
-
- mtx_unlock(&skm->lock);
-
- r = send(ofd, sb->sb_data + sb->sb_of, wr, 0);
-
- mtx_lock(&skm->lock);
-
- if (r == -1) {
- if (errno == ENOBUFS || errno == EAGAIN ||
- errno == EWOULDBLOCK)
- return 0;
- return -1;
- }
-
- skm->ts_last_fwd = now;
-
- sb->sb_of += r;
- to_write -= r;
-
- if (sb->sb_of < sb->sb_size)
- break;
-
- sockem_buf_destroy(skm, sb);
-
- now = sockem_clock();
- }
-
- /* Re-enable app fd poll if queued buffers are below threshold */
- if (skm->bufs_size < skm->bufs_size_max)
- skm->poll_fd_cnt = 2;
-
- return 0;
-}
-
-
-/**
- * @brief read from \p ifd, write to \p ofd in a blocking fashion.
- *
- * @returns the number of bytes forwarded, or -1 on error.
- */
-static int sockem_recv_fwd(sockem_t *skm, int ifd, int ofd, int direct) {
- ssize_t r, wr;
-
- r = recv(ifd, skm->recv_buf, skm->recv_bufsz, MSG_DONTWAIT);
- if (r == -1) {
- int serr = socket_errno();
- if (serr == EAGAIN || serr == EWOULDBLOCK)
- return 0;
- return -1;
-
- } else if (r == 0) {
- /* Socket closed */
- return -1;
- }
-
- if (direct) {
- /* No delay, rate limit, or buffered data: send right away */
- wr = send(ofd, skm->recv_buf, r, 0);
- if (wr < r)
- return -1;
-
- return wr;
- } else {
- sockem_buf_add(skm, r, skm->recv_buf);
- return r;
- }
-}
-
-
-/**
- * @brief Close all sockets and unsets ->run.
- * @remark Preserves caller's errno.
- * @remark lock must be held.
- */
-static void sockem_close_all(sockem_t *skm) {
- int serr = socket_errno();
-
- if (skm->ls != -1) {
- sockem_close0(skm->ls);
- skm->ls = -1;
- }
-
- if (skm->ps != -1) {
- sockem_close0(skm->ps);
- skm->ps = -1;
- }
-
- skm->run = SOCKEM_TERM;
-
- errno = serr;
-}
-
-
-/**
- * @brief Copy desired (app) config to internally use(d) configuration.
- * @remark lock must be held
- */
-static __inline void sockem_conf_use(sockem_t *skm) {
- skm->use = skm->conf;
- /* Figure out if direct forward is to be used */
- skm->use.direct = !(skm->use.delay || skm->use.jitter ||
- (skm->use.tx_thruput < (1 << 30)));
-}
-
-/**
- * @brief sockem internal per-socket forwarder thread
- */
-static void *sockem_run(void *arg) {
- sockem_t *skm = arg;
- int cs = -1;
- int ls;
- struct pollfd pfd[2];
-
- mtx_lock(&skm->lock);
- if (skm->run == SOCKEM_START)
- skm->run = SOCKEM_RUN;
- sockem_conf_use(skm);
- ls = skm->ls;
- mtx_unlock(&skm->lock);
-
- skm->recv_bufsz = skm->use.recv_bufsz;
- skm->recv_buf = malloc(skm->recv_bufsz);
-
- /* Accept connection from sockfd in sockem_connect() */
- cs = accept(ls, NULL, 0);
- if (cs == -1) {
- mtx_lock(&skm->lock);
- if (skm->run == SOCKEM_TERM) {
- /* App socket was closed. */
- goto done;
- }
- fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", ls,
- strerror(socket_errno()));
- mtx_unlock(&skm->lock);
- assert(cs != -1);
- }
-
- /* Set up poll (blocking IO) */
- memset(pfd, 0, sizeof(pfd));
- pfd[1].fd = cs;
- pfd[1].events = POLLIN;
-
- mtx_lock(&skm->lock);
- pfd[0].fd = skm->ps;
- mtx_unlock(&skm->lock);
- pfd[0].events = POLLIN;
-
- skm->poll_fd_cnt = 2;
-
- mtx_lock(&skm->lock);
- while (skm->run == SOCKEM_RUN) {
- int r;
- int i;
- int waittime = sockem_calc_waittime(skm, sockem_clock());
-
- mtx_unlock(&skm->lock);
- r = poll(pfd, skm->poll_fd_cnt, waittime);
- if (r == -1)
- break;
-
- /* Send/forward delayed buffers */
- mtx_lock(&skm->lock);
- sockem_conf_use(skm);
-
- if (sockem_fwd_bufs(skm, skm->ps) == -1) {
- mtx_unlock(&skm->lock);
- skm->run = SOCKEM_TERM;
- break;
- }
- mtx_unlock(&skm->lock);
-
- for (i = 0; r > 0 && i < 2; i++) {
- if (pfd[i].revents & (POLLHUP | POLLERR)) {
- skm->run = SOCKEM_TERM;
-
- } else if (pfd[i].revents & POLLIN) {
- if (sockem_recv_fwd(
- skm, pfd[i].fd, pfd[i ^ 1].fd,
- /* direct mode for app socket
- * without delay, and always for
- * peer socket (receive channel) */
- i == 0 || (skm->use.direct &&
- skm->bufs_size == 0)) ==
- -1) {
- skm->run = SOCKEM_TERM;
- break;
- }
- }
- }
-
- mtx_lock(&skm->lock);
- }
-done:
- if (cs != -1)
- sockem_close0(cs);
- sockem_close_all(skm);
-
- mtx_unlock(&skm->lock);
- free(skm->recv_buf);
-
-
- return NULL;
-}
-
-
-
-/**
- * @brief Connect socket \p s to \p addr
- */
-static int
-sockem_do_connect(int s, const struct sockaddr *addr, socklen_t addrlen) {
- int r;
-
- r = sockem_connect0(s, addr, addrlen);
- if (r == SOCKET_ERROR) {
- int serr = socket_errno();
- if (serr != EINPROGRESS
-#ifdef _WIN32
- && serr != WSAEWOULDBLOCK
-#endif
- ) {
-#ifndef _WIN32
- errno = serr;
-#endif
- return -1;
- }
- }
-
- return 0;
-}
-
-
-sockem_t *sockem_connect(int sockfd,
- const struct sockaddr *addr,
- socklen_t addrlen,
- ...) {
- sockem_t *skm;
- int ls, ps;
- struct sockaddr_in6 sin6 = {.sin6_family = addr->sa_family};
- socklen_t addrlen2 = addrlen;
- va_list ap;
-
- pthread_once(&sockem_once, sockem_init);
-
- /* Create internal app listener socket */
- ls = socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
- if (ls == -1)
- return NULL;
-
- if (bind(ls, (struct sockaddr *)&sin6, addrlen) == -1) {
- sockem_close0(ls);
- return NULL;
- }
-
- /* Get bound address */
- if (getsockname(ls, (struct sockaddr *)&sin6, &addrlen2) == -1) {
- sockem_close0(ls);
- return NULL;
- }
-
- if (listen(ls, 1) == -1) {
- sockem_close0(ls);
- return NULL;
- }
-
- /* Create internal peer socket */
- ps = socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
- if (ps == -1) {
- sockem_close0(ls);
- return NULL;
- }
-
- /* Connect to peer */
- if (sockem_do_connect(ps, addr, addrlen) == -1) {
- sockem_close0(ls);
- sockem_close0(ps);
- return NULL;
- }
-
- /* Create sockem handle */
- skm = calloc(1, sizeof(*skm));
- skm->as = sockfd;
- skm->ls = ls;
- skm->ps = ps;
- skm->bufs_size_max = 16 * 1024 * 1024; /* 16kb of queue buffer */
- TAILQ_INIT(&skm->bufs);
- mtx_init(&skm->lock);
-
- /* Default config */
- skm->conf.rx_thruput = 1 << 30;
- skm->conf.tx_thruput = 1 << 30;
- skm->conf.delay = 0;
- skm->conf.jitter = 0;
- skm->conf.recv_bufsz = 1024 * 1024;
- skm->conf.direct = 1;
-
- /* Apply passed configuration */
- va_start(ap, addrlen);
- if (sockem_vset(skm, ap) == -1) {
- va_end(ap);
- sockem_close(skm);
- return NULL;
- }
- va_end(ap);
-
- mtx_lock(&skm->lock);
- skm->run = SOCKEM_START;
-
- /* Create pipe thread */
- if (thrd_create(&skm->thrd, sockem_run, skm) != 0) {
- mtx_unlock(&skm->lock);
- sockem_close(skm);
- return NULL;
- }
- mtx_unlock(&skm->lock);
-
- /* Connect application socket to listen socket */
- if (sockem_do_connect(sockfd, (struct sockaddr *)&sin6, addrlen2) ==
- -1) {
- sockem_close(skm);
- return NULL;
- }
-
- mtx_lock(&sockem_lock);
- LIST_INSERT_HEAD(&sockems, skm, link);
- mtx_lock(&skm->lock);
- skm->linked = 1;
- mtx_unlock(&skm->lock);
- mtx_unlock(&sockem_lock);
-
- return skm;
-}
-
-
-/**
- * @brief Purge/drop all queued buffers
- */
-static void sockem_bufs_purge(sockem_t *skm) {
- sockem_buf_t *sb;
-
- while ((sb = TAILQ_FIRST(&skm->bufs)))
- sockem_buf_destroy(skm, sb);
-}
-
-
-void sockem_close(sockem_t *skm) {
- mtx_lock(&sockem_lock);
- mtx_lock(&skm->lock);
- if (skm->linked)
- LIST_REMOVE(skm, link);
- mtx_unlock(&sockem_lock);
-
- /* If thread is running let it close the sockets
- * to avoid race condition. */
- if (skm->run == SOCKEM_START || skm->run == SOCKEM_RUN)
- skm->run = SOCKEM_TERM;
- else
- sockem_close_all(skm);
-
- mtx_unlock(&skm->lock);
-
- thrd_join0(skm->thrd);
-
- sockem_bufs_purge(skm);
-
- mtx_destroy(&skm->lock);
-
-
- free(skm);
-}
-
-
-/**
- * @brief Set single conf key.
- * @remark lock must be held.
- * @returns 0 on success or -1 if key is unknown
- */
-static int sockem_set0(sockem_t *skm, const char *key, int val) {
- if (!strcmp(key, "rx.thruput") || !strcmp(key, "rx.throughput"))
- skm->conf.rx_thruput = val;
- else if (!strcmp(key, "tx.thruput") || !strcmp(key, "tx.throughput"))
- skm->conf.tx_thruput = val;
- else if (!strcmp(key, "delay"))
- skm->conf.delay = val;
- else if (!strcmp(key, "jitter"))
- skm->conf.jitter = val;
- else if (!strcmp(key, "rx.bufsz"))
- skm->conf.recv_bufsz = val;
- else if (!strcmp(key, "debug"))
- skm->conf.debug = val;
- else if (!strcmp(key, "true"))
- ; /* dummy key for allowing non-empty but default config */
- else if (!strchr(key, ',')) {
- char *s = strdupa(key);
- while (*s) {
- char *t = strchr(s, ',');
- char *d = strchr(s, '=');
- if (t)
- *t = '\0';
- if (!d)
- return -1;
- *(d++) = '\0';
-
- if (sockem_set0(skm, s, atoi(d)) == -1)
- return -1;
-
- if (!t)
- break;
- s += 1;
- }
- } else
- return -1;
-
- return 0;
-}
-
-
-/**
- * @brief Set sockem config parameters
- */
-static int sockem_vset(sockem_t *skm, va_list ap) {
- const char *key;
- int val;
-
- mtx_lock(&skm->lock);
- while ((key = va_arg(ap, const char *))) {
- val = va_arg(ap, int);
- if (sockem_set0(skm, key, val) == -1) {
- mtx_unlock(&skm->lock);
- return -1;
- }
- }
- mtx_unlock(&skm->lock);
-
- return 0;
-}
-
-int sockem_set(sockem_t *skm, ...) {
- va_list ap;
- int r;
-
- va_start(ap, skm);
- r = sockem_vset(skm, ap);
- va_end(ap);
-
- return r;
-}
-
-
-sockem_t *sockem_find(int sockfd) {
- sockem_t *skm;
-
- pthread_once(&sockem_once, sockem_init);
-
- mtx_lock(&sockem_lock);
- LIST_FOREACH(skm, &sockems, link)
- if (skm->as == sockfd)
- break;
- mtx_unlock(&sockem_lock);
-
- return skm;
-}
-
-
-#ifdef LIBSOCKEM_PRELOAD
-/**
- * Provide overloading socket APIs and conf bootstrapping from env vars.
- *
- */
-
-
-
-/**
- * @brief connect(2) overload
- */
-int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) {
- sockem_t *skm;
-
- pthread_once(&sockem_once, sockem_init);
-
- skm = sockem_connect(sockfd, addr, addrlen, sockem_conf_str, 0, NULL);
- if (!skm)
- return -1;
-
- return 0;
-}
-
-/**
- * @brief close(2) overload
- */
-int close(int fd) {
- sockem_t *skm;
-
- pthread_once(&sockem_once, sockem_init);
-
- mtx_lock(&sockem_lock);
- skm = sockem_find(fd);
-
- if (skm)
- sockem_close(skm);
- mtx_unlock(&sockem_lock);
-
- return sockem_close0(fd);
-}
-
-#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h
deleted file mode 100644
index 8a2ddcd87..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * sockem - socket-level network emulation
- *
- * Copyright (c) 2016, Magnus Edenhill, Andreas Smas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RD_SOCKEM_H_
-#define _RD_SOCKEM_H_
-
-#include <sys/types.h>
-#include <sys/socket.h>
-
-
-typedef struct sockem_s sockem_t;
-
-
-
-/**
- * @brief Connect to \p addr
- *
- * See sockem_set for the va-arg list definition.
- *
- * @returns a sockem handle on success or NULL on failure.
- */
-sockem_t *
-sockem_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen, ...);
-
-/**
- * @brief Close the connection and destroy the sockem.
- */
-void sockem_close(sockem_t *skm);
-
-
-
-/**
- * @brief Set sockem parameters by `char *key, int val` tuples.
- *
- * Keys:
- * rx.thruput
- * tx.thruput
- * delay
- * jitter
- * rx.bufsz
- * true (dummy, ignored)
- *
- * The key may also be a CSV-list of "key=val,key2=val2" pairs in which case
- * val must be 0 and the sentinel NULL.
- *
- * The va-arg list must be terminated with a NULL sentinel
- *
- * @returns 0 on success or -1 if a key was unknown.
- */
-int sockem_set(sockem_t *skm, ...);
-
-
-
-/**
- * @brief Find sockem by (application) socket.
- * @remark Application is responsible for locking.
- */
-sockem_t *sockem_find(int sockfd);
-
-#endif /* _RD_SOCKEM_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c
deleted file mode 100644
index c3e8ce92e..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/**
- * @name Thin abstraction on top of sockem to provide scheduled delays,
- * e.g.; set delay to 500ms in 2000ms
- */
-
-#include "test.h"
-#include "sockem.h"
-#include "sockem_ctrl.h"
-
-static int sockem_ctrl_thrd_main(void *arg) {
- sockem_ctrl_t *ctrl = (sockem_ctrl_t *)arg;
- int64_t next_wakeup = 0;
- mtx_lock(&ctrl->lock);
-
- test_curr = ctrl->test;
-
- while (!ctrl->term) {
- int64_t now;
- struct sockem_cmd *cmd;
- int wait_time = 1000;
-
- if (next_wakeup)
- wait_time = (int)(next_wakeup - test_clock()) / 1000;
-
- if (wait_time > 0)
- cnd_timedwait_ms(&ctrl->cnd, &ctrl->lock, wait_time);
-
- /* Ack last command */
- if (ctrl->cmd_ack != ctrl->cmd_seq) {
- ctrl->cmd_ack = ctrl->cmd_seq;
- cnd_signal(&ctrl->cnd); /* signal back to caller */
- }
-
- /* Serve expired commands */
- next_wakeup = 0;
- now = test_clock();
- while ((cmd = TAILQ_FIRST(&ctrl->cmds))) {
- if (!ctrl->term) {
- if (cmd->ts_at > now) {
- next_wakeup = cmd->ts_at;
- break;
- }
-
- printf(_C_CYA
- "## %s: "
- "sockem: setting socket delay to "
- "%d\n" _C_CLR,
- __FILE__, cmd->delay);
- test_socket_sockem_set_all("delay", cmd->delay);
- }
- TAILQ_REMOVE(&ctrl->cmds, cmd, link);
- free(cmd);
- }
- }
- mtx_unlock(&ctrl->lock);
-
- return 0;
-}
-
-
-
-/**
- * @brief Set socket delay to kick in after \p after ms
- */
-void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay) {
- struct sockem_cmd *cmd;
- int wait_seq;
-
- TEST_SAY("Set delay to %dms (after %dms)\n", delay, after);
-
- cmd = calloc(1, sizeof(*cmd));
- cmd->ts_at = test_clock() + (after * 1000);
- cmd->delay = delay;
-
- mtx_lock(&ctrl->lock);
- wait_seq = ++ctrl->cmd_seq;
- TAILQ_INSERT_TAIL(&ctrl->cmds, cmd, link);
- cnd_broadcast(&ctrl->cnd);
-
- /* Wait for ack from sockem thread */
- while (ctrl->cmd_ack < wait_seq) {
- TEST_SAY("Waiting for sockem control ack\n");
- cnd_timedwait_ms(&ctrl->cnd, &ctrl->lock, 1000);
- }
- mtx_unlock(&ctrl->lock);
-}
-
-
-void sockem_ctrl_init(sockem_ctrl_t *ctrl) {
- memset(ctrl, 0, sizeof(*ctrl));
- mtx_init(&ctrl->lock, mtx_plain);
- cnd_init(&ctrl->cnd);
- TAILQ_INIT(&ctrl->cmds);
- ctrl->test = test_curr;
-
- mtx_lock(&ctrl->lock);
- if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, ctrl) !=
- thrd_success)
- TEST_FAIL("Failed to create sockem ctrl thread");
- mtx_unlock(&ctrl->lock);
-}
-
-void sockem_ctrl_term(sockem_ctrl_t *ctrl) {
- int res;
-
- /* Join controller thread */
- mtx_lock(&ctrl->lock);
- ctrl->term = 1;
- cnd_broadcast(&ctrl->cnd);
- mtx_unlock(&ctrl->lock);
-
- thrd_join(ctrl->thrd, &res);
-
- cnd_destroy(&ctrl->cnd);
- mtx_destroy(&ctrl->lock);
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h
deleted file mode 100644
index d33c87fca..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2018, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SOCKEM_CTRL_H_
-#define _SOCKEM_CTRL_H_
-
-#include <sys/queue.h>
-
-struct sockem_cmd {
- TAILQ_ENTRY(sockem_cmd) link;
- int64_t ts_at; /**< to ctrl thread: at this time, set delay*/
- int delay;
-};
-
-
-typedef struct sockem_ctrl_s {
- mtx_t lock;
- cnd_t cnd;
- thrd_t thrd;
-
- int cmd_seq; /**< Command sequence id */
- int cmd_ack; /**< Last acked (seen) command sequence id */
-
- TAILQ_HEAD(, sockem_cmd) cmds; /**< Queue of commands. */
-
- int term; /**< Terminate */
-
- struct test *test;
-} sockem_ctrl_t;
-
-
-void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay);
-void sockem_ctrl_init(sockem_ctrl_t *ctrl);
-void sockem_ctrl_term(sockem_ctrl_t *ctrl);
-
-#endif /* _SOCKEM_CTRL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/test.c
deleted file mode 100644
index 71180c8f4..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/test.c
+++ /dev/null
@@ -1,6960 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2013, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#define _CRT_RAND_S // rand_s() on MSVC
-#include <stdarg.h>
-#include "test.h"
-#include <signal.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#ifdef _WIN32
-#include <direct.h> /* _getcwd */
-#else
-#include <sys/wait.h> /* waitpid */
-#endif
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h"
-
-int test_level = 2;
-int test_seed = 0;
-
-char test_mode[64] = "bare";
-char test_scenario[64] = "default";
-static volatile sig_atomic_t test_exit = 0;
-static char test_topic_prefix[128] = "rdkafkatest";
-static int test_topic_random = 0;
-int tests_running_cnt = 0;
-int test_concurrent_max = 5;
-int test_assert_on_fail = 0;
-double test_timeout_multiplier = 1.0;
-static char *test_sql_cmd = NULL;
-int test_session_timeout_ms = 6000;
-int test_broker_version;
-static const char *test_broker_version_str = "2.4.0.0";
-int test_flags = 0;
-int test_neg_flags = TEST_F_KNOWN_ISSUE;
-/* run delete-test-topics.sh between each test (when concurrent_max = 1) */
-static int test_delete_topics_between = 0;
-static const char *test_git_version = "HEAD";
-static const char *test_sockem_conf = "";
-int test_on_ci = 0; /* Tests are being run on CI, be more forgiving
- * with regards to timeouts, etc. */
-int test_quick = 0; /** Run tests quickly */
-int test_idempotent_producer = 0;
-int test_rusage = 0; /**< Check resource usage */
-/**< CPU speed calibration for rusage threshold checks.
- * >1.0: CPU is slower than base line system,
- * <1.0: CPU is faster than base line system. */
-double test_rusage_cpu_calibration = 1.0;
-static const char *tests_to_run = NULL; /* all */
-static const char *subtests_to_run = NULL; /* all */
-static const char *tests_to_skip = NULL; /* none */
-int test_write_report = 0; /**< Write test report file */
-
-static int show_summary = 1;
-static int test_summary(int do_lock);
-
-/**
- * Protects shared state, such as tests[]
- */
-mtx_t test_mtx;
-cnd_t test_cnd;
-
-static const char *test_states[] = {
- "DNS", "SKIPPED", "RUNNING", "PASSED", "FAILED",
-};
-
-
-
-#define _TEST_DECL(NAME) extern int main_##NAME(int, char **)
-#define _TEST(NAME, FLAGS, ...) \
- { .name = #NAME, .mainfunc = main_##NAME, .flags = FLAGS, __VA_ARGS__ }
-
-
-/**
- * Declare all tests here
- */
-_TEST_DECL(0000_unittests);
-_TEST_DECL(0001_multiobj);
-_TEST_DECL(0002_unkpart);
-_TEST_DECL(0003_msgmaxsize);
-_TEST_DECL(0004_conf);
-_TEST_DECL(0005_order);
-_TEST_DECL(0006_symbols);
-_TEST_DECL(0007_autotopic);
-_TEST_DECL(0008_reqacks);
-_TEST_DECL(0009_mock_cluster);
-_TEST_DECL(0011_produce_batch);
-_TEST_DECL(0012_produce_consume);
-_TEST_DECL(0013_null_msgs);
-_TEST_DECL(0014_reconsume_191);
-_TEST_DECL(0015_offsets_seek);
-_TEST_DECL(0016_client_swname);
-_TEST_DECL(0017_compression);
-_TEST_DECL(0018_cgrp_term);
-_TEST_DECL(0019_list_groups);
-_TEST_DECL(0020_destroy_hang);
-_TEST_DECL(0021_rkt_destroy);
-_TEST_DECL(0022_consume_batch);
-_TEST_DECL(0022_consume_batch_local);
-_TEST_DECL(0025_timers);
-_TEST_DECL(0026_consume_pause);
-_TEST_DECL(0028_long_topicnames);
-_TEST_DECL(0029_assign_offset);
-_TEST_DECL(0030_offset_commit);
-_TEST_DECL(0031_get_offsets);
-_TEST_DECL(0033_regex_subscribe);
-_TEST_DECL(0033_regex_subscribe_local);
-_TEST_DECL(0034_offset_reset);
-_TEST_DECL(0034_offset_reset_mock);
-_TEST_DECL(0035_api_version);
-_TEST_DECL(0036_partial_fetch);
-_TEST_DECL(0037_destroy_hang_local);
-_TEST_DECL(0038_performance);
-_TEST_DECL(0039_event_dr);
-_TEST_DECL(0039_event_log);
-_TEST_DECL(0039_event);
-_TEST_DECL(0040_io_event);
-_TEST_DECL(0041_fetch_max_bytes);
-_TEST_DECL(0042_many_topics);
-_TEST_DECL(0043_no_connection);
-_TEST_DECL(0044_partition_cnt);
-_TEST_DECL(0045_subscribe_update);
-_TEST_DECL(0045_subscribe_update_topic_remove);
-_TEST_DECL(0045_subscribe_update_non_exist_and_partchange);
-_TEST_DECL(0045_subscribe_update_mock);
-_TEST_DECL(0046_rkt_cache);
-_TEST_DECL(0047_partial_buf_tmout);
-_TEST_DECL(0048_partitioner);
-_TEST_DECL(0049_consume_conn_close);
-_TEST_DECL(0050_subscribe_adds);
-_TEST_DECL(0051_assign_adds);
-_TEST_DECL(0052_msg_timestamps);
-_TEST_DECL(0053_stats_timing);
-_TEST_DECL(0053_stats);
-_TEST_DECL(0054_offset_time);
-_TEST_DECL(0055_producer_latency);
-_TEST_DECL(0056_balanced_group_mt);
-_TEST_DECL(0057_invalid_topic);
-_TEST_DECL(0058_log);
-_TEST_DECL(0059_bsearch);
-_TEST_DECL(0060_op_prio);
-_TEST_DECL(0061_consumer_lag);
-_TEST_DECL(0062_stats_event);
-_TEST_DECL(0063_clusterid);
-_TEST_DECL(0064_interceptors);
-_TEST_DECL(0065_yield);
-_TEST_DECL(0066_plugins);
-_TEST_DECL(0067_empty_topic);
-_TEST_DECL(0068_produce_timeout);
-_TEST_DECL(0069_consumer_add_parts);
-_TEST_DECL(0070_null_empty);
-_TEST_DECL(0072_headers_ut);
-_TEST_DECL(0073_headers);
-_TEST_DECL(0074_producev);
-_TEST_DECL(0075_retry);
-_TEST_DECL(0076_produce_retry);
-_TEST_DECL(0077_compaction);
-_TEST_DECL(0078_c_from_cpp);
-_TEST_DECL(0079_fork);
-_TEST_DECL(0080_admin_ut);
-_TEST_DECL(0081_admin);
-_TEST_DECL(0082_fetch_max_bytes);
-_TEST_DECL(0083_cb_event);
-_TEST_DECL(0084_destroy_flags_local);
-_TEST_DECL(0084_destroy_flags);
-_TEST_DECL(0085_headers);
-_TEST_DECL(0086_purge_local);
-_TEST_DECL(0086_purge_remote);
-_TEST_DECL(0088_produce_metadata_timeout);
-_TEST_DECL(0089_max_poll_interval);
-_TEST_DECL(0090_idempotence);
-_TEST_DECL(0091_max_poll_interval_timeout);
-_TEST_DECL(0092_mixed_msgver);
-_TEST_DECL(0093_holb_consumer);
-_TEST_DECL(0094_idempotence_msg_timeout);
-_TEST_DECL(0095_all_brokers_down);
-_TEST_DECL(0097_ssl_verify);
-_TEST_DECL(0097_ssl_verify_local);
-_TEST_DECL(0098_consumer_txn);
-_TEST_DECL(0099_commit_metadata);
-_TEST_DECL(0100_thread_interceptors);
-_TEST_DECL(0101_fetch_from_follower);
-_TEST_DECL(0102_static_group_rebalance);
-_TEST_DECL(0103_transactions_local);
-_TEST_DECL(0103_transactions);
-_TEST_DECL(0104_fetch_from_follower_mock);
-_TEST_DECL(0105_transactions_mock);
-_TEST_DECL(0106_cgrp_sess_timeout);
-_TEST_DECL(0107_topic_recreate);
-_TEST_DECL(0109_auto_create_topics);
-_TEST_DECL(0110_batch_size);
-_TEST_DECL(0111_delay_create_topics);
-_TEST_DECL(0112_assign_unknown_part);
-_TEST_DECL(0113_cooperative_rebalance_local);
-_TEST_DECL(0113_cooperative_rebalance);
-_TEST_DECL(0114_sticky_partitioning);
-_TEST_DECL(0115_producer_auth);
-_TEST_DECL(0116_kafkaconsumer_close);
-_TEST_DECL(0117_mock_errors);
-_TEST_DECL(0118_commit_rebalance);
-_TEST_DECL(0119_consumer_auth);
-_TEST_DECL(0120_asymmetric_subscription);
-_TEST_DECL(0121_clusterid);
-_TEST_DECL(0122_buffer_cleaning_after_rebalance);
-_TEST_DECL(0123_connections_max_idle);
-_TEST_DECL(0124_openssl_invalid_engine);
-_TEST_DECL(0125_immediate_flush);
-_TEST_DECL(0126_oauthbearer_oidc);
-_TEST_DECL(0128_sasl_callback_queue);
-_TEST_DECL(0129_fetch_aborted_msgs);
-_TEST_DECL(0130_store_offsets);
-_TEST_DECL(0131_connect_timeout);
-_TEST_DECL(0132_strategy_ordering);
-_TEST_DECL(0133_ssl_keys);
-_TEST_DECL(0134_ssl_provider);
-_TEST_DECL(0135_sasl_credentials);
-_TEST_DECL(0136_resolve_cb);
-_TEST_DECL(0137_barrier_batch_consume);
-_TEST_DECL(0138_admin_mock);
-
-/* Manual tests */
-_TEST_DECL(8000_idle);
-
-
-/* Define test resource usage thresholds if the default limits
- * are not tolerable.
- *
- * Fields:
- * .ucpu - Max User CPU percentage (double)
- * .scpu - Max System/Kernel CPU percentage (double)
- * .rss - Max RSS (memory) in megabytes (double)
- * .ctxsw - Max number of voluntary context switches (int)
- *
- * Also see test_rusage_check_thresholds() in rusage.c
- *
- * Make a comment in the _THRES() below why the extra thresholds are required.
- *
- * Usage:
- * _TEST(00...., ...,
- * _THRES(.ucpu = 15.0)), <-- Max 15% User CPU usage
- */
-#define _THRES(...) .rusage_thres = {__VA_ARGS__}
-
-/**
- * Define all tests here
- */
-struct test tests[] = {
- /* Special MAIN test to hold over-all timings, etc. */
- {.name = "<MAIN>", .flags = TEST_F_LOCAL},
- _TEST(0000_unittests,
- TEST_F_LOCAL,
- /* The msgq insert order tests are heavy on
- * user CPU (memory scan), RSS, and
- * system CPU (lots of allocations -> madvise(2)). */
- _THRES(.ucpu = 100.0, .scpu = 20.0, .rss = 900.0)),
- _TEST(0001_multiobj, 0),
- _TEST(0002_unkpart, 0),
- _TEST(0003_msgmaxsize, 0),
- _TEST(0004_conf, TEST_F_LOCAL),
- _TEST(0005_order, 0),
- _TEST(0006_symbols, TEST_F_LOCAL),
- _TEST(0007_autotopic, 0),
- _TEST(0008_reqacks, 0),
- _TEST(0009_mock_cluster,
- TEST_F_LOCAL,
- /* Mock cluster requires MsgVersion 2 */
- TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0011_produce_batch,
- 0,
- /* Produces a lot of messages */
- _THRES(.ucpu = 40.0, .scpu = 8.0)),
- _TEST(0012_produce_consume, 0),
- _TEST(0013_null_msgs, 0),
- _TEST(0014_reconsume_191, 0),
- _TEST(0015_offsets_seek, 0),
- _TEST(0016_client_swname, 0),
- _TEST(0017_compression, 0),
- _TEST(0018_cgrp_term, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0019_list_groups, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0020_destroy_hang, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0021_rkt_destroy, 0),
- _TEST(0022_consume_batch, 0),
- _TEST(0022_consume_batch_local, TEST_F_LOCAL),
- _TEST(0025_timers, TEST_F_LOCAL),
- _TEST(0026_consume_pause, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0028_long_topicnames,
- TEST_F_KNOWN_ISSUE,
- TEST_BRKVER(0, 9, 0, 0),
- .extra = "https://github.com/edenhill/librdkafka/issues/529"),
- _TEST(0029_assign_offset, 0),
- _TEST(0030_offset_commit,
- 0,
- TEST_BRKVER(0, 9, 0, 0),
- /* Loops over committed() until timeout */
- _THRES(.ucpu = 10.0, .scpu = 5.0)),
- _TEST(0031_get_offsets, 0),
- _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0033_regex_subscribe_local, TEST_F_LOCAL),
- _TEST(0034_offset_reset, 0),
- _TEST(0034_offset_reset_mock, TEST_F_LOCAL),
- _TEST(0035_api_version, 0),
- _TEST(0036_partial_fetch, 0),
- _TEST(0037_destroy_hang_local, TEST_F_LOCAL),
- _TEST(0038_performance,
- 0,
- /* Produces and consumes a lot of messages */
- _THRES(.ucpu = 150.0, .scpu = 10)),
- _TEST(0039_event_dr, 0),
- _TEST(0039_event_log, TEST_F_LOCAL),
- _TEST(0039_event, TEST_F_LOCAL),
- _TEST(0040_io_event, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0041_fetch_max_bytes,
- 0,
- /* Re-fetches large messages multiple times */
- _THRES(.ucpu = 20.0, .scpu = 10.0)),
- _TEST(0042_many_topics, 0),
- _TEST(0043_no_connection, TEST_F_LOCAL),
- _TEST(0044_partition_cnt,
- 0,
- TEST_BRKVER(1, 0, 0, 0),
- /* Produces a lot of messages */
- _THRES(.ucpu = 30.0)),
- _TEST(0045_subscribe_update, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0045_subscribe_update_topic_remove,
- 0,
- TEST_BRKVER(0, 9, 0, 0),
- .scenario = "noautocreate"),
- _TEST(0045_subscribe_update_non_exist_and_partchange,
- 0,
- TEST_BRKVER(0, 9, 0, 0),
- .scenario = "noautocreate"),
- _TEST(0045_subscribe_update_mock, TEST_F_LOCAL),
- _TEST(0046_rkt_cache, TEST_F_LOCAL),
- _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE),
- _TEST(0048_partitioner,
- 0,
- /* Produces many small messages */
- _THRES(.ucpu = 10.0, .scpu = 5.0)),
-#if WITH_SOCKEM
- _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0, 9, 0, 0)),
-#endif
- _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0051_assign_adds, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0, 10, 0, 0)),
- _TEST(0053_stats_timing, TEST_F_LOCAL),
- _TEST(0053_stats, 0),
- _TEST(0054_offset_time, 0, TEST_BRKVER(0, 10, 1, 0)),
- _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32),
- _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0057_invalid_topic, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0058_log, TEST_F_LOCAL),
- _TEST(0059_bsearch, 0, TEST_BRKVER(0, 10, 0, 0)),
- _TEST(0060_op_prio, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0061_consumer_lag, 0),
- _TEST(0062_stats_event, TEST_F_LOCAL),
- _TEST(0063_clusterid, 0, TEST_BRKVER(0, 10, 1, 0)),
- _TEST(0064_interceptors, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0065_yield, 0),
- _TEST(0066_plugins,
- TEST_F_LOCAL | TEST_F_KNOWN_ISSUE_WIN32 | TEST_F_KNOWN_ISSUE_OSX,
- .extra =
- "dynamic loading of tests might not be fixed for this platform"),
- _TEST(0067_empty_topic, 0),
-#if WITH_SOCKEM
- _TEST(0068_produce_timeout, TEST_F_SOCKEM),
-#endif
- _TEST(0069_consumer_add_parts,
- TEST_F_KNOWN_ISSUE_WIN32,
- TEST_BRKVER(1, 0, 0, 0)),
- _TEST(0070_null_empty, 0),
- _TEST(0072_headers_ut, TEST_F_LOCAL),
- _TEST(0073_headers, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0074_producev, TEST_F_LOCAL),
-#if WITH_SOCKEM
- _TEST(0075_retry, TEST_F_SOCKEM),
-#endif
- _TEST(0076_produce_retry, TEST_F_SOCKEM),
- _TEST(0077_compaction,
- 0,
- /* The test itself requires message headers */
- TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0078_c_from_cpp, TEST_F_LOCAL),
- _TEST(0079_fork,
- TEST_F_LOCAL | TEST_F_KNOWN_ISSUE,
- .extra = "using a fork():ed rd_kafka_t is not supported and will "
- "most likely hang"),
- _TEST(0080_admin_ut, TEST_F_LOCAL),
- _TEST(0081_admin, 0, TEST_BRKVER(0, 10, 2, 0)),
- _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0, 10, 1, 0)),
- _TEST(0083_cb_event, 0, TEST_BRKVER(0, 9, 0, 0)),
- _TEST(0084_destroy_flags_local, TEST_F_LOCAL),
- _TEST(0084_destroy_flags, 0),
- _TEST(0085_headers, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0086_purge_local, TEST_F_LOCAL),
- _TEST(0086_purge_remote, 0),
-#if WITH_SOCKEM
- _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM),
-#endif
- _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0, 10, 1, 0)),
- _TEST(0090_idempotence, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0, 10, 1, 0)),
- _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0093_holb_consumer, 0, TEST_BRKVER(0, 10, 1, 0)),
-#if WITH_SOCKEM
- _TEST(0094_idempotence_msg_timeout,
- TEST_F_SOCKEM,
- TEST_BRKVER(0, 11, 0, 0)),
-#endif
- _TEST(0095_all_brokers_down, TEST_F_LOCAL),
- _TEST(0097_ssl_verify, 0),
- _TEST(0097_ssl_verify_local, TEST_F_LOCAL),
- _TEST(0098_consumer_txn, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0099_commit_metadata, 0),
- _TEST(0100_thread_interceptors, TEST_F_LOCAL),
- _TEST(0101_fetch_from_follower, 0, TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0102_static_group_rebalance, 0, TEST_BRKVER(2, 3, 0, 0)),
- _TEST(0103_transactions_local, TEST_F_LOCAL),
- _TEST(0103_transactions,
- 0,
- TEST_BRKVER(0, 11, 0, 0),
- .scenario = "default,ak23"),
- _TEST(0104_fetch_from_follower_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0105_transactions_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0106_cgrp_sess_timeout, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0107_topic_recreate,
- 0,
- TEST_BRKVER_TOPIC_ADMINAPI,
- .scenario = "noautocreate"),
- _TEST(0109_auto_create_topics, 0),
- _TEST(0110_batch_size, 0),
- _TEST(0111_delay_create_topics,
- 0,
- TEST_BRKVER_TOPIC_ADMINAPI,
- .scenario = "noautocreate"),
- _TEST(0112_assign_unknown_part, 0),
- _TEST(0113_cooperative_rebalance_local,
- TEST_F_LOCAL,
- TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0113_cooperative_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0114_sticky_partitioning, 0),
- _TEST(0115_producer_auth, 0, TEST_BRKVER(2, 1, 0, 0)),
- _TEST(0116_kafkaconsumer_close, TEST_F_LOCAL),
- _TEST(0117_mock_errors, TEST_F_LOCAL),
- _TEST(0118_commit_rebalance, 0),
- _TEST(0119_consumer_auth, 0, TEST_BRKVER(2, 1, 0, 0)),
- _TEST(0120_asymmetric_subscription, TEST_F_LOCAL),
- _TEST(0121_clusterid, TEST_F_LOCAL),
- _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0123_connections_max_idle, 0),
- _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL),
- _TEST(0125_immediate_flush, 0),
- _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 1, 0, 0)),
- _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2, 0, 0, 0)),
- _TEST(0129_fetch_aborted_msgs, 0, TEST_BRKVER(0, 11, 0, 0)),
- _TEST(0130_store_offsets, 0),
- _TEST(0131_connect_timeout, TEST_F_LOCAL),
- _TEST(0132_strategy_ordering, 0, TEST_BRKVER(2, 4, 0, 0)),
- _TEST(0133_ssl_keys, TEST_F_LOCAL),
- _TEST(0134_ssl_provider, TEST_F_LOCAL),
- _TEST(0135_sasl_credentials, 0),
- _TEST(0136_resolve_cb, TEST_F_LOCAL),
- _TEST(0137_barrier_batch_consume, 0),
- _TEST(0138_admin_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)),
-
- /* Manual tests */
- _TEST(8000_idle, TEST_F_MANUAL),
-
- {NULL}};
-
-
-RD_TLS struct test *test_curr = &tests[0];
-
-
-
-#if WITH_SOCKEM
-/**
- * Socket network emulation with sockem
- */
-
-static void test_socket_add(struct test *test, sockem_t *skm) {
- TEST_LOCK();
- rd_list_add(&test->sockets, skm);
- TEST_UNLOCK();
-}
-
-static void test_socket_del(struct test *test, sockem_t *skm, int do_lock) {
- if (do_lock)
- TEST_LOCK();
- /* Best effort, skm might not have been added if connect_cb failed */
- rd_list_remove(&test->sockets, skm);
- if (do_lock)
- TEST_UNLOCK();
-}
-
-int test_socket_sockem_set_all(const char *key, int val) {
- int i;
- sockem_t *skm;
- int cnt = 0;
-
- TEST_LOCK();
-
- cnt = rd_list_cnt(&test_curr->sockets);
- TEST_SAY("Setting sockem %s=%d on %s%d socket(s)\n", key, val,
- cnt > 0 ? "" : _C_RED, cnt);
-
- RD_LIST_FOREACH(skm, &test_curr->sockets, i) {
- if (sockem_set(skm, key, val, NULL) == -1)
- TEST_FAIL("sockem_set(%s, %d) failed", key, val);
- }
-
- TEST_UNLOCK();
-
- return cnt;
-}
-
-void test_socket_sockem_set(int s, const char *key, int value) {
- sockem_t *skm;
-
- TEST_LOCK();
- skm = sockem_find(s);
- if (skm)
- sockem_set(skm, key, value, NULL);
- TEST_UNLOCK();
-}
-
-void test_socket_close_all(struct test *test, int reinit) {
- TEST_LOCK();
- rd_list_destroy(&test->sockets);
- if (reinit)
- rd_list_init(&test->sockets, 16, (void *)sockem_close);
- TEST_UNLOCK();
-}
-
-
-static int test_connect_cb(int s,
- const struct sockaddr *addr,
- int addrlen,
- const char *id,
- void *opaque) {
- struct test *test = opaque;
- sockem_t *skm;
- int r;
-
- skm = sockem_connect(s, addr, addrlen, test_sockem_conf, 0, NULL);
- if (!skm)
- return errno;
-
- if (test->connect_cb) {
- r = test->connect_cb(test, skm, id);
- if (r)
- return r;
- }
-
- test_socket_add(test, skm);
-
- return 0;
-}
-
-static int test_closesocket_cb(int s, void *opaque) {
- struct test *test = opaque;
- sockem_t *skm;
-
- TEST_LOCK();
- skm = sockem_find(s);
- if (skm) {
- /* Close sockem's sockets */
- sockem_close(skm);
- test_socket_del(test, skm, 0 /*nolock*/);
- }
- TEST_UNLOCK();
-
- /* Close librdkafka's socket */
-#ifdef _WIN32
- closesocket(s);
-#else
- close(s);
-#endif
-
- return 0;
-}
-
-
-void test_socket_enable(rd_kafka_conf_t *conf) {
- rd_kafka_conf_set_connect_cb(conf, test_connect_cb);
- rd_kafka_conf_set_closesocket_cb(conf, test_closesocket_cb);
- rd_kafka_conf_set_opaque(conf, test_curr);
-}
-#endif /* WITH_SOCKEM */
-
-/**
- * @brief For use as the is_fatal_cb(), treating no errors as test-fatal.
- */
-int test_error_is_not_fatal_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason) {
- return 0;
-}
-
-static void
-test_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
- if (test_curr->is_fatal_cb &&
- !test_curr->is_fatal_cb(rk, err, reason)) {
- TEST_SAY(_C_YEL "%s rdkafka error (non-testfatal): %s: %s\n",
- rd_kafka_name(rk), rd_kafka_err2str(err), reason);
- } else {
- if (err == RD_KAFKA_RESP_ERR__FATAL) {
- char errstr[512];
- TEST_SAY(_C_RED "%s Fatal error: %s\n",
- rd_kafka_name(rk), reason);
-
- err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
-
- if (test_curr->is_fatal_cb &&
- !test_curr->is_fatal_cb(rk, err, reason))
- TEST_SAY(_C_YEL
- "%s rdkafka ignored FATAL error: "
- "%s: %s\n",
- rd_kafka_name(rk),
- rd_kafka_err2str(err), errstr);
- else
- TEST_FAIL("%s rdkafka FATAL error: %s: %s",
- rd_kafka_name(rk),
- rd_kafka_err2str(err), errstr);
-
- } else {
- TEST_FAIL("%s rdkafka error: %s: %s", rd_kafka_name(rk),
- rd_kafka_err2str(err), reason);
- }
- }
-}
-
-static int
-test_stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
- struct test *test = test_curr;
- if (test->stats_fp)
- fprintf(test->stats_fp,
- "{\"test\": \"%s\", \"instance\":\"%s\", "
- "\"stats\": %s}\n",
- test->name, rd_kafka_name(rk), json);
- return 0;
-}
-
-
-/**
- * @brief Limit the test run time (in seconds)
- */
-void test_timeout_set(int timeout) {
- TEST_LOCK();
- TEST_SAY("Setting test timeout to %ds * %.1f\n", timeout,
- test_timeout_multiplier);
- timeout = (int)((double)timeout * test_timeout_multiplier);
- test_curr->timeout = test_clock() + ((int64_t)timeout * 1000000);
- TEST_UNLOCK();
-}
-
-int tmout_multip(int msecs) {
- int r;
- TEST_LOCK();
- r = (int)(((double)(msecs)) * test_timeout_multiplier);
- TEST_UNLOCK();
- return r;
-}
-
-
-
-#ifdef _WIN32
-static void test_init_win32(void) {
- /* Enable VT emulation to support colored output. */
- HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE);
- DWORD dwMode = 0;
-
- if (hOut == INVALID_HANDLE_VALUE || !GetConsoleMode(hOut, &dwMode))
- return;
-
-#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
-#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x4
-#endif
- dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
- SetConsoleMode(hOut, dwMode);
-}
-#endif
-
-
-static void test_init(void) {
- int seed;
- const char *tmp;
-
-
- if (test_seed)
- return;
-
- if ((tmp = test_getenv("TEST_LEVEL", NULL)))
- test_level = atoi(tmp);
- if ((tmp = test_getenv("TEST_MODE", NULL)))
- strncpy(test_mode, tmp, sizeof(test_mode) - 1);
- if ((tmp = test_getenv("TEST_SCENARIO", NULL)))
- strncpy(test_scenario, tmp, sizeof(test_scenario) - 1);
- if ((tmp = test_getenv("TEST_SOCKEM", NULL)))
- test_sockem_conf = tmp;
- if ((tmp = test_getenv("TEST_SEED", NULL)))
- seed = atoi(tmp);
- else
- seed = test_clock() & 0xffffffff;
- if ((tmp = test_getenv("TEST_CPU_CALIBRATION", NULL))) {
- test_rusage_cpu_calibration = strtod(tmp, NULL);
- if (test_rusage_cpu_calibration < 0.00001) {
- fprintf(stderr,
- "%% Invalid CPU calibration "
- "value (from TEST_CPU_CALIBRATION env): %s\n",
- tmp);
- exit(1);
- }
- }
-
-#ifdef _WIN32
- test_init_win32();
- {
- LARGE_INTEGER cycl;
- QueryPerformanceCounter(&cycl);
- seed = (int)cycl.QuadPart;
- }
-#endif
- srand(seed);
- test_seed = seed;
-}
-
-
-const char *test_mk_topic_name(const char *suffix, int randomized) {
- static RD_TLS char ret[512];
-
- /* Strip main_ prefix (caller is using __FUNCTION__) */
- if (!strncmp(suffix, "main_", 5))
- suffix += 5;
-
- if (test_topic_random || randomized)
- rd_snprintf(ret, sizeof(ret), "%s_rnd%" PRIx64 "_%s",
- test_topic_prefix, test_id_generate(), suffix);
- else
- rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix,
- suffix);
-
- TEST_SAY("Using topic \"%s\"\n", ret);
-
- return ret;
-}
-
-
-/**
- * @brief Set special test config property
- * @returns 1 if property was known, else 0.
- */
-int test_set_special_conf(const char *name, const char *val, int *timeoutp) {
- if (!strcmp(name, "test.timeout.multiplier")) {
- TEST_LOCK();
- test_timeout_multiplier = strtod(val, NULL);
- TEST_UNLOCK();
- *timeoutp = tmout_multip((*timeoutp) * 1000) / 1000;
- } else if (!strcmp(name, "test.topic.prefix")) {
- rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), "%s",
- val);
- } else if (!strcmp(name, "test.topic.random")) {
- if (!strcmp(val, "true") || !strcmp(val, "1"))
- test_topic_random = 1;
- else
- test_topic_random = 0;
- } else if (!strcmp(name, "test.concurrent.max")) {
- TEST_LOCK();
- test_concurrent_max = (int)strtod(val, NULL);
- TEST_UNLOCK();
- } else if (!strcmp(name, "test.sql.command")) {
- TEST_LOCK();
- if (test_sql_cmd)
- rd_free(test_sql_cmd);
- test_sql_cmd = rd_strdup(val);
- TEST_UNLOCK();
- } else
- return 0;
-
- return 1;
-}
-
-static void test_read_conf_file(const char *conf_path,
- rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *topic_conf,
- int *timeoutp) {
- FILE *fp;
- char buf[1024];
- int line = 0;
-
-#ifndef _WIN32
- fp = fopen(conf_path, "r");
-#else
- fp = NULL;
- errno = fopen_s(&fp, conf_path, "r");
-#endif
- if (!fp) {
- if (errno == ENOENT) {
- TEST_SAY("Test config file %s not found\n", conf_path);
- return;
- } else
- TEST_FAIL("Failed to read %s: %s", conf_path,
- strerror(errno));
- }
-
- while (fgets(buf, sizeof(buf) - 1, fp)) {
- char *t;
- char *b = buf;
- rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN;
- char *name, *val;
- char errstr[512];
-
- line++;
- if ((t = strchr(b, '\n')))
- *t = '\0';
-
- if (*b == '#' || !*b)
- continue;
-
- if (!(t = strchr(b, '=')))
- TEST_FAIL("%s:%i: expected name=value format\n",
- conf_path, line);
-
- name = b;
- *t = '\0';
- val = t + 1;
-
- if (test_set_special_conf(name, val, timeoutp))
- continue;
-
- if (!strncmp(name, "topic.", strlen("topic."))) {
- name += strlen("topic.");
- if (topic_conf)
- res = rd_kafka_topic_conf_set(topic_conf, name,
- val, errstr,
- sizeof(errstr));
- else
- res = RD_KAFKA_CONF_OK;
- name -= strlen("topic.");
- }
-
- if (res == RD_KAFKA_CONF_UNKNOWN) {
- if (conf)
- res = rd_kafka_conf_set(conf, name, val, errstr,
- sizeof(errstr));
- else
- res = RD_KAFKA_CONF_OK;
- }
-
- if (res != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s:%i: %s\n", conf_path, line, errstr);
- }
-
- fclose(fp);
-}
-
-/**
- * @brief Get path to test config file
- */
-const char *test_conf_get_path(void) {
- return test_getenv("RDKAFKA_TEST_CONF", "test.conf");
-}
-
-const char *test_getenv(const char *env, const char *def) {
- return rd_getenv(env, def);
-}
-
-void test_conf_common_init(rd_kafka_conf_t *conf, int timeout) {
- if (conf) {
- const char *tmp = test_getenv("TEST_DEBUG", NULL);
- if (tmp)
- test_conf_set(conf, "debug", tmp);
- }
-
- if (timeout)
- test_timeout_set(timeout);
-}
-
-
-/**
- * Creates and sets up kafka configuration objects.
- * Will read "test.conf" file if it exists.
- */
-void test_conf_init(rd_kafka_conf_t **conf,
- rd_kafka_topic_conf_t **topic_conf,
- int timeout) {
- const char *test_conf = test_conf_get_path();
-
- if (conf) {
- *conf = rd_kafka_conf_new();
- rd_kafka_conf_set(*conf, "client.id", test_curr->name, NULL, 0);
- if (test_idempotent_producer)
- test_conf_set(*conf, "enable.idempotence", "true");
- rd_kafka_conf_set_error_cb(*conf, test_error_cb);
- rd_kafka_conf_set_stats_cb(*conf, test_stats_cb);
-
- /* Allow higher request timeouts on CI */
- if (test_on_ci)
- test_conf_set(*conf, "request.timeout.ms", "10000");
-
-#ifdef SIGIO
- {
- char buf[64];
-
- /* Quick termination */
- rd_snprintf(buf, sizeof(buf), "%i", SIGIO);
- rd_kafka_conf_set(*conf, "internal.termination.signal",
- buf, NULL, 0);
- signal(SIGIO, SIG_IGN);
- }
-#endif
- }
-
-#if WITH_SOCKEM
- if (*test_sockem_conf && conf)
- test_socket_enable(*conf);
-#endif
-
- if (topic_conf)
- *topic_conf = rd_kafka_topic_conf_new();
-
- /* Open and read optional local test configuration file, if any. */
- test_read_conf_file(test_conf, conf ? *conf : NULL,
- topic_conf ? *topic_conf : NULL, &timeout);
-
- test_conf_common_init(conf ? *conf : NULL, timeout);
-}
-
-
-static RD_INLINE unsigned int test_rand(void) {
- unsigned int r;
-#ifdef _WIN32
- rand_s(&r);
-#else
- r = rand();
-#endif
- return r;
-}
-/**
- * Generate a "unique" test id.
- */
-uint64_t test_id_generate(void) {
- return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand();
-}
-
-
-/**
- * Generate a "unique" string id
- */
-char *test_str_id_generate(char *dest, size_t dest_size) {
- rd_snprintf(dest, dest_size, "%" PRId64, test_id_generate());
- return dest;
-}
-
-/**
- * Same as test_str_id_generate but returns a temporary string.
- */
-const char *test_str_id_generate_tmp(void) {
- static RD_TLS char ret[64];
- return test_str_id_generate(ret, sizeof(ret));
-}
-
-/**
- * Format a message token.
- * Pad's to dest_size.
- */
-void test_msg_fmt(char *dest,
- size_t dest_size,
- uint64_t testid,
- int32_t partition,
- int msgid) {
- size_t of;
-
- of = rd_snprintf(dest, dest_size,
- "testid=%" PRIu64 ", partition=%" PRId32 ", msg=%i\n",
- testid, partition, msgid);
- if (of < dest_size - 1) {
- memset(dest + of, '!', dest_size - of);
- dest[dest_size - 1] = '\0';
- }
-}
-
-/**
- * @brief Prepare message value and key for test produce.
- */
-void test_prepare_msg(uint64_t testid,
- int32_t partition,
- int msg_id,
- char *val,
- size_t val_size,
- char *key,
- size_t key_size) {
- size_t of = 0;
-
- test_msg_fmt(key, key_size, testid, partition, msg_id);
-
- while (of < val_size) {
- /* Copy-repeat key into val until val_size */
- size_t len = RD_MIN(val_size - of, key_size);
- memcpy(val + of, key, len);
- of += len;
- }
-}
-
-
-
-/**
- * Parse a message token
- */
-void test_msg_parse00(const char *func,
- int line,
- uint64_t testid,
- int32_t exp_partition,
- int *msgidp,
- const char *topic,
- int32_t partition,
- int64_t offset,
- const char *key,
- size_t key_size) {
- char buf[128];
- uint64_t in_testid;
- int in_part;
-
- if (!key)
- TEST_FAIL("%s:%i: Message (%s [%" PRId32 "] @ %" PRId64
- ") "
- "has empty key\n",
- func, line, topic, partition, offset);
-
- rd_snprintf(buf, sizeof(buf), "%.*s", (int)key_size, key);
-
- if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
- &in_testid, &in_part, msgidp) != 3)
- TEST_FAIL("%s:%i: Incorrect key format: %s", func, line, buf);
-
-
- if (testid != in_testid ||
- (exp_partition != -1 && exp_partition != in_part))
- TEST_FAIL("%s:%i: Our testid %" PRIu64
- ", part %i did "
- "not match message: \"%s\"\n",
- func, line, testid, (int)exp_partition, buf);
-}
-
-void test_msg_parse0(const char *func,
- int line,
- uint64_t testid,
- rd_kafka_message_t *rkmessage,
- int32_t exp_partition,
- int *msgidp) {
- test_msg_parse00(func, line, testid, exp_partition, msgidp,
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset,
- (const char *)rkmessage->key, rkmessage->key_len);
-}
-
-
-struct run_args {
- struct test *test;
- int argc;
- char **argv;
-};
-
-static int run_test0(struct run_args *run_args) {
- struct test *test = run_args->test;
- test_timing_t t_run;
- int r;
- char stats_file[256];
-
- rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%" PRIu64 ".json",
- test->name, test_id_generate());
- if (!(test->stats_fp = fopen(stats_file, "w+")))
- TEST_SAY("=== Failed to create stats file %s: %s ===\n",
- stats_file, strerror(errno));
-
- test_curr = test;
-
-#if WITH_SOCKEM
- rd_list_init(&test->sockets, 16, (void *)sockem_close);
-#endif
- /* Don't check message status by default */
- test->exp_dr_status = (rd_kafka_msg_status_t)-1;
-
- TEST_SAY("================= Running test %s =================\n",
- test->name);
- if (test->stats_fp)
- TEST_SAY("==== Stats written to file %s ====\n", stats_file);
-
- test_rusage_start(test_curr);
- TIMING_START(&t_run, "%s", test->name);
- test->start = t_run.ts_start;
-
- /* Run test main function */
- r = test->mainfunc(run_args->argc, run_args->argv);
-
- TIMING_STOP(&t_run);
- test_rusage_stop(test_curr,
- (double)TIMING_DURATION(&t_run) / 1000000.0);
-
- TEST_LOCK();
- test->duration = TIMING_DURATION(&t_run);
-
- if (test->state == TEST_SKIPPED) {
- TEST_SAY(
- "================= Test %s SKIPPED "
- "=================\n",
- run_args->test->name);
- } else if (r) {
- test->state = TEST_FAILED;
- TEST_SAY(
- "\033[31m"
- "================= Test %s FAILED ================="
- "\033[0m\n",
- run_args->test->name);
- } else {
- test->state = TEST_PASSED;
- TEST_SAY(
- "\033[32m"
- "================= Test %s PASSED ================="
- "\033[0m\n",
- run_args->test->name);
- }
- TEST_UNLOCK();
-
- cnd_broadcast(&test_cnd);
-
-#if WITH_SOCKEM
- test_socket_close_all(test, 0);
-#endif
-
- if (test->stats_fp) {
- long pos = ftell(test->stats_fp);
- fclose(test->stats_fp);
- test->stats_fp = NULL;
- /* Delete file if nothing was written */
- if (pos == 0) {
-#ifndef _WIN32
- unlink(stats_file);
-#else
- _unlink(stats_file);
-#endif
- }
- }
-
- if (test_delete_topics_between && test_concurrent_max == 1)
- test_delete_all_test_topics(60 * 1000);
-
- return r;
-}
-
-
-
-static int run_test_from_thread(void *arg) {
- struct run_args *run_args = arg;
-
- thrd_detach(thrd_current());
-
- run_test0(run_args);
-
- TEST_LOCK();
- tests_running_cnt--;
- TEST_UNLOCK();
-
- free(run_args);
-
- return 0;
-}
-
-
-/**
- * @brief Check running tests for timeouts.
- * @locks TEST_LOCK MUST be held
- */
-static void check_test_timeouts(void) {
- int64_t now = test_clock();
- struct test *test;
-
- for (test = tests; test->name; test++) {
- if (test->state != TEST_RUNNING)
- continue;
-
- /* Timeout check */
- if (now > test->timeout) {
- struct test *save_test = test_curr;
- test_curr = test;
- test->state = TEST_FAILED;
- test_summary(0 /*no-locks*/);
- TEST_FAIL0(
- __FILE__, __LINE__, 0 /*nolock*/, 0 /*fail-later*/,
- "Test %s%s%s%s timed out "
- "(timeout set to %d seconds)\n",
- test->name, *test->subtest ? " (" : "",
- test->subtest, *test->subtest ? ")" : "",
- (int)(test->timeout - test->start) / 1000000);
- test_curr = save_test;
- tests_running_cnt--; /* fail-later misses this*/
-#ifdef _WIN32
- TerminateThread(test->thrd, -1);
-#else
- pthread_kill(test->thrd, SIGKILL);
-#endif
- }
- }
-}
-
-
-static int run_test(struct test *test, int argc, char **argv) {
- struct run_args *run_args = calloc(1, sizeof(*run_args));
- int wait_cnt = 0;
-
- run_args->test = test;
- run_args->argc = argc;
- run_args->argv = argv;
-
- TEST_LOCK();
- while (tests_running_cnt >= test_concurrent_max) {
- if (!(wait_cnt++ % 100))
- TEST_SAY(
- "Too many tests running (%d >= %d): "
- "postponing %s start...\n",
- tests_running_cnt, test_concurrent_max, test->name);
- cnd_timedwait_ms(&test_cnd, &test_mtx, 100);
-
- check_test_timeouts();
- }
- tests_running_cnt++;
- test->timeout = test_clock() +
- (int64_t)(30.0 * 1000000.0 * test_timeout_multiplier);
- test->state = TEST_RUNNING;
- TEST_UNLOCK();
-
- if (thrd_create(&test->thrd, run_test_from_thread, run_args) !=
- thrd_success) {
- TEST_LOCK();
- tests_running_cnt--;
- test->state = TEST_FAILED;
- TEST_UNLOCK();
-
- TEST_FAIL("Failed to start thread for test %s\n", test->name);
- }
-
- return 0;
-}
-
-static void run_tests(int argc, char **argv) {
- struct test *test;
-
- for (test = tests; test->name; test++) {
- char testnum[128];
- char *t;
- const char *skip_reason = NULL;
- rd_bool_t skip_silent = rd_false;
- char tmp[128];
- const char *scenario =
- test->scenario ? test->scenario : "default";
-
- if (!test->mainfunc)
- continue;
-
- /* Extract test number, as string */
- strncpy(testnum, test->name, sizeof(testnum) - 1);
- testnum[sizeof(testnum) - 1] = '\0';
- if ((t = strchr(testnum, '_')))
- *t = '\0';
-
- if ((test_flags && (test_flags & test->flags) != test_flags)) {
- skip_reason = "filtered due to test flags";
- skip_silent = rd_true;
- }
- if ((test_neg_flags & ~test_flags) & test->flags)
- skip_reason = "Filtered due to negative test flags";
- if (test_broker_version &&
- (test->minver > test_broker_version ||
- (test->maxver && test->maxver < test_broker_version))) {
- rd_snprintf(tmp, sizeof(tmp),
- "not applicable for broker "
- "version %d.%d.%d.%d",
- TEST_BRKVER_X(test_broker_version, 0),
- TEST_BRKVER_X(test_broker_version, 1),
- TEST_BRKVER_X(test_broker_version, 2),
- TEST_BRKVER_X(test_broker_version, 3));
- skip_reason = tmp;
- }
-
- if (!strstr(scenario, test_scenario)) {
- rd_snprintf(tmp, sizeof(tmp),
- "requires test scenario %s", scenario);
- skip_silent = rd_true;
- skip_reason = tmp;
- }
-
- if (tests_to_run && !strstr(tests_to_run, testnum)) {
- skip_reason = "not included in TESTS list";
- skip_silent = rd_true;
- } else if (!tests_to_run && (test->flags & TEST_F_MANUAL)) {
- skip_reason = "manual test";
- skip_silent = rd_true;
- } else if (tests_to_skip && strstr(tests_to_skip, testnum))
- skip_reason = "included in TESTS_SKIP list";
-
- if (!skip_reason) {
- run_test(test, argc, argv);
- } else {
- if (skip_silent) {
- TEST_SAYL(3,
- "================= Skipping test %s "
- "(%s) ================\n",
- test->name, skip_reason);
- TEST_LOCK();
- test->state = TEST_SKIPPED;
- TEST_UNLOCK();
- } else {
- test_curr = test;
- TEST_SKIP("%s\n", skip_reason);
- test_curr = &tests[0];
- }
- }
- }
-}
-
-/**
- * @brief Print summary for all tests.
- *
- * @returns the number of failed tests.
- */
-static int test_summary(int do_lock) {
- struct test *test;
- FILE *report_fp = NULL;
- char report_path[128];
- time_t t;
- struct tm *tm;
- char datestr[64];
- int64_t total_duration = 0;
- int tests_run = 0;
- int tests_failed = 0;
- int tests_failed_known = 0;
- int tests_passed = 0;
- FILE *sql_fp = NULL;
- const char *tmp;
-
- t = time(NULL);
- tm = localtime(&t);
- strftime(datestr, sizeof(datestr), "%Y%m%d%H%M%S", tm);
-
- if ((tmp = test_getenv("TEST_REPORT", NULL)))
- rd_snprintf(report_path, sizeof(report_path), "%s", tmp);
- else if (test_write_report)
- rd_snprintf(report_path, sizeof(report_path),
- "test_report_%s.json", datestr);
- else
- report_path[0] = '\0';
-
- if (*report_path) {
- report_fp = fopen(report_path, "w+");
- if (!report_fp)
- TEST_WARN("Failed to create report file %s: %s\n",
- report_path, strerror(errno));
- else
- fprintf(report_fp,
- "{ \"id\": \"%s_%s\", \"mode\": \"%s\", "
- "\"scenario\": \"%s\", "
- "\"date\": \"%s\", "
- "\"git_version\": \"%s\", "
- "\"broker_version\": \"%s\", "
- "\"tests\": {",
- datestr, test_mode, test_mode, test_scenario,
- datestr, test_git_version,
- test_broker_version_str);
- }
-
- if (do_lock)
- TEST_LOCK();
-
- if (test_sql_cmd) {
-#ifdef _WIN32
- sql_fp = _popen(test_sql_cmd, "w");
-#else
- sql_fp = popen(test_sql_cmd, "w");
-#endif
- if (!sql_fp)
- TEST_WARN("Failed to execute test.sql.command: %s",
- test_sql_cmd);
- else
- fprintf(sql_fp,
- "CREATE TABLE IF NOT EXISTS "
- "runs(runid text PRIMARY KEY, mode text, "
- "date datetime, cnt int, passed int, "
- "failed int, duration numeric);\n"
- "CREATE TABLE IF NOT EXISTS "
- "tests(runid text, mode text, name text, "
- "state text, extra text, duration numeric);\n");
- }
-
- if (show_summary)
- printf(
- "TEST %s (%s, scenario %s) SUMMARY\n"
- "#========================================================="
- "=========#\n",
- datestr, test_mode, test_scenario);
-
- for (test = tests; test->name; test++) {
- const char *color;
- int64_t duration;
- char extra[128] = "";
- int do_count = 1;
-
- if (!(duration = test->duration) && test->start > 0)
- duration = test_clock() - test->start;
-
- if (test == tests) {
- /* <MAIN> test:
- * test accounts for total runtime.
- * dont include in passed/run/failed counts. */
- total_duration = duration;
- do_count = 0;
- }
-
- switch (test->state) {
- case TEST_PASSED:
- color = _C_GRN;
- if (do_count) {
- tests_passed++;
- tests_run++;
- }
- break;
- case TEST_FAILED:
- if (test->flags & TEST_F_KNOWN_ISSUE) {
- rd_snprintf(extra, sizeof(extra),
- " <-- known issue%s%s",
- test->extra ? ": " : "",
- test->extra ? test->extra : "");
- if (do_count)
- tests_failed_known++;
- }
- color = _C_RED;
- if (do_count) {
- tests_failed++;
- tests_run++;
- }
- break;
- case TEST_RUNNING:
- color = _C_MAG;
- if (do_count) {
- tests_failed++; /* All tests should be finished
- */
- tests_run++;
- }
- break;
- case TEST_NOT_STARTED:
- color = _C_YEL;
- if (test->extra)
- rd_snprintf(extra, sizeof(extra), " %s",
- test->extra);
- break;
- default:
- color = _C_CYA;
- break;
- }
-
- if (show_summary &&
- (test->state != TEST_SKIPPED || *test->failstr ||
- (tests_to_run && !strncmp(tests_to_run, test->name,
- strlen(tests_to_run))))) {
- printf("|%s %-40s | %10s | %7.3fs %s|", color,
- test->name, test_states[test->state],
- (double)duration / 1000000.0, _C_CLR);
- if (test->state == TEST_FAILED)
- printf(_C_RED " %s" _C_CLR, test->failstr);
- else if (test->state == TEST_SKIPPED)
- printf(_C_CYA " %s" _C_CLR, test->failstr);
- printf("%s\n", extra);
- }
-
- if (report_fp) {
- int i;
- fprintf(report_fp,
- "%s\"%s\": {"
- "\"name\": \"%s\", "
- "\"state\": \"%s\", "
- "\"known_issue\": %s, "
- "\"extra\": \"%s\", "
- "\"duration\": %.3f, "
- "\"report\": [ ",
- test == tests ? "" : ", ", test->name,
- test->name, test_states[test->state],
- test->flags & TEST_F_KNOWN_ISSUE ? "true"
- : "false",
- test->extra ? test->extra : "",
- (double)duration / 1000000.0);
-
- for (i = 0; i < test->report_cnt; i++) {
- fprintf(report_fp, "%s%s ", i == 0 ? "" : ",",
- test->report_arr[i]);
- }
-
- fprintf(report_fp, "] }");
- }
-
- if (sql_fp)
- fprintf(sql_fp,
- "INSERT INTO tests VALUES("
- "'%s_%s', '%s', '%s', '%s', '%s', %f);\n",
- datestr, test_mode, test_mode, test->name,
- test_states[test->state],
- test->extra ? test->extra : "",
- (double)duration / 1000000.0);
- }
- if (do_lock)
- TEST_UNLOCK();
-
- if (show_summary)
- printf(
- "#========================================================="
- "=========#\n");
-
- if (report_fp) {
- fprintf(report_fp,
- "}, "
- "\"tests_run\": %d, "
- "\"tests_passed\": %d, "
- "\"tests_failed\": %d, "
- "\"duration\": %.3f"
- "}\n",
- tests_run, tests_passed, tests_failed,
- (double)total_duration / 1000000.0);
-
- fclose(report_fp);
- TEST_SAY("# Test report written to %s\n", report_path);
- }
-
- if (sql_fp) {
- fprintf(sql_fp,
- "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), "
- "%d, %d, %d, %f);\n",
- datestr, test_mode, test_mode, tests_run, tests_passed,
- tests_failed, (double)total_duration / 1000000.0);
- fclose(sql_fp);
- }
-
- return tests_failed - tests_failed_known;
-}
-
-#ifndef _WIN32
-static void test_sig_term(int sig) {
- if (test_exit)
- exit(1);
- fprintf(stderr,
- "Exiting tests, waiting for running tests to finish.\n");
- test_exit = 1;
-}
-#endif
-
-/**
- * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
- */
-static void test_wait_exit(int timeout) {
- int r;
- time_t start = time(NULL);
-
- while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
- TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
- rd_sleep(1);
- }
-
- TEST_SAY("%i thread(s) in use by librdkafka\n", r);
-
- if (r > 0)
- TEST_FAIL("%i thread(s) still active in librdkafka", r);
-
- timeout -= (int)(time(NULL) - start);
- if (timeout > 0) {
- TEST_SAY(
- "Waiting %d seconds for all librdkafka memory "
- "to be released\n",
- timeout);
- if (rd_kafka_wait_destroyed(timeout * 1000) == -1)
- TEST_FAIL(
- "Not all internal librdkafka "
- "objects destroyed\n");
- }
-}
-
-
-
-/**
- * @brief Test framework cleanup before termination.
- */
-static void test_cleanup(void) {
- struct test *test;
-
- /* Free report arrays */
- for (test = tests; test->name; test++) {
- int i;
- if (!test->report_arr)
- continue;
- for (i = 0; i < test->report_cnt; i++)
- rd_free(test->report_arr[i]);
- rd_free(test->report_arr);
- test->report_arr = NULL;
- }
-
- if (test_sql_cmd)
- rd_free(test_sql_cmd);
-}
-
-
-int main(int argc, char **argv) {
- int i, r;
- test_timing_t t_all;
- int a, b, c, d;
- const char *tmpver;
-
- mtx_init(&test_mtx, mtx_plain);
- cnd_init(&test_cnd);
-
- test_init();
-
-#ifndef _WIN32
- signal(SIGINT, test_sig_term);
-#endif
- tests_to_run = test_getenv("TESTS", NULL);
- subtests_to_run = test_getenv("SUBTESTS", NULL);
- tests_to_skip = test_getenv("TESTS_SKIP", NULL);
- tmpver = test_getenv("TEST_KAFKA_VERSION", NULL);
- if (!tmpver)
- tmpver = test_getenv("KAFKA_VERSION", test_broker_version_str);
- test_broker_version_str = tmpver;
-
- test_git_version = test_getenv("RDKAFKA_GITVER", "HEAD");
-
- /* Are we running on CI? */
- if (test_getenv("CI", NULL)) {
- test_on_ci = 1;
- test_concurrent_max = 3;
- }
-
- test_conf_init(NULL, NULL, 10);
-
- for (i = 1; i < argc; i++) {
- if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) {
- if (test_rusage) {
- fprintf(stderr,
- "%% %s ignored: -R takes preceedence\n",
- argv[i]);
- continue;
- }
- test_concurrent_max = (int)strtod(argv[i] + 2, NULL);
- } else if (!strcmp(argv[i], "-l"))
- test_flags |= TEST_F_LOCAL;
- else if (!strcmp(argv[i], "-L"))
- test_neg_flags |= TEST_F_LOCAL;
- else if (!strcmp(argv[i], "-a"))
- test_assert_on_fail = 1;
- else if (!strcmp(argv[i], "-k"))
- test_flags |= TEST_F_KNOWN_ISSUE;
- else if (!strcmp(argv[i], "-K"))
- test_neg_flags |= TEST_F_KNOWN_ISSUE;
- else if (!strcmp(argv[i], "-E"))
- test_neg_flags |= TEST_F_SOCKEM;
- else if (!strcmp(argv[i], "-V") && i + 1 < argc)
- test_broker_version_str = argv[++i];
- else if (!strcmp(argv[i], "-s") && i + 1 < argc)
- strncpy(test_scenario, argv[++i],
- sizeof(test_scenario) - 1);
- else if (!strcmp(argv[i], "-S"))
- show_summary = 0;
- else if (!strcmp(argv[i], "-D"))
- test_delete_topics_between = 1;
- else if (!strcmp(argv[i], "-P"))
- test_idempotent_producer = 1;
- else if (!strcmp(argv[i], "-Q"))
- test_quick = 1;
- else if (!strcmp(argv[i], "-r"))
- test_write_report = 1;
- else if (!strncmp(argv[i], "-R", 2)) {
- test_rusage = 1;
- test_concurrent_max = 1;
- if (strlen(argv[i]) > strlen("-R")) {
- test_rusage_cpu_calibration =
- strtod(argv[i] + 2, NULL);
- if (test_rusage_cpu_calibration < 0.00001) {
- fprintf(stderr,
- "%% Invalid CPU calibration "
- "value: %s\n",
- argv[i] + 2);
- exit(1);
- }
- }
- } else if (*argv[i] != '-')
- tests_to_run = argv[i];
- else {
- printf(
- "Unknown option: %s\n"
- "\n"
- "Usage: %s [options] [<test-match-substr>]\n"
- "Options:\n"
- " -p<N> Run N tests in parallel\n"
- " -l/-L Only/dont run local tests (no broker "
- "needed)\n"
- " -k/-K Only/dont run tests with known issues\n"
- " -E Don't run sockem tests\n"
- " -a Assert on failures\n"
- " -r Write test_report_...json file.\n"
- " -S Dont show test summary\n"
- " -s <scenario> Test scenario.\n"
- " -V <N.N.N.N> Broker version.\n"
- " -D Delete all test topics between each test "
- "(-p1) or after all tests\n"
- " -P Run all tests with "
- "`enable.idempotency=true`\n"
- " -Q Run tests in quick mode: faster tests, "
- "fewer iterations, less data.\n"
- " -R Check resource usage thresholds.\n"
- " -R<C> Check resource usage thresholds but "
- "adjust CPU thresholds by C (float):\n"
- " C < 1.0: CPU is faster than base line "
- "system.\n"
- " C > 1.0: CPU is slower than base line "
- "system.\n"
- " E.g. -R2.5 = CPU is 2.5x slower than "
- "base line system.\n"
- "\n"
- "Environment variables:\n"
- " TESTS - substring matched test to run (e.g., "
- "0033)\n"
- " SUBTESTS - substring matched subtest to run "
- "(e.g., n_wildcard)\n"
- " TEST_KAFKA_VERSION - broker version (e.g., "
- "0.9.0.1)\n"
- " TEST_SCENARIO - Test scenario\n"
- " TEST_LEVEL - Test verbosity level\n"
- " TEST_MODE - bare, helgrind, valgrind\n"
- " TEST_SEED - random seed\n"
- " RDKAFKA_TEST_CONF - test config file "
- "(test.conf)\n"
- " KAFKA_PATH - Path to kafka source dir\n"
- " ZK_ADDRESS - Zookeeper address\n"
- "\n",
- argv[i], argv[0]);
- exit(1);
- }
- }
-
- TEST_SAY("Git version: %s\n", test_git_version);
-
- if (!strcmp(test_broker_version_str, "trunk"))
- test_broker_version_str = "9.9.9.9"; /* for now */
-
- d = 0;
- if (sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d) <
- 3) {
- printf(
- "%% Expected broker version to be in format "
- "N.N.N (N=int), not %s\n",
- test_broker_version_str);
- exit(1);
- }
- test_broker_version = TEST_BRKVER(a, b, c, d);
- TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", test_broker_version_str,
- TEST_BRKVER_X(test_broker_version, 0),
- TEST_BRKVER_X(test_broker_version, 1),
- TEST_BRKVER_X(test_broker_version, 2),
- TEST_BRKVER_X(test_broker_version, 3));
-
- /* Set up fake "<MAIN>" test for all operations performed in
- * the main thread rather than the per-test threads.
- * Nice side effect is that we get timing and status for main as well.*/
- test_curr = &tests[0];
- test_curr->state = TEST_PASSED;
- test_curr->start = test_clock();
-
- if (test_on_ci) {
- TEST_LOCK();
- test_timeout_multiplier += 2;
- TEST_UNLOCK();
- }
-
- if (!strcmp(test_mode, "helgrind") || !strcmp(test_mode, "drd")) {
- TEST_LOCK();
- test_timeout_multiplier += 5;
- TEST_UNLOCK();
- } else if (!strcmp(test_mode, "valgrind")) {
- TEST_LOCK();
- test_timeout_multiplier += 3;
- TEST_UNLOCK();
- }
-
- /* Broker version 0.9 and api.version.request=true (which is default)
- * will cause a 10s stall per connection. Instead of fixing
- * that for each affected API in every test we increase the timeout
- * multiplier accordingly instead. The typical consume timeout is 5
- * seconds, so a multiplier of 3 should be good. */
- if ((test_broker_version & 0xffff0000) == 0x00090000)
- test_timeout_multiplier += 3;
-
- if (test_concurrent_max > 1)
- test_timeout_multiplier += (double)test_concurrent_max / 3;
-
- TEST_SAY("Tests to run : %s\n", tests_to_run ? tests_to_run : "all");
- if (subtests_to_run)
- TEST_SAY("Sub tests : %s\n", subtests_to_run);
- if (tests_to_skip)
- TEST_SAY("Skip tests : %s\n", tests_to_skip);
- TEST_SAY("Test mode : %s%s%s\n", test_quick ? "quick, " : "",
- test_mode, test_on_ci ? ", CI" : "");
- TEST_SAY("Test scenario: %s\n", test_scenario);
- TEST_SAY("Test filter : %s\n", (test_flags & TEST_F_LOCAL)
- ? "local tests only"
- : "no filter");
- TEST_SAY("Test timeout multiplier: %.1f\n", test_timeout_multiplier);
- TEST_SAY("Action on test failure: %s\n",
- test_assert_on_fail ? "assert crash" : "continue other tests");
- if (test_rusage)
- TEST_SAY("Test rusage : yes (%.2fx CPU calibration)\n",
- test_rusage_cpu_calibration);
- if (test_idempotent_producer)
- TEST_SAY("Test Idempotent Producer: enabled\n");
-
- {
- char cwd[512], *pcwd;
-#ifdef _WIN32
- pcwd = _getcwd(cwd, sizeof(cwd) - 1);
-#else
- pcwd = getcwd(cwd, sizeof(cwd) - 1);
-#endif
- if (pcwd)
- TEST_SAY("Current directory: %s\n", cwd);
- }
-
- test_timeout_set(30);
-
- TIMING_START(&t_all, "ALL-TESTS");
-
- /* Run tests */
- run_tests(argc, argv);
-
- TEST_LOCK();
- while (tests_running_cnt > 0 && !test_exit) {
- struct test *test;
-
- if (!test_quick && test_level >= 2) {
- TEST_SAY("%d test(s) running:", tests_running_cnt);
-
- for (test = tests; test->name; test++) {
- if (test->state != TEST_RUNNING)
- continue;
-
- TEST_SAY0(" %s", test->name);
- }
-
- TEST_SAY0("\n");
- }
-
- check_test_timeouts();
-
- TEST_UNLOCK();
-
- if (test_quick)
- rd_usleep(200 * 1000, NULL);
- else
- rd_sleep(1);
- TEST_LOCK();
- }
-
- TIMING_STOP(&t_all);
-
- test_curr = &tests[0];
- test_curr->duration = test_clock() - test_curr->start;
-
- TEST_UNLOCK();
-
- if (test_delete_topics_between)
- test_delete_all_test_topics(60 * 1000);
-
- r = test_summary(1 /*lock*/) ? 1 : 0;
-
- /* Wait for everything to be cleaned up since broker destroys are
- * handled in its own thread. */
- test_wait_exit(0);
-
- /* If we havent failed at this point then
- * there were no threads leaked */
- if (r == 0)
- TEST_SAY("\n============== ALL TESTS PASSED ==============\n");
-
- test_cleanup();
-
- if (r > 0)
- TEST_FAIL("%d test(s) failed, see previous errors", r);
-
- return r;
-}
-
-
-
-/******************************************************************************
- *
- * Helpers
- *
- ******************************************************************************/
-
-void test_dr_msg_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque) {
- int *remainsp = rkmessage->_private;
- static const char *status_names[] = {
- [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted",
- [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted",
- [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted"};
-
- TEST_SAYL(4,
- "Delivery report: %s (%s) to %s [%" PRId32
- "] "
- "at offset %" PRId64 " latency %.2fms\n",
- rd_kafka_err2str(rkmessage->err),
- status_names[rd_kafka_message_status(rkmessage)],
- rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition,
- rkmessage->offset,
- (float)rd_kafka_message_latency(rkmessage) / 1000.0);
-
- if (!test_curr->produce_sync) {
- if (!test_curr->ignore_dr_err &&
- rkmessage->err != test_curr->exp_dr_err)
- TEST_FAIL("Message delivery (to %s [%" PRId32
- "]) "
- "failed: expected %s, got %s",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition,
- rd_kafka_err2str(test_curr->exp_dr_err),
- rd_kafka_err2str(rkmessage->err));
-
- if ((int)test_curr->exp_dr_status != -1) {
- rd_kafka_msg_status_t status =
- rd_kafka_message_status(rkmessage);
-
- TEST_ASSERT(status == test_curr->exp_dr_status,
- "Expected message status %s, not %s",
- status_names[test_curr->exp_dr_status],
- status_names[status]);
- }
-
- /* Add message to msgver */
- if (!rkmessage->err && test_curr->dr_mv)
- test_msgver_add_msg(rk, test_curr->dr_mv, rkmessage);
- }
-
- if (remainsp) {
- TEST_ASSERT(*remainsp > 0,
- "Too many messages delivered (remains %i)",
- *remainsp);
-
- (*remainsp)--;
- }
-
- if (test_curr->produce_sync)
- test_curr->produce_sync_err = rkmessage->err;
-}
-
-
-rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf) {
- rd_kafka_t *rk;
- char errstr[512];
-
- if (!conf) {
- test_conf_init(&conf, NULL, 0);
-#if WITH_SOCKEM
- if (*test_sockem_conf)
- test_socket_enable(conf);
-#endif
- } else {
- if (!strcmp(test_conf_get(conf, "client.id"), "rdkafka"))
- test_conf_set(conf, "client.id", test_curr->name);
- }
-
-
-
- /* Creat kafka instance */
- rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr));
- if (!rk)
- TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);
-
- TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk));
-
- return rk;
-}
-
-
-rd_kafka_t *test_create_producer(void) {
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, NULL, 0);
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- return test_create_handle(RD_KAFKA_PRODUCER, conf);
-}
-
-
-/**
- * Create topic_t object with va-arg list as key-value config pairs
- * terminated by NULL.
- */
-rd_kafka_topic_t *
-test_create_topic_object(rd_kafka_t *rk, const char *topic, ...) {
- rd_kafka_topic_t *rkt;
- rd_kafka_topic_conf_t *topic_conf;
- va_list ap;
- const char *name, *val;
-
- test_conf_init(NULL, &topic_conf, 0);
-
- va_start(ap, topic);
- while ((name = va_arg(ap, const char *)) &&
- (val = va_arg(ap, const char *))) {
- test_topic_conf_set(topic_conf, name, val);
- }
- va_end(ap);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- return rkt;
-}
-
-
-rd_kafka_topic_t *
-test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...) {
- rd_kafka_topic_t *rkt;
- rd_kafka_topic_conf_t *topic_conf;
- char errstr[512];
- va_list ap;
- const char *name, *val;
-
- test_conf_init(NULL, &topic_conf, 0);
-
- va_start(ap, topic);
- while ((name = va_arg(ap, const char *)) &&
- (val = va_arg(ap, const char *))) {
- if (rd_kafka_topic_conf_set(topic_conf, name, val, errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("Conf failed: %s\n", errstr);
- }
- va_end(ap);
-
- /* Make sure all replicas are in-sync after producing
- * so that consume test wont fail. */
- rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
- errstr, sizeof(errstr));
-
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- return rkt;
-}
-
-
-
-/**
- * Produces \p cnt messages and returns immediately.
- * Does not wait for delivery.
- * \p msgcounterp is incremented for each produced messages and passed
- * as \p msg_opaque which is later used in test_dr_msg_cb to decrement
- * the counter on delivery.
- *
- * If \p payload is NULL the message key and payload will be formatted
- * according to standard test format, otherwise the key will be NULL and
- * payload send as message payload.
- *
- * Default message size is 128 bytes, if \p size is non-zero and \p payload
- * is NULL the message size of \p size will be used.
- */
-void test_produce_msgs_nowait(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int msgrate,
- int *msgcounterp) {
- int msg_id;
- test_timing_t t_all, t_poll;
- char key[128];
- void *buf;
- int64_t tot_bytes = 0;
- int64_t tot_time_poll = 0;
- int64_t per_msg_wait = 0;
-
- if (msgrate > 0)
- per_msg_wait = 1000000 / (int64_t)msgrate;
-
-
- if (payload)
- buf = (void *)payload;
- else {
- if (size == 0)
- size = 128;
- buf = calloc(1, size);
- }
-
- TEST_SAY("Produce to %s [%" PRId32 "]: messages #%d..%d\n",
- rd_kafka_topic_name(rkt), partition, msg_base, msg_base + cnt);
-
- TIMING_START(&t_all, "PRODUCE");
- TIMING_START(&t_poll, "SUM(POLL)");
-
- for (msg_id = msg_base; msg_id < msg_base + cnt; msg_id++) {
- int wait_time = 0;
-
- if (!payload)
- test_prepare_msg(testid, partition, msg_id, buf, size,
- key, sizeof(key));
-
-
- if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, buf,
- size, !payload ? key : NULL,
- !payload ? strlen(key) : 0,
- msgcounterp) == -1)
- TEST_FAIL(
- "Failed to produce message %i "
- "to partition %i: %s",
- msg_id, (int)partition,
- rd_kafka_err2str(rd_kafka_last_error()));
-
- (*msgcounterp)++;
- tot_bytes += size;
-
- TIMING_RESTART(&t_poll);
- do {
- if (per_msg_wait) {
- wait_time = (int)(per_msg_wait -
- TIMING_DURATION(&t_poll)) /
- 1000;
- if (wait_time < 0)
- wait_time = 0;
- }
- rd_kafka_poll(rk, wait_time);
- } while (wait_time > 0);
-
- tot_time_poll = TIMING_DURATION(&t_poll);
-
- if (TIMING_EVERY(&t_all, 3 * 1000000))
- TEST_SAY(
- "produced %3d%%: %d/%d messages "
- "(%d msgs/s, %d bytes/s)\n",
- ((msg_id - msg_base) * 100) / cnt,
- msg_id - msg_base, cnt,
- (int)((msg_id - msg_base) /
- (TIMING_DURATION(&t_all) / 1000000)),
- (int)((tot_bytes) /
- (TIMING_DURATION(&t_all) / 1000000)));
- }
-
- if (!payload)
- free(buf);
-
- t_poll.duration = tot_time_poll;
- TIMING_STOP(&t_poll);
- TIMING_STOP(&t_all);
-}
-
-/**
- * Waits for the messages tracked by counter \p msgcounterp to be delivered.
- */
-void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp) {
- test_timing_t t_all;
- int start_cnt = *msgcounterp;
-
- TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT");
-
- /* Wait for messages to be delivered */
- while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) {
- rd_kafka_poll(rk, 10);
- if (TIMING_EVERY(&t_all, 3 * 1000000)) {
- int delivered = start_cnt - *msgcounterp;
- TEST_SAY(
- "wait_delivery: "
- "%d/%d messages delivered: %d msgs/s\n",
- delivered, start_cnt,
- (int)(delivered /
- (TIMING_DURATION(&t_all) / 1000000)));
- }
- }
-
- TIMING_STOP(&t_all);
-
- TEST_ASSERT(*msgcounterp == 0,
- "Not all messages delivered: msgcounter still at %d, "
- "outq_len %d",
- *msgcounterp, rd_kafka_outq_len(rk));
-}
-
-/**
- * Produces \p cnt messages and waits for succesful delivery
- */
-void test_produce_msgs(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size) {
- int remains = 0;
-
- test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
- payload, size, 0, &remains);
-
- test_wait_delivery(rk, &remains);
-}
-
-
-/**
- * @brief Produces \p cnt messages and waits for succesful delivery
- */
-void test_produce_msgs2(rd_kafka_t *rk,
- const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size) {
- int remains = 0;
- rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL);
-
- test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
- payload, size, 0, &remains);
-
- test_wait_delivery(rk, &remains);
-
- rd_kafka_topic_destroy(rkt);
-}
-
-/**
- * @brief Produces \p cnt messages without waiting for delivery.
- */
-void test_produce_msgs2_nowait(rd_kafka_t *rk,
- const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int *remainsp) {
- rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL);
-
- test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
- payload, size, 0, remainsp);
-
- rd_kafka_topic_destroy(rkt);
-}
-
-
-/**
- * Produces \p cnt messages at \p msgs/s, and waits for succesful delivery
- */
-void test_produce_msgs_rate(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int msgrate) {
- int remains = 0;
-
- test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
- payload, size, msgrate, &remains);
-
- test_wait_delivery(rk, &remains);
-}
-
-
-
-/**
- * Create producer, produce \p msgcnt messages to \p topic \p partition,
- * destroy consumer, and returns the used testid.
- */
-uint64_t test_produce_msgs_easy_size(const char *topic,
- uint64_t testid,
- int32_t partition,
- int msgcnt,
- size_t size) {
- rd_kafka_t *rk;
- rd_kafka_topic_t *rkt;
- test_timing_t t_produce;
-
- if (!testid)
- testid = test_id_generate();
- rk = test_create_producer();
- rkt = test_create_producer_topic(rk, topic, NULL);
-
- TIMING_START(&t_produce, "PRODUCE");
- test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, size);
- TIMING_STOP(&t_produce);
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(rk);
-
- return testid;
-}
-
-rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition) {
- test_curr->produce_sync = 1;
- test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, 0);
- test_curr->produce_sync = 0;
- return test_curr->produce_sync_err;
-}
-
-
-/**
- * @brief Easy produce function.
- *
- * @param ... is a NULL-terminated list of key, value config property pairs.
- */
-void test_produce_msgs_easy_v(const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- size_t size,
- ...) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *p;
- rd_kafka_topic_t *rkt;
- va_list ap;
- const char *key, *val;
-
- test_conf_init(&conf, NULL, 0);
-
- va_start(ap, size);
- while ((key = va_arg(ap, const char *)) &&
- (val = va_arg(ap, const char *)))
- test_conf_set(conf, key, val);
- va_end(ap);
-
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- rkt = test_create_producer_topic(p, topic, NULL);
-
- test_produce_msgs(p, rkt, testid, partition, msg_base, cnt, NULL, size);
-
- rd_kafka_topic_destroy(rkt);
- rd_kafka_destroy(p);
-}
-
-
-/**
- * @brief Produce messages to multiple topic-partitions.
- *
- * @param ...vararg is a tuple of:
- * const char *topic
- * int32_t partition (or UA)
- * int msg_base
- * int msg_cnt
- *
- * End with a NULL topic
- */
-void test_produce_msgs_easy_multi(uint64_t testid, ...) {
- rd_kafka_conf_t *conf;
- rd_kafka_t *p;
- va_list ap;
- const char *topic;
- int msgcounter = 0;
-
- test_conf_init(&conf, NULL, 0);
-
- rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
-
- p = test_create_handle(RD_KAFKA_PRODUCER, conf);
-
- va_start(ap, testid);
- while ((topic = va_arg(ap, const char *))) {
- int32_t partition = va_arg(ap, int32_t);
- int msg_base = va_arg(ap, int);
- int msg_cnt = va_arg(ap, int);
- rd_kafka_topic_t *rkt;
-
- rkt = test_create_producer_topic(p, topic, NULL);
-
- test_produce_msgs_nowait(p, rkt, testid, partition, msg_base,
- msg_cnt, NULL, 0, 0, &msgcounter);
-
- rd_kafka_topic_destroy(rkt);
- }
- va_end(ap);
-
- test_flush(p, tmout_multip(10 * 1000));
-
- rd_kafka_destroy(p);
-}
-
-
-
-/**
- * @brief A standard incremental rebalance callback.
- */
-void test_incremental_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
- TEST_SAY("%s: incremental rebalance: %s: %d partition(s)%s\n",
- rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt,
- rd_kafka_assignment_lost(rk) ? ", assignment lost" : "");
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- test_consumer_incremental_assign("rebalance_cb", rk, parts);
- break;
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- test_consumer_incremental_unassign("rebalance_cb", rk, parts);
- break;
- default:
- TEST_FAIL("Unknown rebalance event: %s",
- rd_kafka_err2name(err));
- break;
- }
-}
-
-/**
- * @brief A standard rebalance callback.
- */
-void test_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque) {
-
- if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
- test_incremental_rebalance_cb(rk, err, parts, opaque);
- return;
- }
-
- TEST_SAY("%s: Rebalance: %s: %d partition(s)\n", rd_kafka_name(rk),
- rd_kafka_err2name(err), parts->cnt);
-
- switch (err) {
- case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
- test_consumer_assign("assign", rk, parts);
- break;
- case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
- test_consumer_unassign("unassign", rk);
- break;
- default:
- TEST_FAIL("Unknown rebalance event: %s",
- rd_kafka_err2name(err));
- break;
- }
-}
-
-
-
-rd_kafka_t *test_create_consumer(
- const char *group_id,
- void (*rebalance_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque),
- rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *default_topic_conf) {
- rd_kafka_t *rk;
- char tmp[64];
-
- if (!conf)
- test_conf_init(&conf, NULL, 0);
-
- if (group_id) {
- test_conf_set(conf, "group.id", group_id);
-
- rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms);
- test_conf_set(conf, "session.timeout.ms", tmp);
-
- if (rebalance_cb)
- rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
- } else {
- TEST_ASSERT(!rebalance_cb);
- }
-
- if (default_topic_conf)
- rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf);
-
- /* Create kafka instance */
- rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
-
- if (group_id)
- rd_kafka_poll_set_consumer(rk);
-
- return rk;
-}
-
-rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk,
- const char *topic) {
- rd_kafka_topic_t *rkt;
- rd_kafka_topic_conf_t *topic_conf;
-
- test_conf_init(NULL, &topic_conf, 0);
-
- rkt = rd_kafka_topic_new(rk, topic, topic_conf);
- if (!rkt)
- TEST_FAIL("Failed to create topic: %s\n",
- rd_kafka_err2str(rd_kafka_last_error()));
-
- return rkt;
-}
-
-
-void test_consumer_start(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t start_offset) {
-
- TEST_SAY("%s: consumer_start: %s [%" PRId32 "] at offset %" PRId64 "\n",
- what, rd_kafka_topic_name(rkt), partition, start_offset);
-
- if (rd_kafka_consume_start(rkt, partition, start_offset) == -1)
- TEST_FAIL("%s: consume_start failed: %s\n", what,
- rd_kafka_err2str(rd_kafka_last_error()));
-}
-
-void test_consumer_stop(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition) {
-
- TEST_SAY("%s: consumer_stop: %s [%" PRId32 "]\n", what,
- rd_kafka_topic_name(rkt), partition);
-
- if (rd_kafka_consume_stop(rkt, partition) == -1)
- TEST_FAIL("%s: consume_stop failed: %s\n", what,
- rd_kafka_err2str(rd_kafka_last_error()));
-}
-
-void test_consumer_seek(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset) {
- int err;
-
- TEST_SAY("%s: consumer_seek: %s [%" PRId32 "] to offset %" PRId64 "\n",
- what, rd_kafka_topic_name(rkt), partition, offset);
-
- if ((err = rd_kafka_seek(rkt, partition, offset, 2000)))
- TEST_FAIL("%s: consume_seek(%s, %" PRId32 ", %" PRId64
- ") "
- "failed: %s\n",
- what, rd_kafka_topic_name(rkt), partition, offset,
- rd_kafka_err2str(err));
-}
-
-
-
-/**
- * Returns offset of the last message consumed
- */
-int64_t test_consume_msgs(const char *what,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int64_t offset,
- int exp_msg_base,
- int exp_cnt,
- int parse_fmt) {
- int cnt = 0;
- int msg_next = exp_msg_base;
- int fails = 0;
- int64_t offset_last = -1;
- int64_t tot_bytes = 0;
- test_timing_t t_first, t_all;
-
- TEST_SAY("%s: consume_msgs: %s [%" PRId32
- "]: expect msg #%d..%d "
- "at offset %" PRId64 "\n",
- what, rd_kafka_topic_name(rkt), partition, exp_msg_base,
- exp_msg_base + exp_cnt, offset);
-
- if (offset != TEST_NO_SEEK) {
- rd_kafka_resp_err_t err;
- test_timing_t t_seek;
-
- TIMING_START(&t_seek, "SEEK");
- if ((err = rd_kafka_seek(rkt, partition, offset, 5000)))
- TEST_FAIL("%s: consume_msgs: %s [%" PRId32
- "]: "
- "seek to %" PRId64 " failed: %s\n",
- what, rd_kafka_topic_name(rkt), partition,
- offset, rd_kafka_err2str(err));
- TIMING_STOP(&t_seek);
- TEST_SAY("%s: seeked to offset %" PRId64 "\n", what, offset);
- }
-
- TIMING_START(&t_first, "FIRST MSG");
- TIMING_START(&t_all, "ALL MSGS");
-
- while (cnt < exp_cnt) {
- rd_kafka_message_t *rkmessage;
- int msg_id;
-
- rkmessage =
- rd_kafka_consume(rkt, partition, tmout_multip(5000));
-
- if (TIMING_EVERY(&t_all, 3 * 1000000))
- TEST_SAY(
- "%s: "
- "consumed %3d%%: %d/%d messages "
- "(%d msgs/s, %d bytes/s)\n",
- what, cnt * 100 / exp_cnt, cnt, exp_cnt,
- (int)(cnt / (TIMING_DURATION(&t_all) / 1000000)),
- (int)(tot_bytes /
- (TIMING_DURATION(&t_all) / 1000000)));
-
- if (!rkmessage)
- TEST_FAIL("%s: consume_msgs: %s [%" PRId32
- "]: "
- "expected msg #%d (%d/%d): timed out\n",
- what, rd_kafka_topic_name(rkt), partition,
- msg_next, cnt, exp_cnt);
-
- if (rkmessage->err)
- TEST_FAIL("%s: consume_msgs: %s [%" PRId32
- "]: "
- "expected msg #%d (%d/%d): got error: %s\n",
- what, rd_kafka_topic_name(rkt), partition,
- msg_next, cnt, exp_cnt,
- rd_kafka_err2str(rkmessage->err));
-
- if (cnt == 0)
- TIMING_STOP(&t_first);
-
- if (parse_fmt)
- test_msg_parse(testid, rkmessage, partition, &msg_id);
- else
- msg_id = 0;
-
- if (test_level >= 3)
- TEST_SAY("%s: consume_msgs: %s [%" PRId32
- "]: "
- "got msg #%d at offset %" PRId64
- " (expect #%d at offset %" PRId64 ")\n",
- what, rd_kafka_topic_name(rkt), partition,
- msg_id, rkmessage->offset, msg_next,
- offset >= 0 ? offset + cnt : -1);
-
- if (parse_fmt && msg_id != msg_next) {
- TEST_SAY("%s: consume_msgs: %s [%" PRId32
- "]: "
- "expected msg #%d (%d/%d): got msg #%d\n",
- what, rd_kafka_topic_name(rkt), partition,
- msg_next, cnt, exp_cnt, msg_id);
- fails++;
- }
-
- cnt++;
- tot_bytes += rkmessage->len;
- msg_next++;
- offset_last = rkmessage->offset;
-
- rd_kafka_message_destroy(rkmessage);
- }
-
- TIMING_STOP(&t_all);
-
- if (fails)
- TEST_FAIL("%s: consume_msgs: %s [%" PRId32 "]: %d failures\n",
- what, rd_kafka_topic_name(rkt), partition, fails);
-
- TEST_SAY("%s: consume_msgs: %s [%" PRId32
- "]: "
- "%d/%d messages consumed succesfully\n",
- what, rd_kafka_topic_name(rkt), partition, cnt, exp_cnt);
- return offset_last;
-}
-
-
-/**
- * Create high-level consumer subscribing to \p topic from BEGINNING
- * and expects \d exp_msgcnt with matching \p testid
- * Destroys consumer when done.
- *
- * @param txn If true, isolation.level is set to read_committed.
- * @param partition If -1 the topic will be subscribed to, otherwise the
- * single partition will be assigned immediately.
- *
- * If \p group_id is NULL a new unique group is generated
- */
-void test_consume_msgs_easy_mv0(const char *group_id,
- const char *topic,
- rd_bool_t txn,
- int32_t partition,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf,
- test_msgver_t *mv) {
- rd_kafka_t *rk;
- char grpid0[64];
- rd_kafka_conf_t *conf;
-
- test_conf_init(&conf, tconf ? NULL : &tconf, 0);
-
- if (!group_id)
- group_id = test_str_id_generate(grpid0, sizeof(grpid0));
-
- if (txn)
- test_conf_set(conf, "isolation.level", "read_committed");
-
- test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
- if (exp_eofcnt != -1)
- test_conf_set(conf, "enable.partition.eof", "true");
- rk = test_create_consumer(group_id, NULL, conf, tconf);
-
- rd_kafka_poll_set_consumer(rk);
-
- if (partition == -1) {
- TEST_SAY(
- "Subscribing to topic %s in group %s "
- "(expecting %d msgs with testid %" PRIu64 ")\n",
- topic, group_id, exp_msgcnt, testid);
-
- test_consumer_subscribe(rk, topic);
- } else {
- rd_kafka_topic_partition_list_t *plist;
-
- TEST_SAY("Assign topic %s [%" PRId32
- "] in group %s "
- "(expecting %d msgs with testid %" PRIu64 ")\n",
- topic, partition, group_id, exp_msgcnt, testid);
-
- plist = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(plist, topic, partition);
- test_consumer_assign("consume_easy_mv", rk, plist);
- rd_kafka_topic_partition_list_destroy(plist);
- }
-
- /* Consume messages */
- test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, -1,
- exp_msgcnt, mv);
-
- test_consumer_close(rk);
-
- rd_kafka_destroy(rk);
-}
-
-void test_consume_msgs_easy(const char *group_id,
- const char *topic,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf) {
- test_msgver_t mv;
-
- test_msgver_init(&mv, testid);
-
- test_consume_msgs_easy_mv(group_id, topic, -1, testid, exp_eofcnt,
- exp_msgcnt, tconf, &mv);
-
- test_msgver_clear(&mv);
-}
-
-
-void test_consume_txn_msgs_easy(const char *group_id,
- const char *topic,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf) {
- test_msgver_t mv;
-
- test_msgver_init(&mv, testid);
-
- test_consume_msgs_easy_mv0(group_id, topic, rd_true /*txn*/, -1, testid,
- exp_eofcnt, exp_msgcnt, tconf, &mv);
-
- test_msgver_clear(&mv);
-}
-
-
-/**
- * @brief Waits for up to \p timeout_ms for consumer to receive assignment.
- * If no assignment received without the timeout the test fails.
- *
- * @warning This method will poll the consumer and might thus read messages.
- * Set \p do_poll to false to use a sleep rather than poll.
- */
-void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll) {
- rd_kafka_topic_partition_list_t *assignment = NULL;
- int i;
-
- while (1) {
- rd_kafka_resp_err_t err;
-
- err = rd_kafka_assignment(rk, &assignment);
- TEST_ASSERT(!err, "rd_kafka_assignment() failed: %s",
- rd_kafka_err2str(err));
-
- if (assignment->cnt > 0)
- break;
-
- rd_kafka_topic_partition_list_destroy(assignment);
-
- if (do_poll)
- test_consumer_poll_once(rk, NULL, 1000);
- else
- rd_usleep(1000 * 1000, NULL);
- }
-
- TEST_SAY("%s: Assignment (%d partition(s)): ", rd_kafka_name(rk),
- assignment->cnt);
- for (i = 0; i < assignment->cnt; i++)
- TEST_SAY0("%s%s[%" PRId32 "]", i == 0 ? "" : ", ",
- assignment->elems[i].topic,
- assignment->elems[i].partition);
- TEST_SAY0("\n");
-
- rd_kafka_topic_partition_list_destroy(assignment);
-}
-
-
-/**
- * @brief Verify that the consumer's assignment matches the expected assignment.
- *
- * The va-list is a NULL-terminated list of (const char *topic, int partition)
- * tuples.
- *
- * Fails the test on mismatch, unless \p fail_immediately is false.
- */
-void test_consumer_verify_assignment0(const char *func,
- int line,
- rd_kafka_t *rk,
- int fail_immediately,
- ...) {
- va_list ap;
- int cnt = 0;
- const char *topic;
- rd_kafka_topic_partition_list_t *assignment;
- rd_kafka_resp_err_t err;
- int i;
-
- if ((err = rd_kafka_assignment(rk, &assignment)))
- TEST_FAIL("%s:%d: Failed to get assignment for %s: %s", func,
- line, rd_kafka_name(rk), rd_kafka_err2str(err));
-
- TEST_SAY("%s assignment (%d partition(s)):\n", rd_kafka_name(rk),
- assignment->cnt);
- for (i = 0; i < assignment->cnt; i++)
- TEST_SAY(" %s [%" PRId32 "]\n", assignment->elems[i].topic,
- assignment->elems[i].partition);
-
- va_start(ap, fail_immediately);
- while ((topic = va_arg(ap, const char *))) {
- int partition = va_arg(ap, int);
- cnt++;
-
- if (!rd_kafka_topic_partition_list_find(assignment, topic,
- partition))
- TEST_FAIL_LATER(
- "%s:%d: Expected %s [%d] not found in %s's "
- "assignment (%d partition(s))",
- func, line, topic, partition, rd_kafka_name(rk),
- assignment->cnt);
- }
- va_end(ap);
-
- if (cnt != assignment->cnt)
- TEST_FAIL_LATER(
- "%s:%d: "
- "Expected %d assigned partition(s) for %s, not %d",
- func, line, cnt, rd_kafka_name(rk), assignment->cnt);
-
- if (fail_immediately)
- TEST_LATER_CHECK();
-
- rd_kafka_topic_partition_list_destroy(assignment);
-}
-
-
-
-/**
- * @brief Start subscribing for 'topic'
- */
-void test_consumer_subscribe(rd_kafka_t *rk, const char *topic) {
- rd_kafka_topic_partition_list_t *topics;
- rd_kafka_resp_err_t err;
-
- topics = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);
-
- err = rd_kafka_subscribe(rk, topics);
- if (err)
- TEST_FAIL("%s: Failed to subscribe to %s: %s\n",
- rd_kafka_name(rk), topic, rd_kafka_err2str(err));
-
- rd_kafka_topic_partition_list_destroy(topics);
-}
-
-
-void test_consumer_assign(const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_resp_err_t err;
- test_timing_t timing;
-
- TIMING_START(&timing, "ASSIGN.PARTITIONS");
- err = rd_kafka_assign(rk, partitions);
- TIMING_STOP(&timing);
- if (err)
- TEST_FAIL("%s: failed to assign %d partition(s): %s\n", what,
- partitions->cnt, rd_kafka_err2str(err));
- else
- TEST_SAY("%s: assigned %d partition(s)\n", what,
- partitions->cnt);
-}
-
-
-void test_consumer_incremental_assign(
- const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_error_t *error;
- test_timing_t timing;
-
- TIMING_START(&timing, "INCREMENTAL.ASSIGN.PARTITIONS");
- error = rd_kafka_incremental_assign(rk, partitions);
- TIMING_STOP(&timing);
- if (error) {
- TEST_FAIL(
- "%s: incremental assign of %d partition(s) failed: "
- "%s",
- what, partitions->cnt, rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
- } else
- TEST_SAY("%s: incremental assign of %d partition(s) done\n",
- what, partitions->cnt);
-}
-
-
-void test_consumer_unassign(const char *what, rd_kafka_t *rk) {
- rd_kafka_resp_err_t err;
- test_timing_t timing;
-
- TIMING_START(&timing, "UNASSIGN.PARTITIONS");
- err = rd_kafka_assign(rk, NULL);
- TIMING_STOP(&timing);
- if (err)
- TEST_FAIL("%s: failed to unassign current partitions: %s\n",
- what, rd_kafka_err2str(err));
- else
- TEST_SAY("%s: unassigned current partitions\n", what);
-}
-
-
-void test_consumer_incremental_unassign(
- const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *partitions) {
- rd_kafka_error_t *error;
- test_timing_t timing;
-
- TIMING_START(&timing, "INCREMENTAL.UNASSIGN.PARTITIONS");
- error = rd_kafka_incremental_unassign(rk, partitions);
- TIMING_STOP(&timing);
- if (error) {
- TEST_FAIL(
- "%s: incremental unassign of %d partition(s) "
- "failed: %s",
- what, partitions->cnt, rd_kafka_error_string(error));
- rd_kafka_error_destroy(error);
- } else
- TEST_SAY("%s: incremental unassign of %d partition(s) done\n",
- what, partitions->cnt);
-}
-
-
-/**
- * @brief Assign a single partition with an optional starting offset
- */
-void test_consumer_assign_partition(const char *what,
- rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t offset) {
- rd_kafka_topic_partition_list_t *part;
-
- part = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(part, topic, partition)->offset =
- offset;
-
- test_consumer_assign(what, rk, part);
-
- rd_kafka_topic_partition_list_destroy(part);
-}
-
-
-void test_consumer_pause_resume_partition(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- rd_bool_t pause) {
- rd_kafka_topic_partition_list_t *part;
- rd_kafka_resp_err_t err;
-
- part = rd_kafka_topic_partition_list_new(1);
- rd_kafka_topic_partition_list_add(part, topic, partition);
-
- if (pause)
- err = rd_kafka_pause_partitions(rk, part);
- else
- err = rd_kafka_resume_partitions(rk, part);
-
- TEST_ASSERT(!err, "Failed to %s %s [%" PRId32 "]: %s",
- pause ? "pause" : "resume", topic, partition,
- rd_kafka_err2str(err));
-
- rd_kafka_topic_partition_list_destroy(part);
-}
-
-
-/**
- * Message verification services
- *
- */
-
-void test_msgver_init(test_msgver_t *mv, uint64_t testid) {
- memset(mv, 0, sizeof(*mv));
- mv->testid = testid;
- /* Max warning logs before suppressing. */
- mv->log_max = (test_level + 1) * 100;
-}
-
-void test_msgver_ignore_eof(test_msgver_t *mv) {
- mv->ignore_eof = rd_true;
-}
-
-#define TEST_MV_WARN(mv, ...) \
- do { \
- if ((mv)->log_cnt++ > (mv)->log_max) \
- (mv)->log_suppr_cnt++; \
- else \
- TEST_WARN(__VA_ARGS__); \
- } while (0)
-
-
-
-static void test_mv_mvec_grow(struct test_mv_mvec *mvec, int tot_size) {
- if (tot_size <= mvec->size)
- return;
- mvec->size = tot_size;
- mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size);
-}
-
-/**
- * Make sure there is room for at least \p cnt messages, else grow mvec.
- */
-static void test_mv_mvec_reserve(struct test_mv_mvec *mvec, int cnt) {
- test_mv_mvec_grow(mvec, mvec->cnt + cnt);
-}
-
-void test_mv_mvec_init(struct test_mv_mvec *mvec, int exp_cnt) {
- TEST_ASSERT(mvec->m == NULL, "mvec not cleared");
-
- if (!exp_cnt)
- return;
-
- test_mv_mvec_grow(mvec, exp_cnt);
-}
-
-
-void test_mv_mvec_clear(struct test_mv_mvec *mvec) {
- if (mvec->m)
- free(mvec->m);
-}
-
-void test_msgver_clear(test_msgver_t *mv) {
- int i;
- for (i = 0; i < mv->p_cnt; i++) {
- struct test_mv_p *p = mv->p[i];
- free(p->topic);
- test_mv_mvec_clear(&p->mvec);
- free(p);
- }
-
- free(mv->p);
-
- test_msgver_init(mv, mv->testid);
-}
-
-struct test_mv_p *test_msgver_p_get(test_msgver_t *mv,
- const char *topic,
- int32_t partition,
- int do_create) {
- int i;
- struct test_mv_p *p;
-
- for (i = 0; i < mv->p_cnt; i++) {
- p = mv->p[i];
- if (p->partition == partition && !strcmp(p->topic, topic))
- return p;
- }
-
- if (!do_create)
- TEST_FAIL("Topic %s [%d] not found in msgver", topic,
- partition);
-
- if (mv->p_cnt == mv->p_size) {
- mv->p_size = (mv->p_size + 4) * 2;
- mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size);
- }
-
- mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p));
-
- p->topic = rd_strdup(topic);
- p->partition = partition;
- p->eof_offset = RD_KAFKA_OFFSET_INVALID;
-
- return p;
-}
-
-
-/**
- * Add (room for) message to message vector.
- * Resizes the vector as needed.
- */
-static struct test_mv_m *test_mv_mvec_add(struct test_mv_mvec *mvec) {
- if (mvec->cnt == mvec->size) {
- test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000));
- }
-
- mvec->cnt++;
-
- return &mvec->m[mvec->cnt - 1];
-}
-
-/**
- * Returns message at index \p mi
- */
-static RD_INLINE struct test_mv_m *test_mv_mvec_get(struct test_mv_mvec *mvec,
- int mi) {
- if (mi >= mvec->cnt)
- return NULL;
- return &mvec->m[mi];
-}
-
-/**
- * @returns the message with msgid \p msgid, or NULL.
- */
-static struct test_mv_m *test_mv_mvec_find_by_msgid(struct test_mv_mvec *mvec,
- int msgid) {
- int mi;
-
- for (mi = 0; mi < mvec->cnt; mi++)
- if (mvec->m[mi].msgid == msgid)
- return &mvec->m[mi];
-
- return NULL;
-}
-
-
-/**
- * Print message list to \p fp
- */
-static RD_UNUSED void test_mv_mvec_dump(FILE *fp,
- const struct test_mv_mvec *mvec) {
- int mi;
-
- fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n",
- mvec->cnt, mvec->size);
- for (mi = 0; mi < mvec->cnt; mi++)
- fprintf(fp, " msgid %d, offset %" PRId64 "\n",
- mvec->m[mi].msgid, mvec->m[mi].offset);
- fprintf(fp, "*** Done ***\n");
-}
-
-static void test_mv_mvec_sort(struct test_mv_mvec *mvec,
- int (*cmp)(const void *, const void *)) {
- qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp);
-}
-
-
-/**
- * @brief Adds a message to the msgver service.
- *
- * @returns 1 if message is from the expected testid, else 0 (not added)
- */
-int test_msgver_add_msg00(const char *func,
- int line,
- const char *clientname,
- test_msgver_t *mv,
- uint64_t testid,
- const char *topic,
- int32_t partition,
- int64_t offset,
- int64_t timestamp,
- int32_t broker_id,
- rd_kafka_resp_err_t err,
- int msgnum) {
- struct test_mv_p *p;
- struct test_mv_m *m;
-
- if (testid != mv->testid) {
- TEST_SAYL(3,
- "%s:%d: %s: mismatching testid %" PRIu64
- " != %" PRIu64 "\n",
- func, line, clientname, testid, mv->testid);
- return 0; /* Ignore message */
- }
-
- if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF && mv->ignore_eof) {
- TEST_SAYL(3, "%s:%d: %s: ignoring EOF for %s [%" PRId32 "]\n",
- func, line, clientname, topic, partition);
- return 0; /* Ignore message */
- }
-
- p = test_msgver_p_get(mv, topic, partition, 1);
-
- if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- p->eof_offset = offset;
- return 1;
- }
-
- m = test_mv_mvec_add(&p->mvec);
-
- m->offset = offset;
- m->msgid = msgnum;
- m->timestamp = timestamp;
- m->broker_id = broker_id;
-
- if (test_level > 2) {
- TEST_SAY(
- "%s:%d: %s: "
- "Recv msg %s [%" PRId32 "] offset %" PRId64
- " msgid %d "
- "timestamp %" PRId64 " broker %" PRId32 "\n",
- func, line, clientname, p->topic, p->partition, m->offset,
- m->msgid, m->timestamp, m->broker_id);
- }
-
- mv->msgcnt++;
-
- return 1;
-}
-
-/**
- * Adds a message to the msgver service.
- *
- * Message must be a proper message or PARTITION_EOF.
- *
- * @param override_topic if non-NULL, overrides the rkmessage's topic
- * with this one.
- *
- * @returns 1 if message is from the expected testid, else 0 (not added).
- */
-int test_msgver_add_msg0(const char *func,
- int line,
- const char *clientname,
- test_msgver_t *mv,
- const rd_kafka_message_t *rkmessage,
- const char *override_topic) {
- uint64_t in_testid;
- int in_part;
- int in_msgnum = -1;
- char buf[128];
- const void *val;
- size_t valsize;
-
- if (mv->fwd)
- test_msgver_add_msg0(func, line, clientname, mv->fwd, rkmessage,
- override_topic);
-
- if (rd_kafka_message_status(rkmessage) ==
- RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
- rkmessage->err) {
- if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF)
- return 0; /* Ignore error */
-
- in_testid = mv->testid;
-
- } else {
-
- if (!mv->msgid_hdr) {
- rd_snprintf(buf, sizeof(buf), "%.*s",
- (int)rkmessage->len,
- (char *)rkmessage->payload);
- val = buf;
- } else {
- /* msgid is in message header */
- rd_kafka_headers_t *hdrs;
-
- if (rd_kafka_message_headers(rkmessage, &hdrs) ||
- rd_kafka_header_get_last(hdrs, mv->msgid_hdr, &val,
- &valsize)) {
- TEST_SAYL(3,
- "%s:%d: msgid expected in header %s "
- "but %s exists for "
- "message at offset %" PRId64
- " has no headers\n",
- func, line, mv->msgid_hdr,
- hdrs ? "no such header"
- : "no headers",
- rkmessage->offset);
-
- return 0;
- }
- }
-
- if (sscanf(val, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
- &in_testid, &in_part, &in_msgnum) != 3)
- TEST_FAIL(
- "%s:%d: Incorrect format at offset %" PRId64 ": %s",
- func, line, rkmessage->offset, (const char *)val);
- }
-
- return test_msgver_add_msg00(
- func, line, clientname, mv, in_testid,
- override_topic ? override_topic
- : rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_timestamp(rkmessage, NULL),
- rd_kafka_message_broker_id(rkmessage), rkmessage->err, in_msgnum);
- return 1;
-}
-
-
-
-/**
- * Verify that all messages were received in order.
- *
- * - Offsets need to occur without gaps
- * - msgids need to be increasing: but may have gaps, e.g., using partitioner)
- */
-static int test_mv_mvec_verify_order(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs) {
- int mi;
- int fails = 0;
-
- for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) {
- struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1);
- struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
-
- if (((flags & TEST_MSGVER_BY_OFFSET) &&
- prev->offset + 1 != this->offset) ||
- ((flags & TEST_MSGVER_BY_MSGID) &&
- prev->msgid > this->msgid)) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] msg rcvidx #%d/%d: "
- "out of order (prev vs this): "
- "offset %" PRId64 " vs %" PRId64
- ", "
- "msgid %d vs %d\n",
- p ? p->topic : "*", p ? p->partition : -1,
- mi, mvec->cnt, prev->offset, this->offset,
- prev->msgid, this->msgid);
- fails++;
- } else if ((flags & TEST_MSGVER_BY_BROKER_ID) &&
- this->broker_id != vs->broker_id) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] msg rcvidx #%d/%d: "
- "broker id mismatch: expected %" PRId32
- ", not %" PRId32 "\n",
- p ? p->topic : "*", p ? p->partition : -1,
- mi, mvec->cnt, vs->broker_id,
- this->broker_id);
- fails++;
- }
- }
-
- return fails;
-}
-
-
-/**
- * @brief Verify that messages correspond to 'correct' msgver.
- */
-static int test_mv_mvec_verify_corr(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs) {
- int mi;
- int fails = 0;
- struct test_mv_p *corr_p = NULL;
- struct test_mv_mvec *corr_mvec;
- int verifycnt = 0;
-
- TEST_ASSERT(vs->corr);
-
- /* Get correct mvec for comparison. */
- if (p)
- corr_p = test_msgver_p_get(vs->corr, p->topic, p->partition, 0);
- if (!corr_p) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "]: "
- "no corresponding correct partition found\n",
- p ? p->topic : "*", p ? p->partition : -1);
- return 1;
- }
-
- corr_mvec = &corr_p->mvec;
-
- for (mi = 0; mi < mvec->cnt; mi++) {
- struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
- const struct test_mv_m *corr;
-
-
- if (flags & TEST_MSGVER_SUBSET)
- corr =
- test_mv_mvec_find_by_msgid(corr_mvec, this->msgid);
- else
- corr = test_mv_mvec_get(corr_mvec, mi);
-
- if (0)
- TEST_MV_WARN(mv,
- "msg #%d: msgid %d, offset %" PRId64 "\n",
- mi, this->msgid, this->offset);
- if (!corr) {
- if (!(flags & TEST_MSGVER_SUBSET)) {
- TEST_MV_WARN(
- mv,
- " %s [%" PRId32
- "] msg rcvidx #%d/%d: "
- "out of range: correct mvec has "
- "%d messages: "
- "message offset %" PRId64 ", msgid %d\n",
- p ? p->topic : "*", p ? p->partition : -1,
- mi, mvec->cnt, corr_mvec->cnt, this->offset,
- this->msgid);
- fails++;
- }
- continue;
- }
-
- if (((flags & TEST_MSGVER_BY_OFFSET) &&
- this->offset != corr->offset) ||
- ((flags & TEST_MSGVER_BY_MSGID) &&
- this->msgid != corr->msgid) ||
- ((flags & TEST_MSGVER_BY_TIMESTAMP) &&
- this->timestamp != corr->timestamp) ||
- ((flags & TEST_MSGVER_BY_BROKER_ID) &&
- this->broker_id != corr->broker_id)) {
- TEST_MV_WARN(
- mv,
- " %s [%" PRId32
- "] msg rcvidx #%d/%d: "
- "did not match correct msg: "
- "offset %" PRId64 " vs %" PRId64
- ", "
- "msgid %d vs %d, "
- "timestamp %" PRId64 " vs %" PRId64
- ", "
- "broker %" PRId32 " vs %" PRId32 " (fl 0x%x)\n",
- p ? p->topic : "*", p ? p->partition : -1, mi,
- mvec->cnt, this->offset, corr->offset, this->msgid,
- corr->msgid, this->timestamp, corr->timestamp,
- this->broker_id, corr->broker_id, flags);
- fails++;
- } else {
- verifycnt++;
- }
- }
-
- if (verifycnt != corr_mvec->cnt && !(flags & TEST_MSGVER_SUBSET)) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "]: of %d input messages, "
- "only %d/%d matched correct messages\n",
- p ? p->topic : "*", p ? p->partition : -1,
- mvec->cnt, verifycnt, corr_mvec->cnt);
- fails++;
- }
-
- return fails;
-}
-
-
-
-static int test_mv_m_cmp_offset(const void *_a, const void *_b) {
- const struct test_mv_m *a = _a, *b = _b;
-
- return RD_CMP(a->offset, b->offset);
-}
-
-static int test_mv_m_cmp_msgid(const void *_a, const void *_b) {
- const struct test_mv_m *a = _a, *b = _b;
-
- return RD_CMP(a->msgid, b->msgid);
-}
-
-
-/**
- * Verify that there are no duplicate message.
- *
- * - Offsets are checked
- * - msgids are checked
- *
- * * NOTE: This sorts the message (.m) array, first by offset, then by msgid
- * and leaves the message array sorted (by msgid)
- */
-static int test_mv_mvec_verify_dup(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs) {
- int mi;
- int fails = 0;
- enum { _P_OFFSET, _P_MSGID } pass;
-
- for (pass = _P_OFFSET; pass <= _P_MSGID; pass++) {
-
- if (pass == _P_OFFSET) {
- if (!(flags & TEST_MSGVER_BY_OFFSET))
- continue;
- test_mv_mvec_sort(mvec, test_mv_m_cmp_offset);
- } else if (pass == _P_MSGID) {
- if (!(flags & TEST_MSGVER_BY_MSGID))
- continue;
- test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid);
- }
-
- for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) {
- struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1);
- struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
- int is_dup = 0;
-
- if (pass == _P_OFFSET)
- is_dup = prev->offset == this->offset;
- else if (pass == _P_MSGID)
- is_dup = prev->msgid == this->msgid;
-
- if (!is_dup)
- continue;
-
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] "
- "duplicate msg (prev vs this): "
- "offset %" PRId64 " vs %" PRId64
- ", "
- "msgid %d vs %d\n",
- p ? p->topic : "*", p ? p->partition : -1,
- prev->offset, this->offset, prev->msgid,
- this->msgid);
- fails++;
- }
- }
-
- return fails;
-}
-
-/**
- * @brief Verify that all messages are from the correct broker.
- */
-static int test_mv_mvec_verify_broker(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs) {
- int mi;
- int fails = 0;
-
- /* Assume that the correct flag has been checked already. */
-
-
- rd_assert(flags & TEST_MSGVER_BY_BROKER_ID);
- for (mi = 0; mi < mvec->cnt; mi++) {
- struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
- if (this->broker_id != vs->broker_id) {
- TEST_MV_WARN(
- mv,
- " %s [%" PRId32
- "] broker_id check: "
- "msgid #%d (at mi %d): "
- "broker_id %" PRId32
- " is not the expected broker_id %" PRId32 "\n",
- p ? p->topic : "*", p ? p->partition : -1,
- this->msgid, mi, this->broker_id, vs->broker_id);
- fails++;
- }
- }
- return fails;
-}
-
-
-/**
- * Verify that \p mvec contains the expected range:
- * - TEST_MSGVER_BY_MSGID: msgid within \p vs->msgid_min .. \p vs->msgid_max
- * - TEST_MSGVER_BY_TIMESTAMP: timestamp with \p vs->timestamp_min .. _max
- *
- * * NOTE: TEST_MSGVER_BY_MSGID is required
- *
- * * NOTE: This sorts the message (.m) array by msgid
- * and leaves the message array sorted (by msgid)
- */
-static int test_mv_mvec_verify_range(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs) {
- int mi;
- int fails = 0;
- int cnt = 0;
- int exp_cnt = vs->msgid_max - vs->msgid_min + 1;
- int skip_cnt = 0;
-
- if (!(flags & TEST_MSGVER_BY_MSGID))
- return 0;
-
- test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid);
-
- // test_mv_mvec_dump(stdout, mvec);
-
- for (mi = 0; mi < mvec->cnt; mi++) {
- struct test_mv_m *prev =
- mi ? test_mv_mvec_get(mvec, mi - 1) : NULL;
- struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
-
- if (this->msgid < vs->msgid_min) {
- skip_cnt++;
- continue;
- } else if (this->msgid > vs->msgid_max)
- break;
-
- if (flags & TEST_MSGVER_BY_TIMESTAMP) {
- if (this->timestamp < vs->timestamp_min ||
- this->timestamp > vs->timestamp_max) {
- TEST_MV_WARN(
- mv,
- " %s [%" PRId32
- "] range check: "
- "msgid #%d (at mi %d): "
- "timestamp %" PRId64
- " outside "
- "expected range %" PRId64 "..%" PRId64 "\n",
- p ? p->topic : "*", p ? p->partition : -1,
- this->msgid, mi, this->timestamp,
- vs->timestamp_min, vs->timestamp_max);
- fails++;
- }
- }
-
- if ((flags & TEST_MSGVER_BY_BROKER_ID) &&
- this->broker_id != vs->broker_id) {
- TEST_MV_WARN(
- mv,
- " %s [%" PRId32
- "] range check: "
- "msgid #%d (at mi %d): "
- "expected broker id %" PRId32 ", not %" PRId32 "\n",
- p ? p->topic : "*", p ? p->partition : -1,
- this->msgid, mi, vs->broker_id, this->broker_id);
- fails++;
- }
-
- if (cnt++ == 0) {
- if (this->msgid != vs->msgid_min) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] range check: "
- "first message #%d (at mi %d) "
- "is not first in "
- "expected range %d..%d\n",
- p ? p->topic : "*",
- p ? p->partition : -1, this->msgid,
- mi, vs->msgid_min, vs->msgid_max);
- fails++;
- }
- } else if (cnt > exp_cnt) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] range check: "
- "too many messages received (%d/%d) at "
- "msgid %d for expected range %d..%d\n",
- p ? p->topic : "*", p ? p->partition : -1,
- cnt, exp_cnt, this->msgid, vs->msgid_min,
- vs->msgid_max);
- fails++;
- }
-
- if (!prev) {
- skip_cnt++;
- continue;
- }
-
- if (prev->msgid + 1 != this->msgid) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] range check: "
- " %d message(s) missing between "
- "msgid %d..%d in expected range %d..%d\n",
- p ? p->topic : "*", p ? p->partition : -1,
- this->msgid - prev->msgid - 1,
- prev->msgid + 1, this->msgid - 1,
- vs->msgid_min, vs->msgid_max);
- fails++;
- }
- }
-
- if (cnt != exp_cnt) {
- TEST_MV_WARN(mv,
- " %s [%" PRId32
- "] range check: "
- " wrong number of messages seen, wanted %d got %d "
- "in expected range %d..%d (%d messages skipped)\n",
- p ? p->topic : "*", p ? p->partition : -1, exp_cnt,
- cnt, vs->msgid_min, vs->msgid_max, skip_cnt);
- fails++;
- }
-
- return fails;
-}
-
-
-
-/**
- * Run verifier \p f for all partitions.
- */
-#define test_mv_p_verify_f(mv, flags, f, vs) \
- test_mv_p_verify_f0(mv, flags, f, #f, vs)
-static int test_mv_p_verify_f0(test_msgver_t *mv,
- int flags,
- int (*f)(test_msgver_t *mv,
- int flags,
- struct test_mv_p *p,
- struct test_mv_mvec *mvec,
- struct test_mv_vs *vs),
- const char *f_name,
- struct test_mv_vs *vs) {
- int i;
- int fails = 0;
-
- for (i = 0; i < mv->p_cnt; i++) {
- TEST_SAY("Verifying %s [%" PRId32 "] %d msgs with %s\n",
- mv->p[i]->topic, mv->p[i]->partition,
- mv->p[i]->mvec.cnt, f_name);
- fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs);
- }
-
- return fails;
-}
-
-
-/**
- * Collect all messages from all topics and partitions into vs->mvec
- */
-static void test_mv_collect_all_msgs(test_msgver_t *mv, struct test_mv_vs *vs) {
- int i;
-
- for (i = 0; i < mv->p_cnt; i++) {
- struct test_mv_p *p = mv->p[i];
- int mi;
-
- test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt);
- for (mi = 0; mi < p->mvec.cnt; mi++) {
- struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi);
- struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec);
- *m_new = *m;
- }
- }
-}
-
-
-/**
- * Verify that all messages (by msgid) in range msg_base+exp_cnt were received
- * and received only once.
- * This works across all partitions.
- */
-static int
-test_msgver_verify_range(test_msgver_t *mv, int flags, struct test_mv_vs *vs) {
- int fails = 0;
-
- /**
- * Create temporary array to hold expected message set,
- * then traverse all topics and partitions and move matching messages
- * to that set. Then verify the message set.
- */
-
- test_mv_mvec_init(&vs->mvec, vs->exp_cnt);
-
- /* Collect all msgs into vs mvec */
- test_mv_collect_all_msgs(mv, vs);
-
- fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID | flags,
- NULL, &vs->mvec, vs);
- fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID | flags, NULL,
- &vs->mvec, vs);
-
- test_mv_mvec_clear(&vs->mvec);
-
- return fails;
-}
-
-
-/**
- * Verify that \p exp_cnt messages were received for \p topic and \p partition
- * starting at msgid base \p msg_base.
- */
-int test_msgver_verify_part0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- int flags,
- const char *topic,
- int partition,
- int msg_base,
- int exp_cnt) {
- int fails = 0;
- struct test_mv_vs vs = {.msg_base = msg_base, .exp_cnt = exp_cnt};
- struct test_mv_p *p;
-
- TEST_SAY(
- "%s:%d: %s: Verifying %d received messages (flags 0x%x) "
- "in %s [%d]: expecting msgids %d..%d (%d)\n",
- func, line, what, mv->msgcnt, flags, topic, partition, msg_base,
- msg_base + exp_cnt, exp_cnt);
-
- p = test_msgver_p_get(mv, topic, partition, 0);
-
- /* Per-partition checks */
- if (flags & TEST_MSGVER_ORDER)
- fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs);
- if (flags & TEST_MSGVER_DUP)
- fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs);
-
- if (mv->msgcnt < vs.exp_cnt) {
- TEST_MV_WARN(mv,
- "%s:%d: "
- "%s [%" PRId32
- "] expected %d messages but only "
- "%d received\n",
- func, line, p ? p->topic : "*",
- p ? p->partition : -1, vs.exp_cnt, mv->msgcnt);
- fails++;
- }
-
-
- if (mv->log_suppr_cnt > 0)
- TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
- func, line, what, mv->log_suppr_cnt);
-
- if (fails)
- TEST_FAIL(
- "%s:%d: %s: Verification of %d received messages "
- "failed: "
- "expected msgids %d..%d (%d): see previous errors\n",
- func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt,
- exp_cnt);
- else
- TEST_SAY(
- "%s:%d: %s: Verification of %d received messages "
- "succeeded: "
- "expected msgids %d..%d (%d)\n",
- func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt,
- exp_cnt);
-
- return fails;
-}
-
-/**
- * Verify that \p exp_cnt messages were received starting at
- * msgid base \p msg_base.
- */
-int test_msgver_verify0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- int flags,
- struct test_mv_vs vs) {
- int fails = 0;
-
- TEST_SAY(
- "%s:%d: %s: Verifying %d received messages (flags 0x%x): "
- "expecting msgids %d..%d (%d)\n",
- func, line, what, mv->msgcnt, flags, vs.msg_base,
- vs.msg_base + vs.exp_cnt, vs.exp_cnt);
- if (flags & TEST_MSGVER_BY_TIMESTAMP) {
- assert((flags & TEST_MSGVER_BY_MSGID)); /* Required */
- TEST_SAY(
- "%s:%d: %s: "
- " and expecting timestamps %" PRId64 "..%" PRId64 "\n",
- func, line, what, vs.timestamp_min, vs.timestamp_max);
- }
-
- /* Per-partition checks */
- if (flags & TEST_MSGVER_ORDER)
- fails += test_mv_p_verify_f(mv, flags,
- test_mv_mvec_verify_order, &vs);
- if (flags & TEST_MSGVER_DUP)
- fails +=
- test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_dup, &vs);
-
- if (flags & TEST_MSGVER_BY_BROKER_ID)
- fails += test_mv_p_verify_f(mv, flags,
- test_mv_mvec_verify_broker, &vs);
-
- /* Checks across all partitions */
- if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) {
- vs.msgid_min = vs.msg_base;
- vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1;
- fails += test_msgver_verify_range(mv, flags, &vs);
- }
-
- if (mv->log_suppr_cnt > 0)
- TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
- func, line, what, mv->log_suppr_cnt);
-
- if (vs.exp_cnt != mv->msgcnt) {
- if (!(flags & TEST_MSGVER_SUBSET)) {
- TEST_WARN("%s:%d: %s: expected %d messages, got %d\n",
- func, line, what, vs.exp_cnt, mv->msgcnt);
- fails++;
- }
- }
-
- if (fails)
- TEST_FAIL(
- "%s:%d: %s: Verification of %d received messages "
- "failed: "
- "expected msgids %d..%d (%d): see previous errors\n",
- func, line, what, mv->msgcnt, vs.msg_base,
- vs.msg_base + vs.exp_cnt, vs.exp_cnt);
- else
- TEST_SAY(
- "%s:%d: %s: Verification of %d received messages "
- "succeeded: "
- "expected msgids %d..%d (%d)\n",
- func, line, what, mv->msgcnt, vs.msg_base,
- vs.msg_base + vs.exp_cnt, vs.exp_cnt);
-
- return fails;
-}
-
-
-
-void test_verify_rkmessage0(const char *func,
- int line,
- rd_kafka_message_t *rkmessage,
- uint64_t testid,
- int32_t partition,
- int msgnum) {
- uint64_t in_testid;
- int in_part;
- int in_msgnum;
- char buf[128];
-
- rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len,
- (char *)rkmessage->payload);
-
- if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
- &in_testid, &in_part, &in_msgnum) != 3)
- TEST_FAIL("Incorrect format: %s", buf);
-
- if (testid != in_testid || (partition != -1 && partition != in_part) ||
- (msgnum != -1 && msgnum != in_msgnum) || in_msgnum < 0)
- goto fail_match;
-
- if (test_level > 2) {
- TEST_SAY("%s:%i: Our testid %" PRIu64
- ", part %i (%i), msg %i\n",
- func, line, testid, (int)partition,
- (int)rkmessage->partition, msgnum);
- }
-
-
- return;
-
-fail_match:
- TEST_FAIL("%s:%i: Our testid %" PRIu64
- ", part %i, msg %i did "
- "not match message: \"%s\"\n",
- func, line, testid, (int)partition, msgnum, buf);
-}
-
-
-/**
- * @brief Verify that \p mv is identical to \p corr according to flags.
- */
-void test_msgver_verify_compare0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- test_msgver_t *corr,
- int flags) {
- struct test_mv_vs vs;
- int fails = 0;
-
- memset(&vs, 0, sizeof(vs));
-
- TEST_SAY(
- "%s:%d: %s: Verifying %d received messages (flags 0x%x) by "
- "comparison to correct msgver (%d messages)\n",
- func, line, what, mv->msgcnt, flags, corr->msgcnt);
-
- vs.corr = corr;
-
- /* Per-partition checks */
- fails += test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_corr, &vs);
-
- if (mv->log_suppr_cnt > 0)
- TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
- func, line, what, mv->log_suppr_cnt);
-
- if (corr->msgcnt != mv->msgcnt) {
- if (!(flags & TEST_MSGVER_SUBSET)) {
- TEST_WARN("%s:%d: %s: expected %d messages, got %d\n",
- func, line, what, corr->msgcnt, mv->msgcnt);
- fails++;
- }
- }
-
- if (fails)
- TEST_FAIL(
- "%s:%d: %s: Verification of %d received messages "
- "failed: expected %d messages: see previous errors\n",
- func, line, what, mv->msgcnt, corr->msgcnt);
- else
- TEST_SAY(
- "%s:%d: %s: Verification of %d received messages "
- "succeeded: matching %d messages from correct msgver\n",
- func, line, what, mv->msgcnt, corr->msgcnt);
-}
-
-
-/**
- * Consumer poll but dont expect any proper messages for \p timeout_ms.
- */
-void test_consumer_poll_no_msgs(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int timeout_ms) {
- int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000);
- int cnt = 0;
- test_timing_t t_cons;
- test_msgver_t mv;
-
- test_msgver_init(&mv, testid);
-
- if (what)
- TEST_SAY("%s: not expecting any messages for %dms\n", what,
- timeout_ms);
-
- TIMING_START(&t_cons, "CONSUME");
-
- do {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
- if (!rkmessage)
- continue;
-
- if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("%s [%" PRId32
- "] reached EOF at "
- "offset %" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset);
- test_msgver_add_msg(rk, &mv, rkmessage);
-
- } else if (rkmessage->err) {
- TEST_FAIL(
- "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
- rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
- : "(no-topic)",
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_errstr(rkmessage));
-
- } else {
- if (test_msgver_add_msg(rk, &mv, rkmessage)) {
- TEST_MV_WARN(
- &mv,
- "Received unexpected message on "
- "%s [%" PRId32
- "] at offset "
- "%" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset);
- cnt++;
- }
- }
-
- rd_kafka_message_destroy(rkmessage);
- } while (test_clock() <= tmout);
-
- if (what)
- TIMING_STOP(&t_cons);
-
- test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0);
- test_msgver_clear(&mv);
-
- TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt);
-}
-
-/**
- * @brief Consumer poll with expectation that a \p err will be reached
- * within \p timeout_ms.
- */
-void test_consumer_poll_expect_err(rd_kafka_t *rk,
- uint64_t testid,
- int timeout_ms,
- rd_kafka_resp_err_t err) {
- int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000);
-
- TEST_SAY("%s: expecting error %s within %dms\n", rd_kafka_name(rk),
- rd_kafka_err2name(err), timeout_ms);
-
- do {
- rd_kafka_message_t *rkmessage;
- rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
- if (!rkmessage)
- continue;
-
- if (rkmessage->err == err) {
- TEST_SAY("Got expected error: %s: %s\n",
- rd_kafka_err2name(rkmessage->err),
- rd_kafka_message_errstr(rkmessage));
- rd_kafka_message_destroy(rkmessage);
-
- return;
- } else if (rkmessage->err) {
- TEST_FAIL("%s [%" PRId32
- "] unexpected error "
- "(offset %" PRId64 "): %s",
- rkmessage->rkt
- ? rd_kafka_topic_name(rkmessage->rkt)
- : "(no-topic)",
- rkmessage->partition, rkmessage->offset,
- rd_kafka_err2name(rkmessage->err));
- }
-
- rd_kafka_message_destroy(rkmessage);
- } while (test_clock() <= tmout);
- TEST_FAIL("Expected error %s not seen in %dms", rd_kafka_err2name(err),
- timeout_ms);
-}
-
-/**
- * Call consumer poll once and then return.
- * Messages are handled.
- *
- * \p mv is optional
- *
- * @returns 0 on timeout, 1 if a message was received or .._PARTITION_EOF
- * if EOF was reached.
- * TEST_FAIL()s on all errors.
- */
-int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
- if (!rkmessage)
- return 0;
-
- if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("%s [%" PRId32
- "] reached EOF at "
- "offset %" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset);
- if (mv)
- test_msgver_add_msg(rk, mv, rkmessage);
- rd_kafka_message_destroy(rkmessage);
- return RD_KAFKA_RESP_ERR__PARTITION_EOF;
-
- } else if (rkmessage->err) {
- TEST_FAIL("%s [%" PRId32 "] error (offset %" PRId64 "): %s",
- rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
- : "(no-topic)",
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_errstr(rkmessage));
-
- } else {
- if (mv)
- test_msgver_add_msg(rk, mv, rkmessage);
- }
-
- rd_kafka_message_destroy(rkmessage);
- return 1;
-}
-
-/**
- * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1).
- * If false: poll until either one is reached.
- * @param timeout_ms Each call to poll has a timeout set by this argument. The
- * test fails if any poll times out.
- */
-int test_consumer_poll_exact_timeout(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- rd_bool_t exact,
- test_msgver_t *mv,
- int timeout_ms) {
- int eof_cnt = 0;
- int cnt = 0;
- test_timing_t t_cons;
-
- TEST_SAY("%s: consume %s%d messages\n", what, exact ? "exactly " : "",
- exp_cnt);
-
- TIMING_START(&t_cons, "CONSUME");
-
- while ((!exact && ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) &&
- (exp_cnt <= 0 || cnt < exp_cnt))) ||
- (exact && (eof_cnt < exp_eof_cnt || cnt < exp_cnt))) {
- rd_kafka_message_t *rkmessage;
-
- rkmessage =
- rd_kafka_consumer_poll(rk, tmout_multip(timeout_ms));
- if (!rkmessage) /* Shouldn't take this long to get a msg */
- TEST_FAIL(
- "%s: consumer_poll() timeout "
- "(%d/%d eof, %d/%d msgs)\n",
- what, eof_cnt, exp_eof_cnt, cnt, exp_cnt);
-
-
- if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
- TEST_SAY("%s [%" PRId32
- "] reached EOF at "
- "offset %" PRId64 "\n",
- rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset);
- TEST_ASSERT(exp_eof_cnt != 0, "expected no EOFs");
- if (mv)
- test_msgver_add_msg(rk, mv, rkmessage);
- eof_cnt++;
-
- } else if (rkmessage->err) {
- TEST_FAIL(
- "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
- rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
- : "(no-topic)",
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_errstr(rkmessage));
-
- } else {
- TEST_SAYL(4,
- "%s: consumed message on %s [%" PRId32
- "] "
- "at offset %" PRId64 " (leader epoch %" PRId32
- ")\n",
- what, rd_kafka_topic_name(rkmessage->rkt),
- rkmessage->partition, rkmessage->offset,
- rd_kafka_message_leader_epoch(rkmessage));
-
- if (!mv || test_msgver_add_msg(rk, mv, rkmessage))
- cnt++;
- }
-
- rd_kafka_message_destroy(rkmessage);
- }
-
- TIMING_STOP(&t_cons);
-
- TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", what, cnt,
- exp_cnt, eof_cnt, exp_eof_cnt);
-
- TEST_ASSERT(!exact || ((exp_cnt == -1 || exp_cnt == cnt) &&
- (exp_eof_cnt == -1 || exp_eof_cnt == eof_cnt)),
- "%s: mismatch between exact expected counts and actual: "
- "%d/%d EOFs, %d/%d msgs",
- what, eof_cnt, exp_eof_cnt, cnt, exp_cnt);
-
- if (exp_cnt == 0)
- TEST_ASSERT(cnt == 0 && eof_cnt == exp_eof_cnt,
- "%s: expected no messages and %d EOFs: "
- "got %d messages and %d EOFs",
- what, exp_eof_cnt, cnt, eof_cnt);
- return cnt;
-}
-
-
-/**
- * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1).
- * If false: poll until either one is reached.
- */
-int test_consumer_poll_exact(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- rd_bool_t exact,
- test_msgver_t *mv) {
- return test_consumer_poll_exact_timeout(what, rk, testid, exp_eof_cnt,
- exp_msg_base, exp_cnt, exact,
- mv, 10 * 1000);
-}
-
-int test_consumer_poll(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- test_msgver_t *mv) {
- return test_consumer_poll_exact(what, rk, testid, exp_eof_cnt,
- exp_msg_base, exp_cnt,
- rd_false /*not exact */, mv);
-}
-
-int test_consumer_poll_timeout(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- test_msgver_t *mv,
- int timeout_ms) {
- return test_consumer_poll_exact_timeout(
- what, rk, testid, exp_eof_cnt, exp_msg_base, exp_cnt,
- rd_false /*not exact */, mv, timeout_ms);
-}
-
-void test_consumer_close(rd_kafka_t *rk) {
- rd_kafka_resp_err_t err;
- test_timing_t timing;
-
- TEST_SAY("Closing consumer %s\n", rd_kafka_name(rk));
-
- TIMING_START(&timing, "CONSUMER.CLOSE");
- err = rd_kafka_consumer_close(rk);
- TIMING_STOP(&timing);
- if (err)
- TEST_FAIL("Failed to close consumer: %s\n",
- rd_kafka_err2str(err));
-}
-
-
-void test_flush(rd_kafka_t *rk, int timeout_ms) {
- test_timing_t timing;
- rd_kafka_resp_err_t err;
-
- TEST_SAY("%s: Flushing %d messages\n", rd_kafka_name(rk),
- rd_kafka_outq_len(rk));
- TIMING_START(&timing, "FLUSH");
- err = rd_kafka_flush(rk, timeout_ms);
- TIMING_STOP(&timing);
- if (err)
- TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n",
- rd_kafka_name(rk), timeout_ms, rd_kafka_err2str(err),
- rd_kafka_outq_len(rk));
-}
-
-
-void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
- char errstr[512];
- if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
- RD_KAFKA_CONF_OK)
- TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", name, val,
- errstr);
-}
-
-/**
- * @brief Get configuration value for property \p name.
- *
- * @param conf Configuration to get value from. If NULL the test.conf (if any)
- * configuration will be used.
- */
-char *test_conf_get(const rd_kafka_conf_t *conf, const char *name) {
- static RD_TLS char ret[256];
- size_t ret_sz = sizeof(ret);
- rd_kafka_conf_t *def_conf = NULL;
-
- if (!conf) /* Use the current test.conf */
- test_conf_init(&def_conf, NULL, 0);
-
- if (rd_kafka_conf_get(conf ? conf : def_conf, name, ret, &ret_sz) !=
- RD_KAFKA_CONF_OK)
- TEST_FAIL("Failed to get config \"%s\": %s\n", name,
- "unknown property");
-
- if (def_conf)
- rd_kafka_conf_destroy(def_conf);
-
- return ret;
-}
-
-
-char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf,
- const char *name) {
- static RD_TLS char ret[256];
- size_t ret_sz = sizeof(ret);
- if (rd_kafka_topic_conf_get(tconf, name, ret, &ret_sz) !=
- RD_KAFKA_CONF_OK)
- TEST_FAIL("Failed to get topic config \"%s\": %s\n", name,
- "unknown property");
- return ret;
-}
-
-
-/**
- * @brief Check if property \name matches \p val in \p conf.
- * If \p conf is NULL the test config will be used. */
-int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val) {
- char *real;
- int free_conf = 0;
-
- if (!conf) {
- test_conf_init(&conf, NULL, 0);
- free_conf = 1;
- }
-
- real = test_conf_get(conf, name);
-
- if (free_conf)
- rd_kafka_conf_destroy(conf);
-
- return !strcmp(real, val);
-}
-
-
-void test_topic_conf_set(rd_kafka_topic_conf_t *tconf,
- const char *name,
- const char *val) {
- char errstr[512];
- if (rd_kafka_topic_conf_set(tconf, name, val, errstr, sizeof(errstr)) !=
- RD_KAFKA_CONF_OK)
- TEST_FAIL("Failed to set topic config \"%s\"=\"%s\": %s\n",
- name, val, errstr);
-}
-
-/**
- * @brief First attempt to set topic level property, then global.
- */
-void test_any_conf_set(rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf,
- const char *name,
- const char *val) {
- rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN;
- char errstr[512] = {"Missing conf_t"};
-
- if (tconf)
- res = rd_kafka_topic_conf_set(tconf, name, val, errstr,
- sizeof(errstr));
- if (res == RD_KAFKA_CONF_UNKNOWN && conf)
- res =
- rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr));
-
- if (res != RD_KAFKA_CONF_OK)
- TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", name,
- val, errstr);
-}
-
-
-/**
- * @returns true if test clients need to be configured for authentication
- * or other security measures (SSL), else false for unauthed plaintext.
- */
-int test_needs_auth(void) {
- rd_kafka_conf_t *conf;
- const char *sec;
-
- test_conf_init(&conf, NULL, 0);
-
- sec = test_conf_get(conf, "security.protocol");
-
- rd_kafka_conf_destroy(conf);
-
- return strcmp(sec, "plaintext");
-}
-
-
-void test_print_partition_list(
- const rd_kafka_topic_partition_list_t *partitions) {
- int i;
- for (i = 0; i < partitions->cnt; i++) {
- TEST_SAY(" %s [%" PRId32 "] offset %" PRId64 " (epoch %" PRId32
- ") %s%s\n",
- partitions->elems[i].topic,
- partitions->elems[i].partition,
- partitions->elems[i].offset,
- rd_kafka_topic_partition_get_leader_epoch(
- &partitions->elems[i]),
- partitions->elems[i].err ? ": " : "",
- partitions->elems[i].err
- ? rd_kafka_err2str(partitions->elems[i].err)
- : "");
- }
-}
-
-/**
- * @brief Compare two lists, returning 0 if equal.
- *
- * @remark The lists may be sorted by this function.
- */
-int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al,
- rd_kafka_topic_partition_list_t *bl) {
- int i;
-
- if (al->cnt < bl->cnt)
- return -1;
- else if (al->cnt > bl->cnt)
- return 1;
- else if (al->cnt == 0)
- return 0;
-
- rd_kafka_topic_partition_list_sort(al, NULL, NULL);
- rd_kafka_topic_partition_list_sort(bl, NULL, NULL);
-
- for (i = 0; i < al->cnt; i++) {
- const rd_kafka_topic_partition_t *a = &al->elems[i];
- const rd_kafka_topic_partition_t *b = &bl->elems[i];
- if (a->partition != b->partition || strcmp(a->topic, b->topic))
- return -1;
- }
-
- return 0;
-}
-
-/**
- * @brief Compare two lists and their offsets, returning 0 if equal.
- *
- * @remark The lists may be sorted by this function.
- */
-int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al,
- rd_kafka_topic_partition_list_t *bl) {
- int i;
-
- if (al->cnt < bl->cnt)
- return -1;
- else if (al->cnt > bl->cnt)
- return 1;
- else if (al->cnt == 0)
- return 0;
-
- rd_kafka_topic_partition_list_sort(al, NULL, NULL);
- rd_kafka_topic_partition_list_sort(bl, NULL, NULL);
-
- for (i = 0; i < al->cnt; i++) {
- const rd_kafka_topic_partition_t *a = &al->elems[i];
- const rd_kafka_topic_partition_t *b = &bl->elems[i];
- if (a->partition != b->partition ||
- strcmp(a->topic, b->topic) || a->offset != b->offset ||
- rd_kafka_topic_partition_get_leader_epoch(a) !=
- rd_kafka_topic_partition_get_leader_epoch(b))
- return -1;
- }
-
- return 0;
-}
-
-/**
- * @brief Execute script from the Kafka distribution bin/ path.
- */
-void test_kafka_cmd(const char *fmt, ...) {
-#ifdef _WIN32
- TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__);
-#else
- char cmd[1024];
- int r;
- va_list ap;
- test_timing_t t_cmd;
- const char *kpath;
-
- kpath = test_getenv("KAFKA_PATH", NULL);
-
- if (!kpath)
- TEST_FAIL("%s: KAFKA_PATH must be set", __FUNCTION__);
-
- r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/", kpath);
- TEST_ASSERT(r < (int)sizeof(cmd));
-
- va_start(ap, fmt);
- rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap);
- va_end(ap);
-
- TEST_SAY("Executing: %s\n", cmd);
- TIMING_START(&t_cmd, "exec");
- r = system(cmd);
- TIMING_STOP(&t_cmd);
-
- if (r == -1)
- TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno));
- else if (WIFSIGNALED(r))
- TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd,
- WTERMSIG(r));
- else if (WEXITSTATUS(r))
- TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd,
- WEXITSTATUS(r));
-#endif
-}
-
-/**
- * @brief Execute kafka-topics.sh from the Kafka distribution.
- */
-void test_kafka_topics(const char *fmt, ...) {
-#ifdef _WIN32
- TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__);
-#else
- char cmd[1024];
- int r, bytes_left;
- va_list ap;
- test_timing_t t_cmd;
- const char *kpath, *bootstrap_env, *flag, *bootstrap_srvs;
-
- if (test_broker_version >= TEST_BRKVER(3, 0, 0, 0)) {
- bootstrap_env = "BROKERS";
- flag = "--bootstrap-server";
- } else {
- bootstrap_env = "ZK_ADDRESS";
- flag = "--zookeeper";
- }
-
- kpath = test_getenv("KAFKA_PATH", NULL);
- bootstrap_srvs = test_getenv(bootstrap_env, NULL);
-
- if (!kpath || !bootstrap_srvs)
- TEST_FAIL("%s: KAFKA_PATH and %s must be set", __FUNCTION__,
- bootstrap_env);
-
- r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/kafka-topics.sh %s %s ",
- kpath, flag, bootstrap_srvs);
- TEST_ASSERT(r > 0 && r < (int)sizeof(cmd));
-
- bytes_left = sizeof(cmd) - r;
-
- va_start(ap, fmt);
- r = rd_vsnprintf(cmd + r, bytes_left, fmt, ap);
- va_end(ap);
- TEST_ASSERT(r > 0 && r < bytes_left);
-
- TEST_SAY("Executing: %s\n", cmd);
- TIMING_START(&t_cmd, "exec");
- r = system(cmd);
- TIMING_STOP(&t_cmd);
-
- if (r == -1)
- TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno));
- else if (WIFSIGNALED(r))
- TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd,
- WTERMSIG(r));
- else if (WEXITSTATUS(r))
- TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd,
- WEXITSTATUS(r));
-#endif
-}
-
-
-
-/**
- * @brief Create topic using Topic Admin API
- *
- * @param configs is an optional key-value tuple array of
- * topic configs (or NULL).
- */
-void test_admin_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor,
- const char **configs) {
- rd_kafka_t *rk;
- rd_kafka_NewTopic_t *newt[1];
- const size_t newt_cnt = 1;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *rkqu;
- rd_kafka_event_t *rkev;
- const rd_kafka_CreateTopics_result_t *res;
- const rd_kafka_topic_result_t **terr;
- int timeout_ms = tmout_multip(10000);
- size_t res_cnt;
- rd_kafka_resp_err_t err;
- char errstr[512];
- test_timing_t t_create;
-
- if (!(rk = use_rk))
- rk = test_create_producer();
-
- rkqu = rd_kafka_queue_new(rk);
-
- newt[0] =
- rd_kafka_NewTopic_new(topicname, partition_cnt, replication_factor,
- errstr, sizeof(errstr));
- TEST_ASSERT(newt[0] != NULL, "%s", errstr);
-
- if (configs) {
- int i;
-
- for (i = 0; configs[i] && configs[i + 1]; i += 2)
- TEST_CALL_ERR__(rd_kafka_NewTopic_set_config(
- newt[0], configs[i], configs[i + 1]));
- }
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
- TEST_SAY(
- "Creating topic \"%s\" "
- "(partitions=%d, replication_factor=%d, timeout=%d)\n",
- topicname, partition_cnt, replication_factor, timeout_ms);
-
- TIMING_START(&t_create, "CreateTopics");
- rd_kafka_CreateTopics(rk, newt, newt_cnt, options, rkqu);
-
- /* Wait for result */
- rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
- TEST_ASSERT(rkev, "Timed out waiting for CreateTopics result");
-
- TIMING_STOP(&t_create);
-
- TEST_ASSERT(!rd_kafka_event_error(rkev), "CreateTopics failed: %s",
- rd_kafka_event_error_string(rkev));
-
- res = rd_kafka_event_CreateTopics_result(rkev);
- TEST_ASSERT(res, "Expected CreateTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_CreateTopics_result_topics(res, &res_cnt);
- TEST_ASSERT(terr, "CreateTopics_result_topics returned NULL");
- TEST_ASSERT(res_cnt == newt_cnt,
- "CreateTopics_result_topics returned %" PRIusz
- " topics, "
- "not the expected %" PRIusz,
- res_cnt, newt_cnt);
-
- TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]) ||
- rd_kafka_topic_result_error(terr[0]) ==
- RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS,
- "Topic %s result error: %s",
- rd_kafka_topic_result_name(terr[0]),
- rd_kafka_topic_result_error_string(terr[0]));
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_queue_destroy(rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_NewTopic_destroy(newt[0]);
-
- if (!use_rk)
- rd_kafka_destroy(rk);
-}
-
-
-
-/**
- * @brief Create topic using kafka-topics.sh --create
- */
-static void test_create_topic_sh(const char *topicname,
- int partition_cnt,
- int replication_factor) {
- test_kafka_topics(
- "--create --topic \"%s\" "
- "--replication-factor %d --partitions %d",
- topicname, replication_factor, partition_cnt);
-}
-
-
-/**
- * @brief Create topic
- */
-void test_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor) {
- if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
- test_create_topic_sh(topicname, partition_cnt,
- replication_factor);
- else
- test_admin_create_topic(use_rk, topicname, partition_cnt,
- replication_factor, NULL);
-}
-
-
-/**
- * @brief Create topic using kafka-topics.sh --delete
- */
-static void test_delete_topic_sh(const char *topicname) {
- test_kafka_topics("--delete --topic \"%s\" ", topicname);
-}
-
-
-/**
- * @brief Delete topic using Topic Admin API
- */
-static void test_admin_delete_topic(rd_kafka_t *use_rk, const char *topicname) {
- rd_kafka_t *rk;
- rd_kafka_DeleteTopic_t *delt[1];
- const size_t delt_cnt = 1;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *rkqu;
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteTopics_result_t *res;
- const rd_kafka_topic_result_t **terr;
- int timeout_ms = tmout_multip(10000);
- size_t res_cnt;
- rd_kafka_resp_err_t err;
- char errstr[512];
- test_timing_t t_create;
-
- if (!(rk = use_rk))
- rk = test_create_producer();
-
- rkqu = rd_kafka_queue_new(rk);
-
- delt[0] = rd_kafka_DeleteTopic_new(topicname);
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
- TEST_SAY(
- "Deleting topic \"%s\" "
- "(timeout=%d)\n",
- topicname, timeout_ms);
-
- TIMING_START(&t_create, "DeleteTopics");
- rd_kafka_DeleteTopics(rk, delt, delt_cnt, options, rkqu);
-
- /* Wait for result */
- rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
- TEST_ASSERT(rkev, "Timed out waiting for DeleteTopics result");
-
- TIMING_STOP(&t_create);
-
- res = rd_kafka_event_DeleteTopics_result(rkev);
- TEST_ASSERT(res, "Expected DeleteTopics_result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_DeleteTopics_result_topics(res, &res_cnt);
- TEST_ASSERT(terr, "DeleteTopics_result_topics returned NULL");
- TEST_ASSERT(res_cnt == delt_cnt,
- "DeleteTopics_result_topics returned %" PRIusz
- " topics, "
- "not the expected %" PRIusz,
- res_cnt, delt_cnt);
-
- TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]),
- "Topic %s result error: %s",
- rd_kafka_topic_result_name(terr[0]),
- rd_kafka_topic_result_error_string(terr[0]));
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_queue_destroy(rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_DeleteTopic_destroy(delt[0]);
-
- if (!use_rk)
- rd_kafka_destroy(rk);
-}
-
-
-/**
- * @brief Delete a topic
- */
-void test_delete_topic(rd_kafka_t *use_rk, const char *topicname) {
- if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
- test_delete_topic_sh(topicname);
- else
- test_admin_delete_topic(use_rk, topicname);
-}
-
-
-/**
- * @brief Create additional partitions for a topic using Admin API
- */
-static void test_admin_create_partitions(rd_kafka_t *use_rk,
- const char *topicname,
- int new_partition_cnt) {
- rd_kafka_t *rk;
- rd_kafka_NewPartitions_t *newp[1];
- const size_t newp_cnt = 1;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *rkqu;
- rd_kafka_event_t *rkev;
- const rd_kafka_CreatePartitions_result_t *res;
- const rd_kafka_topic_result_t **terr;
- int timeout_ms = tmout_multip(10000);
- size_t res_cnt;
- rd_kafka_resp_err_t err;
- char errstr[512];
- test_timing_t t_create;
-
- if (!(rk = use_rk))
- rk = test_create_producer();
-
- rkqu = rd_kafka_queue_new(rk);
-
- newp[0] = rd_kafka_NewPartitions_new(topicname, new_partition_cnt,
- errstr, sizeof(errstr));
- TEST_ASSERT(newp[0] != NULL, "%s", errstr);
-
- options =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, timeout_ms, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "%s", errstr);
-
- TEST_SAY("Creating %d (total) partitions for topic \"%s\"\n",
- new_partition_cnt, topicname);
-
- TIMING_START(&t_create, "CreatePartitions");
- rd_kafka_CreatePartitions(rk, newp, newp_cnt, options, rkqu);
-
- /* Wait for result */
- rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
- TEST_ASSERT(rkev, "Timed out waiting for CreatePartitions result");
-
- TIMING_STOP(&t_create);
-
- res = rd_kafka_event_CreatePartitions_result(rkev);
- TEST_ASSERT(res, "Expected CreatePartitions_result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_CreatePartitions_result_topics(res, &res_cnt);
- TEST_ASSERT(terr, "CreatePartitions_result_topics returned NULL");
- TEST_ASSERT(res_cnt == newp_cnt,
- "CreatePartitions_result_topics returned %" PRIusz
- " topics, not the expected %" PRIusz,
- res_cnt, newp_cnt);
-
- TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]),
- "Topic %s result error: %s",
- rd_kafka_topic_result_name(terr[0]),
- rd_kafka_topic_result_error_string(terr[0]));
-
- rd_kafka_event_destroy(rkev);
-
- rd_kafka_queue_destroy(rkqu);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_NewPartitions_destroy(newp[0]);
-
- if (!use_rk)
- rd_kafka_destroy(rk);
-}
-
-
-/**
- * @brief Create partitions for topic
- */
-void test_create_partitions(rd_kafka_t *use_rk,
- const char *topicname,
- int new_partition_cnt) {
- if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
- test_kafka_topics("--alter --topic %s --partitions %d",
- topicname, new_partition_cnt);
- else
- test_admin_create_partitions(use_rk, topicname,
- new_partition_cnt);
-}
-
-
-int test_get_partition_count(rd_kafka_t *rk,
- const char *topicname,
- int timeout_ms) {
- rd_kafka_t *use_rk;
- rd_kafka_resp_err_t err;
- rd_kafka_topic_t *rkt;
- int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
- int ret = -1;
-
- if (!rk)
- use_rk = test_create_producer();
- else
- use_rk = rk;
-
- rkt = rd_kafka_topic_new(use_rk, topicname, NULL);
-
- do {
- const struct rd_kafka_metadata *metadata;
-
- err = rd_kafka_metadata(use_rk, 0, rkt, &metadata,
- tmout_multip(15000));
- if (err)
- TEST_WARN("metadata() for %s failed: %s\n",
- rkt ? rd_kafka_topic_name(rkt)
- : "(all-local)",
- rd_kafka_err2str(err));
- else {
- if (metadata->topic_cnt == 1) {
- if (metadata->topics[0].err == 0 ||
- metadata->topics[0].partition_cnt > 0) {
- int32_t cnt;
- cnt = metadata->topics[0].partition_cnt;
- rd_kafka_metadata_destroy(metadata);
- ret = (int)cnt;
- break;
- }
- TEST_SAY(
- "metadata(%s) returned %s: retrying\n",
- rd_kafka_topic_name(rkt),
- rd_kafka_err2str(metadata->topics[0].err));
- }
- rd_kafka_metadata_destroy(metadata);
- rd_sleep(1);
- }
- } while (test_clock() < abs_timeout);
-
- rd_kafka_topic_destroy(rkt);
-
- if (!rk)
- rd_kafka_destroy(use_rk);
-
- return ret;
-}
-
-/**
- * @brief Let the broker auto-create the topic for us.
- */
-rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- int timeout_ms) {
- const struct rd_kafka_metadata *metadata;
- rd_kafka_resp_err_t err;
- test_timing_t t;
- int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
-
- do {
- TIMING_START(&t, "auto_create_topic");
- err = rd_kafka_metadata(rk, 0, rkt, &metadata,
- tmout_multip(15000));
- TIMING_STOP(&t);
- if (err)
- TEST_WARN("metadata() for %s failed: %s\n",
- rkt ? rd_kafka_topic_name(rkt)
- : "(all-local)",
- rd_kafka_err2str(err));
- else {
- if (metadata->topic_cnt == 1) {
- if (metadata->topics[0].err == 0 ||
- metadata->topics[0].partition_cnt > 0) {
- rd_kafka_metadata_destroy(metadata);
- return 0;
- }
- TEST_SAY(
- "metadata(%s) returned %s: retrying\n",
- rd_kafka_topic_name(rkt),
- rd_kafka_err2str(metadata->topics[0].err));
- }
- rd_kafka_metadata_destroy(metadata);
- rd_sleep(1);
- }
- } while (test_clock() < abs_timeout);
-
- return err;
-}
-
-rd_kafka_resp_err_t
-test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms) {
- rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, name, NULL);
- rd_kafka_resp_err_t err;
- if (!rkt)
- return rd_kafka_last_error();
- err = test_auto_create_topic_rkt(rk, rkt, timeout_ms);
- rd_kafka_topic_destroy(rkt);
- return err;
-}
-
-
-/**
- * @brief Check if topic auto creation works.
- * @returns 1 if it does, else 0.
- */
-int test_check_auto_create_topic(void) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf;
- rd_kafka_resp_err_t err;
- const char *topic = test_mk_topic_name("autocreatetest", 1);
-
- test_conf_init(&conf, NULL, 0);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
- err = test_auto_create_topic(rk, topic, tmout_multip(5000));
- if (err)
- TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", topic,
- rd_kafka_err2str(err));
- rd_kafka_destroy(rk);
-
- return err ? 0 : 1;
-}
-
-
-/**
- * @brief Builds and runs a Java application from the java/ directory.
- *
- * The application is started in the background, use
- * test_waitpid() to await its demise.
- *
- * @param cls The app class to run using java/run-class.sh
- *
- * @returns -1 if the application could not be started, else the pid.
- */
-int test_run_java(const char *cls, const char **argv) {
-#ifdef _WIN32
- TEST_WARN("%s(%s) not supported Windows, yet", __FUNCTION__, cls);
- return -1;
-#else
- int r;
- const char *kpath;
- pid_t pid;
- const char **full_argv, **p;
- int cnt;
- extern char **environ;
-
- kpath = test_getenv("KAFKA_PATH", NULL);
-
- if (!kpath) {
- TEST_WARN("%s(%s): KAFKA_PATH must be set\n", __FUNCTION__,
- cls);
- return -1;
- }
-
- /* Build */
- r = system("make -s java");
-
- if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r)) {
- TEST_WARN("%s(%s): failed to build java class (code %d)\n",
- __FUNCTION__, cls, r);
- return -1;
- }
-
- /* For child process and run cls */
- pid = fork();
- if (pid == -1) {
- TEST_WARN("%s(%s): failed to fork: %s\n", __FUNCTION__, cls,
- strerror(errno));
- return -1;
- }
-
- if (pid > 0)
- return (int)pid; /* In parent process */
-
- /* In child process */
-
- /* Reconstruct argv to contain run-class.sh and the cls */
- for (cnt = 0; argv[cnt]; cnt++)
- ;
-
- cnt += 3; /* run-class.sh, cls, .., NULL */
- full_argv = malloc(sizeof(*full_argv) * cnt);
- full_argv[0] = "java/run-class.sh";
- full_argv[1] = (const char *)cls;
-
- /* Copy arguments */
- for (p = &full_argv[2]; *argv; p++, argv++)
- *p = *argv;
- *p = NULL;
-
- /* Run */
- r = execve(full_argv[0], (char *const *)full_argv, environ);
-
- TEST_WARN("%s(%s): failed to execute run-class.sh: %s\n", __FUNCTION__,
- cls, strerror(errno));
- exit(2);
-
- return -1; /* NOTREACHED */
-#endif
-}
-
-
-/**
- * @brief Wait for child-process \p pid to exit.
- *
- * @returns -1 if the child process exited successfully, else -1.
- */
-int test_waitpid(int pid) {
-#ifdef _WIN32
- TEST_WARN("%s() not supported Windows, yet", __FUNCTION__);
- return -1;
-#else
- pid_t r;
- int status = 0;
-
- r = waitpid((pid_t)pid, &status, 0);
-
- if (r == -1) {
- TEST_WARN("waitpid(%d) failed: %s\n", pid, strerror(errno));
- return -1;
- }
-
- if (WIFSIGNALED(status)) {
- TEST_WARN("Process %d terminated by signal %d\n", pid,
- WTERMSIG(status));
- return -1;
- } else if (WEXITSTATUS(status)) {
- TEST_WARN("Process %d exited with status %d\n", pid,
- WEXITSTATUS(status));
- return -1;
- }
-
- return 0;
-#endif
-}
-
-
-/**
- * @brief Check if \p feature is builtin to librdkafka.
- * @returns returns 1 if feature is built in, else 0.
- */
-int test_check_builtin(const char *feature) {
- rd_kafka_conf_t *conf;
- char errstr[128];
- int r;
-
- conf = rd_kafka_conf_new();
- if (rd_kafka_conf_set(conf, "builtin.features", feature, errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK) {
- TEST_SAY("Feature \"%s\" not built-in: %s\n", feature, errstr);
- r = 0;
- } else {
- TEST_SAY("Feature \"%s\" is built-in\n", feature);
- r = 1;
- }
-
- rd_kafka_conf_destroy(conf);
- return r;
-}
-
-
-char *tsprintf(const char *fmt, ...) {
- static RD_TLS char ret[8][512];
- static RD_TLS int i;
- va_list ap;
-
-
- i = (i + 1) % 8;
-
- va_start(ap, fmt);
- rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap);
- va_end(ap);
-
- return ret[i];
-}
-
-
-/**
- * @brief Add a test report JSON object.
- * These will be written as a JSON array to the test report file.
- */
-void test_report_add(struct test *test, const char *fmt, ...) {
- va_list ap;
- char buf[512];
-
- va_start(ap, fmt);
- vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- if (test->report_cnt == test->report_size) {
- if (test->report_size == 0)
- test->report_size = 8;
- else
- test->report_size *= 2;
-
- test->report_arr =
- realloc(test->report_arr,
- sizeof(*test->report_arr) * test->report_size);
- }
-
- test->report_arr[test->report_cnt++] = rd_strdup(buf);
-
- TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt - 1, buf);
-}
-
-/**
- * Returns 1 if KAFKA_PATH and BROKERS (or ZK_ADDRESS) is set to se we can use
- * the kafka-topics.sh script to manually create topics.
- *
- * If \p skip is set TEST_SKIP() will be called with a helpful message.
- */
-int test_can_create_topics(int skip) {
-#ifndef _WIN32
- const char *bootstrap;
-#endif
-
- /* Has AdminAPI */
- if (test_broker_version >= TEST_BRKVER(0, 10, 2, 0))
- return 1;
-
-#ifdef _WIN32
- if (skip)
- TEST_SKIP("Cannot create topics on Win32\n");
- return 0;
-#else
-
- bootstrap = test_broker_version >= TEST_BRKVER(3, 0, 0, 0)
- ? "BROKERS"
- : "ZK_ADDRESS";
-
- if (!test_getenv("KAFKA_PATH", NULL) || !test_getenv(bootstrap, NULL)) {
- if (skip)
- TEST_SKIP(
- "Cannot create topics "
- "(set KAFKA_PATH and %s)\n",
- bootstrap);
- return 0;
- }
-
-
- return 1;
-#endif
-}
-
-
-/**
- * Wait for \p event_type, discarding all other events prior to it.
- */
-rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq,
- rd_kafka_event_type_t event_type,
- int timeout_ms) {
- test_timing_t t_w;
- int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
-
- TIMING_START(&t_w, "wait_event");
- while (test_clock() < abs_timeout) {
- rd_kafka_event_t *rkev;
-
- rkev = rd_kafka_queue_poll(
- eventq, (int)(abs_timeout - test_clock()) / 1000);
-
- if (rd_kafka_event_type(rkev) == event_type) {
- TIMING_STOP(&t_w);
- return rkev;
- }
-
- if (!rkev)
- continue;
-
- if (rd_kafka_event_error(rkev))
- TEST_SAY("discarding ignored event %s: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
- else
- TEST_SAY("discarding ignored event %s\n",
- rd_kafka_event_name(rkev));
- rd_kafka_event_destroy(rkev);
- }
- TIMING_STOP(&t_w);
-
- return NULL;
-}
-
-
-void test_SAY(const char *file, int line, int level, const char *str) {
- TEST_SAYL(level, "%s", str);
-}
-
-void test_SKIP(const char *file, int line, const char *str) {
- TEST_WARN("SKIPPING TEST: %s", str);
- TEST_LOCK();
- test_curr->state = TEST_SKIPPED;
- if (!*test_curr->failstr) {
- rd_snprintf(test_curr->failstr, sizeof(test_curr->failstr),
- "%s", str);
- rtrim(test_curr->failstr);
- }
- TEST_UNLOCK();
-}
-
-const char *test_curr_name(void) {
- return test_curr->name;
-}
-
-
-/**
- * @brief Dump/print message haders
- */
-void test_headers_dump(const char *what,
- int lvl,
- const rd_kafka_headers_t *hdrs) {
- size_t idx = 0;
- const char *name, *value;
- size_t size;
-
- while (!rd_kafka_header_get_all(hdrs, idx++, &name,
- (const void **)&value, &size))
- TEST_SAYL(lvl, "%s: Header #%" PRIusz ": %s='%s'\n", what,
- idx - 1, name, value ? value : "(NULL)");
-}
-
-
-/**
- * @brief Retrieve and return the list of broker ids in the cluster.
- *
- * @param rk Optional instance to use.
- * @param cntp Will be updated to the number of brokers returned.
- *
- * @returns a malloc:ed list of int32_t broker ids.
- */
-int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp) {
- int32_t *ids;
- rd_kafka_t *rk;
- const rd_kafka_metadata_t *md;
- rd_kafka_resp_err_t err;
- size_t i;
-
- if (!(rk = use_rk))
- rk = test_create_producer();
-
- err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
- TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
- TEST_ASSERT(md->broker_cnt > 0, "%d brokers, expected > 0",
- md->broker_cnt);
-
- ids = malloc(sizeof(*ids) * md->broker_cnt);
-
- for (i = 0; i < (size_t)md->broker_cnt; i++)
- ids[i] = md->brokers[i].id;
-
- *cntp = md->broker_cnt;
-
- rd_kafka_metadata_destroy(md);
-
- if (!use_rk)
- rd_kafka_destroy(rk);
-
- return ids;
-}
-
-
-
-/**
- * @brief Verify that all topics in \p topics are reported in metadata,
- * and that none of the topics in \p not_topics are reported.
- *
- * @returns the number of failures (but does not FAIL).
- */
-static int verify_topics_in_metadata(rd_kafka_t *rk,
- rd_kafka_metadata_topic_t *topics,
- size_t topic_cnt,
- rd_kafka_metadata_topic_t *not_topics,
- size_t not_topic_cnt) {
- const rd_kafka_metadata_t *md;
- rd_kafka_resp_err_t err;
- int ti;
- size_t i;
- int fails = 0;
-
- /* Mark topics with dummy error which is overwritten
- * when topic is found in metadata, allowing us to check
- * for missed topics. */
- for (i = 0; i < topic_cnt; i++)
- topics[i].err = 12345;
-
- err = rd_kafka_metadata(rk, 1 /*all_topics*/, NULL, &md,
- tmout_multip(5000));
- TEST_ASSERT(!err, "metadata failed: %s", rd_kafka_err2str(err));
-
- for (ti = 0; ti < md->topic_cnt; ti++) {
- const rd_kafka_metadata_topic_t *mdt = &md->topics[ti];
-
- for (i = 0; i < topic_cnt; i++) {
- int pi;
- rd_kafka_metadata_topic_t *exp_mdt;
-
- if (strcmp(topics[i].topic, mdt->topic))
- continue;
-
- exp_mdt = &topics[i];
-
- exp_mdt->err = mdt->err; /* indicate found */
- if (mdt->err) {
- TEST_SAY(
- "metadata: "
- "Topic %s has error %s\n",
- mdt->topic, rd_kafka_err2str(mdt->err));
- fails++;
- }
-
- if (exp_mdt->partition_cnt > 0 &&
- mdt->partition_cnt != exp_mdt->partition_cnt) {
- TEST_SAY(
- "metadata: "
- "Topic %s, expected %d partitions"
- ", not %d\n",
- mdt->topic, exp_mdt->partition_cnt,
- mdt->partition_cnt);
- fails++;
- continue;
- }
-
- /* Verify per-partition values */
- for (pi = 0;
- exp_mdt->partitions && pi < exp_mdt->partition_cnt;
- pi++) {
- const rd_kafka_metadata_partition_t *mdp =
- &mdt->partitions[pi];
- const rd_kafka_metadata_partition_t *exp_mdp =
- &exp_mdt->partitions[pi];
-
- if (mdp->id != exp_mdp->id) {
- TEST_SAY(
- "metadata: "
- "Topic %s, "
- "partition %d, "
- "partition list out of order,"
- " expected %d, not %d\n",
- mdt->topic, pi, exp_mdp->id,
- mdp->id);
- fails++;
- continue;
- }
-
- if (exp_mdp->replicas) {
- if (mdp->replica_cnt !=
- exp_mdp->replica_cnt) {
- TEST_SAY(
- "metadata: "
- "Topic %s, "
- "partition %d, "
- "expected %d replicas,"
- " not %d\n",
- mdt->topic, pi,
- exp_mdp->replica_cnt,
- mdp->replica_cnt);
- fails++;
- } else if (
- memcmp(
- mdp->replicas,
- exp_mdp->replicas,
- mdp->replica_cnt *
- sizeof(*mdp->replicas))) {
- int ri;
-
- TEST_SAY(
- "metadata: "
- "Topic %s, "
- "partition %d, "
- "replica mismatch:\n",
- mdt->topic, pi);
-
- for (ri = 0;
- ri < mdp->replica_cnt;
- ri++) {
- TEST_SAY(
- " #%d: "
- "expected "
- "replica %d, "
- "not %d\n",
- ri,
- exp_mdp
- ->replicas[ri],
- mdp->replicas[ri]);
- }
-
- fails++;
- }
- }
- }
- }
-
- for (i = 0; i < not_topic_cnt; i++) {
- if (strcmp(not_topics[i].topic, mdt->topic))
- continue;
-
- TEST_SAY(
- "metadata: "
- "Topic %s found in metadata, unexpected\n",
- mdt->topic);
- fails++;
- }
- }
-
- for (i = 0; i < topic_cnt; i++) {
- if ((int)topics[i].err == 12345) {
- TEST_SAY(
- "metadata: "
- "Topic %s not seen in metadata\n",
- topics[i].topic);
- fails++;
- }
- }
-
- if (fails > 0)
- TEST_SAY("Metadata verification for %" PRIusz
- " topics failed "
- "with %d errors (see above)\n",
- topic_cnt, fails);
- else
- TEST_SAY(
- "Metadata verification succeeded: "
- "%" PRIusz
- " desired topics seen, "
- "%" PRIusz " undesired topics not seen\n",
- topic_cnt, not_topic_cnt);
-
- rd_kafka_metadata_destroy(md);
-
- return fails;
-}
-
-
-
-/**
- * @brief Wait for metadata to reflect expected and not expected topics
- */
-void test_wait_metadata_update(rd_kafka_t *rk,
- rd_kafka_metadata_topic_t *topics,
- size_t topic_cnt,
- rd_kafka_metadata_topic_t *not_topics,
- size_t not_topic_cnt,
- int tmout) {
- int64_t abs_timeout;
- test_timing_t t_md;
- rd_kafka_t *our_rk = NULL;
-
- if (!rk)
- rk = our_rk = test_create_handle(RD_KAFKA_PRODUCER, NULL);
-
- abs_timeout = test_clock() + ((int64_t)tmout * 1000);
-
- TEST_SAY("Waiting for up to %dms for metadata update\n", tmout);
-
- TIMING_START(&t_md, "METADATA.WAIT");
- do {
- int md_fails;
-
- md_fails = verify_topics_in_metadata(rk, topics, topic_cnt,
- not_topics, not_topic_cnt);
-
- if (!md_fails) {
- TEST_SAY(
- "All expected topics (not?) "
- "seen in metadata\n");
- abs_timeout = 0;
- break;
- }
-
- rd_sleep(1);
- } while (test_clock() < abs_timeout);
- TIMING_STOP(&t_md);
-
- if (our_rk)
- rd_kafka_destroy(our_rk);
-
- if (abs_timeout)
- TEST_FAIL("Expected topics not seen in given time.");
-}
-
-/**
- * @brief Wait for topic to be available in metadata
- */
-void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout) {
- rd_kafka_metadata_topic_t topics = {.topic = (char *)topic};
-
- test_wait_metadata_update(rk, &topics, 1, NULL, 0, tmout);
-
- /* Wait an additional second for the topic to propagate in
- * the cluster. This is not perfect but a cheap workaround for
- * the asynchronous nature of topic creations in Kafka. */
- rd_sleep(1);
-}
-
-
-
-/**
- * @brief Wait for up to \p tmout for any type of admin result.
- * @returns the event
- */
-rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
- rd_kafka_event_type_t evtype,
- int tmout) {
- rd_kafka_event_t *rkev;
-
- while (1) {
- rkev = rd_kafka_queue_poll(q, tmout);
- if (!rkev)
- TEST_FAIL("Timed out waiting for admin result (%d)\n",
- evtype);
-
- if (rd_kafka_event_type(rkev) == evtype)
- return rkev;
-
-
- if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) {
- TEST_WARN(
- "Received error event while waiting for %d: "
- "%s: ignoring",
- evtype, rd_kafka_event_error_string(rkev));
- continue;
- }
-
-
- TEST_ASSERT(rd_kafka_event_type(rkev) == evtype,
- "Expected event type %d, got %d (%s)", evtype,
- rd_kafka_event_type(rkev),
- rd_kafka_event_name(rkev));
- }
-
- return NULL;
-}
-
-/**
- * @brief Wait for up to \p tmout for an admin API result and return the
- * distilled error code.
- *
- * Supported APIs:
- * - AlterConfigs
- * - CreatePartitions
- * - CreateTopics
- * - DeleteGroups
- * - DeleteRecords
- * - DeleteTopics
- * - DeleteConsumerGroupOffsets
- * - DescribeConfigs
- * - CreateAcls
- */
-rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
- rd_kafka_event_type_t evtype,
- rd_kafka_event_t **retevent,
- int tmout) {
- rd_kafka_event_t *rkev;
- size_t i;
- const rd_kafka_topic_result_t **terr = NULL;
- size_t terr_cnt = 0;
- const rd_kafka_ConfigResource_t **cres = NULL;
- size_t cres_cnt = 0;
- const rd_kafka_acl_result_t **aclres = NULL;
- size_t aclres_cnt = 0;
- int errcnt = 0;
- rd_kafka_resp_err_t err;
- const rd_kafka_group_result_t **gres = NULL;
- size_t gres_cnt = 0;
- const rd_kafka_ConsumerGroupDescription_t **gdescs = NULL;
- size_t gdescs_cnt = 0;
- const rd_kafka_error_t **glists_errors = NULL;
- size_t glists_error_cnt = 0;
- const rd_kafka_topic_partition_list_t *offsets = NULL;
-
- rkev = test_wait_admin_result(q, evtype, tmout);
-
- if ((err = rd_kafka_event_error(rkev))) {
- TEST_WARN("%s failed: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
- rd_kafka_event_destroy(rkev);
- return err;
- }
-
- if (evtype == RD_KAFKA_EVENT_CREATETOPICS_RESULT) {
- const rd_kafka_CreateTopics_result_t *res;
- if (!(res = rd_kafka_event_CreateTopics_result(rkev)))
- TEST_FAIL("Expected a CreateTopics result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_CreateTopics_result_topics(res, &terr_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_DELETETOPICS_RESULT) {
- const rd_kafka_DeleteTopics_result_t *res;
- if (!(res = rd_kafka_event_DeleteTopics_result(rkev)))
- TEST_FAIL("Expected a DeleteTopics result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_DeleteTopics_result_topics(res, &terr_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) {
- const rd_kafka_CreatePartitions_result_t *res;
- if (!(res = rd_kafka_event_CreatePartitions_result(rkev)))
- TEST_FAIL("Expected a CreatePartitions result, not %s",
- rd_kafka_event_name(rkev));
-
- terr = rd_kafka_CreatePartitions_result_topics(res, &terr_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) {
- const rd_kafka_DescribeConfigs_result_t *res;
-
- if (!(res = rd_kafka_event_DescribeConfigs_result(rkev)))
- TEST_FAIL("Expected a DescribeConfigs result, not %s",
- rd_kafka_event_name(rkev));
-
- cres =
- rd_kafka_DescribeConfigs_result_resources(res, &cres_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) {
- const rd_kafka_AlterConfigs_result_t *res;
-
- if (!(res = rd_kafka_event_AlterConfigs_result(rkev)))
- TEST_FAIL("Expected a AlterConfigs result, not %s",
- rd_kafka_event_name(rkev));
-
- cres = rd_kafka_AlterConfigs_result_resources(res, &cres_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_CREATEACLS_RESULT) {
- const rd_kafka_CreateAcls_result_t *res;
-
- if (!(res = rd_kafka_event_CreateAcls_result(rkev)))
- TEST_FAIL("Expected a CreateAcls result, not %s",
- rd_kafka_event_name(rkev));
-
- aclres = rd_kafka_CreateAcls_result_acls(res, &aclres_cnt);
- } else if (evtype == RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) {
- const rd_kafka_ListConsumerGroups_result_t *res;
- if (!(res = rd_kafka_event_ListConsumerGroups_result(rkev)))
- TEST_FAIL(
- "Expected a ListConsumerGroups result, not %s",
- rd_kafka_event_name(rkev));
-
- glists_errors = rd_kafka_ListConsumerGroups_result_errors(
- res, &glists_error_cnt);
- } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) {
- const rd_kafka_DescribeConsumerGroups_result_t *res;
- if (!(res = rd_kafka_event_DescribeConsumerGroups_result(rkev)))
- TEST_FAIL(
- "Expected a DescribeConsumerGroups result, not %s",
- rd_kafka_event_name(rkev));
-
- gdescs = rd_kafka_DescribeConsumerGroups_result_groups(
- res, &gdescs_cnt);
- } else if (evtype == RD_KAFKA_EVENT_DELETEGROUPS_RESULT) {
- const rd_kafka_DeleteGroups_result_t *res;
- if (!(res = rd_kafka_event_DeleteGroups_result(rkev)))
- TEST_FAIL("Expected a DeleteGroups result, not %s",
- rd_kafka_event_name(rkev));
-
- gres = rd_kafka_DeleteGroups_result_groups(res, &gres_cnt);
-
- } else if (evtype == RD_KAFKA_EVENT_DELETERECORDS_RESULT) {
- const rd_kafka_DeleteRecords_result_t *res;
- if (!(res = rd_kafka_event_DeleteRecords_result(rkev)))
- TEST_FAIL("Expected a DeleteRecords result, not %s",
- rd_kafka_event_name(rkev));
-
- offsets = rd_kafka_DeleteRecords_result_offsets(res);
-
- } else if (evtype == RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) {
- const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
- if (!(res = rd_kafka_event_DeleteConsumerGroupOffsets_result(
- rkev)))
- TEST_FAIL(
- "Expected a DeleteConsumerGroupOffsets "
- "result, not %s",
- rd_kafka_event_name(rkev));
-
- gres = rd_kafka_DeleteConsumerGroupOffsets_result_groups(
- rkev, &gres_cnt);
-
- } else {
- TEST_FAIL("Bad evtype: %d", evtype);
- RD_NOTREACHED();
- }
-
- /* Check topic errors */
- for (i = 0; i < terr_cnt; i++) {
- if (rd_kafka_topic_result_error(terr[i])) {
- TEST_WARN("..Topics result: %s: error: %s\n",
- rd_kafka_topic_result_name(terr[i]),
- rd_kafka_topic_result_error_string(terr[i]));
- if (!(errcnt++))
- err = rd_kafka_topic_result_error(terr[i]);
- }
- }
-
- /* Check resource errors */
- for (i = 0; i < cres_cnt; i++) {
- if (rd_kafka_ConfigResource_error(cres[i])) {
- TEST_WARN(
- "ConfigResource result: %d,%s: error: %s\n",
- rd_kafka_ConfigResource_type(cres[i]),
- rd_kafka_ConfigResource_name(cres[i]),
- rd_kafka_ConfigResource_error_string(cres[i]));
- if (!(errcnt++))
- err = rd_kafka_ConfigResource_error(cres[i]);
- }
- }
-
- /* Check ACL errors */
- for (i = 0; i < aclres_cnt; i++) {
- const rd_kafka_error_t *error =
- rd_kafka_acl_result_error(aclres[i]);
- if (error) {
- TEST_WARN("AclResult error: %s: %s\n",
- rd_kafka_error_name(error),
- rd_kafka_error_string(error));
- if (!(errcnt++))
- err = rd_kafka_error_code(error);
- }
- }
-
- /* Check list groups errors */
- for (i = 0; i < glists_error_cnt; i++) {
- const rd_kafka_error_t *error = glists_errors[i];
- TEST_WARN("%s error: %s\n", rd_kafka_event_name(rkev),
- rd_kafka_error_string(error));
- if (!(errcnt++))
- err = rd_kafka_error_code(error);
- }
-
- /* Check describe groups errors */
- for (i = 0; i < gdescs_cnt; i++) {
- const rd_kafka_error_t *error;
- if ((error =
- rd_kafka_ConsumerGroupDescription_error(gdescs[i]))) {
- TEST_WARN("%s result: %s: error: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_ConsumerGroupDescription_group_id(
- gdescs[i]),
- rd_kafka_error_string(error));
- if (!(errcnt++))
- err = rd_kafka_error_code(error);
- }
- }
-
- /* Check group errors */
- for (i = 0; i < gres_cnt; i++) {
- const rd_kafka_topic_partition_list_t *parts;
-
- if (rd_kafka_group_result_error(gres[i])) {
-
- TEST_WARN("%s result: %s: error: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_group_result_name(gres[i]),
- rd_kafka_error_string(
- rd_kafka_group_result_error(gres[i])));
- if (!(errcnt++))
- err = rd_kafka_error_code(
- rd_kafka_group_result_error(gres[i]));
- }
-
- parts = rd_kafka_group_result_partitions(gres[i]);
- if (parts) {
- int j;
- for (j = 0; j < parts->cnt; i++) {
- if (!parts->elems[j].err)
- continue;
-
- TEST_WARN(
- "%s result: %s: "
- "%s [%" PRId32 "] error: %s\n",
- rd_kafka_event_name(rkev),
- rd_kafka_group_result_name(gres[i]),
- parts->elems[j].topic,
- parts->elems[j].partition,
- rd_kafka_err2str(parts->elems[j].err));
- errcnt++;
- }
- }
- }
-
- /* Check offset errors */
- for (i = 0; (offsets && i < (size_t)offsets->cnt); i++) {
- if (offsets->elems[i].err) {
- TEST_WARN("DeleteRecords result: %s [%d]: error: %s\n",
- offsets->elems[i].topic,
- offsets->elems[i].partition,
- rd_kafka_err2str(offsets->elems[i].err));
- if (!(errcnt++))
- err = offsets->elems[i].err;
- }
- }
-
- if (!err && retevent)
- *retevent = rkev;
- else
- rd_kafka_event_destroy(rkev);
-
- return err;
-}
-
-
-
-/**
- * @brief Topic Admin API helpers
- *
- * @param useq Makes the call async and posts the response in this queue.
- * If NULL this call will be synchronous and return the error
- * result.
- *
- * @remark Fails the current test on failure.
- */
-
-rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **topics,
- size_t topic_cnt,
- int num_partitions,
- void *opaque) {
- rd_kafka_NewTopic_t **new_topics;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *q;
- size_t i;
- const int tmout = 30 * 1000;
- rd_kafka_resp_err_t err;
-
- new_topics = malloc(sizeof(*new_topics) * topic_cnt);
-
- for (i = 0; i < topic_cnt; i++) {
- char errstr[512];
- new_topics[i] = rd_kafka_NewTopic_new(
- topics[i], num_partitions, 1, errstr, sizeof(errstr));
- TEST_ASSERT(new_topics[i],
- "Failed to NewTopic(\"%s\", %d) #%" PRIusz ": %s",
- topics[i], num_partitions, i, errstr);
- }
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- char errstr[512];
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, tmout - 5000, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Creating %" PRIusz " topics\n", topic_cnt);
-
- rd_kafka_CreateTopics(rk, new_topics, topic_cnt, options, q);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_NewTopic_destroy_array(new_topics, topic_cnt);
- free(new_topics);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to create %d topic(s): %s", (int)topic_cnt,
- rd_kafka_err2str(err));
-
- return err;
-}
-
-
-rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const char *topic,
- size_t total_part_cnt,
- void *opaque) {
- rd_kafka_NewPartitions_t *newp[1];
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *q;
- const int tmout = 30 * 1000;
- rd_kafka_resp_err_t err;
- char errstr[512];
-
- newp[0] = rd_kafka_NewPartitions_new(topic, total_part_cnt, errstr,
- sizeof(errstr));
- TEST_ASSERT(newp[0], "Failed to NewPartitions(\"%s\", %" PRIusz "): %s",
- topic, total_part_cnt, errstr);
-
- options =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, tmout - 5000, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Creating (up to) %" PRIusz " partitions for topic \"%s\"\n",
- total_part_cnt, topic);
-
- rd_kafka_CreatePartitions(rk, newp, 1, options, q);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_NewPartitions_destroy(newp[0]);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to create partitions: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-
-rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **topics,
- size_t topic_cnt,
- void *opaque) {
- rd_kafka_queue_t *q;
- rd_kafka_DeleteTopic_t **del_topics;
- rd_kafka_AdminOptions_t *options;
- size_t i;
- rd_kafka_resp_err_t err;
- const int tmout = 30 * 1000;
-
- del_topics = malloc(sizeof(*del_topics) * topic_cnt);
-
- for (i = 0; i < topic_cnt; i++) {
- del_topics[i] = rd_kafka_DeleteTopic_new(topics[i]);
- TEST_ASSERT(del_topics[i]);
- }
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- char errstr[512];
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, tmout - 5000, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Deleting %" PRIusz " topics\n", topic_cnt);
-
- rd_kafka_DeleteTopics(rk, del_topics, topic_cnt, options, useq);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_DeleteTopic_destroy_array(del_topics, topic_cnt);
-
- free(del_topics);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_DELETETOPICS_RESULT, NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to delete topics: %s", rd_kafka_err2str(err));
-
- return err;
-}
-
-rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **groups,
- size_t group_cnt,
- void *opaque) {
- rd_kafka_queue_t *q;
- rd_kafka_DeleteGroup_t **del_groups;
- rd_kafka_AdminOptions_t *options;
- size_t i;
- rd_kafka_resp_err_t err;
- const int tmout = 30 * 1000;
-
- del_groups = malloc(sizeof(*del_groups) * group_cnt);
-
- for (i = 0; i < group_cnt; i++) {
- del_groups[i] = rd_kafka_DeleteGroup_new(groups[i]);
- TEST_ASSERT(del_groups[i]);
- }
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- char errstr[512];
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Deleting %" PRIusz " groups\n", group_cnt);
-
- rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, q);
-
- rd_kafka_AdminOptions_destroy(options);
-
- rd_kafka_DeleteGroup_destroy_array(del_groups, group_cnt);
- free(del_groups);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to delete groups: %s", rd_kafka_err2str(err));
-
- return err;
-}
-
-rd_kafka_resp_err_t
-test_DeleteRecords_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_resp_err_t err;
- rd_kafka_DeleteRecords_t *del_records =
- rd_kafka_DeleteRecords_new(offsets);
- const int tmout = 30 * 1000;
-
- options =
- rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- char errstr[512];
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, tmout - 5000, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Deleting offsets from %d partitions\n", offsets->cnt);
-
- rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
-
- rd_kafka_DeleteRecords_destroy(del_records);
-
- rd_kafka_AdminOptions_destroy(options);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_DELETERECORDS_RESULT, NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to delete records: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple(
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const char *group_id,
- const rd_kafka_topic_partition_list_t *offsets,
- void *opaque) {
- rd_kafka_queue_t *q;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_resp_err_t err;
- const int tmout = 30 * 1000;
- rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets;
-
- options = rd_kafka_AdminOptions_new(
- rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- char errstr[512];
-
- err = rd_kafka_AdminOptions_set_request_timeout(
- options, tmout, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
- err = rd_kafka_AdminOptions_set_operation_timeout(
- options, tmout - 5000, errstr, sizeof(errstr));
- TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
-
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- if (offsets) {
- TEST_SAY(
- "Deleting committed offsets for group %s and "
- "%d partitions\n",
- group_id, offsets->cnt);
-
- cgoffsets =
- rd_kafka_DeleteConsumerGroupOffsets_new(group_id, offsets);
- } else {
- TEST_SAY("Provoking invalid DeleteConsumerGroupOffsets call\n");
- cgoffsets = NULL;
- }
-
- rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, cgoffsets ? 1 : 0,
- options, useq);
-
- if (cgoffsets)
- rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets);
-
- rd_kafka_AdminOptions_destroy(options);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, NULL,
- tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to delete committed offsets: %s",
- rd_kafka_err2str(err));
-
- return err;
-}
-
-/**
- * @brief Delta Alter configuration for the given resource,
- * overwriting/setting the configs provided in \p configs.
- * Existing configuration remains intact.
- *
- * @param configs 'const char *name, const char *value' tuples
- * @param config_cnt is the number of tuples in \p configs
- */
-rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk,
- rd_kafka_ResourceType_t restype,
- const char *resname,
- const char **configs,
- size_t config_cnt) {
- rd_kafka_queue_t *q;
- rd_kafka_ConfigResource_t *confres;
- rd_kafka_event_t *rkev;
- size_t i;
- rd_kafka_resp_err_t err;
- const rd_kafka_ConfigResource_t **results;
- size_t result_cnt;
- const rd_kafka_ConfigEntry_t **configents;
- size_t configent_cnt;
-
-
- q = rd_kafka_queue_new(rk);
-
- TEST_SAY("Getting configuration for %d %s\n", restype, resname);
-
- confres = rd_kafka_ConfigResource_new(restype, resname);
- rd_kafka_DescribeConfigs(rk, &confres, 1, NULL, q);
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15 * 1000);
- if (err) {
- rd_kafka_queue_destroy(q);
- rd_kafka_ConfigResource_destroy(confres);
- return err;
- }
-
- results = rd_kafka_DescribeConfigs_result_resources(
- rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt);
- TEST_ASSERT(result_cnt == 1,
- "expected 1 DescribeConfigs result, not %" PRIusz,
- result_cnt);
-
- configents =
- rd_kafka_ConfigResource_configs(results[0], &configent_cnt);
- TEST_ASSERT(configent_cnt > 0,
- "expected > 0 ConfigEntry:s, not %" PRIusz, configent_cnt);
-
- TEST_SAY("Altering configuration for %d %s\n", restype, resname);
-
- /* Apply all existing configuration entries to resource object that
- * will later be passed to AlterConfigs. */
- for (i = 0; i < configent_cnt; i++) {
- const char *entry_name =
- rd_kafka_ConfigEntry_name(configents[i]);
-
- if (test_broker_version >= TEST_BRKVER(3, 2, 0, 0)) {
- /* Skip entries that are overwritten to
- * avoid duplicates, that cause an error since
- * this broker version. */
- size_t j;
- for (j = 0; j < config_cnt; j += 2) {
- if (!strcmp(configs[j], entry_name)) {
- break;
- }
- }
-
- if (j < config_cnt)
- continue;
- }
-
- err = rd_kafka_ConfigResource_set_config(
- confres, entry_name,
- rd_kafka_ConfigEntry_value(configents[i]));
- TEST_ASSERT(!err,
- "Failed to set read-back config %s=%s "
- "on local resource object",
- entry_name,
- rd_kafka_ConfigEntry_value(configents[i]));
- }
-
- rd_kafka_event_destroy(rkev);
-
- /* Then apply the configuration to change. */
- for (i = 0; i < config_cnt; i += 2) {
- err = rd_kafka_ConfigResource_set_config(confres, configs[i],
- configs[i + 1]);
- TEST_ASSERT(!err,
- "Failed to set config %s=%s on "
- "local resource object",
- configs[i], configs[i + 1]);
- }
-
- rd_kafka_AlterConfigs(rk, &confres, 1, NULL, q);
-
- rd_kafka_ConfigResource_destroy(confres);
-
- err = test_wait_topic_admin_result(
- q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15 * 1000);
-
- rd_kafka_queue_destroy(q);
-
- return err;
-}
-
-/**
- * @brief Topic Admin API helpers
- *
- * @param useq Makes the call async and posts the response in this queue.
- * If NULL this call will be synchronous and return the error
- * result.
- *
- * @remark Fails the current test on failure.
- */
-
-rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- rd_kafka_AclBinding_t **acls,
- size_t acl_cnt,
- void *opaque) {
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *q;
- rd_kafka_resp_err_t err;
- const int tmout = 30 * 1000;
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
- rd_kafka_AdminOptions_set_opaque(options, opaque);
-
- if (!useq) {
- q = rd_kafka_queue_new(rk);
- } else {
- q = useq;
- }
-
- TEST_SAY("Creating %" PRIusz " acls\n", acl_cnt);
-
- rd_kafka_CreateAcls(rk, acls, acl_cnt, options, q);
-
- rd_kafka_AdminOptions_destroy(options);
-
- if (useq)
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-
- err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATEACLS_RESULT,
- NULL, tmout + 5000);
-
- rd_kafka_queue_destroy(q);
-
- if (err)
- TEST_FAIL("Failed to create %d acl(s): %s", (int)acl_cnt,
- rd_kafka_err2str(err));
-
- return err;
-}
-
-static void test_free_string_array(char **strs, size_t cnt) {
- size_t i;
- for (i = 0; i < cnt; i++)
- free(strs[i]);
- free(strs);
-}
-
-
-/**
- * @return an array of all topics in the cluster matching our the
- * rdkafka test prefix.
- */
-static rd_kafka_resp_err_t
-test_get_all_test_topics(rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) {
- size_t test_topic_prefix_len = strlen(test_topic_prefix);
- const rd_kafka_metadata_t *md;
- char **topics = NULL;
- size_t topic_cnt = 0;
- int i;
- rd_kafka_resp_err_t err;
-
- *topic_cntp = 0;
- if (topicsp)
- *topicsp = NULL;
-
- /* Retrieve list of topics */
- err = rd_kafka_metadata(rk, 1 /*all topics*/, NULL, &md,
- tmout_multip(10000));
- if (err) {
- TEST_WARN(
- "%s: Failed to acquire metadata: %s: "
- "not deleting any topics\n",
- __FUNCTION__, rd_kafka_err2str(err));
- return err;
- }
-
- if (md->topic_cnt == 0) {
- TEST_WARN("%s: No topics in cluster\n", __FUNCTION__);
- rd_kafka_metadata_destroy(md);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- if (topicsp)
- topics = malloc(sizeof(*topics) * md->topic_cnt);
-
- for (i = 0; i < md->topic_cnt; i++) {
- if (strlen(md->topics[i].topic) >= test_topic_prefix_len &&
- !strncmp(md->topics[i].topic, test_topic_prefix,
- test_topic_prefix_len)) {
- if (topicsp)
- topics[topic_cnt++] =
- rd_strdup(md->topics[i].topic);
- else
- topic_cnt++;
- }
- }
-
- if (topic_cnt == 0) {
- TEST_SAY(
- "%s: No topics (out of %d) matching our "
- "test prefix (%s)\n",
- __FUNCTION__, md->topic_cnt, test_topic_prefix);
- rd_kafka_metadata_destroy(md);
- if (topics)
- test_free_string_array(topics, topic_cnt);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_metadata_destroy(md);
-
- if (topicsp)
- *topicsp = topics;
- *topic_cntp = topic_cnt;
-
- return RD_KAFKA_RESP_ERR_NO_ERROR;
-}
-
-/**
- * @brief Delete all test topics using the Kafka Admin API.
- */
-rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms) {
- rd_kafka_t *rk;
- char **topics;
- size_t topic_cnt = 0;
- rd_kafka_resp_err_t err;
- int i;
- rd_kafka_AdminOptions_t *options;
- rd_kafka_queue_t *q;
- char errstr[256];
- int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
-
- rk = test_create_producer();
-
- err = test_get_all_test_topics(rk, &topics, &topic_cnt);
- if (err) {
- /* Error already reported by test_get_all_test_topics() */
- rd_kafka_destroy(rk);
- return err;
- }
-
- if (topic_cnt == 0) {
- rd_kafka_destroy(rk);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- q = rd_kafka_queue_get_main(rk);
-
- options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
- if (rd_kafka_AdminOptions_set_operation_timeout(options, 2 * 60 * 1000,
- errstr, sizeof(errstr)))
- TEST_SAY(_C_YEL
- "Failed to set DeleteTopics timeout: %s: "
- "ignoring\n",
- errstr);
-
- TEST_SAY(_C_MAG
- "====> Deleting all test topics with <===="
- "a timeout of 2 minutes\n");
-
- test_DeleteTopics_simple(rk, q, topics, topic_cnt, options);
-
- rd_kafka_AdminOptions_destroy(options);
-
- while (1) {
- rd_kafka_event_t *rkev;
- const rd_kafka_DeleteTopics_result_t *res;
-
- rkev = rd_kafka_queue_poll(q, -1);
-
- res = rd_kafka_event_DeleteTopics_result(rkev);
- if (!res) {
- TEST_SAY("%s: Ignoring event: %s: %s\n", __FUNCTION__,
- rd_kafka_event_name(rkev),
- rd_kafka_event_error_string(rkev));
- rd_kafka_event_destroy(rkev);
- continue;
- }
-
- if (rd_kafka_event_error(rkev)) {
- TEST_WARN("%s: DeleteTopics for %" PRIusz
- " topics "
- "failed: %s\n",
- __FUNCTION__, topic_cnt,
- rd_kafka_event_error_string(rkev));
- err = rd_kafka_event_error(rkev);
- } else {
- const rd_kafka_topic_result_t **terr;
- size_t tcnt;
- int okcnt = 0;
-
- terr = rd_kafka_DeleteTopics_result_topics(res, &tcnt);
-
- for (i = 0; i < (int)tcnt; i++) {
- if (!rd_kafka_topic_result_error(terr[i])) {
- okcnt++;
- continue;
- }
-
- TEST_WARN("%s: Failed to delete topic %s: %s\n",
- __FUNCTION__,
- rd_kafka_topic_result_name(terr[i]),
- rd_kafka_topic_result_error_string(
- terr[i]));
- }
-
- TEST_SAY(
- "%s: DeleteTopics "
- "succeeded for %d/%" PRIusz " topics\n",
- __FUNCTION__, okcnt, topic_cnt);
- err = RD_KAFKA_RESP_ERR_NO_ERROR;
- }
-
- rd_kafka_event_destroy(rkev);
- break;
- }
-
- rd_kafka_queue_destroy(q);
-
- test_free_string_array(topics, topic_cnt);
-
- /* Wait for topics to be fully deleted */
- while (1) {
- err = test_get_all_test_topics(rk, NULL, &topic_cnt);
-
- if (!err && topic_cnt == 0)
- break;
-
- if (abs_timeout < test_clock()) {
- TEST_WARN(
- "%s: Timed out waiting for "
- "remaining %" PRIusz
- " deleted topics "
- "to disappear from cluster metadata\n",
- __FUNCTION__, topic_cnt);
- break;
- }
-
- TEST_SAY("Waiting for remaining %" PRIusz
- " delete topics "
- "to disappear from cluster metadata\n",
- topic_cnt);
-
- rd_sleep(1);
- }
-
- rd_kafka_destroy(rk);
-
- return err;
-}
-
-
-
-void test_fail0(const char *file,
- int line,
- const char *function,
- int do_lock,
- int fail_now,
- const char *fmt,
- ...) {
- char buf[512];
- int is_thrd = 0;
- size_t of;
- va_list ap;
- char *t;
- char timestr[32];
- time_t tnow = time(NULL);
-
-#ifdef __MINGW32__
- strftime(timestr, sizeof(timestr), "%a %b %d %H:%M:%S %Y",
- localtime(&tnow));
-#elif defined(_WIN32)
- ctime_s(timestr, sizeof(timestr), &tnow);
-#else
- ctime_r(&tnow, timestr);
-#endif
- t = strchr(timestr, '\n');
- if (t)
- *t = '\0';
-
- of = rd_snprintf(buf, sizeof(buf), "%s%s%s():%i: ", test_curr->subtest,
- *test_curr->subtest ? ": " : "", function, line);
- rd_assert(of < sizeof(buf));
-
- va_start(ap, fmt);
- rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap);
- va_end(ap);
-
- /* Remove trailing newline */
- if ((t = strchr(buf, '\n')) && !*(t + 1))
- *t = '\0';
-
- TEST_SAYL(0, "TEST FAILURE\n");
- fprintf(stderr,
- "\033[31m### Test \"%s%s%s%s\" failed at %s:%i:%s() at %s: "
- "###\n"
- "%s\n",
- test_curr->name, *test_curr->subtest ? " (" : "",
- test_curr->subtest, *test_curr->subtest ? ")" : "", file, line,
- function, timestr, buf + of);
- if (do_lock)
- TEST_LOCK();
- test_curr->state = TEST_FAILED;
- test_curr->failcnt += 1;
- test_curr->is_fatal_cb = NULL;
-
- if (!*test_curr->failstr) {
- strncpy(test_curr->failstr, buf, sizeof(test_curr->failstr));
- test_curr->failstr[sizeof(test_curr->failstr) - 1] = '\0';
- }
- if (fail_now && test_curr->mainfunc) {
- tests_running_cnt--;
- is_thrd = 1;
- }
- if (do_lock)
- TEST_UNLOCK();
- if (!fail_now)
- return;
- if (test_assert_on_fail || !is_thrd)
- assert(0);
- else
- thrd_exit(0);
-}
-
-
-/**
- * @brief Destroy a mock cluster and its underlying rd_kafka_t handle
- */
-void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) {
- rd_kafka_t *rk = rd_kafka_mock_cluster_handle(mcluster);
- rd_kafka_mock_cluster_destroy(mcluster);
- rd_kafka_destroy(rk);
-}
-
-
-
-/**
- * @brief Create a standalone mock cluster that can be used by multiple
- * rd_kafka_t instances.
- */
-rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt,
- const char **bootstraps) {
- rd_kafka_t *rk;
- rd_kafka_conf_t *conf = rd_kafka_conf_new();
- rd_kafka_mock_cluster_t *mcluster;
- char errstr[256];
-
- test_conf_common_init(conf, 0);
-
- test_conf_set(conf, "client.id", "MOCK");
-
- rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
- TEST_ASSERT(rk, "Failed to create mock cluster rd_kafka_t: %s", errstr);
-
- mcluster = rd_kafka_mock_cluster_new(rk, broker_cnt);
- TEST_ASSERT(mcluster, "Failed to acquire mock cluster");
-
- if (bootstraps)
- *bootstraps = rd_kafka_mock_cluster_bootstraps(mcluster);
-
- return mcluster;
-}
-
-
-
-/**
- * @name Sub-tests
- */
-
-
-/**
- * @brief Start a sub-test. \p fmt is optional and allows additional
- * sub-test info to be displayed, e.g., test parameters.
- *
- * @returns 0 if sub-test should not be run, else 1.
- */
-int test_sub_start(const char *func,
- int line,
- int is_quick,
- const char *fmt,
- ...) {
-
- if (!is_quick && test_quick)
- return 0;
-
- if (fmt && *fmt) {
- va_list ap;
- char buf[256];
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest),
- "%s:%d: %s", func, line, buf);
- } else {
- rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest),
- "%s:%d", func, line);
- }
-
- if (subtests_to_run && !strstr(test_curr->subtest, subtests_to_run)) {
- *test_curr->subtest = '\0';
- return 0;
- }
-
- test_curr->subtest_quick = is_quick;
-
- TIMING_START(&test_curr->subtest_duration, "SUBTEST");
-
- TEST_SAY(_C_MAG "[ %s ]\n", test_curr->subtest);
-
- return 1;
-}
-
-
-/**
- * @brief Reset the current subtest state.
- */
-static void test_sub_reset(void) {
- *test_curr->subtest = '\0';
- test_curr->is_fatal_cb = NULL;
- test_curr->ignore_dr_err = rd_false;
- test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
- /* Don't check msg status by default */
- test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1;
- test_curr->dr_mv = NULL;
-}
-
-/**
- * @brief Sub-test has passed.
- */
-void test_sub_pass(void) {
-
- TEST_ASSERT(*test_curr->subtest);
-
- TEST_SAYL(1, _C_GRN "[ %s: PASS (%.02fs) ]\n", test_curr->subtest,
- (float)(TIMING_DURATION(&test_curr->subtest_duration) /
- 1000000.0f));
-
- if (test_curr->subtest_quick && test_quick && !test_on_ci &&
- TIMING_DURATION(&test_curr->subtest_duration) > 45 * 1000 * 1000)
- TEST_WARN(
- "Subtest %s marked as QUICK but took %.02fs to "
- "finish: either fix the test or "
- "remove the _QUICK identifier (limit is 45s)\n",
- test_curr->subtest,
- (float)(TIMING_DURATION(&test_curr->subtest_duration) /
- 1000000.0f));
-
- test_sub_reset();
-}
-
-
-/**
- * @brief Skip sub-test (must have been started with SUB_TEST*()).
- */
-void test_sub_skip(const char *fmt, ...) {
- va_list ap;
- char buf[256];
-
- TEST_ASSERT(*test_curr->subtest);
-
- va_start(ap, fmt);
- rd_vsnprintf(buf, sizeof(buf), fmt, ap);
- va_end(ap);
-
- TEST_SAYL(1, _C_YEL "[ %s: SKIP: %s ]\n", test_curr->subtest, buf);
-
- test_sub_reset();
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example b/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example
deleted file mode 100644
index dea4a09f6..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copy this file to test.conf and set up according to your configuration.
-
-#
-# Test configuration
-#
-# For slow connections: multiply test timeouts by this much (float)
-#test.timeout.multiplier=3.5
-
-# Test topic names are constructed by:
-# <prefix>_<suffix>, where default topic prefix is "rdkafkatest".
-# suffix is specified by the tests.
-#test.topic.prefix=bib
-
-# Make topic names random:
-# <prefix>_<randomnumber>_<suffix>
-#test.topic.random=true
-
-# Write test results to sqlite3 database
-#test.sql.command=sqlite3 rdktests
-
-# Bootstrap broker(s)
-metadata.broker.list=localhost:9092
-
-# Debugging
-#debug=metadata,topic,msg,broker
-
-# Any other librdkafka configuration property.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.h b/fluent-bit/lib/librdkafka-2.1.0/tests/test.h
deleted file mode 100644
index a431f9a25..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/test.h
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _TEST_H_
-#define _TEST_H_
-
-#include "../src/rd.h"
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#ifndef _WIN32
-#include <unistd.h>
-#endif
-#include <errno.h>
-#include <assert.h>
-#include <time.h>
-#include <ctype.h>
-
-#if HAVE_GETRUSAGE
-#include <sys/time.h>
-#include <sys/resource.h>
-#endif
-
-#include "rdkafka.h"
-#include "rdkafka_mock.h"
-#include "tinycthread.h"
-#include "rdlist.h"
-
-#if WITH_SOCKEM
-#include "sockem.h"
-#endif
-
-#include "testshared.h"
-#ifdef _WIN32
-#define sscanf(...) sscanf_s(__VA_ARGS__)
-#endif
-
-/**
- * Test output is controlled through "TEST_LEVEL=N" environemnt variable.
- * N < 2: TEST_SAY() is quiet.
- */
-
-extern int test_seed;
-extern char test_mode[64];
-extern RD_TLS struct test *test_curr;
-extern int test_assert_on_fail;
-extern int tests_running_cnt;
-extern int test_concurrent_max;
-extern int test_rusage;
-extern double test_rusage_cpu_calibration;
-extern double test_timeout_multiplier;
-extern int test_session_timeout_ms; /* Group session timeout */
-extern int test_flags;
-extern int test_neg_flags;
-extern int test_idempotent_producer;
-
-extern mtx_t test_mtx;
-
-#define TEST_LOCK() mtx_lock(&test_mtx)
-#define TEST_UNLOCK() mtx_unlock(&test_mtx)
-
-
-/* Forward decl */
-typedef struct test_msgver_s test_msgver_t;
-
-
-/** @struct Resource usage thresholds */
-struct rusage_thres {
- double ucpu; /**< Max User CPU in percentage */
- double scpu; /**< Max Sys CPU in percentage */
- double rss; /**< Max RSS (memory) increase in MB */
- int ctxsw; /**< Max number of voluntary context switches, i.e.
- * syscalls. */
-};
-
-typedef enum {
- TEST_NOT_STARTED,
- TEST_SKIPPED,
- TEST_RUNNING,
- TEST_PASSED,
- TEST_FAILED,
-} test_state_t;
-
-struct test {
- /**
- * Setup
- */
- const char *name; /**< e.g. Same as filename minus extension */
- int (*mainfunc)(int argc, char **argv); /**< test's main func */
- const int flags; /**< Test flags */
-#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */
-#define TEST_F_KNOWN_ISSUE \
- 0x2 /**< Known issue, can fail without affecting \
- * total test run status. */
-#define TEST_F_MANUAL \
- 0x4 /**< Manual test, only started when specifically \
- * stated */
-#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */
- int minver; /**< Limit tests to broker version range. */
- int maxver;
-
- const char *extra; /**< Extra information to print in test_summary. */
-
- const char *scenario; /**< Test scenario */
-
- char *
- *report_arr; /**< Test-specific reporting, JSON array of objects. */
- int report_cnt;
- int report_size;
-
- rd_bool_t ignore_dr_err; /**< Ignore delivery report errors */
- rd_kafka_resp_err_t exp_dr_err; /* Expected error in test_dr_cb */
- rd_kafka_msg_status_t exp_dr_status; /**< Expected delivery status,
- * or -1 for not checking. */
- int produce_sync; /**< test_produce_sync() call in action */
- rd_kafka_resp_err_t produce_sync_err; /**< DR error */
- test_msgver_t *dr_mv; /**< MsgVer that delivered messages will be
- * added to (if not NULL).
- * Must be set and freed by test. */
-
- /**
- * Runtime
- */
- thrd_t thrd;
- int64_t start;
- int64_t duration;
- FILE *stats_fp;
- int64_t timeout;
- test_state_t state;
- int failcnt; /**< Number of failures, useful with FAIL_LATER */
- char failstr[512 + 1]; /**< First test failure reason */
- char subtest[400]; /**< Current subtest, if any */
- test_timing_t subtest_duration; /**< Subtest duration timing */
- rd_bool_t subtest_quick; /**< Subtest is marked as QUICK */
-
-#if WITH_SOCKEM
- rd_list_t sockets;
- int (*connect_cb)(struct test *test, sockem_t *skm, const char *id);
-#endif
- int (*is_fatal_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason);
-
- /**< Resource usage thresholds */
- struct rusage_thres rusage_thres; /**< Usage thresholds */
-#if HAVE_GETRUSAGE
- struct rusage rusage; /**< Monitored process CPU/mem usage */
-#endif
-};
-
-
-#ifdef _WIN32
-#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE
-#else
-#define TEST_F_KNOWN_ISSUE_WIN32 0
-#endif
-
-#ifdef __APPLE__
-#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE
-#else
-#define TEST_F_KNOWN_ISSUE_OSX 0
-#endif
-
-
-#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__)
-#define TEST_SAYL(LVL, ...) \
- do { \
- if (test_level >= LVL) { \
- fprintf( \
- stderr, "\033[36m[%-28s/%7.3fs] ", \
- test_curr->name, \
- test_curr->start \
- ? ((float)(test_clock() - test_curr->start) / \
- 1000000.0f) \
- : 0); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m"); \
- } \
- } while (0)
-#define TEST_SAY(...) TEST_SAYL(2, __VA_ARGS__)
-
-/**
- * Append JSON object (as string) to this tests' report array.
- */
-#define TEST_REPORT(...) test_report_add(test_curr, __VA_ARGS__)
-
-
-
-static RD_INLINE RD_UNUSED void rtrim(char *str) {
- size_t len = strlen(str);
- char *s;
-
- if (len == 0)
- return;
-
- s = str + len - 1;
- while (isspace((int)*s)) {
- *s = '\0';
- s--;
- }
-}
-
-/* Skip the current test. Argument is textual reason (printf format) */
-#define TEST_SKIP(...) \
- do { \
- TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \
- TEST_LOCK(); \
- test_curr->state = TEST_SKIPPED; \
- if (!*test_curr->failstr) { \
- rd_snprintf(test_curr->failstr, \
- sizeof(test_curr->failstr), __VA_ARGS__); \
- rtrim(test_curr->failstr); \
- } \
- TEST_UNLOCK(); \
- } while (0)
-
-
-void test_conf_init(rd_kafka_conf_t **conf,
- rd_kafka_topic_conf_t **topic_conf,
- int timeout);
-
-
-
-void test_msg_fmt(char *dest,
- size_t dest_size,
- uint64_t testid,
- int32_t partition,
- int msgid);
-void test_msg_parse0(const char *func,
- int line,
- uint64_t testid,
- rd_kafka_message_t *rkmessage,
- int32_t exp_partition,
- int *msgidp);
-#define test_msg_parse(testid, rkmessage, exp_partition, msgidp) \
- test_msg_parse0(__FUNCTION__, __LINE__, testid, rkmessage, \
- exp_partition, msgidp)
-
-
-static RD_INLINE int jitter(int low, int high) RD_UNUSED;
-static RD_INLINE int jitter(int low, int high) {
- return (low + (rand() % ((high - low) + 1)));
-}
-
-
-
-/******************************************************************************
- *
- * Helpers
- *
- ******************************************************************************/
-
-
-
-/****************************************************************
- * Message verification services *
- * *
- * *
- * *
- ****************************************************************/
-
-
-/**
- * A test_msgver_t is first fed with messages from any number of
- * topics and partitions, it is then checked for expected messages, such as:
- * - all messages received, based on message payload information.
- * - messages received in order
- * - EOF
- */
-struct test_msgver_s {
- struct test_mv_p **p; /* Partitions array */
- int p_cnt; /* Partition count */
- int p_size; /* p size */
- int msgcnt; /* Total message count */
- uint64_t testid; /* Only accept messages for this testid */
- rd_bool_t ignore_eof; /* Don't end PARTITION_EOF messages */
-
- struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */
-
- int log_cnt; /* Current number of warning logs */
- int log_max; /* Max warning logs before suppressing. */
- int log_suppr_cnt; /* Number of suppressed log messages. */
-
- const char *msgid_hdr; /**< msgid string is in header by this name,
- * rather than in the payload (default). */
-}; /* test_msgver_t; */
-
-/* Message */
-struct test_mv_m {
- int64_t offset; /* Message offset */
- int msgid; /* Message id */
- int64_t timestamp; /* Message timestamp */
- int32_t broker_id; /* Message broker id */
-};
-
-
-/* Message vector */
-struct test_mv_mvec {
- struct test_mv_m *m;
- int cnt;
- int size; /* m[] size */
-};
-
-/* Partition */
-struct test_mv_p {
- char *topic;
- int32_t partition;
- struct test_mv_mvec mvec;
- int64_t eof_offset;
-};
-
-/* Verification state */
-struct test_mv_vs {
- int msg_base;
- int exp_cnt;
-
- /* used by verify_range */
- int msgid_min;
- int msgid_max;
- int64_t timestamp_min;
- int64_t timestamp_max;
-
- /* used by verify_broker_id */
- int32_t broker_id;
-
- struct test_mv_mvec mvec;
-
- /* Correct msgver for comparison */
- test_msgver_t *corr;
-};
-
-
-void test_msgver_init(test_msgver_t *mv, uint64_t testid);
-void test_msgver_clear(test_msgver_t *mv);
-void test_msgver_ignore_eof(test_msgver_t *mv);
-int test_msgver_add_msg00(const char *func,
- int line,
- const char *clientname,
- test_msgver_t *mv,
- uint64_t testid,
- const char *topic,
- int32_t partition,
- int64_t offset,
- int64_t timestamp,
- int32_t broker_id,
- rd_kafka_resp_err_t err,
- int msgnum);
-int test_msgver_add_msg0(const char *func,
- int line,
- const char *clientname,
- test_msgver_t *mv,
- const rd_kafka_message_t *rkmessage,
- const char *override_topic);
-#define test_msgver_add_msg(rk, mv, rkm) \
- test_msgver_add_msg0(__FUNCTION__, __LINE__, rd_kafka_name(rk), mv, \
- rkm, NULL)
-
-/**
- * Flags to indicate what to verify.
- */
-#define TEST_MSGVER_ORDER 0x1 /* Order */
-#define TEST_MSGVER_DUP 0x2 /* Duplicates */
-#define TEST_MSGVER_RANGE 0x4 /* Range of messages */
-
-#define TEST_MSGVER_ALL 0xf /* All verifiers */
-
-#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */
-#define TEST_MSGVER_BY_OFFSET \
- 0x20000 /* Verify by offset (unique in partition)*/
-#define TEST_MSGVER_BY_TIMESTAMP 0x40000 /* Verify by timestamp range */
-#define TEST_MSGVER_BY_BROKER_ID 0x80000 /* Verify by broker id */
-
-#define TEST_MSGVER_SUBSET \
- 0x100000 /* verify_compare: allow correct mv to be \
- * a subset of mv. */
-
-/* Only test per partition, not across all messages received on all partitions.
- * This is useful when doing incremental verifications with multiple partitions
- * and the total number of messages has not been received yet.
- * Can't do range check here since messages may be spread out on multiple
- * partitions and we might just have read a few partitions. */
-#define TEST_MSGVER_PER_PART \
- ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | TEST_MSGVER_BY_MSGID | \
- TEST_MSGVER_BY_OFFSET)
-
-/* Test on all messages across all partitions.
- * This can only be used to check with msgid, not offset since that
- * is partition local. */
-#define TEST_MSGVER_ALL_PART (TEST_MSGVER_ALL | TEST_MSGVER_BY_MSGID)
-
-
-int test_msgver_verify_part0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- int flags,
- const char *topic,
- int partition,
- int msg_base,
- int exp_cnt);
-#define test_msgver_verify_part(what, mv, flags, topic, partition, msg_base, \
- exp_cnt) \
- test_msgver_verify_part0(__FUNCTION__, __LINE__, what, mv, flags, \
- topic, partition, msg_base, exp_cnt)
-
-int test_msgver_verify0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- int flags,
- struct test_mv_vs vs);
-#define test_msgver_verify(what, mv, flags, msgbase, expcnt) \
- test_msgver_verify0( \
- __FUNCTION__, __LINE__, what, mv, flags, \
- (struct test_mv_vs) {.msg_base = msgbase, .exp_cnt = expcnt})
-
-
-void test_msgver_verify_compare0(const char *func,
- int line,
- const char *what,
- test_msgver_t *mv,
- test_msgver_t *corr,
- int flags);
-#define test_msgver_verify_compare(what, mv, corr, flags) \
- test_msgver_verify_compare0(__FUNCTION__, __LINE__, what, mv, corr, \
- flags)
-
-rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf);
-
-/**
- * Delivery reported callback.
- * Called for each message once to signal its delivery status.
- */
-void test_dr_msg_cb(rd_kafka_t *rk,
- const rd_kafka_message_t *rkmessage,
- void *opaque);
-
-rd_kafka_t *test_create_producer(void);
-rd_kafka_topic_t *
-test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...);
-void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp);
-void test_produce_msgs_nowait(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int msgrate,
- int *msgcounterp);
-void test_produce_msgs(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size);
-void test_produce_msgs2(rd_kafka_t *rk,
- const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size);
-void test_produce_msgs2_nowait(rd_kafka_t *rk,
- const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int *remainsp);
-void test_produce_msgs_rate(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- const char *payload,
- size_t size,
- int msgrate);
-rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition);
-
-void test_produce_msgs_easy_v(const char *topic,
- uint64_t testid,
- int32_t partition,
- int msg_base,
- int cnt,
- size_t size,
- ...);
-void test_produce_msgs_easy_multi(uint64_t testid, ...);
-
-void test_incremental_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque);
-void test_rebalance_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *parts,
- void *opaque);
-
-rd_kafka_t *test_create_consumer(
- const char *group_id,
- void (*rebalance_cb)(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- rd_kafka_topic_partition_list_t *partitions,
- void *opaque),
- rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *default_topic_conf);
-rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, const char *topic);
-rd_kafka_topic_t *
-test_create_topic_object(rd_kafka_t *rk, const char *topic, ...);
-void test_consumer_start(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t start_offset);
-void test_consumer_stop(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition);
-void test_consumer_seek(const char *what,
- rd_kafka_topic_t *rkt,
- int32_t partition,
- int64_t offset);
-
-#define TEST_NO_SEEK -1
-int64_t test_consume_msgs(const char *what,
- rd_kafka_topic_t *rkt,
- uint64_t testid,
- int32_t partition,
- int64_t offset,
- int exp_msg_base,
- int exp_cnt,
- int parse_fmt);
-
-
-void test_verify_rkmessage0(const char *func,
- int line,
- rd_kafka_message_t *rkmessage,
- uint64_t testid,
- int32_t partition,
- int msgnum);
-#define test_verify_rkmessage(rkmessage, testid, partition, msgnum) \
- test_verify_rkmessage0(__FUNCTION__, __LINE__, rkmessage, testid, \
- partition, msgnum)
-
-void test_consumer_subscribe(rd_kafka_t *rk, const char *topic);
-
-void test_consume_msgs_easy_mv0(const char *group_id,
- const char *topic,
- rd_bool_t txn,
- int32_t partition,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf,
- test_msgver_t *mv);
-
-#define test_consume_msgs_easy_mv(group_id, topic, partition, testid, \
- exp_eofcnt, exp_msgcnt, tconf, mv) \
- test_consume_msgs_easy_mv0(group_id, topic, rd_false /*not-txn*/, \
- partition, testid, exp_eofcnt, exp_msgcnt, \
- tconf, mv)
-
-void test_consume_msgs_easy(const char *group_id,
- const char *topic,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf);
-
-void test_consume_txn_msgs_easy(const char *group_id,
- const char *topic,
- uint64_t testid,
- int exp_eofcnt,
- int exp_msgcnt,
- rd_kafka_topic_conf_t *tconf);
-
-void test_consumer_poll_no_msgs(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int timeout_ms);
-void test_consumer_poll_expect_err(rd_kafka_t *rk,
- uint64_t testid,
- int timeout_ms,
- rd_kafka_resp_err_t err);
-int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms);
-int test_consumer_poll_exact_timeout(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- rd_bool_t exact,
- test_msgver_t *mv,
- int timeout_ms);
-int test_consumer_poll_exact(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- rd_bool_t exact,
- test_msgver_t *mv);
-int test_consumer_poll(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- test_msgver_t *mv);
-int test_consumer_poll_timeout(const char *what,
- rd_kafka_t *rk,
- uint64_t testid,
- int exp_eof_cnt,
- int exp_msg_base,
- int exp_cnt,
- test_msgver_t *mv,
- int timeout_ms);
-
-void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll);
-void test_consumer_verify_assignment0(const char *func,
- int line,
- rd_kafka_t *rk,
- int fail_immediately,
- ...);
-#define test_consumer_verify_assignment(rk, fail_immediately, ...) \
- test_consumer_verify_assignment0(__FUNCTION__, __LINE__, rk, \
- fail_immediately, __VA_ARGS__)
-
-void test_consumer_assign(const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *parts);
-void test_consumer_incremental_assign(const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *parts);
-void test_consumer_unassign(const char *what, rd_kafka_t *rk);
-void test_consumer_incremental_unassign(const char *what,
- rd_kafka_t *rk,
- rd_kafka_topic_partition_list_t *parts);
-void test_consumer_assign_partition(const char *what,
- rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- int64_t offset);
-void test_consumer_pause_resume_partition(rd_kafka_t *rk,
- const char *topic,
- int32_t partition,
- rd_bool_t pause);
-
-void test_consumer_close(rd_kafka_t *rk);
-
-void test_flush(rd_kafka_t *rk, int timeout_ms);
-
-void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val);
-char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, const char *name);
-int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val);
-void test_topic_conf_set(rd_kafka_topic_conf_t *tconf,
- const char *name,
- const char *val);
-void test_any_conf_set(rd_kafka_conf_t *conf,
- rd_kafka_topic_conf_t *tconf,
- const char *name,
- const char *val);
-
-void test_print_partition_list(
- const rd_kafka_topic_partition_list_t *partitions);
-int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al,
- rd_kafka_topic_partition_list_t *bl);
-int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al,
- rd_kafka_topic_partition_list_t *bl);
-
-void test_kafka_topics(const char *fmt, ...);
-void test_admin_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor,
- const char **configs);
-void test_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor);
-rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk,
- rd_kafka_topic_t *rkt,
- int timeout_ms);
-rd_kafka_resp_err_t
-test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms);
-int test_check_auto_create_topic(void);
-
-void test_create_partitions(rd_kafka_t *use_rk,
- const char *topicname,
- int new_partition_cnt);
-
-int test_get_partition_count(rd_kafka_t *rk,
- const char *topicname,
- int timeout_ms);
-
-char *tsprintf(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
-
-void test_report_add(struct test *test, const char *fmt, ...);
-int test_can_create_topics(int skip);
-
-rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq,
- rd_kafka_event_type_t event_type,
- int timeout_ms);
-
-void test_prepare_msg(uint64_t testid,
- int32_t partition,
- int msg_id,
- char *val,
- size_t val_size,
- char *key,
- size_t key_size);
-
-#if WITH_SOCKEM
-void test_socket_enable(rd_kafka_conf_t *conf);
-void test_socket_close_all(struct test *test, int reinit);
-int test_socket_sockem_set_all(const char *key, int val);
-void test_socket_sockem_set(int s, const char *key, int value);
-#endif
-
-void test_headers_dump(const char *what,
- int lvl,
- const rd_kafka_headers_t *hdrs);
-
-int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp);
-
-void test_wait_metadata_update(rd_kafka_t *rk,
- rd_kafka_metadata_topic_t *topics,
- size_t topic_cnt,
- rd_kafka_metadata_topic_t *not_topics,
- size_t not_topic_cnt,
- int tmout);
-
-rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
- rd_kafka_event_type_t evtype,
- int tmout);
-
-rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
- rd_kafka_event_type_t evtype,
- rd_kafka_event_t **retevent,
- int tmout);
-
-rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **topics,
- size_t topic_cnt,
- int num_partitions,
- void *opaque);
-rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const char *topic,
- size_t total_part_cnt,
- void *opaque);
-
-rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **topics,
- size_t topic_cnt,
- void *opaque);
-
-rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk,
- rd_kafka_ResourceType_t restype,
- const char *resname,
- const char **configs,
- size_t config_cnt);
-
-rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- char **groups,
- size_t group_cnt,
- void *opaque);
-
-rd_kafka_resp_err_t
-test_DeleteRecords_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const rd_kafka_topic_partition_list_t *offsets,
- void *opaque);
-
-rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple(
- rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- const char *group_id,
- const rd_kafka_topic_partition_list_t *offsets,
- void *opaque);
-
-rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
- rd_kafka_queue_t *useq,
- rd_kafka_AclBinding_t **acls,
- size_t acl_cnt,
- void *opaque);
-
-rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms);
-
-void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
-rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt,
- const char **bootstraps);
-
-
-
-int test_error_is_not_fatal_cb(rd_kafka_t *rk,
- rd_kafka_resp_err_t err,
- const char *reason);
-
-
-/**
- * @brief Calls rdkafka function (with arguments)
- * and checks its return value (must be rd_kafka_resp_err_t) for
- * error, in which case the test fails.
- * Also times the call.
- *
- * @remark The trailing __ makes calling code easier to read.
- */
-#define TEST_CALL__(FUNC_W_ARGS) \
- do { \
- test_timing_t _timing; \
- const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
- rd_kafka_resp_err_t _err; \
- TIMING_START(&_timing, "%s", _desc); \
- TEST_SAYL(3, "Begin call %s\n", _desc); \
- _err = FUNC_W_ARGS; \
- TIMING_STOP(&_timing); \
- if (!_err) \
- break; \
- if (strstr(_desc, "errstr")) \
- TEST_FAIL("%s failed: %s: %s\n", _desc, \
- rd_kafka_err2name(_err), errstr); \
- else \
- TEST_FAIL("%s failed: %s\n", _desc, \
- rd_kafka_err2str(_err)); \
- } while (0)
-
-
-/**
- * @brief Same as TEST_CALL__() but expects an rd_kafka_error_t * return type.
- */
-#define TEST_CALL_ERROR__(FUNC_W_ARGS) \
- do { \
- test_timing_t _timing; \
- const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
- const rd_kafka_error_t *_error; \
- TIMING_START(&_timing, "%s", _desc); \
- TEST_SAYL(3, "Begin call %s\n", _desc); \
- _error = FUNC_W_ARGS; \
- TIMING_STOP(&_timing); \
- if (!_error) \
- break; \
- TEST_FAIL("%s failed: %s\n", _desc, \
- rd_kafka_error_string(_error)); \
- } while (0)
-
-/**
- * @brief Same as TEST_CALL__() but expects an rd_kafka_resp_err_t return type
- * without errstr.
- */
-#define TEST_CALL_ERR__(FUNC_W_ARGS) \
- do { \
- test_timing_t _timing; \
- const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
- rd_kafka_resp_err_t _err; \
- TIMING_START(&_timing, "%s", _desc); \
- TEST_SAYL(3, "Begin call %s\n", _desc); \
- _err = FUNC_W_ARGS; \
- TIMING_STOP(&_timing); \
- if (!_err) \
- break; \
- TEST_FAIL("%s failed: %s\n", _desc, rd_kafka_err2str(_err)); \
- } while (0)
-
-
-/**
- * @brief Print a rich error_t object in all its glory. NULL is ok.
- *
- * @param ... Is a prefix format-string+args that is printed with TEST_SAY()
- * prior to the error details. E.g., "commit() returned: ".
- * A newline is automatically appended.
- */
-#define TEST_SAY_ERROR(ERROR, ...) \
- do { \
- rd_kafka_error_t *_e = (ERROR); \
- TEST_SAY(__VA_ARGS__); \
- if (!_e) { \
- TEST_SAY0("No error" _C_CLR "\n"); \
- break; \
- } \
- if (rd_kafka_error_is_fatal(_e)) \
- TEST_SAY0(_C_RED "FATAL "); \
- if (rd_kafka_error_is_retriable(_e)) \
- TEST_SAY0("Retriable "); \
- if (rd_kafka_error_txn_requires_abort(_e)) \
- TEST_SAY0("TxnRequiresAbort "); \
- TEST_SAY0("Error: %s: %s" _C_CLR "\n", \
- rd_kafka_error_name(_e), rd_kafka_error_string(_e)); \
- } while (0)
-
-/**
- * @name rusage.c
- * @{
- */
-void test_rusage_start(struct test *test);
-int test_rusage_stop(struct test *test, double duration);
-
-/**@}*/
-
-#endif /* _TEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp
deleted file mode 100644
index e965e249f..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "testcpp.h"
-
-#include <fstream>
-#include <cstring>
-
-
-namespace Test {
-
-/**
- * @brief Read config file and populate config objects.
- * @returns 0 on success or -1 on error
- */
-static int read_config_file(std::string path,
- RdKafka::Conf *conf,
- RdKafka::Conf *topic_conf,
- int *timeoutp) {
- std::ifstream input(path.c_str(), std::ifstream::in);
-
- if (!input)
- return 0;
-
- std::string line;
- while (std::getline(input, line)) {
- /* Trim string */
- line.erase(0, line.find_first_not_of("\t "));
- line.erase(line.find_last_not_of("\t ") + 1);
-
- if (line.length() == 0 || line.substr(0, 1) == "#")
- continue;
-
- size_t f = line.find("=");
- if (f == std::string::npos) {
- Test::Fail(tostr() << "Conf file: malformed line: " << line);
- return -1;
- }
-
- std::string n = line.substr(0, f);
- std::string v = line.substr(f + 1);
- std::string errstr;
-
- if (test_set_special_conf(n.c_str(), v.c_str(), timeoutp))
- continue;
-
- RdKafka::Conf::ConfResult r = RdKafka::Conf::CONF_UNKNOWN;
-
- if (n.substr(0, 6) == "topic.")
- r = topic_conf->set(n.substr(6), v, errstr);
- if (r == RdKafka::Conf::CONF_UNKNOWN)
- r = conf->set(n, v, errstr);
-
- if (r != RdKafka::Conf::CONF_OK) {
- Test::Fail(errstr);
- return -1;
- }
- }
-
- return 0;
-}
-
-void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout) {
- const char *tmp;
-
- if (conf)
- *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
- if (topic_conf)
- *topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
-
- read_config_file(test_conf_get_path(), conf ? *conf : NULL,
- topic_conf ? *topic_conf : NULL, &timeout);
-
- std::string errstr;
- if ((*conf)->set("client.id", test_curr_name(), errstr) !=
- RdKafka::Conf::CONF_OK)
- Test::Fail("set client.id failed: " + errstr);
-
- if (*conf && (tmp = test_getenv("TEST_DEBUG", NULL))) {
- if ((*conf)->set("debug", tmp, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("TEST_DEBUG failed: " + errstr);
- }
-
-
- if (timeout)
- test_timeout_set(timeout);
-}
-
-
-void DeliveryReportCb::dr_cb(RdKafka::Message &msg) {
- if (msg.err() != RdKafka::ERR_NO_ERROR)
- Test::Fail(tostr() << "Delivery failed to " << msg.topic_name() << " ["
- << msg.partition() << "]: " << msg.errstr());
- else
- Test::Say(3, tostr() << "Delivered to " << msg.topic_name() << " ["
- << msg.partition() << "] @ " << msg.offset()
- << " (timestamp " << msg.timestamp().timestamp
- << ")\n");
-}
-}; // namespace Test
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h
deleted file mode 100644
index 2ecaed394..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _TESTCPP_H_
-#define _TESTCPP_H_
-
-#include <sstream>
-
-#include "rdkafkacpp.h"
-
-extern "C" {
-#ifdef _WIN32
-/* Win32/Visual Studio */
-#include "../src/win32_config.h"
-#include "../src/rdwin32.h"
-#else
-#include "../config.h"
-/* POSIX / UNIX based systems */
-#include "../src/rdposix.h"
-#endif
-#include "testshared.h"
-}
-
-// courtesy of
-// http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html
-struct tostr {
- std::stringstream ss;
- template <typename T>
- tostr &operator<<(const T &data) {
- ss << data;
- return *this;
- }
- operator std::string() {
- return ss.str();
- }
-};
-
-
-
-#define TestMessageVerify(testid, exp_partition, msgidp, msg) \
- test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, msgidp, \
- (msg)->topic_name().c_str(), (msg)->partition(), \
- (msg)->offset(), (const char *)(msg)->key_pointer(), \
- (msg)->key_len())
-
-namespace Test {
-
-/**
- * @brief Get test config object
- */
-
-static RD_UNUSED void Fail(std::string str) {
- test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 1 /*now*/, "%s",
- str.c_str());
-}
-static RD_UNUSED void FailLater(std::string str) {
- test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 0 /*later*/, "%s",
- str.c_str());
-}
-static RD_UNUSED void Skip(std::string str) {
- test_SKIP(__FILE__, __LINE__, str.c_str());
-}
-static RD_UNUSED void Say(int level, std::string str) {
- test_SAY(__FILE__, __LINE__, level, str.c_str());
-}
-static RD_UNUSED void Say(std::string str) {
- Test::Say(2, str);
-}
-
-/**
- * @brief Generate test topic name
- */
-static RD_UNUSED std::string mk_topic_name(std::string suffix,
- bool randomized) {
- return test_mk_topic_name(suffix.c_str(), (int)randomized);
-}
-
-/**
- * @brief Generate random test group name
- */
-static RD_UNUSED std::string mk_unique_group_name(std::string suffix) {
- return test_mk_topic_name(suffix.c_str(), 1);
-}
-
-/**
- * @brief Create partitions
- */
-static RD_UNUSED void create_partitions(RdKafka::Handle *use_handle,
- const char *topicname,
- int new_partition_cnt) {
- rd_kafka_t *use_rk = NULL;
- if (use_handle != NULL)
- use_rk = use_handle->c_ptr();
- test_create_partitions(use_rk, topicname, new_partition_cnt);
-}
-
-/**
- * @brief Create a topic
- */
-static RD_UNUSED void create_topic(RdKafka::Handle *use_handle,
- const char *topicname,
- int partition_cnt,
- int replication_factor) {
- rd_kafka_t *use_rk = NULL;
- if (use_handle != NULL)
- use_rk = use_handle->c_ptr();
- test_create_topic(use_rk, topicname, partition_cnt, replication_factor);
-}
-
-/**
- * @brief Delete a topic
- */
-static RD_UNUSED void delete_topic(RdKafka::Handle *use_handle,
- const char *topicname) {
- rd_kafka_t *use_rk = NULL;
- if (use_handle != NULL)
- use_rk = use_handle->c_ptr();
- test_delete_topic(use_rk, topicname);
-}
-
-/**
- * @brief Get new configuration objects
- */
-void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout);
-
-
-static RD_UNUSED void conf_set(RdKafka::Conf *conf,
- std::string name,
- std::string val) {
- std::string errstr;
- if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK)
- Test::Fail("Conf failed: " + errstr);
-}
-
-static RD_UNUSED void print_TopicPartitions(
- std::string header,
- const std::vector<RdKafka::TopicPartition *> &partitions) {
- Test::Say(tostr() << header << ": " << partitions.size()
- << " TopicPartition(s):\n");
- for (unsigned int i = 0; i < partitions.size(); i++)
- Test::Say(tostr() << " " << partitions[i]->topic() << "["
- << partitions[i]->partition() << "] "
- << "offset " << partitions[i]->offset() << ": "
- << RdKafka::err2str(partitions[i]->err()) << "\n");
-}
-
-
-/* Convenience subscribe() */
-static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c,
- const std::string &topic) {
- Test::Say(c->name() + ": Subscribing to " + topic + "\n");
- std::vector<std::string> topics;
- topics.push_back(topic);
- RdKafka::ErrorCode err;
- if ((err = c->subscribe(topics)))
- Test::Fail("Subscribe failed: " + RdKafka::err2str(err));
-}
-
-
-/* Convenience subscribe() to two topics */
-static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c,
- const std::string &topic1,
- const std::string &topic2) {
- Test::Say(c->name() + ": Subscribing to " + topic1 + " and " + topic2 + "\n");
- std::vector<std::string> topics;
- topics.push_back(topic1);
- topics.push_back(topic2);
- RdKafka::ErrorCode err;
- if ((err = c->subscribe(topics)))
- Test::Fail("Subscribe failed: " + RdKafka::err2str(err));
-}
-
-/* Convenience unsubscribe() */
-static RD_UNUSED void unsubscribe(RdKafka::KafkaConsumer *c) {
- Test::Say(c->name() + ": Unsubscribing\n");
- RdKafka::ErrorCode err;
- if ((err = c->unsubscribe()))
- Test::Fail("Unsubscribe failed: " + RdKafka::err2str(err));
-}
-
-
-static RD_UNUSED void incremental_assign(
- RdKafka::KafkaConsumer *c,
- const std::vector<RdKafka::TopicPartition *> &parts) {
- Test::Say(tostr() << c->name() << ": incremental assign of " << parts.size()
- << " partition(s)\n");
- if (test_level >= 2)
- print_TopicPartitions("incremental_assign()", parts);
- RdKafka::Error *error;
- if ((error = c->incremental_assign(parts)))
- Test::Fail(c->name() + ": Incremental assign failed: " + error->str());
-}
-
-static RD_UNUSED void incremental_unassign(
- RdKafka::KafkaConsumer *c,
- const std::vector<RdKafka::TopicPartition *> &parts) {
- Test::Say(tostr() << c->name() << ": incremental unassign of " << parts.size()
- << " partition(s)\n");
- if (test_level >= 2)
- print_TopicPartitions("incremental_unassign()", parts);
- RdKafka::Error *error;
- if ((error = c->incremental_unassign(parts)))
- Test::Fail(c->name() + ": Incremental unassign failed: " + error->str());
-}
-
-/**
- * @brief Wait until the current assignment size is \p partition_count.
- * If \p topic is not NULL, then additionally, each partition in
- * the assignment must have topic \p topic.
- */
-static RD_UNUSED void wait_for_assignment(RdKafka::KafkaConsumer *c,
- size_t partition_count,
- const std::string *topic) {
- bool done = false;
- while (!done) {
- RdKafka::Message *msg1 = c->consume(500);
- delete msg1;
-
- std::vector<RdKafka::TopicPartition *> partitions;
- c->assignment(partitions);
-
- if (partitions.size() == partition_count) {
- done = true;
- if (topic) {
- for (size_t i = 0; i < partitions.size(); i++) {
- if (partitions[i]->topic() != *topic) {
- done = false;
- break;
- }
- }
- }
- }
-
- RdKafka::TopicPartition::destroy(partitions);
- }
-}
-
-
-/**
- * @brief Check current assignment has size \p partition_count
- * If \p topic is not NULL, then additionally check that
- * each partition in the assignment has topic \p topic.
- */
-static RD_UNUSED void check_assignment(RdKafka::KafkaConsumer *c,
- size_t partition_count,
- const std::string *topic) {
- std::vector<RdKafka::TopicPartition *> partitions;
- c->assignment(partitions);
- if (partition_count != partitions.size())
- Test::Fail(tostr() << "Expecting current assignment to have size "
- << partition_count << ", not: " << partitions.size());
- for (size_t i = 0; i < partitions.size(); i++) {
- if (topic != NULL) {
- if (partitions[i]->topic() != *topic)
- Test::Fail(tostr() << "Expecting assignment to be " << *topic
- << ", not " << partitions[i]->topic());
- }
- delete partitions[i];
- }
-}
-
-
-/**
- * @brief Current assignment partition count. If \p topic is
- * NULL, then the total partition count, else the number
- * of assigned partitions from \p topic.
- */
-static RD_UNUSED size_t assignment_partition_count(RdKafka::KafkaConsumer *c,
- std::string *topic) {
- std::vector<RdKafka::TopicPartition *> partitions;
- c->assignment(partitions);
- int cnt = 0;
- for (size_t i = 0; i < partitions.size(); i++) {
- if (topic == NULL || *topic == partitions[i]->topic())
- cnt++;
- delete partitions[i];
- }
- return cnt;
-}
-
-
-/**
- * @brief Poll the consumer once, discarding the returned message
- * or error event.
- * @returns true if a proper event/message was seen, or false on timeout.
- */
-static RD_UNUSED bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) {
- RdKafka::Message *msg = c->consume(timeout_ms);
- bool ret = msg->err() != RdKafka::ERR__TIMED_OUT;
- delete msg;
- return ret;
-}
-
-
-/**
- * @brief Produce \p msgcnt messages to \p topic \p partition.
- */
-static RD_UNUSED void produce_msgs(RdKafka::Producer *p,
- const std::string &topic,
- int32_t partition,
- int msgcnt,
- int msgsize,
- bool flush) {
- char *buf = (char *)malloc(msgsize);
-
- for (int i = 0; i < msgsize; i++)
- buf[i] = (char)((int)'a' + (i % 26));
-
- for (int i = 0; i < msgcnt; i++) {
- RdKafka::ErrorCode err;
- err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
- (void *)buf, (size_t)msgsize, NULL, 0, 0, NULL);
- TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
- p->poll(0);
- }
-
- free(buf);
-
- if (flush)
- p->flush(10 * 1000);
-}
-
-
-
-/**
- * @brief Delivery report class
- */
-class DeliveryReportCb : public RdKafka::DeliveryReportCb {
- public:
- void dr_cb(RdKafka::Message &msg);
-};
-
-static DeliveryReportCb DrCb;
-}; // namespace Test
-
-#endif /* _TESTCPP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h b/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h
deleted file mode 100644
index efdd5d555..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _TESTSHARED_H_
-#define _TESTSHARED_H_
-
-/**
- * C variables and functions shared with C++ tests
- */
-
-#ifndef _RDKAFKA_H_
-typedef struct rd_kafka_s rd_kafka_t;
-typedef struct rd_kafka_conf_s rd_kafka_conf_t;
-#endif
-
-/* ANSI color codes */
-#define _C_CLR "\033[0m"
-#define _C_RED "\033[31m"
-#define _C_GRN "\033[32m"
-#define _C_YEL "\033[33m"
-#define _C_BLU "\033[34m"
-#define _C_MAG "\033[35m"
-#define _C_CYA "\033[36m"
-
-
-/** Test logging level (TEST_LEVEL=.. env) */
-extern int test_level;
-
-/** Test scenario */
-extern char test_scenario[64];
-
-/** @returns the \p msecs timeout multiplied by the test timeout multiplier */
-extern int tmout_multip(int msecs);
-
-/** @brief true if tests should run in quick-mode (faster, less data) */
-extern int test_quick;
-
-/** @brief Broker version to int */
-#define TEST_BRKVER(A, B, C, D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D))
-/** @brief return single version component from int */
-#define TEST_BRKVER_X(V, I) (((V) >> (24 - ((I)*8))) & 0xff)
-
-/** @brief Topic Admin API supported by this broker version and later */
-#define TEST_BRKVER_TOPIC_ADMINAPI TEST_BRKVER(0, 10, 2, 0)
-
-extern int test_broker_version;
-extern int test_on_ci;
-
-const char *test_mk_topic_name(const char *suffix, int randomized);
-
-void test_delete_topic(rd_kafka_t *use_rk, const char *topicname);
-
-void test_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor);
-
-void test_create_partitions(rd_kafka_t *use_rk,
- const char *topicname,
- int new_partition_cnt);
-
-void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout);
-
-void test_kafka_cmd(const char *fmt, ...);
-
-uint64_t test_produce_msgs_easy_size(const char *topic,
- uint64_t testid,
- int32_t partition,
- int msgcnt,
- size_t size);
-#define test_produce_msgs_easy(topic, testid, partition, msgcnt) \
- test_produce_msgs_easy_size(topic, testid, partition, msgcnt, 0)
-
-
-void test_fail0(const char *file,
- int line,
- const char *function,
- int do_lock,
- int fail_now,
- const char *fmt,
- ...) RD_FORMAT(printf, 6, 7);
-
-
-
-void test_fail0(const char *file,
- int line,
- const char *function,
- int do_lock,
- int fail_now,
- const char *fmt,
- ...) RD_FORMAT(printf, 6, 7);
-
-#define TEST_FAIL0(file, line, do_lock, fail_now, ...) \
- test_fail0(__FILE__, __LINE__, __FUNCTION__, do_lock, fail_now, \
- __VA_ARGS__)
-
-/* Whine and abort test */
-#define TEST_FAIL(...) TEST_FAIL0(__FILE__, __LINE__, 1, 1, __VA_ARGS__)
-
-/* Whine right away, mark the test as failed, but continue the test. */
-#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__, __LINE__, 1, 0, __VA_ARGS__)
-
-/* Whine right away, maybe mark the test as failed, but continue the test. */
-#define TEST_FAIL_LATER0(LATER, ...) \
- TEST_FAIL0(__FILE__, __LINE__, 1, !(LATER), __VA_ARGS__)
-
-#define TEST_FAILCNT() (test_curr->failcnt)
-
-#define TEST_LATER_CHECK(...) \
- do { \
- if (test_curr->state == TEST_FAILED) \
- TEST_FAIL("See previous errors. " __VA_ARGS__); \
- } while (0)
-
-#define TEST_PERROR(call) \
- do { \
- if (!(call)) \
- TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \
- } while (0)
-
-#define TEST_WARN(...) \
- do { \
- fprintf(stderr, \
- "\033[33m[%-28s/%7.3fs] WARN: ", test_curr->name, \
- test_curr->start \
- ? ((float)(test_clock() - test_curr->start) / \
- 1000000.0f) \
- : 0); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, "\033[0m"); \
- } while (0)
-
-/* "..." is a failure reason in printf format, include as much info as needed */
-#define TEST_ASSERT(expr, ...) \
- do { \
- if (!(expr)) { \
- TEST_FAIL("Test assertion failed: \"" #expr \
- "\": " __VA_ARGS__); \
- } \
- } while (0)
-
-
-/* "..." is a failure reason in printf format, include as much info as needed */
-#define TEST_ASSERT_LATER(expr, ...) \
- do { \
- if (!(expr)) { \
- TEST_FAIL0(__FILE__, __LINE__, 1, 0, \
- "Test assertion failed: \"" #expr \
- "\": " __VA_ARGS__); \
- } \
- } while (0)
-
-
-void test_SAY(const char *file, int line, int level, const char *str);
-void test_SKIP(const char *file, int line, const char *str);
-
-void test_timeout_set(int timeout);
-int test_set_special_conf(const char *name, const char *val, int *timeoutp);
-char *test_conf_get(const rd_kafka_conf_t *conf, const char *name);
-const char *test_conf_get_path(void);
-const char *test_getenv(const char *env, const char *def);
-
-int test_needs_auth(void);
-
-uint64_t test_id_generate(void);
-char *test_str_id_generate(char *dest, size_t dest_size);
-const char *test_str_id_generate_tmp(void);
-
-void test_prepare_msg(uint64_t testid,
- int32_t partition,
- int msg_id,
- char *val,
- size_t val_size,
- char *key,
- size_t key_size);
-/**
- * Parse a message token
- */
-void test_msg_parse00(const char *func,
- int line,
- uint64_t testid,
- int32_t exp_partition,
- int *msgidp,
- const char *topic,
- int32_t partition,
- int64_t offset,
- const char *key,
- size_t key_size);
-
-
-int test_check_builtin(const char *feature);
-
-/**
- * @returns the current test's name (thread-local)
- */
-extern const char *test_curr_name(void);
-
-#ifndef _WIN32
-#include <sys/time.h>
-#ifndef RD_UNUSED
-#define RD_UNUSED __attribute__((unused))
-#endif
-
-#else
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#endif
-
-#ifndef RD_UNUSED
-#define RD_UNUSED
-#endif
-
-
-/**
- * A microsecond monotonic clock
- */
-static RD_INLINE int64_t test_clock(void)
-#ifndef _MSC_VER
- __attribute__((unused))
-#endif
- ;
-static RD_INLINE int64_t test_clock(void) {
-#ifdef __APPLE__
- /* No monotonic clock on Darwin */
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec;
-#elif defined(_WIN32)
- LARGE_INTEGER now;
- static RD_TLS LARGE_INTEGER freq;
- if (!freq.QuadPart)
- QueryPerformanceFrequency(&freq);
- QueryPerformanceCounter(&now);
- return (now.QuadPart * 1000000) / freq.QuadPart;
-#else
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ((int64_t)ts.tv_sec * 1000000LLU) +
- ((int64_t)ts.tv_nsec / 1000LLU);
-#endif
-}
-
-
-typedef struct test_timing_s {
- char name[450];
- int64_t ts_start;
- int64_t duration;
- int64_t ts_every; /* Last every */
-} test_timing_t;
-
-/**
- * @brief Start timing, Va-Argument is textual name (printf format)
- */
-#define TIMING_RESTART(TIMING) \
- do { \
- (TIMING)->ts_start = test_clock(); \
- (TIMING)->duration = 0; \
- } while (0)
-
-#define TIMING_START(TIMING, ...) \
- do { \
- rd_snprintf((TIMING)->name, sizeof((TIMING)->name), \
- __VA_ARGS__); \
- TIMING_RESTART(TIMING); \
- (TIMING)->ts_every = (TIMING)->ts_start; \
- } while (0)
-
-#define TIMING_STOPPED(TIMING) ((TIMING)->duration != 0)
-
-#ifndef __cplusplus
-#define TIMING_STOP(TIMING) \
- do { \
- (TIMING)->duration = test_clock() - (TIMING)->ts_start; \
- TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \
- (float)(TIMING)->duration / 1000.0f); \
- } while (0)
-#define TIMING_REPORT(TIMING) \
- TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \
- (float)(TIMING)->duration / 1000.0f);
-
-#else
-#define TIMING_STOP(TIMING) \
- do { \
- char _str[512]; \
- (TIMING)->duration = test_clock() - (TIMING)->ts_start; \
- rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \
- (TIMING)->name, \
- (float)(TIMING)->duration / 1000.0f); \
- Test::Say(_str); \
- } while (0)
-
-#endif
-
-#define TIMING_DURATION(TIMING) \
- ((TIMING)->duration ? (TIMING)->duration \
- : (test_clock() - (TIMING)->ts_start))
-
-#define TIMING_ASSERT0(TIMING, DO_FAIL_LATER, TMIN_MS, TMAX_MS) \
- do { \
- if (!TIMING_STOPPED(TIMING)) \
- TIMING_STOP(TIMING); \
- int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \
- if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \
- break; \
- if (test_on_ci || strcmp(test_mode, "bare")) \
- TEST_WARN( \
- "%s: expected duration %d <= %d <= %d ms%s\n", \
- (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \
- ": not FAILING test on CI"); \
- else \
- TEST_FAIL_LATER0( \
- DO_FAIL_LATER, \
- "%s: expected duration %d <= %d <= %d ms", \
- (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \
- } while (0)
-
-#define TIMING_ASSERT(TIMING, TMIN_MS, TMAX_MS) \
- TIMING_ASSERT0(TIMING, 0, TMIN_MS, TMAX_MS)
-#define TIMING_ASSERT_LATER(TIMING, TMIN_MS, TMAX_MS) \
- TIMING_ASSERT0(TIMING, 1, TMIN_MS, TMAX_MS)
-
-/* Trigger something every US microseconds. */
-static RD_UNUSED int TIMING_EVERY(test_timing_t *timing, int us) {
- int64_t now = test_clock();
- if (timing->ts_every + us <= now) {
- timing->ts_every = now;
- return 1;
- }
- return 0;
-}
-
-
-/**
- * Sub-tests
- */
-int test_sub_start(const char *func,
- int line,
- int is_quick,
- const char *fmt,
- ...);
-void test_sub_pass(void);
-void test_sub_skip(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
-
-#define SUB_TEST0(IS_QUICK, ...) \
- do { \
- if (!test_sub_start(__FUNCTION__, __LINE__, IS_QUICK, \
- __VA_ARGS__)) \
- return; \
- } while (0)
-
-#define SUB_TEST(...) SUB_TEST0(0, "" __VA_ARGS__)
-#define SUB_TEST_QUICK(...) SUB_TEST0(1, "" __VA_ARGS__)
-#define SUB_TEST_PASS() test_sub_pass()
-#define SUB_TEST_SKIP(...) \
- do { \
- test_sub_skip(__VA_ARGS__); \
- return; \
- } while (0)
-
-
-#ifndef _WIN32
-#define rd_sleep(S) sleep(S)
-#else
-#define rd_sleep(S) Sleep((S)*1000)
-#endif
-
-/* Make sure __SANITIZE_ADDRESS__ (gcc) is defined if compiled with asan */
-#if !defined(__SANITIZE_ADDRESS__) && defined(__has_feature)
-#if __has_feature(address_sanitizer)
-#define __SANITIZE_ADDRESS__ 1
-#endif
-#endif
-
-
-int test_run_java(const char *cls, const char **argv);
-int test_waitpid(int pid);
-#endif /* _TESTSHARED_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md
deleted file mode 100644
index f1ec5681b..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Tools
-
-Asorted librdkafka tools.
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md
deleted file mode 100644
index a4ce80bd9..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Stats tools
-
-These tools are suitable for parsing librdkafka's statistics
-as emitted by the `stats_cb` when `statistics.interval.ms` is set.
-
- * [to_csv.py](to_csv.py) - selectively convert stats JSON to CSV.
- * [graph.py](graph.py) - graph CSV files.
- * [filter.jq](filter.jq) - basic `jq` filter.
-
-Install dependencies:
-
- $ python3 -m pip install -r requirements.txt
-
-
-Examples:
-
- # Extract stats json from log line (test*.csv files are created)
- $ grep -F STATS: file.log | sed -e 's/^.*STATS: //' | ./to_csv.py test1
-
- # Graph toppar graphs (group by partition), but skip some columns.
- $ ./graph.py --skip '*bytes,*msg_cnt,stateage,*msgs,leader' --group-by 1partition test1_toppars.csv
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq
deleted file mode 100644
index 414a20697..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq
+++ /dev/null
@@ -1,42 +0,0 @@
-# Usage:
-# cat stats.json | jq -R -f filter.jq
-
-fromjson? |
-{
- time: .time | (. - (3600*5) | strftime("%Y-%m-%d %H:%M:%S")),
- brokers:
- [ .brokers[] | select(.req.Produce > 0) | {
- (.nodeid | tostring): {
- "nodeid": .nodeid,
- "state": .state,
- "stateage": (.stateage/1000000.0),
- "connects": .connects,
- "rtt_p99": .rtt.p99,
- "throttle": .throttle.cnt,
- "outbuf_cnt": .outbuf_cnt,
- "outbuf_msg_cnt": .outbuf_msg_cnt,
- "waitresp_cnt": .waitresp_cnt,
- "Produce": .req.Produce,
- "Metadata": .req.Metadata,
- "toppar_cnt": (.toppars | length)
- }
- }
- ],
-
- topics:
- [ .topics[] | select(.batchcnt.cnt > 0) | {
- (.topic): {
- "batchsize_p99": .batchsize.p99,
- "batchcnt_p99": .batchcnt.p99,
- "toppars": (.partitions[] | {
- (.partition | tostring): {
- leader: .leader,
- msgq_cnt: .msgq_cnt,
- xmit_msgq_cnt: .xmit_msgq_cnt,
- txmsgs: .txmsgs,
- msgs_inflight: .msgs_inflight
- }
- }),
- }
- } ]
-} \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py
deleted file mode 100755
index 3eeaa1541..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python3
-#
-# Use pandas + bokeh to create graphs/charts/plots for stats CSV (to_csv.py).
-#
-
-import os
-import pandas as pd
-from bokeh.io import curdoc
-from bokeh.models import ColumnDataSource, HoverTool
-from bokeh.plotting import figure
-from bokeh.palettes import Dark2_5 as palette
-from bokeh.models.formatters import DatetimeTickFormatter
-
-import pandas_bokeh
-import argparse
-import itertools
-from fnmatch import fnmatch
-
-datecolumn = '0time'
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Graph CSV files')
- parser.add_argument('infiles', nargs='+', type=str,
- help='CSV files to plot.')
- parser.add_argument('--cols', type=str,
- help='Columns to plot (CSV list)')
- parser.add_argument('--skip', type=str,
- help='Columns to skip (CSV list)')
- parser.add_argument('--group-by', type=str,
- help='Group data series by field')
- parser.add_argument('--chart-cols', type=int, default=3,
- help='Number of chart columns')
- parser.add_argument('--plot-width', type=int, default=400,
- help='Per-plot width')
- parser.add_argument('--plot-height', type=int, default=300,
- help='Per-plot height')
- parser.add_argument('--out', type=str, default='out.html',
- help='Output file (HTML)')
- args = parser.parse_args()
-
- outpath = args.out
- if args.cols is None:
- cols = None
- else:
- cols = args.cols.split(',')
- cols.append(datecolumn)
-
- if args.skip is None:
- assert cols is None, "--cols and --skip are mutually exclusive"
- skip = None
- else:
- skip = args.skip.split(',')
-
- group_by = args.group_by
-
- pandas_bokeh.output_file(outpath)
- curdoc().theme = 'dark_minimal'
-
- figs = {}
- plots = []
- for infile in args.infiles:
-
- colors = itertools.cycle(palette)
-
- cols_to_use = cols
-
- if skip is not None:
- # First read available fields
- avail_cols = list(pd.read_csv(infile, nrows=1))
-
- cols_to_use = [c for c in avail_cols
- if len([x for x in skip if fnmatch(c, x)]) == 0]
-
- df = pd.read_csv(infile,
- parse_dates=[datecolumn],
- index_col=datecolumn,
- usecols=cols_to_use)
- title = os.path.basename(infile)
- print(f"{infile}:")
-
- if group_by is not None:
-
- grp = df.groupby([group_by])
-
- # Make one plot per column, skipping the index and group_by cols.
- for col in df.keys():
- if col in (datecolumn, group_by):
- continue
-
- print("col: ", col)
-
- for _, dg in grp:
- print(col, " dg:\n", dg.head())
- figtitle = f"{title}: {col}"
- p = figs.get(figtitle, None)
- if p is None:
- p = figure(title=f"{title}: {col}",
- plot_width=args.plot_width,
- plot_height=args.plot_height,
- x_axis_type='datetime',
- tools="hover,box_zoom,wheel_zoom," +
- "reset,pan,poly_select,tap,save")
- figs[figtitle] = p
- plots.append(p)
-
- p.add_tools(HoverTool(
- tooltips=[
- ("index", "$index"),
- ("time", "@0time{%F}"),
- ("y", "$y"),
- ("desc", "$name"),
- ],
- formatters={
- "@0time": "datetime",
- },
- mode='vline'))
-
- p.xaxis.formatter = DatetimeTickFormatter(
- minutes=['%H:%M'],
- seconds=['%H:%M:%S'])
-
- source = ColumnDataSource(dg)
-
- val = dg[group_by][0]
- for k in dg:
- if k != col:
- continue
-
- p.line(x=datecolumn, y=k, source=source,
- legend_label=f"{k}[{val}]",
- name=f"{k}[{val}]",
- color=next(colors))
-
- continue
-
- else:
- p = df.plot_bokeh(title=title,
- kind='line', show_figure=False)
-
- plots.append(p)
-
- for p in plots:
- p.legend.click_policy = "hide"
-
- grid = []
- for i in range(0, len(plots), args.chart_cols):
- grid.append(plots[i:i + args.chart_cols])
-
- pandas_bokeh.plot_grid(grid)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt
deleted file mode 100644
index 1ea1d84d2..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-pandas
-pandas-bokeh
-numpy
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py
deleted file mode 100755
index d5fc9b6e7..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python3
-#
-# Parse librdkafka stats JSON from stdin, one stats object per line, pick out
-# the relevant fields and emit CSV files suitable for plotting with graph.py
-#
-
-import sys
-import json
-from datetime import datetime
-from collections import OrderedDict
-
-
-def parse(linenr, string):
- try:
- js = json.loads(string)
- except Exception:
- return [], [], [], []
-
- dt = datetime.utcfromtimestamp(js['time']).strftime('%Y-%m-%d %H:%M:%S')
-
- top = {'0time': dt}
- topcollect = ['msg_cnt', 'msg_size']
- for c in topcollect:
- top[c] = js[c]
-
- top['msg_cnt_fill'] = (float(js['msg_cnt']) / js['msg_max']) * 100.0
- top['msg_size_fill'] = (float(js['msg_size']) / js['msg_size_max']) * 100.0
-
- collect = ['outbuf_cnt', 'outbuf_msg_cnt', 'tx',
- 'waitresp_cnt', 'waitresp_msg_cnt', 'wakeups']
-
- brokers = []
- for b, d in js['brokers'].items():
- if d['req']['Produce'] == 0:
- continue
-
- out = {'0time': dt, '1nodeid': d['nodeid']}
- out['stateage'] = int(d['stateage'] / 1000)
-
- for c in collect:
- out[c] = d[c]
-
- out['rtt_p99'] = int(d['rtt']['p99'] / 1000)
- out['int_latency_p99'] = int(d['int_latency']['p99'] / 1000)
- out['outbuf_latency_p99'] = int(d['outbuf_latency']['p99'] / 1000)
- out['throttle_p99'] = d['throttle']['p99']
- out['throttle_cnt'] = d['throttle']['cnt']
- out['latency_p99'] = (out['int_latency_p99'] +
- out['outbuf_latency_p99'] +
- out['rtt_p99'])
- out['toppars_cnt'] = len(d['toppars'])
- out['produce_req'] = d['req']['Produce']
-
- brokers.append(out)
-
- tcollect = []
- tpcollect = ['leader', 'msgq_cnt', 'msgq_bytes',
- 'xmit_msgq_cnt', 'xmit_msgq_bytes',
- 'txmsgs', 'txbytes', 'msgs_inflight']
-
- topics = []
- toppars = []
- for t, d in js['topics'].items():
-
- tout = {'0time': dt, '1topic': t}
- for c in tcollect:
- tout[c] = d[c]
- tout['batchsize_p99'] = d['batchsize']['p99']
- tout['batchcnt_p99'] = d['batchcnt']['p99']
-
- for tp, d2 in d['partitions'].items():
- if d2['txmsgs'] == 0:
- continue
-
- tpout = {'0time': dt, '1partition': d2['partition']}
-
- for c in tpcollect:
- tpout[c] = d2[c]
-
- toppars.append(tpout)
-
- topics.append(tout)
-
- return [top], brokers, topics, toppars
-
-
-class CsvWriter(object):
- def __init__(self, outpfx, name):
- self.f = open(f"{outpfx}_{name}.csv", "w")
- self.cnt = 0
-
- def write(self, d):
- od = OrderedDict(sorted(d.items()))
- if self.cnt == 0:
- # Write heading
- self.f.write(','.join(od.keys()) + '\n')
-
- self.f.write(','.join(map(str, od.values())) + '\n')
- self.cnt += 1
-
- def write_list(self, a_list_of_dicts):
- for d in a_list_of_dicts:
- self.write(d)
-
-
-out = sys.argv[1]
-
-w_top = CsvWriter(out, 'top')
-w_brokers = CsvWriter(out, 'brokers')
-w_topics = CsvWriter(out, 'topics')
-w_toppars = CsvWriter(out, 'toppars')
-
-
-for linenr, string in enumerate(sys.stdin):
- try:
- top, brokers, topics, toppars = parse(linenr, string)
- except Exception as e:
- print(f"SKIP {linenr+1}: {e}")
- continue
-
- w_top.write_list(top)
- w_brokers.write_list(brokers)
- w_topics.write_list(topics)
- w_toppars.write_list(toppars)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh
deleted file mode 100755
index 48cbecb0c..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Run tests, one by one, until a failure.
-#
-# Usage:
-# ./until-fail.sh [test-runner args] [mode]
-#
-# mode := bare valgrind helgrind gdb ..
-#
-# Logs for the last test run is written to _until-fail_<PID>.log.
-#
-
-[[ -z "$DELETE_TOPICS" ]] && DELETE_TOPICS=y
-
-if [[ -z $ZK_ADDRESS ]]; then
- ZK_ADDRESS="localhost"
-fi
-
-set -e
-set -o pipefail # to have 'run-test.sh | tee' fail if run-test.sh fails.
-
-ARGS=
-while [[ $1 == -* ]]; do
- ARGS="$ARGS $1"
- shift
-done
-
-modes=$*
-if [[ -z "$modes" ]]; then
- modes="valgrind"
-fi
-
-if [[ -z "$TESTS" ]]; then
- tests=$(echo 0???-*.c 0???-*.cpp)
-else
- tests="$TESTS"
-fi
-
-if [[ $modes != gdb ]]; then
- ARGS="-p1 $ARGS"
-fi
-
-LOG_FILE="_until_fail_$$.log"
-
-iter=0
-while true ; do
- iter=$(expr $iter + 1)
-
- for t in $tests ; do
- # Strip everything after test number (0001-....)
- t=$(echo $t | cut -d- -f1)
-
- for mode in $modes ; do
-
- echo "##################################################"
- echo "##################################################"
- echo "############ Test iteration $iter ################"
- echo "############ Test $t in mode $mode ###############"
- echo "##################################################"
- echo "##################################################"
-
- if [[ $t == all ]]; then
- unset TESTS
- else
- export TESTS=$t
- fi
- (./run-test.sh $ARGS $mode 2>&1 | tee $LOG_FILE) || (echo "Failed on iteration $iter, test $t, mode $mode, logs in $LOG_FILE" ; exit 1)
- done
- done
-
-
- if [[ "$DELETE_TOPICS" == "y" ]]; then
- # Delete topics using Admin API, which is very fast
- # leads to sub-sequent test failures because of the background
- # deletes in Kafka still taking a long time:
- #
- #make delete_topics
-
- # Delete topic-by-topic using kafka-topics for each one,
- # very slow but topics are properly deleted before the script
- # returns.
- ./delete-test-topics.sh $ZK_ADDRESS || true
- fi
-done
-
-
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c
deleted file mode 100644
index 18431ba72..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2015, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "test.h"
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafka.h" /* for Kafka driver */
-
-
-/**
- * Consumer partition assignment test, without consumer group balancing.
- */
-
-
-int main_0016_assign_partition(int argc, char **argv) {
- const char *topic = test_mk_topic_name(__FUNCTION__, 1);
- rd_kafka_t *rk_p, *rk_c;
- rd_kafka_topic_t *rkt_p;
- int msg_cnt = 1000;
- int msg_base = 0;
- int partition_cnt = 2;
- int partition;
- uint64_t testid;
- rd_kafka_topic_conf_t *default_topic_conf;
- rd_kafka_topic_partition_list_t *partitions;
- char errstr[512];
-
- testid = test_id_generate();
-
- /* Produce messages */
- rk_p = test_create_producer();
- rkt_p = test_create_producer_topic(rk_p, topic, NULL);
-
- for (partition = 0; partition < partition_cnt; partition++) {
- test_produce_msgs(rk_p, rkt_p, testid, partition,
- msg_base + (partition * msg_cnt), msg_cnt,
- NULL, 0);
- }
-
- rd_kafka_topic_destroy(rkt_p);
- rd_kafka_destroy(rk_p);
-
-
- test_conf_init(NULL, &default_topic_conf, 0);
- if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
- "smallest", errstr,
- sizeof(errstr)) != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s\n", errstr);
-
- rk_c =
- test_create_consumer(topic /*group_id*/, NULL, default_topic_conf);
-
- /* Fill in partition set */
- partitions = rd_kafka_topic_partition_list_new(partition_cnt);
-
- for (partition = 0; partition < partition_cnt; partition++)
- rd_kafka_topic_partition_list_add(partitions, topic, partition);
-
- test_consumer_assign("assign.partition", rk_c, partitions);
-
- /* Make sure all messages are available */
- test_consumer_poll("verify.all", rk_c, testid, partition_cnt, msg_base,
- partition_cnt * msg_cnt);
-
- /* Stop assignments */
- test_consumer_unassign("unassign.partitions", rk_c);
-
-#if 0 // FIXME when get_offset() is functional
- /* Acquire stored offsets */
- for (partition = 0 ; partition < partition_cnt ; partition++) {
- rd_kafka_resp_err_t err;
- rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c, topic, NULL);
- int64_t offset;
- test_timing_t t_offs;
-
- TIMING_START(&t_offs, "GET.OFFSET");
- err = rd_kafka_consumer_get_offset(rkt_c, partition,
- &offset, 5000);
- TIMING_STOP(&t_offs);
- if (err)
- TEST_FAIL("Failed to get offsets for %s [%"PRId32"]: "
- "%s\n",
- rd_kafka_topic_name(rkt_c), partition,
- rd_kafka_err2str(err));
- TEST_SAY("get_offset for %s [%"PRId32"] returned %"PRId64"\n",
- rd_kafka_topic_name(rkt_c), partition, offset);
-
- rd_kafka_topic_destroy(rkt_c);
- }
-#endif
- test_consumer_close(rk_c);
-
- rd_kafka_destroy(rk_c);
-
- return 0;
-}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp
deleted file mode 100644
index 00c31bc82..000000000
--- a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * librdkafka - Apache Kafka C library
- *
- * Copyright (c) 2012-2014, Magnus Edenhill
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * - Generate unique topic name (there is a C function for that in test.h wihch
- * you should use)
- * - Query metadata for that topic
- * - Wait one second
- * - Query again, it should now have isrs and everything
- * Note: The test require auto.create.topics.enable = true in kafka server
- * properties.
- */
-
-
-#define _GNU_SOURCE
-#include <sys/time.h>
-#include <time.h>
-#include <string>
-#include <sstream>
-#include <iostream>
-
-
-extern "C" {
-#include "test.h"
-}
-
-/* Typical include path would be <librdkafka/rdkafka.h>, but this program
- * is built from within the librdkafka source tree and thus differs. */
-#include "rdkafkacpp.h" /* for Kafka driver */
-
-/**
- * Generate unique topic name (there is a C function for that in test.h wihch
- * you should use) Query metadata for that topic Wait one second Query again, it
- * should now have isrs and everything
- */
-static void test_metadata_cpp(void) {
- RdKafka::Conf *conf = RdKafka::Conf::create(
- RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C
- test_conf_init()? */
- RdKafka::Conf *tconf = RdKafka::Conf::create(
- RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */
-
- RdKafka::Metadata *metadata;
- RdKafka::ErrorCode err;
- int msgcnt = test_on_ci ? 1000 : 10000;
- int partition_cnt = 2;
- int i;
- uint64_t testid;
- int msg_base = 0;
- std::string errstr;
- const char *topic_str = test_mk_topic_name("0013", 1);
- /* if(!topic){
- TEST_FAIL()
- }*/
-
- // const RdKafka::Conf::ConfResult confResult =
- // conf->set("debug","all",errstr); if(confResult != RdKafka::Conf::CONF_OK){
- // std::stringstream errstring;
- // errstring << "Can't set config" << errstr;
- // TEST_FAIL(errstring.str().c_str());
- //}
-
- TEST_SAY("Topic %s.\n", topic_str);
-
- const RdKafka::Conf::ConfResult confBrokerResult =
- conf->set("metadata.broker.list", "localhost:9092", errstr);
- if (confBrokerResult != RdKafka::Conf::CONF_OK) {
- std::stringstream errstring;
- errstring << "Can't set broker" << errstr;
- TEST_FAIL(errstring.str().c_str());
- }
-
- /* Create a producer to fetch metadata */
- RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
- if (!producer) {
- std::stringstream errstring;
- errstring << "Can't create producer" << errstr;
- TEST_FAIL(errstring.str().c_str());
- }
-
- /*
- * Create topic handle.
- */
- RdKafka::Topic *topic = NULL;
- topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
- if (!topic) {
- std::stringstream errstring;
- errstring << "Can't create topic" << errstr;
- exit(1);
- }
-
- /* First request of metadata: It have to fail */
- err = producer->metadata(topic != NULL, topic, &metadata, 5000);
- if (err != RdKafka::ERR_NO_ERROR) {
- std::stringstream errstring;
- errstring << "Can't request first metadata: " << errstr;
- TEST_FAIL(errstring.str().c_str());
- }
-
- /* It's a new topic, it should have no partitions */
- if (metadata->topics()->at(0)->partitions()->size() != 0) {
- TEST_FAIL("ISRS != 0");
- }
-
- sleep(1);
-
- /* Second request of metadata: It have to success */
- err = producer->metadata(topic != NULL, topic, &metadata, 5000);
-
- /* It should have now partitions */
- if (metadata->topics()->at(0)->partitions()->size() == 0) {
- TEST_FAIL("ISRS == 0");
- }
-
-
- delete topic;
- delete producer;
- delete tconf;
- delete conf;
-
- /* Wait for everything to be cleaned up since broker destroys are
- * handled in its own thread. */
- test_wait_exit(10);
-
- /* If we havent failed at this point then
- * there were no threads leaked */
- return;
-}
-
-int main(int argc, char **argv) {
- test_conf_init(NULL, NULL, 20);
- test_metadata_cpp();
- return 0;
-}