diff --git a/.appveyor.yml b/.appveyor.yml
index b215134155..ea561e30f9 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,10 +1,10 @@
-version: 1.8.0-R-post{build}
+version: 1.9.0-R-post{build}
pull_requests:
do_not_increment_build_number: true
-image: Visual Studio 2015
+image: Visual Studio 2019
configuration: Release
environment:
- runtime: v140
+ runtime: v142
matrix:
- platform: x64
arch: x64
diff --git a/.travis.yml b/.travis.yml
index f5a8d99791..1b4e009d09 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -22,7 +22,10 @@ matrix:
if: tag IS present
os: linux
compiler: gcc
- env: ADDITIONAL_BUILDS="debian" LINKAGE=std
+ env:
+ - ADDITIONAL_BUILDS="debian"
+ - ADDITIONAL_BUILD_FLAGS="--source-deps-only"
+ - LINKAGE=std
before_script:
- ./configure --install-deps --disable-lz4-ext --disable-regex-ext --prefix="$PWD/dest" --enable-strip
@@ -35,7 +38,7 @@ matrix:
- name: "Linux clang: +alpine +manylinux +werror"
os: linux
compiler: clang
- env: ADDITIONAL_BUILDS="alpine manylinux2010_x86_64" LINKAGE=std
+ env: ADDITIONAL_BUILDS="alpine manylinux2010_x86_64" ADDITIONAL_BUILD_FLAGS="--source-deps-only" LINKAGE=std
before_script:
- ./configure --install-deps --disable-lz4-ext --prefix="$PWD/dest" --enable-werror --enable-strip
@@ -83,16 +86,21 @@ matrix:
- source ./packaging/mingw-w64/travis-before-install.sh
before_script:
- ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh
+ - ./packaging/mingw-w64/run-tests.sh
- name: "Linux GCC: +integration-tests +copyright-check +doc-check +devel +code-cov +c99 +c++98"
os: linux
dist: xenial
+ language: python
+ python: 3.8
compiler: gcc
env: NO_ARTIFACTS=y RUN_INTEGRATION_TESTS=y COPYRIGHT_CHECK=y DOC_CHECK=y
before_script:
- wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb
- sudo dpkg -i rapidjson-dev.deb
- - sudo pip3 install -r tests/requirements.txt
+ - python -m pip install -U pip
+ - python -m pip -V
+ - python -m pip install -r tests/requirements.txt
- sudo apt update
- sudo apt install -y doxygen graphviz gdb
- ./configure --CFLAGS="-std=c99" --CXXFLAGS="-std=c++98" --install-deps --enable-devel --disable-lz4-ext --prefix="$PWD/dest"
@@ -137,10 +145,10 @@ script:
- if [[ $SKIP_MAKE != y && $RUN_INTEGRATION_TESTS != y ]]; then if [[ -n $TRAVIS_TAG ]]; then make -C tests run_local_quick; else make -C tests unit ; fi ; fi
- if [[ $SKIP_MAKE != y ]]; then make install || travis_terminate 1 ; fi
- if [[ -z $NO_ARTIFACTS ]]; then (cd dest && tar cvzf ../artifacts/librdkafka-${CC}.tar.gz .) ; fi
-- if [[ -n $TRAVIS_TAG ]]; then for distro in $ADDITIONAL_BUILDS ; do packaging/tools/distro-build.sh $distro --enable-strip || travis_terminate 1 ; done ; fi
+- if [[ -n $TRAVIS_TAG ]]; then for distro in $ADDITIONAL_BUILDS ; do packaging/tools/distro-build.sh $distro $ADDITIONAL_BUILD_FLAGS --enable-strip || travis_terminate 1 ; done ; fi
- if [[ $COPYRIGHT_CHECK == y ]]; then make copyright-check || travis_terminate 1; fi
- if [[ $DOC_CHECK == y ]]; then make docs || travis_terminate 1 ; fi
-- if [[ -z $TRAVIS_TAG && $RUN_INTEGRATION_TESTS == y ]]; then (cd tests && travis_retry ./interactive_broker_version.py -c "make quick" 2.7.0) || travis_terminate 1 ; fi
+- if [[ -z $TRAVIS_TAG && $RUN_INTEGRATION_TESTS == y ]]; then (cd tests && travis_retry ./interactive_broker_version.py -c "make quick" 2.8.1) || travis_terminate 1 ; fi
- if [[ -f tests/core ]] && (which gdb >/dev/null); then (cd tests && LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner core < backtrace.gdb) ; fi
- sha256sum artifacts/* || true
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1036142747..7d35a6f517 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,9 +2,37 @@
librdkafka v1.9.0 is a feature release:
+ * Added KIP-768 OUATHBEARER OIDC support (by @jliunyu, #3560)
+ * Added KIP-140 Admin API ACL support (by @emasab, #2676)
+
+
+## Upgrade considerations
+
+ * Consumer:
+ `rd_kafka_offsets_store()` (et.al) will now return an error for any
+ partition that is not currently assigned (through `rd_kafka_*assign()`).
+ This prevents a race condition where an application would store offsets
+ after the assigned partitions had been revoked (which resets the stored
+ offset), that could cause these old stored offsets to be committed later
+ when the same partitions were assigned to this consumer again - effectively
+ overwriting any committed offsets by any consumers that were assigned the
+ same partitions previously. This would typically result in the offsets
+ rewinding and messages to be reprocessed.
+ As an extra effort to avoid this situation the stored offset is now
+ also reset when partitions are assigned (through `rd_kafka_*assign()`).
+ Applications that explicitly call `..offset*_store()` will now need
+ to handle the case where `RD_KAFKA_RESP_ERR__STATE` is returned
+ in the per-partition `.err` field - meaning the partition is no longer
+ assigned to this consumer and the offset could not be stored for commit.
+
## Enhancements
+ * Improved producer queue scheduling. Fixes the performance regression
+ introduced in v1.7.0 for some produce patterns. (#3538, #2912)
+ * Windows: Added native Win32 IO/Queue scheduling. This removes the
+ internal TCP loopback connections that were previously used for timely
+ queue wakeups.
* SASL OAUTHBEARER refresh callbacks can now be scheduled for execution
on librdkafka's background thread. This solves the problem where an
application has a custom SASL OAUTHBEARER refresh callback and thus needs to
@@ -15,18 +43,99 @@ librdkafka v1.9.0 is a feature release:
can now be triggered automatically on the librdkafka background thread.
* `rd_kafka_queue_get_background()` now creates the background thread
if not already created.
+ * Bundled zlib upgraded to version 1.2.12.
+ * Bundled OpenSSL upgraded to 1.1.1n.
+ * Added `test.mock.broker.rtt` to simulate RTT/latency for mock brokers.
## Fixes
### General fixes
+ * Fix various 1 second delays due to internal broker threads blocking on IO
+ even though there are events to handle.
+ These delays could be seen randomly in any of the non produce/consume
+ request APIs, such as `commit_transaction()`, `list_groups()`, etc.
* Windows: some applications would crash with an error message like
`no OPENSSL_Applink()` written to the console if `ssl.keystore.location`
was configured.
This regression was introduced in v1.8.0 due to use of vcpkgs and how
keystore file was read. #3554.
+ * `rd_kafka_clusterid()` would previously fail with timeout if
+ called on cluster with no visible topics (#3620).
+ The clusterid is now returned as soon as metadata has been retrieved.
+ * Fix hang in `rd_kafka_list_groups()` if there are no available brokers
+ to connect to (#3705).
+
+
+### Consumer fixes
+
+ * `rd_kafka_offsets_store()` (et.al) will now return an error for any
+ partition that is not currently assigned (through `rd_kafka_*assign()`).
+ See **Upgrade considerations** above for more information.
+ * `rd_kafka_*assign()` will now reset/clear the stored offset.
+ See **Upgrade considerations** above for more information.
+ * A `ERR_MSG_SIZE_TOO_LARGE` consumer error would previously be raised
+ if the consumer received a maximum sized FetchResponse only containing
+ (transaction) aborted messages with no control messages. The fetching did
+ not stop, but some applications would terminate upon receiving this error.
+ No error is now raised in this case. (#2993)
+ Thanks to @jacobmikesell for providing an application to reproduce the
+ issue.
+ * The consumer no longer backs off the next fetch request (default 500ms) when
+ the parsed fetch response is truncated (which is a valid case).
+ This should speed up the message fetch rate in case of maximum sized
+ fetch responses.
+ * Fix consumer crash (`assert: rkbuf->rkbuf_rkb`) when parsing
+ malformed JoinGroupResponse consumer group metadata state.
+ * Fix crash (`cant handle op type`) when using `consume_batch_queue()` (et.al)
+ and an OAUTHBEARER refresh callback was set.
+ The callback is now triggered by the consume call. (#3263)
+
+
+### Producer fixes
+ * Fix message loss in idempotent/transactional producer.
+ A corner case has been identified that may cause idempotent/transactional
+ messages to be lost despite being reported as successfully delivered:
+ During cluster instability a restarting broker may report existing topics
+ as non-existent for some time before it is able to acquire up to date
+ cluster and topic metadata.
+ If an idempotent/transactional producer updates its topic metadata cache
+ from such a broker the producer will consider the topic to be removed from
+ the cluster and thus remove its local partition objects for the given topic.
+ This also removes the internal message sequence number counter for the given
+ partitions.
+ If the producer later receives proper topic metadata for the cluster the
+ previously "removed" topics will be rediscovered and new partition objects
+ will be created in the producer. These new partition objects, with no
+ knowledge of previous incarnations, would start counting partition messages
+ at zero again.
+ If new messages were produced for these partitions by the same producer
+ instance, the same message sequence numbers would be sent to the broker.
+ If the broker still maintains state for the producer's PID and Epoch it could
+ deem that these messages with reused sequence numbers had already been
+ written to the log and treat them as legit duplicates.
+ This would seem to the producer that these new messages were successfully
+ written to the partition log by the broker when they were in fact discarded
+ as duplicates, leading to silent message loss.
+ The fix included in this release is to save the per-partition idempotency
+ state when a partition is removed, and then recover and use that saved
+ state if the partition comes back at a later time.
+ * The transactional producer would retry (re)initializing its PID if a
+ `PRODUCER_FENCED` error was returned from the
+ broker (added in Apache Kafka 2.8), which could cause the producer to
+ seemingly hang.
+ This error code is now correctly handled by raising a fatal error.
+ * Improved producer queue wakeup scheduling. This should significantly
+ decrease the number of wakeups and thus syscalls for high message rate
+ producers. (#3538, #2912)
+ * The logic for enforcing that `message.timeout.ms` is greather than
+ an explicitly configured `linger.ms` was incorrect and instead of
+ erroring out early the lingering time was automatically adjusted to the
+ message timeout, ignoring the configured `linger.ms`.
+ This has now been fixed so that an error is returned when instantiating the
+ producer. Thanks to @larry-cdn77 for analysis and test-cases. (#3709)
# librdkafka v1.8.2
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d1129bce9d..774473fa27 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -219,7 +219,7 @@ configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h")
include(GNUInstallDirs)
-set(config_install_dir "lib/cmake/${PROJECT_NAME}")
+set(config_install_dir "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
diff --git a/CONFIGURATION.md b/CONFIGURATION.md
index 0010d1bf89..a0a15ab5c9 100644
--- a/CONFIGURATION.md
+++ b/CONFIGURATION.md
@@ -100,12 +100,12 @@ sasl.aws.duration.sec | * | 900 .. 43200 | 900
sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string*
enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean*
oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see `rd_kafka_conf_enable_sasl_queue()`.
*Type: see dedicated API*
-sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method is used. If set it to "oidc", OAuth/OIDC login method will be used. sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, sasl.oauthbearer.scope, sasl.oauthbearer.extensions, and sasl.oauthbearer.token.endpoint.url are needed if sasl.oauthbearer.method is set to "oidc".
*Type: enum value*
-sasl.oauthbearer.client.id | * | | | low | It's a public identifier for the application. It must be unique across all clients that the authorization server handles. This is only used when sasl.oauthbearer.method is set to oidc.
*Type: string*
-sasl.oauthbearer.client.secret | * | | | low | A client secret only known to the application and the authorization server. This should be a sufficiently random string that are not guessable. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string*
-sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string*
-sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. It's comma-separated list of key=value pairs. The example of the input is "supportFeatureX=true,organizationId=sales-emea". This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string*
-sasl.oauthbearer.token.endpoint.url | * | | | low | OAUTH issuer token endpoint HTTP(S) URI used to retrieve the token. This is only used when sasl.oauthbearer.method is set to "oidc".
*Type: string*
+sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method to be used. If set to "oidc", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`.
*Type: enum value*
+sasl.oauthbearer.client.id | * | | | low | Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string*
+sasl.oauthbearer.client.secret | * | | | low | Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string*
+sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string*
+sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., "supportFeatureX=true,organizationId=sales-emea".Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string*
+sasl.oauthbearer.token.endpoint.url | * | | | low | OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string*
plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string*
interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API*
group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string*
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 11665b3e00..1a8eb9b10d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -36,9 +36,10 @@ likely to happen.
clang-format is used to check, and fix, the style for C/C++ files,
while flake8 and autopep8 is used for the Python scripts.
-You should check the style before committing by running `make style-check`
+You should check the style before committing by running `make style-check-changed`
from the top-level directory, and if any style errors are reported you can
-automatically fix them using `make style-fix`.
+automatically fix them using `make style-fix-changed` (or just run
+that command directly).
The Python code may need some manual fixing since autopep8 is unable to fix
all warnings reported by flake8, in particular it will not split long lines,
diff --git a/INTRODUCTION.md b/INTRODUCTION.md
index abb920166d..8f4b5dc74d 100644
--- a/INTRODUCTION.md
+++ b/INTRODUCTION.md
@@ -50,10 +50,12 @@ librdkafka also provides a native C++ interface.
- [Termination](#termination)
- [High-level KafkaConsumer](#high-level-kafkaconsumer)
- [Producer](#producer)
+ - [Admin API client](#admin-api-client)
- [Speeding up termination](#speeding-up-termination)
- [Threads and callbacks](#threads-and-callbacks)
- [Brokers](#brokers)
- [SSL](#ssl)
+ - [OAUTHBEARER with support for OIDC](#oauthbearer-with-support-for-oidc)
- [Sparse connections](#sparse-connections)
- [Random broker selection](#random-broker-selection)
- [Persistent broker connections](#persistent-broker-connections)
@@ -67,10 +69,12 @@ librdkafka also provides a native C++ interface.
- [Offset management](#offset-management)
- [Auto offset commit](#auto-offset-commit)
- [At-least-once processing](#at-least-once-processing)
+ - [Auto offset reset](#auto-offset-reset)
- [Consumer groups](#consumer-groups)
- [Static consumer groups](#static-consumer-groups)
- [Topics](#topics)
- [Unknown or unauthorized topics](#unknown-or-unauthorized-topics)
+ - [Topic metadata propagation for newly created topics](#topic-metadata-propagation-for-newly-created-topics)
- [Topic auto creation](#topic-auto-creation)
- [Metadata](#metadata)
- [< 0.9.3](#-093)
@@ -1121,6 +1125,31 @@ For example, to read both intermediate and root CAs, set
`ssl.ca.certificate.stores=CA,Root`.
+#### OAUTHBEARER with support for OIDC
+
+OAUTHBEARER with OIDC provides a method for the client to authenticate to the
+Kafka cluster by requesting an authentication token from an issuing server
+and passing the retrieved token to brokers during connection setup.
+
+To use this authentication method the client needs to be configured as follows:
+
+ * `security.protocol` - set to `SASL_SSL` or `SASL_PLAINTEXT`.
+ * `sasl.mechanism` - set to `OAUTHBEARER`.
+ * `sasl.oauthbearer.method` - set to `OIDC`.
+ * `sasl.oauthbearer.token.endpoint.url` - OAUTH issuer token
+ endpoint HTTP(S) URI used to retrieve the token.
+ * `sasl.oauthbearer.client.id` - public identifier for the application.
+ It must be unique across all clients that the authorization server handles.
+ * `sasl.oauthbearer.client.secret` - secret known only to the
+ application and the authorization server.
+ This should be a sufficiently random string that is not guessable.
+ * `sasl.oauthbearer.scope` - clients use this to specify the scope of the
+ access request to the broker.
+ * `sasl.oauthbearer.extensions` - (optional) additional information to be
+ provided to the broker. A comma-separated list of key=value pairs.
+ For example:
+ `supportFeatureX=true,organizationId=sales-emea`
+
#### Sparse connections
@@ -1905,7 +1934,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf
| KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported |
| KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported |
| KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported |
-| KIP-768 - SASL/OAUTHBEARER OIDC support | WIP | Not supported |
+| KIP-768 - SASL/OAUTHBEARER OIDC support | 3.0 | Supported |
diff --git a/Makefile b/Makefile
index ee2c8c80d0..2d931f09ab 100755
--- a/Makefile
+++ b/Makefile
@@ -110,7 +110,15 @@ style-check:
@(packaging/tools/style-format.sh \
$$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$') )
+style-check-changed:
+ @(packaging/tools/style-format.sh \
+ $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$'))
+
style-fix:
@(packaging/tools/style-format.sh --fix \
$$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$'))
+style-fix-changed:
+ @(packaging/tools/style-format.sh --fix \
+ $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$'))
+
diff --git a/README.md b/README.md
index 2186146887..a52734633a 100644
--- a/README.md
+++ b/README.md
@@ -44,6 +44,7 @@ affiliation with and is not endorsed by The Apache Software Foundation.
[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
* Statistics metrics in [STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md).
* [Frequently asked questions](https://github.com/edenhill/librdkafka/wiki).
+ * Step-by-step tutorial [Getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/).
**NOTE**: The `master` branch is actively developed, use latest [release](https://github.com/edenhill/librdkafka/releases) for production use.
@@ -138,11 +139,13 @@ If the version is out of date, please [create an issue or pull request](https://
## Usage in code
+See [getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/) for a basic tutorial.
+
1. Refer to the [examples directory](examples/) for code using:
-* Producers: basic producers, idempotent producers, transactional producers.
-* Consumers: basic consumers, reading batches of messages.
-* Performance and latency testing tools.
+ * Producers: basic producers, idempotent producers, transactional producers.
+ * Consumers: basic consumers, reading batches of messages.
+ * Performance and latency testing tools.
2. Refer to the [examples GitHub repo](https://github.com/confluentinc/examples/tree/master/clients/cloud/c) for code connecting to a cloud streaming data service based on Apache Kafka
@@ -156,7 +159,7 @@ Commercial support is available from [Confluent Inc](https://www.confluent.io/)
## Community support
-**Only the [last official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.**
+**Only the [latest official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.**
File bug reports and feature requests using [GitHub Issues](https://github.com/edenhill/librdkafka/issues).
diff --git a/STATISTICS.md b/STATISTICS.md
index 0a21ee0842..392e2cf05a 100644
--- a/STATISTICS.md
+++ b/STATISTICS.md
@@ -106,7 +106,7 @@ rxidle | int | | Microseconds since last socket receive (or -1 if no receives ye
req | object | | Request type counters. Object key is the request name, value is the number of requests sent.
zbuf_grow | int | | Total number of decompression buffer size increases
buf_grow | int | | Total number of buffer size increases (deprecated, unused)
-wakeups | int | | Broker thread poll wakeups
+wakeups | int | | Broker thread poll loop wakeups
connects | int | | Number of connection attempts, including successful and failed, and name resolution failures.
disconnects | int | | Number of disconnects (triggered by broker, network, load-balancer, etc.).
int_latency | object | | Internal producer queue latency in microseconds. See *Window stats* below
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index a90d279423..bbbb89ad90 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -23,6 +23,10 @@ target_link_libraries(rdkafka_complex_consumer_example_cpp PUBLIC rdkafka++)
add_executable(openssl_engine_example_cpp openssl_engine_example.cpp ${win32_sources})
target_link_libraries(openssl_engine_example_cpp PUBLIC rdkafka++)
+add_executable(misc misc.c ${win32_sources})
+target_link_libraries(misc PUBLIC rdkafka)
+
+
# The targets below has Unix include dirs and do not compile on Windows.
if(NOT WIN32)
add_executable(rdkafka_example rdkafka_example.c)
diff --git a/examples/Makefile b/examples/Makefile
index 7720a3c123..fc6eccc56f 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -3,7 +3,8 @@ EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
kafkatest_verifiable_client \
producer consumer idempotent_producer transactions \
delete_records \
- openssl_engine_example_cpp
+ openssl_engine_example_cpp \
+ misc
all: $(EXAMPLES)
@@ -107,6 +108,10 @@ openssl_engine_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a openss
$(CXX) $(CPPFLAGS) $(CXXFLAGS) openssl_engine_example.cpp -o $@ $(LDFLAGS) \
../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+misc: ../src/librdkafka.a misc.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
clean:
rm -f $(EXAMPLES)
diff --git a/examples/README.md b/examples/README.md
index b742cde0e1..0e36a06657 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -16,6 +16,7 @@ Begin with the following examples:
* [transactions-older-broker.c](transactions-older-broker.c) - Same as
`transactions.c` but for Apache Kafka versions 2.4.x and older which
lack KIP-447 support.
+ * [misc.c](misc.c) - a collection of miscellaneous usage examples.
For more complex uses, see:
diff --git a/examples/misc.c b/examples/misc.c
new file mode 100644
index 0000000000..3c696d793c
--- /dev/null
+++ b/examples/misc.c
@@ -0,0 +1,287 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * A collection of smaller usage examples
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include
+#endif
+
+
+/* Typical include path would be , but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Miscellaneous librdkafka usage examples\n"
+ "\n"
+ "Usage: %s []\n"
+ "\n"
+ "Commands:\n"
+ " List groups:\n"
+ " %s -b list_groups \n"
+ "\n"
+ " Show librdkafka version:\n"
+ " %s version\n"
+ "\n"
+ "Common options for all commands:\n"
+ " -b Bootstrap server list to connect to.\n"
+ " -X Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, argv0, argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+/**
+ * Commands
+ *
+ */
+
+/**
+ * @brief Just print the librdkafka version
+ */
+static void cmd_version(rd_kafka_conf_t *conf, int argc, char **argv) {
+ if (argc)
+ usage("version command takes no arguments");
+
+ printf("librdkafka v%s\n", rd_kafka_version_str());
+ rd_kafka_conf_destroy(conf);
+}
+
+
+/**
+ * @brief Call rd_kafka_list_groups() with an optional groupid argument.
+ */
+static void cmd_list_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *groupid = NULL;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_group_list *grplist;
+ int i;
+ int retval = 0;
+
+ if (argc > 1)
+ usage("too many arguments to list_groups");
+
+ if (argc == 1)
+ groupid = argv[0];
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * List groups
+ */
+ err = rd_kafka_list_groups(rk, groupid, &grplist, 10 * 1000 /*10s*/);
+ if (err)
+ fatal("rd_kafka_list_groups(%s) failed: %s", groupid,
+ rd_kafka_err2str(err));
+
+ if (grplist->group_cnt == 0) {
+ if (groupid) {
+ fprintf(stderr, "Group %s not found\n", groupid);
+ retval = 1;
+ } else {
+ fprintf(stderr, "No groups in cluster\n");
+ }
+ }
+
+ /*
+ * Print group information
+ */
+ for (i = 0; grplist->group_cnt; i++) {
+ int j;
+ const struct rd_kafka_group_info *grp = &grplist->groups[i];
+
+ printf(
+ "Group \"%s\" protocol-type %s, protocol %s, "
+ "state %s, with %d member(s))",
+ grp->group, grp->protocol_type, grp->protocol, grp->state,
+ grp->member_cnt);
+ if (grp->err)
+ printf(" error: %s", rd_kafka_err2str(grp->err));
+ printf("\n");
+ for (j = 0; j < grp->member_cnt; j++) {
+ const struct rd_kafka_group_member_info *mb =
+ &grp->members[j];
+ printf(
+ " Member \"%s\" with client-id %s, host %s, "
+ "%d bytes of metadat, %d bytes of assignment\n",
+ mb->member_id, mb->client_id, mb->client_host,
+ mb->member_metadata_size,
+ mb->member_assignment_size);
+ }
+ }
+
+ rd_kafka_group_list_destroy(grplist);
+
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt, i;
+ const char *cmd;
+ static const struct {
+ const char *cmd;
+ void (*func)(rd_kafka_conf_t *conf, int argc, char **argv);
+ } cmds[] = {
+ {"version", cmd_version},
+ {"list_groups", cmd_list_groups},
+ {NULL},
+ };
+
+ argv0 = argv[0];
+
+ if (argc == 1)
+ usage(NULL);
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+
+ if (optind == argc)
+ usage("No command specified");
+
+
+ cmd = argv[optind++];
+
+ /*
+ * Find matching command and run it
+ */
+ for (i = 0; cmds[i].cmd; i++) {
+ if (!strcmp(cmds[i].cmd, cmd)) {
+ cmds[i].func(conf, argc - optind, &argv[optind]);
+ exit(0);
+ }
+ }
+
+ usage("Unknown command: %s", cmd);
+
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/examples/rdkafka_performance.c b/examples/rdkafka_performance.c
index c4ba0274b5..dc31c3e0f8 100644
--- a/examples/rdkafka_performance.c
+++ b/examples/rdkafka_performance.c
@@ -216,17 +216,23 @@ static void msg_delivered(rd_kafka_t *rk,
!last || msgs_wait_cnt < 5 || !(msgs_wait_cnt % dr_disp_div) ||
(now - last) >= dispintvl * 1000 || verbosity >= 3) {
if (rkmessage->err && verbosity >= 2)
- printf("%% Message delivery failed: %s [%" PRId32
+ printf("%% Message delivery failed (broker %" PRId32
+ "): "
+ "%s [%" PRId32
"]: "
"%s (%li remain)\n",
+ rd_kafka_message_broker_id(rkmessage),
rd_kafka_topic_name(rkmessage->rkt),
rkmessage->partition,
rd_kafka_err2str(rkmessage->err), msgs_wait_cnt);
else if (verbosity > 2)
printf("%% Message delivered (offset %" PRId64
+ ", broker %" PRId32
"): "
"%li remain\n",
- rkmessage->offset, msgs_wait_cnt);
+ rkmessage->offset,
+ rd_kafka_message_broker_id(rkmessage),
+ msgs_wait_cnt);
if (verbosity >= 3 && do_seq)
printf(" --> \"%.*s\"\n", (int)rkmessage->len,
(const char *)rkmessage->payload);
@@ -1485,7 +1491,7 @@ int main(int argc, char **argv) {
(int)RD_MAX(0, (next - rd_clock()) /
1000));
} while (next > rd_clock());
- } else {
+ } else if (cnt.msgs % 1000 == 0) {
rd_kafka_poll(rk, 0);
}
diff --git a/mklove/modules/configure.base b/mklove/modules/configure.base
index a18cd6befe..d38856a542 100644
--- a/mklove/modules/configure.base
+++ b/mklove/modules/configure.base
@@ -1748,6 +1748,7 @@ $cflags"
# If attempting static linking and we're using source-only
# dependencies, then there is no need for pkg-config since
# the source installer will have set the required flags.
+ mkl_check_failed "$cname" "" "ignore" "pkg-config ignored for static build"
return 1
fi
diff --git a/mklove/modules/configure.libcurl b/mklove/modules/configure.libcurl
index c40b93d646..dd4b1a123f 100644
--- a/mklove/modules/configure.libcurl
+++ b/mklove/modules/configure.libcurl
@@ -22,6 +22,9 @@ function manual_checks {
mkl_meta_set "libcurl" "apk" "curl-dev curl-static"
mkl_meta_set "libcurl" "deb" "libcurl4-openssl-dev"
mkl_meta_set "libcurl" "static" "libcurl.a"
+ if [[ $MKL_DISTRO == "osx" && $WITH_STATIC_LINKING ]]; then
+ mkl_env_append LDFLAGS "-framework CoreFoundation -framework SystemConfiguration"
+ fi
mkl_lib_check "libcurl" "WITH_CURL" $action CC "-lcurl" \
"
#include
@@ -41,15 +44,26 @@ void foo (void) {
function install_source {
local name=$1
local destdir=$2
- local ver=7.78.0
+ local ver=7.82.0
+ local checksum="910cc5fe279dc36e2cca534172c94364cf3fcf7d6494ba56e6c61a390881ddce"
echo "### Installing $name $ver from source to $destdir"
if [[ ! -f Makefile ]]; then
- curl -fL https://curl.se/download/curl-${ver}.tar.gz | \
- tar xzf - --strip-components 1
+ mkl_download_archive \
+ "https://curl.se/download/curl-${ver}.tar.gz" \
+ 256 \
+ $checksum || return 1
fi
- # Clear out LIBS to not interfer with lib detection process.
+ # curl's configure has a runtime check where a program is built
+ # with all libs linked and then executed, since mklove's destdir
+ # is outside the standard ld.so search path this runtime check will
+ # fail due to missing libraries. We circumvent this by passing
+ # a modified LD_LIBRARY_PATH with our destdir lib dirs prepended.
+ local _save_ldp="$LD_LIBRARY_PATH"
+ export LD_LIBRARY_PATH="${destdir}/usr/lib:${destdir}/usr/lib64:$LD_LIBRARY_PATH"
+
+ # Also clear out LIBS to not interfer with lib detection process.
LIBS="" ./configure \
--with-openssl \
--enable-static \
@@ -75,6 +89,14 @@ function install_source {
--without-{librtmp,libidn2,winidn,nghttp2,nghttp3,ngtcp2,quiche,brotli} &&
time make -j &&
make DESTDIR="${destdir}" prefix=/usr install
+ local ret=$?
+
+ # Restore
+ export LD_LIBRARY_PATH="$_save_ldp"
+
+ if [[ $MKL_DISTRO == osx ]]; then
+ mkl_mkvar_append "libcurl" LIBS "-framework CoreFoundation -framework SystemConfiguration"
+ fi
- return $?
+ return $ret
}
diff --git a/mklove/modules/configure.libssl b/mklove/modules/configure.libssl
index d8c24c4efd..9b794636cf 100644
--- a/mklove/modules/configure.libssl
+++ b/mklove/modules/configure.libssl
@@ -79,8 +79,8 @@ function manual_checks {
function libcrypto_install_source {
local name=$1
local destdir=$2
- local ver=1.1.1l
- local checksum="0b7a3e5e59c34827fe0c3a74b7ec8baef302b98fa80088d7f9153aa16fa76bd1"
+ local ver=1.1.1n
+ local checksum="40dceb51a4f6a5275bde0e6bf20ef4b91bfc32ed57c0552e2e8e15463372b17a"
local url=https://www.openssl.org/source/openssl-${ver}.tar.gz
local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib no-deprecated"
diff --git a/mklove/modules/configure.zlib b/mklove/modules/configure.zlib
index ba770488c3..09e89e833c 100644
--- a/mklove/modules/configure.zlib
+++ b/mklove/modules/configure.zlib
@@ -42,8 +42,8 @@ void foo (void) {
function install_source {
local name=$1
local destdir=$2
- local ver=1.2.11
- local checksum="c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1"
+ local ver=1.2.12
+ local checksum="91844808532e5ce316b3c010929493c0244f3d37593afd6de04f71821d5136d9"
echo "### Installing $name $ver from source to $destdir"
if [[ ! -f Makefile ]]; then
diff --git a/packaging/RELEASE.md b/packaging/RELEASE.md
index 33b6398ddd..4b483c3c34 100644
--- a/packaging/RELEASE.md
+++ b/packaging/RELEASE.md
@@ -10,34 +10,11 @@ Releases are done in two phases:
followed by a single version-bump commit (see below).
Release tag and version format:
+ * tagged release builds to verify CI release builders: vA.B.C-PREn
* release-candidate: vA.B.C-RCn
* final release: vA.B.C
-
-## Write release notes
-
-Go to https://github.com/edenhill/librdkafka/releases and create a new
-release (save as draft), outlining the following sections based on the
-changes since the last release:
- * What type of release (maintenance or feature release)
- * A short intro to the release, describing the type of release: maintenance
- or feature release, as well as fix or feature high-lights.
- * A section of New features, if any.
- * A section of Enhancements, if any.
- * A section of Fixes, if any.
-
-Hint: Use ´git log --oneline vLastReleaseTag..´ to get a list of commits since
- the last release, filter and sort this list into the above categories,
- making sure the end result is meaningful to the end-user.
- Make sure to credit community contributors for their work.
-
-Save this page as Draft until the final tag is created.
-
-The github release asset/artifact checksums will be added later when the
-final tag is pushed.
-
-
## Update protocol requests and error codes
Check out the latest version of Apache Kafka (not trunk, needs to be a released
@@ -62,6 +39,7 @@ respectively.
Add the error strings to `rdkafka.c`.
The Kafka error strings are sometimes a bit too verbose for our taste,
so feel free to rewrite them (usually removing a couple of 'the's).
+Error strings must not contain a trailing period.
**NOTE**: Only add **new** error codes, do not alter existing ones since that
will be a breaking API change.
@@ -82,6 +60,27 @@ so feel free to rewrite them (usually removing a couple of 'the's).
If all tests pass, carry on, otherwise identify and fix bug and start over.
+
+## Write release notes / changelog
+
+All relevant PRs should also include an update to [CHANGELOG.md](../CHANGELOG.md)
+that in a user-centric fashion outlines what changed.
+It might not be practical for all contributors to write meaningful changelog
+entries, so it is okay to add them separately later after the PR has been
+merged (make sure to credit community contributors for their work).
+
+The changelog should include:
+ * What type of release (maintenance or feature release)
+ * A short intro to the release, describing the type of release: maintenance
+ or feature release, as well as fix or feature high-lights.
+ * A section of **New features**, if any.
+ * A section of **Upgrade considerations**, if any, to outline important changes
+ that require user attention.
+ * A section of **Enhancements**, if any.
+ * A section of **Fixes**, if any, preferably with Consumer, Producer, and
+ Generic sub-sections.
+
+
## Pre-release code tasks
**Switch to the release branch which is of the format `A.B.C.x` or `A.B.x`.**
@@ -147,28 +146,13 @@ Wait until this process is finished by monitoring the two CIs:
* https://ci.appveyor.com/project/edenhill/librdkafka
-## Publish release on github
-
-Open up the release page on github that was created above.
-
-Run the following command to get checksums of the github release assets:
-
- $ packaging/tools/gh-release-checksums.py
-
-It will take some time for the script to download the files, when done
-paste the output to the end of the release page.
-
-Make sure the release page looks okay, is still correct (check for new commits),
-and has the correct tag, then click Publish release.
-
-
### Create NuGet package
On a Linux host with docker installed, this will also require S3 credentials
to be set up.
$ cd packaging/nuget
- $ pip3 install -r requirements.txt # if necessary
+ $ python3 -m pip install -r requirements.txt # if necessary
$ ./release.py v0.11.1-RC1
Test the generated librdkafka.redist.0.11.1-RC1.nupkg and
@@ -186,8 +170,32 @@ Follow the Go client release instructions for updating its bundled librdkafka
version based on the tar ball created here.
+## Publish release on github
+
+Create a release on github by going to https://github.com/edenhill/librdkafka/releases
+and Draft a new release.
+Name the release the same as the final release tag (e.g., `v1.9.0`) and set
+the tag to the same.
+Paste the CHANGELOG.md section for this release into the release description,
+look at the preview and fix any formatting issues.
+
+Run the following command to get checksums of the github release assets:
+
+ $ packaging/tools/gh-release-checksums.py
+
+It will take some time for the script to download the files, when done
+paste the output to the end of the release page.
+
+Make sure the release page looks okay, is still correct (check for new commits),
+and has the correct tag, then click Publish release.
+
+
+
### Homebrew recipe update
+**Note**: This is typically not needed since homebrew seems to pick up new
+ release versions quickly enough.
+
The brew-update-pr.sh script automatically pushes a PR to homebrew-core
with a patch to update the librdkafka version of the formula.
This should only be done for final releases and not release candidates.
diff --git a/packaging/mingw-w64/configure-build-msys2-mingw-static.sh b/packaging/mingw-w64/configure-build-msys2-mingw-static.sh
index 6793fb1812..2de3ceb9b9 100644
--- a/packaging/mingw-w64/configure-build-msys2-mingw-static.sh
+++ b/packaging/mingw-w64/configure-build-msys2-mingw-static.sh
@@ -50,6 +50,3 @@ cp ./librdkafkacpp-static.a ../dest/lib/librdkafka++-static.a
popd
rm -rf ./mergescratch
-export PATH="$PWD/dest/bin:/mingw64/bin/:${PATH}"
-cd tests
-./test-runner.exe -l -Q -p1 0000
diff --git a/packaging/mingw-w64/configure-build-msys2-mingw.sh b/packaging/mingw-w64/configure-build-msys2-mingw.sh
index 1f31079a62..af4a5a2bbe 100644
--- a/packaging/mingw-w64/configure-build-msys2-mingw.sh
+++ b/packaging/mingw-w64/configure-build-msys2-mingw.sh
@@ -19,6 +19,7 @@ cmake \
$mingw64 mingw32-make
$mingw64 mingw32-make install
-export PATH="$PWD/dest/bin:/mingw64/bin/:${PATH}"
cd tests
+cp ../dest/bin/librdkafka.dll ./
+cp ../dest/bin/librdkafka++.dll ./
./test-runner.exe -l -Q -p1 0000
diff --git a/packaging/mingw-w64/run-tests.sh b/packaging/mingw-w64/run-tests.sh
new file mode 100644
index 0000000000..6749add5d4
--- /dev/null
+++ b/packaging/mingw-w64/run-tests.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -e
+
+cd tests
+./test-runner.exe -l -Q -p1 0000
diff --git a/packaging/rpm/mock-on-docker.sh b/packaging/rpm/mock-on-docker.sh
index ee26cb7e50..54ae0de2fe 100755
--- a/packaging/rpm/mock-on-docker.sh
+++ b/packaging/rpm/mock-on-docker.sh
@@ -10,8 +10,8 @@
set -ex
-_DOCKER_IMAGE=fedora:33
-_MOCK_CONFIGS="epel-7-x86_64 epel-8-x86_64"
+_DOCKER_IMAGE=fedora:35
+_MOCK_CONFIGS="centos+epel-7-x86_64 centos-stream+epel-8-x86_64"
if [[ $1 == "--build" ]]; then
on_builder=1
diff --git a/packaging/rpm/tests/test-on-docker.sh b/packaging/rpm/tests/test-on-docker.sh
index 78fb0b3f53..2c12ff792e 100755
--- a/packaging/rpm/tests/test-on-docker.sh
+++ b/packaging/rpm/tests/test-on-docker.sh
@@ -14,7 +14,7 @@ if [[ ! -f configure.self ]]; then
exit 1
fi
-_DOCKER_IMAGES="centos:7 centos:8"
+_DOCKER_IMAGES="centos:7 redhat/ubi8:8.5-226"
_RPMDIR=artifacts
if [[ -n $1 ]]; then
diff --git a/packaging/tools/style-format.sh b/packaging/tools/style-format.sh
index b6d0fefda2..4292bc5500 100755
--- a/packaging/tools/style-format.sh
+++ b/packaging/tools/style-format.sh
@@ -104,7 +104,7 @@ for f in $*; do
# Check style
if [[ $lang == c ]]; then
- if ! clang-format --style="$style" --dry-run "$f" ; then
+ if ! clang-format --style="$style" --Werror --dry-run "$f" ; then
echo "$f: had style errors ($stylename): see clang-format output above"
ret=1
fi
diff --git a/src-cpp/rdkafkacpp.h b/src-cpp/rdkafkacpp.h
index 6d7d136302..47f5e99fbc 100644
--- a/src-cpp/rdkafkacpp.h
+++ b/src-cpp/rdkafkacpp.h
@@ -111,7 +111,7 @@ namespace RdKafka {
* @remark This value should only be used during compile time,
* for runtime checks of version use RdKafka::version()
*/
-#define RD_KAFKA_VERSION 0x010802ff
+#define RD_KAFKA_VERSION 0x010900ff
/**
* @brief Returns the librdkafka version as integer.
@@ -2820,7 +2820,7 @@ class RD_EXPORT KafkaConsumer : public virtual Handle {
*
* This call triggers a fetch queue barrier flush.
*
- * @remark Consumtion for the given partition must have started for the
+ * @remark Consumption for the given partition must have started for the
* seek to work. Use assign() to set the starting offset.
*
* @returns an ErrorCode to indicate success or failure.
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 89a00a196c..49f818e1d9 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -102,6 +102,10 @@ if(WITH_SASL_OAUTHBEARER)
list(APPEND sources rdkafka_sasl_oauthbearer.c)
endif()
+if(WITH_CURL)
+ list(APPEND sources rdkafka_sasl_oauthbearer_oidc.c)
+endif()
+
if(WITH_ZLIB)
list(APPEND sources rdgz.c)
endif()
diff --git a/src/Makefile b/src/Makefile
index a5b294128d..28982fa8c8 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -19,6 +19,7 @@ SRCS_$(WITH_ZSTD) += rdkafka_zstd.c
SRCS_$(WITH_HDRHISTOGRAM) += rdhdrhistogram.c
SRCS_$(WITH_SSL) += rdkafka_ssl.c
SRCS_$(WITH_CURL) += rdhttp.c
+SRCS_$(WITH_CURL) += rdkafka_sasl_oauthbearer_oidc.c
SRCS_LZ4 = rdxxhash.c
ifneq ($(WITH_LZ4_EXT), y)
diff --git a/src/rdhttp.c b/src/rdhttp.c
index 91500d865b..dca6c6f83b 100644
--- a/src/rdhttp.c
+++ b/src/rdhttp.c
@@ -223,6 +223,151 @@ rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) {
}
+/**
+ * @brief Extract the JSON object from \p hreq and return it in \p *jsonp.
+ *
+ * @returns Returns NULL on success, or an JSON parsing error - this
+ * error object must be destroyed by calling rd_http_error_destroy().
+ */
+rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp) {
+ size_t len;
+ char *raw_json;
+ const char *end = NULL;
+ rd_slice_t slice;
+ rd_http_error_t *herr = NULL;
+
+ /* cJSON requires the entire input to parse in contiguous memory. */
+ rd_slice_init_full(&slice, hreq->hreq_buf);
+ len = rd_buf_len(hreq->hreq_buf);
+
+ raw_json = rd_malloc(len + 1);
+ rd_slice_read(&slice, raw_json, len);
+ raw_json[len] = '\0';
+
+ /* Parse JSON */
+ *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
+
+ if (!*jsonp)
+ herr = rd_http_error_new(hreq->hreq_code,
+ "Failed to parse JSON response "
+ "at %" PRIusz "/%" PRIusz,
+ (size_t)(end - raw_json), len);
+ rd_free(raw_json);
+ return herr;
+}
+
+
+/**
+ * @brief Check if the error returned from HTTP(S) is temporary or not.
+ *
+ * @returns If the \p error_code is temporary, return rd_true,
+ * otherwise return rd_false.
+ *
+ * @locality Any thread.
+ */
+static rd_bool_t rd_http_is_failure_temporary(int error_code) {
+ switch (error_code) {
+ case 408: /**< Request timeout */
+ case 425: /**< Too early */
+ case 500: /**< Internal server error */
+ case 502: /**< Bad gateway */
+ case 503: /**< Service unavailable */
+ case 504: /**< Gateway timeout */
+ return rd_true;
+
+ default:
+ return rd_false;
+ }
+}
+
+
+/**
+ * @brief Perform a blocking HTTP(S) request to \p url with
+ * HTTP(S) headers and data with \p timeout_s.
+ * If the HTTP(S) request fails, will retry another \p retries times
+ * with multiplying backoff \p retry_ms.
+ *
+ * @returns The result will be returned in \p *jsonp.
+ * Returns NULL on success (HTTP response code < 400), or an error
+ * object on transport, HTTP error or a JSON parsing error - this
+ * error object must be destroyed by calling rd_http_error_destroy().
+ *
+ * @locality Any thread.
+ */
+rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
+ const char *url,
+ const struct curl_slist *headers,
+ const char *post_fields,
+ size_t post_fields_size,
+ int timeout_s,
+ int retries,
+ int retry_ms,
+ cJSON **jsonp) {
+ rd_http_error_t *herr;
+ rd_http_req_t hreq;
+ int i;
+ size_t len;
+ const char *content_type;
+
+ herr = rd_http_req_init(&hreq, url);
+ if (unlikely(herr != NULL))
+ return herr;
+
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_HTTPHEADER, headers);
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_TIMEOUT, timeout_s);
+
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDSIZE,
+ post_fields_size);
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDS, post_fields);
+
+ for (i = 0; i <= retries; i++) {
+ if (rd_kafka_terminating(rk)) {
+ rd_http_req_destroy(&hreq);
+ return rd_http_error_new(-1, "Terminating");
+ }
+
+ herr = rd_http_req_perform_sync(&hreq);
+ len = rd_buf_len(hreq.hreq_buf);
+
+ if (!herr) {
+ if (len > 0)
+ break; /* Success */
+ /* Empty response */
+ rd_http_req_destroy(&hreq);
+ return NULL;
+ }
+ /* Retry if HTTP(S) request returns temporary error and there
+ * are remaining retries, else fail. */
+ if (i == retries || !rd_http_is_failure_temporary(herr->code)) {
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ /* Retry */
+ rd_http_error_destroy(herr);
+ rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate);
+ }
+
+ content_type = rd_http_req_get_content_type(&hreq);
+
+ if (!content_type || rd_strncasecmp(content_type, "application/json",
+ strlen("application/json"))) {
+ if (!herr)
+ herr = rd_http_error_new(
+ hreq.hreq_code, "Response is not JSON encoded: %s",
+ content_type ? content_type : "(n/a)");
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ herr = rd_http_parse_json(&hreq, jsonp);
+
+ rd_http_req_destroy(&hreq);
+
+ return herr;
+}
+
+
/**
* @brief Same as rd_http_get() but requires a JSON response.
* The response is parsed and a JSON object is returned in \p *jsonp.
diff --git a/src/rdhttp.h b/src/rdhttp.h
index 4238abcbce..80512e5ac2 100644
--- a/src/rdhttp.h
+++ b/src/rdhttp.h
@@ -62,9 +62,20 @@ typedef struct rd_http_req_s {
* write to. */
} rd_http_req_t;
-static void rd_http_req_destroy(rd_http_req_t *hreq);
rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url);
rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq);
+rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp);
+rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
+ const char *url,
+ const struct curl_slist *headers,
+ const char *data_to_token,
+ size_t data_to_token_size,
+ int timeout_s,
+ int retry,
+ int retry_ms,
+ cJSON **jsonp);
+void rd_http_req_destroy(rd_http_req_t *hreq);
+
#endif
diff --git a/src/rdkafka.c b/src/rdkafka.c
index afea6ab9e2..90848a2718 100644
--- a/src/rdkafka.c
+++ b/src/rdkafka.c
@@ -55,6 +55,9 @@
#include "rdkafka_idempotence.h"
#include "rdkafka_sasl_oauthbearer.h"
#include "rdkafka_sasl_aws_msk_iam.h"
+#if WITH_CURL
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
#if WITH_SSL
#include "rdkafka_ssl.h"
#endif
@@ -2239,7 +2242,9 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
rd_kafka_conf_set_oauthbearer_token_refresh_cb(
&rk->rk_conf, rd_kafka_oauthbearer_unsecured_token);
- if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
+ if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb &&
+ rk->rk_conf.sasl.oauthbearer.method !=
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC)
rk->rk_conf.enabled_events |=
RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH;
#endif
@@ -2248,6 +2253,13 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
RD_KAFKA_EVENT_AWS_MSK_IAM_CREDENTIAL_REFRESH;
#endif
+#if WITH_CURL
+ if (rk->rk_conf.sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
+ !rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ &rk->rk_conf, rd_kafka_oidc_token_refresh_cb);
+#endif
rk->rk_controllerid = -1;
/* Admin client defaults */
@@ -2305,6 +2317,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
/* Create Mock cluster */
rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0);
if (rk->rk_conf.mock.broker_cnt > 0) {
+ const char *mock_bootstraps;
rk->rk_mock.cluster =
rd_kafka_mock_cluster_new(rk, rk->rk_conf.mock.broker_cnt);
@@ -2316,16 +2329,18 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
goto fail;
}
+ mock_bootstraps =
+ rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster),
rd_kafka_log(rk, LOG_NOTICE, "MOCK",
"Mock cluster enabled: "
"original bootstrap.servers and security.protocol "
- "ignored and replaced");
+ "ignored and replaced with %s",
+ mock_bootstraps);
/* Overwrite bootstrap.servers and connection settings */
- if (rd_kafka_conf_set(
- &rk->rk_conf, "bootstrap.servers",
- rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster),
- NULL, 0) != RD_KAFKA_CONF_OK)
+ if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers",
+ mock_bootstraps, NULL,
+ 0) != RD_KAFKA_CONF_OK)
rd_assert(!"failed to replace mock bootstrap.servers");
if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol",
@@ -2333,8 +2348,13 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
rd_assert(!"failed to reset mock security.protocol");
rk->rk_conf.security_protocol = RD_KAFKA_PROTO_PLAINTEXT;
- }
+ /* Apply default RTT to brokers */
+ if (rk->rk_conf.mock.broker_rtt)
+ rd_kafka_mock_broker_set_rtt(
+ rk->rk_mock.cluster, -1 /*all brokers*/,
+ rk->rk_conf.mock.broker_rtt);
+ }
if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) {
@@ -2409,22 +2429,22 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
* out from rd_kafka_new(). */
if (rk->rk_conf.background_event_cb ||
(rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) {
- rd_kafka_resp_err_t err;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
rd_kafka_wrlock(rk);
- err =
- rd_kafka_background_thread_create(rk, errstr, errstr_size);
+ if (!rk->rk_background.q)
+ err = rd_kafka_background_thread_create(rk, errstr,
+ errstr_size);
rd_kafka_wrunlock(rk);
if (err)
goto fail;
}
- mtx_lock(&rk->rk_init_lock);
-
/* Lock handle here to synchronise state, i.e., hold off
* the thread until we've finalized the handle. */
rd_kafka_wrlock(rk);
/* Create handler thread */
+ mtx_lock(&rk->rk_init_lock);
rk->rk_init_wait_cnt++;
if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) !=
thrd_success) {
@@ -2435,8 +2455,8 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
rd_snprintf(errstr, errstr_size,
"Failed to create thread: %s (%i)",
rd_strerror(errno), errno);
- rd_kafka_wrunlock(rk);
mtx_unlock(&rk->rk_init_lock);
+ rd_kafka_wrunlock(rk);
#ifndef _WIN32
/* Restore sigmask of caller */
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
@@ -2444,8 +2464,8 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
goto fail;
}
- rd_kafka_wrunlock(rk);
mtx_unlock(&rk->rk_init_lock);
+ rd_kafka_wrunlock(rk);
/*
* @warning `goto fail` is prohibited past this point
@@ -3821,6 +3841,9 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
case RD_KAFKA_OP_DELETERECORDS:
case RD_KAFKA_OP_DELETEGROUPS:
case RD_KAFKA_OP_ADMIN_FANOUT:
+ case RD_KAFKA_OP_CREATEACLS:
+ case RD_KAFKA_OP_DESCRIBEACLS:
+ case RD_KAFKA_OP_DELETEACLS:
/* Calls op_destroy() from worker callback,
* when the time comes. */
res = rd_kafka_op_call(rk, rkq, rko);
@@ -3848,7 +3871,15 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
break;
default:
- rd_kafka_assert(rk, !*"cant handle op type");
+ /* If op has a callback set (e.g., OAUTHBEARER_REFRESH),
+ * call it. */
+ if (rko->rko_type & RD_KAFKA_OP_CB) {
+ res = rd_kafka_op_call(rk, rkq, rko);
+ break;
+ }
+
+ RD_BUG("Can't handle op type %s (0x%x)",
+ rd_kafka_op2str(rko->rko_type), rko->rko_type);
break;
}
@@ -4205,7 +4236,7 @@ rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms) {
/* Wake up all broker threads to trigger the produce_serve() call.
* If this flush() call finishes before the broker wakes up
* then no flushing will be performed by that broker thread. */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP, "flushing");
if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) {
/* Application wants delivery reports as events rather
@@ -4646,23 +4677,28 @@ rd_kafka_list_groups(rd_kafka_t *rk,
int rkb_cnt = 0;
struct list_groups_state state = RD_ZERO_INIT;
rd_ts_t ts_end = rd_timeout_init(timeout_ms);
- int state_version = rd_kafka_brokers_get_state_version(rk);
/* Wait until metadata has been fetched from cluster so
* that we have a full broker list.
* This state only happens during initial client setup, after that
* there'll always be a cached metadata copy. */
- rd_kafka_rdlock(rk);
- while (!rk->rk_ts_metadata) {
+ while (1) {
+ int state_version = rd_kafka_brokers_get_state_version(rk);
+ rd_bool_t has_metadata;
+
+ rd_kafka_rdlock(rk);
+ has_metadata = rk->rk_ts_metadata != 0;
rd_kafka_rdunlock(rk);
+ if (has_metadata)
+ break;
+
if (!rd_kafka_brokers_wait_state_change(
rk, state_version, rd_timeout_remains(ts_end)))
return RD_KAFKA_RESP_ERR__TIMED_OUT;
-
- rd_kafka_rdlock(rk);
}
+
state.q = rd_kafka_q_new(rk);
state.desired_group = group;
state.grplist = rd_calloc(1, sizeof(*state.grplist));
@@ -4672,6 +4708,7 @@ rd_kafka_list_groups(rd_kafka_t *rk,
rd_malloc(state.grplist_size * sizeof(*state.grplist->groups));
/* Query each broker for its list of groups */
+ rd_kafka_rdlock(rk);
TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
rd_kafka_broker_lock(rkb);
if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
diff --git a/src/rdkafka.h b/src/rdkafka.h
index a821cea49d..6028b5c64f 100644
--- a/src/rdkafka.h
+++ b/src/rdkafka.h
@@ -165,7 +165,7 @@ typedef SSIZE_T ssize_t;
* @remark This value should only be used during compile time,
* for runtime checks of version use rd_kafka_version()
*/
-#define RD_KAFKA_VERSION 0x010802ff
+#define RD_KAFKA_VERSION 0x010900ff
/**
* @brief Returns the librdkafka version as integer.
@@ -259,6 +259,7 @@ typedef struct rd_kafka_consumer_group_metadata_s
typedef struct rd_kafka_error_s rd_kafka_error_t;
typedef struct rd_kafka_headers_s rd_kafka_headers_t;
typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
+typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
/* @endcond */
@@ -3588,9 +3589,14 @@ int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
* @brief Seek consumer for topic+partition to \p offset which is either an
* absolute or logical offset.
*
- * If \p timeout_ms is not 0 the call will wait this long for the
- * seek to be performed. If the timeout is reached the internal state
- * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
+ * If \p timeout_ms is specified (not 0) the seek call will wait this long
+ * for the consumer to update its fetcher state for the given partition with
+ * the new offset. This guarantees that no previously fetched messages for the
+ * old offset (or fetch position) will be passed to the application.
+ *
+ * If the timeout is reached the internal state will be unknown to the caller
+ * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
+ *
* If \p timeout_ms is 0 it will initiate the seek but return
* immediately without any error reporting (e.g., async).
*
@@ -3621,11 +3627,13 @@ rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt,
*
* The offset may be either absolute (>= 0) or a logical offset.
*
- * If \p timeout_ms is not 0 the call will wait this long for the
- * seeks to be performed. If the timeout is reached the internal state
- * will be unknown for the remaining partitions to seek and this function
- * will return an error with the error code set to
- * `RD_KAFKA_RESP_ERR__TIMED_OUT`.
+ * If \p timeout_ms is specified (not 0) the seek call will wait this long
+ * for the consumer to update its fetcher state for the given partition with
+ * the new offset. This guarantees that no previously fetched messages for the
+ * old offset (or fetch position) will be passed to the application.
+ *
+ * If the timeout is reached the internal state will be unknown to the caller
+ * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
*
* If \p timeout_ms is 0 it will initiate the seek but return
* immediately without any error reporting (e.g., async).
@@ -3824,6 +3832,11 @@ int rd_kafka_consume_callback_queue(
* The \c offset + 1 will be committed (written) to broker (or file) according
* to \c `auto.commit.interval.ms` or manual offset-less commit()
*
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
+ * Since v1.9.0.
+ *
* @remark \c `enable.auto.offset.store` must be set to "false" when using
* this API.
*
@@ -3841,18 +3854,23 @@ rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
* to \c `auto.commit.interval.ms` or manual offset-less commit().
*
* Per-partition success/error status propagated through each partition's
- * \c .err field.
+ * \c .err for all return values (even NO_ERROR) except INVALID_ARG.
+ *
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
+ * Since v1.9.0.
*
* @remark The \c .offset field is stored as is, it will NOT be + 1.
*
* @remark \c `enable.auto.offset.store` must be set to "false" when using
* this API.
*
- * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or
- * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if none of the
- * offsets could be stored, or
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or
* RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store
- * is true.
+ * is true, or
+ * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE
+ * if none of the offsets could be stored.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_offsets_store(rd_kafka_t *rk,
@@ -3894,7 +3912,8 @@ rd_kafka_offsets_store(rd_kafka_t *rk,
* and then start fetching messages. This cycle may take up to
* \c session.timeout.ms * 2 or more to complete.
*
- * @remark A consumer error will be raised for each unavailable topic in the
+ * @remark After this call returns a consumer error will be returned by
+ * rd_kafka_consumer_poll (et.al) for each unavailable topic in the
* \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART
* for non-existent topics, and
* RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics.
@@ -5074,16 +5093,16 @@ typedef int rd_kafka_event_type_t;
#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */
/** DeleteConsumerGroupOffsets_result_t */
#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107
-#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH \
- 0x100 /**< SASL/OAUTHBEARER \
- token needs to be \
- refreshed */
-#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */
-#define RD_KAFKA_EVENT_AWS_MSK_IAM_CREDENTIAL_REFRESH 0x400 /**< SASL/AWS_MSK_IAM
+/** SASL/OAUTHBEARER token needs to be refreshed */
+#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100
+#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */
+#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */
+#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */
+#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */
+#define RD_KAFKA_EVENT_AWS_MSK_IAM_CREDENTIAL_REFRESH 0x2000 /**< SASL/AWS_MSK_IAM
credentials need to be
refreshed */
-
/**
* @returns the event type for the given event.
*
@@ -5225,6 +5244,9 @@ int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
* - RD_KAFKA_EVENT_CREATETOPICS_RESULT
* - RD_KAFKA_EVENT_DELETETOPICS_RESULT
* - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
+ * - RD_KAFKA_EVENT_CREATEACLS_RESULT
+ * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ * - RD_KAFKA_EVENT_DELETEACLS_RESULT
* - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
* - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
* - RD_KAFKA_EVENT_DELETEGROUPS_RESULT
@@ -5315,6 +5337,12 @@ rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
/*! DeleteTopics result type */
typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
+/*! CreateAcls result type */
+typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
+/*! DescribeAcls result type */
+typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
+/*! DeleteAcls result type */
+typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
/*! CreatePartitions result type */
typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
/*! AlterConfigs result type */
@@ -5422,6 +5450,36 @@ rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t *
rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
+/**
+ * @returns the result of a CreateAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_CREATEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_CreateAcls_result_t *
+rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a DescribeAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_DescribeAcls_result_t *
+rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a DeleteAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteAcls_result_t *
+rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
+
/**
* @brief Poll a queue for an event for max \p timeout_ms.
*
@@ -5564,6 +5622,7 @@ typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)(
* @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order
* the interceptors were added.
*
+ * @param conf Configuration object.
* @param ic_opaque The interceptor's opaque pointer specified in ..add..().
* @param name The configuration property to set.
* @param val The configuration value to set, or NULL for reverting to default
@@ -5597,6 +5656,11 @@ typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)(
* \p old_conf being copied to \p new_conf.
*
* @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ * @param new_conf New configuration object.
+ * @param old_conf Old configuration object to copy properties from.
+ * @param filter_cnt Number of property names to filter in \p filter.
+ * @param filter Property names to filter out (ignore) when setting up
+ * \p new_conf.
*
* @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code
* on failure (which is logged but otherwise ignored).
@@ -5741,6 +5805,7 @@ typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)(
* @param offsets List of topic+partition+offset+error that were committed.
* The error message of each partition should be checked for
* error.
+ * @param err The commit error, if any.
* @param ic_opaque The interceptor's opaque pointer specified in ..add..().
*
* @remark This interceptor is only used by consumer instances.
@@ -5770,7 +5835,7 @@ typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)(
* @param brokerid Broker request is being sent to.
* @param ApiKey Kafka protocol request type.
* @param ApiVersion Kafka protocol request type version.
- * @param Corrid Kafka protocol request correlation id.
+ * @param CorrId Kafka protocol request correlation id.
* @param size Size of request.
* @param ic_opaque The interceptor's opaque pointer specified in ..add..().
*
@@ -5805,7 +5870,7 @@ typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)(
* @param brokerid Broker response was received from.
* @param ApiKey Kafka protocol request type or -1 on error.
* @param ApiVersion Kafka protocol request type version or -1 on error.
- * @param Corrid Kafka protocol request correlation id, possibly -1 on error.
+ * @param CorrId Kafka protocol request correlation id, possibly -1 on error.
* @param size Size of response, possibly 0 on error.
* @param rtt Request round-trip-time in microseconds, possibly -1 on error.
* @param err Receive error.
@@ -6265,7 +6330,10 @@ typedef enum rd_kafka_admin_op_t {
RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */
/** DeleteConsumerGroupOffsets */
RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
- RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */
+ RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */
+ RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */
+ RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */
+ RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */
} rd_kafka_admin_op_t;
/**
@@ -6889,7 +6957,10 @@ rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry,
-/*! Apache Kafka resource types */
+/**
+ * @enum rd_kafka_ResourceType_t
+ * @brief Apache Kafka resource types
+ */
typedef enum rd_kafka_ResourceType_t {
RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */
RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */
@@ -6899,6 +6970,30 @@ typedef enum rd_kafka_ResourceType_t {
RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */
} rd_kafka_ResourceType_t;
+/**
+ * @enum rd_kafka_ResourcePatternType_t
+ * @brief Apache Kafka pattern types
+ */
+typedef enum rd_kafka_ResourcePatternType_t {
+ /** Unknown */
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
+ /** Any (used for lookups) */
+ RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
+ /** Match: will perform pattern matching */
+ RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
+ /** Literal: A literal resource name */
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
+ /** Prefixed: A prefixed resource name */
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
+} rd_kafka_ResourcePatternType_t;
+
+/**
+ * @returns a string representation of the \p resource_pattern_type
+ */
+RD_EXPORT const char *rd_kafka_ResourcePatternType_name(
+ rd_kafka_ResourcePatternType_t resource_pattern_type);
+
/**
* @returns a string representation of the \p restype
*/
@@ -7365,9 +7460,358 @@ rd_kafka_DeleteConsumerGroupOffsets_result_groups(
const rd_kafka_DeleteConsumerGroupOffsets_result_t *result,
size_t *cntp);
+/**
+ * @brief ACL Binding is used to create access control lists.
+ *
+ *
+ */
+typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
+
+/**
+ * @brief ACL Binding filter is used to filter access control lists.
+ *
+ */
+typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
+
+/**
+ * @returns the error object for the given acl result, or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *
+rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
+
+
+/**
+ * @name AclOperation
+ * @{
+ */
+
+/**
+ * @enum rd_kafka_AclOperation_t
+ * @brief Apache Kafka ACL operation types.
+ */
+typedef enum rd_kafka_AclOperation_t {
+ RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */
+ RD_KAFKA_ACL_OPERATION_ANY =
+ 1, /**< In a filter, matches any AclOperation */
+ RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */
+ RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */
+ RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */
+ RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */
+ RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */
+ RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */
+ RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */
+ RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION =
+ 9, /**< CLUSTER_ACTION operation */
+ RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS =
+ 10, /**< DESCRIBE_CONFIGS operation */
+ RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS =
+ 11, /**< ALTER_CONFIGS operation */
+ RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE =
+ 12, /**< IDEMPOTENT_WRITE operation */
+ RD_KAFKA_ACL_OPERATION__CNT
+} rd_kafka_AclOperation_t;
+
+/**
+ * @returns a string representation of the \p acl_operation
+ */
+RD_EXPORT const char *
+rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
+
+/**@}*/
+
+/**
+ * @name AclPermissionType
+ * @{
+ */
+
+/**
+ * @enum rd_kafka_AclPermissionType_t
+ * @brief Apache Kafka ACL permission types.
+ */
+typedef enum rd_kafka_AclPermissionType_t {
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */
+ RD_KAFKA_ACL_PERMISSION_TYPE_ANY =
+ 1, /**< In a filter, matches any AclPermissionType */
+ RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT
+} rd_kafka_AclPermissionType_t;
+
+/**
+ * @returns a string representation of the \p acl_permission_type
+ */
+RD_EXPORT const char *rd_kafka_AclPermissionType_name(
+ rd_kafka_AclPermissionType_t acl_permission_type);
/**@}*/
+/**
+ * @brief Create a new AclBinding object. This object is later passed to
+ * rd_kafka_CreateAcls().
+ *
+ * @param restype The ResourceType.
+ * @param name The resource name.
+ * @param resource_pattern_type The pattern type.
+ * @param principal A principal, following the kafka specification.
+ * @param host An hostname or ip.
+ * @param operation A Kafka operation.
+ * @param permission_type A Kafka permission type.
+ * @param errstr An error string for returning errors or NULL to not use it.
+ * @param errstr_size The \p errstr size or 0 to not use it.
+ *
+ * @returns a new allocated AclBinding object, or NULL if the input parameters
+ * are invalid.
+ * Use rd_kafka_AclBinding_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Create a new AclBindingFilter object. This object is later passed to
+ * rd_kafka_DescribeAcls() or
+ * rd_kafka_DeletesAcls() in order to filter
+ * the acls to retrieve or to delete.
+ * Use the same rd_kafka_AclBinding functions to query or destroy it.
+ *
+ * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if
+ * not filtering by this field.
+ * @param name The resource name or NULL if not filtering by this field.
+ * @param resource_pattern_type The pattern type or \c
+ * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field.
+ * @param principal A principal or NULL if not filtering by this field.
+ * @param host An hostname or ip or NULL if not filtering by this field.
+ * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not
+ * filtering by this field.
+ * @param permission_type A Kafka permission type or \c
+ * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field.
+ * @param errstr An error string for returning errors or NULL to not use it.
+ * @param errstr_size The \p errstr size or 0 to not use it.
+ *
+ * @returns a new allocated AclBindingFilter object, or NULL if the input
+ * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when
+ * done.
+ */
+RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
+ rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @returns the resource type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_ResourceType_t
+rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the resource name for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the principal for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the host for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the acl operation for the given acl binding.
+ */
+RD_EXPORT rd_kafka_AclOperation_t
+rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the permission type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_AclPermissionType_t
+rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the resource pattern type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_ResourcePatternType_t
+rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the error object for the given acl binding, or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *
+rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
+
+
+/**
+ * @brief Destroy and free an AclBinding object previously created with
+ * rd_kafka_AclBinding_new()
+ */
+RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
+
+
+/**
+ * @brief Helper function to destroy all AclBinding objects in
+ * the \p acl_bindings array (of \p acl_bindings_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
+ size_t acl_bindings_cnt);
+
+/**
+ * @brief Get an array of acl results from a CreateAcls result.
+ *
+ * The returned \p acl result life-time is the same as the \p result object.
+ * @param result CreateAcls result to get acl results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_acl_result_t **
+rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @brief Create acls as specified by the \p new_acls
+ * array of size \p new_topic_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param new_acls Array of new acls to create.
+ * @param new_acls_cnt Number of elements in \p new_acls array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_CREATEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk,
+ rd_kafka_AclBinding_t **new_acls,
+ size_t new_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * @section DescribeAcls - describe access control lists.
+ *
+ *
+ */
+
+/**
+ * @brief Get an array of resource results from a DescribeAcls result.
+ *
+ * The returned \p resources life-time is the same as the \p result object.
+ * @param result DescribeAcls result to get acls from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_AclBinding_t **
+rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @brief Describe acls matching the filter provided in \p acl_filter
+ *
+ * @param rk Client instance.
+ * @param acl_filter Filter for the returned acls.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_filter,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * @section DeleteAcls - delete access control lists.
+ *
+ *
+ */
+
+typedef struct rd_kafka_DeleteAcls_result_response_s
+ rd_kafka_DeleteAcls_result_response_t;
+
+/**
+ * @brief Get an array of DeleteAcls result responses from a DeleteAcls result.
+ *
+ * The returned \p responses life-time is the same as the \p result object.
+ * @param result DeleteAcls result to get responses from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @returns the error object for the given DeleteAcls result response,
+ * or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
+ const rd_kafka_DeleteAcls_result_response_t *result_response);
+
+
+/**
+ * @returns the matching acls array for the given DeleteAcls result response.
+ *
+ * @remark lifetime of the returned acl bindings is the same as the \p
+ * result_response.
+ */
+RD_EXPORT const rd_kafka_AclBinding_t **
+rd_kafka_DeleteAcls_result_response_matching_acls(
+ const rd_kafka_DeleteAcls_result_response_t *result_response,
+ size_t *matching_acls_cntp);
+
+/**
+ * @brief Delete acls matching the filteres provided in \p del_acls
+ * array of size \p del_acls_cnt.
+ *
+ * @param rk Client instance.
+ * @param del_acls Filters for the acls to delete.
+ * @param del_acls_cnt Number of elements in \p del_acls array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t **del_acls,
+ size_t del_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**@}*/
/**
* @name Security APIs
diff --git a/src/rdkafka_admin.c b/src/rdkafka_admin.c
index 9a63b1e1c9..29b97422a1 100644
--- a/src/rdkafka_admin.c
+++ b/src/rdkafka_admin.c
@@ -522,6 +522,33 @@ rd_kafka_admin_result_ret_resources(const rd_kafka_op_t *rko, size_t *cntp) {
rko->rko_u.admin_result.results.rl_elems;
}
+/**
+ * @brief Return the acl result list from a acl-related result object.
+ */
+static const rd_kafka_acl_result_t **
+rd_kafka_admin_result_ret_acl_results(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_CREATEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_acl_result_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the acl binding list from a acl-related result object.
+ */
+static const rd_kafka_AclBinding_t **
+rd_kafka_admin_result_ret_acl_bindings(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DESCRIBEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_AclBinding_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
/**
* @brief Return the groups list from a group-related result object.
@@ -538,6 +565,21 @@ rd_kafka_admin_result_ret_groups(const rd_kafka_op_t *rko, size_t *cntp) {
rko->rko_u.admin_result.results.rl_elems;
}
+/**
+ * @brief Return the DeleteAcls response list from a acl-related result object.
+ */
+static const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_admin_result_ret_delete_acl_result_responses(const rd_kafka_op_t *rko,
+ size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DELETEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_DeleteAcls_result_response_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
/**
* @brief Create a new admin_request op of type \p optype and sets up the
* generic (type independent files).
@@ -2412,6 +2454,18 @@ const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource) {
*
*/
+const char *rd_kafka_ResourcePatternType_name(
+ rd_kafka_ResourcePatternType_t resource_pattern_type) {
+ static const char *names[] = {"UNKNOWN", "ANY", "MATCH", "LITERAL",
+ "PREFIXED"};
+
+ if ((unsigned int)resource_pattern_type >=
+ (unsigned int)RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT)
+ return "UNSUPPORTED";
+
+ return names[resource_pattern_type];
+}
+
const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype) {
static const char *names[] = {
"UNKNOWN", "ANY", "TOPIC", "GROUP", "BROKER",
@@ -3926,3 +3980,824 @@ void rd_kafka_DeleteConsumerGroupOffsets(
rd_kafka_queue_t *rkqu);
/**@}*/
+/**
+ * @name CreateAcls
+ * @{
+ *
+ *
+ *
+ */
+
+const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t operation) {
+ static const char *names[] = {"UNKNOWN",
+ "ANY",
+ "ALL",
+ "READ",
+ "WRITE",
+ "CREATE",
+ "DELETE",
+ "ALTER",
+ "DESCRIBE",
+ "CLUSTER_ACTION",
+ "DESCRIBE_CONFIGS",
+ "ALTER_CONFIGS",
+ "IDEMPOTENT_WRITE"};
+
+ if ((unsigned int)operation >=
+ (unsigned int)RD_KAFKA_ACL_OPERATION__CNT)
+ return "UNSUPPORTED";
+
+ return names[operation];
+}
+
+const char *
+rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t permission_type) {
+ static const char *names[] = {"UNKNOWN", "ANY", "DENY", "ALLOW"};
+
+ if ((unsigned int)permission_type >=
+ (unsigned int)RD_KAFKA_ACL_PERMISSION_TYPE__CNT)
+ return "UNSUPPORTED";
+
+ return names[permission_type];
+}
+
+static rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new0(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ rd_kafka_resp_err_t err,
+ const char *errstr) {
+ rd_kafka_AclBinding_t *acl_binding;
+
+ acl_binding = rd_calloc(1, sizeof(*acl_binding));
+ acl_binding->name = name != NULL ? rd_strdup(name) : NULL;
+ acl_binding->principal =
+ principal != NULL ? rd_strdup(principal) : NULL;
+ acl_binding->host = host != NULL ? rd_strdup(host) : NULL;
+ acl_binding->restype = restype;
+ acl_binding->resource_pattern_type = resource_pattern_type;
+ acl_binding->operation = operation;
+ acl_binding->permission_type = permission_type;
+ if (err)
+ acl_binding->error = rd_kafka_error_new(err, "%s", errstr);
+
+ return acl_binding;
+}
+
+rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size) {
+ if (!name) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource name");
+ return NULL;
+ }
+ if (!principal) {
+ rd_snprintf(errstr, errstr_size, "Invalid principal");
+ return NULL;
+ }
+ if (!host) {
+ rd_snprintf(errstr, errstr_size, "Invalid host");
+ return NULL;
+ }
+
+ if (restype == RD_KAFKA_RESOURCE_ANY ||
+ restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ restype >= RD_KAFKA_RESOURCE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource type");
+ return NULL;
+ }
+
+ if (resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_ANY ||
+ resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_MATCH ||
+ resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid resource pattern type");
+ return NULL;
+ }
+
+ if (operation == RD_KAFKA_ACL_OPERATION_ANY ||
+ operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid operation");
+ return NULL;
+ }
+
+ if (permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ANY ||
+ permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid permission type");
+ return NULL;
+ }
+
+ return rd_kafka_AclBinding_new0(
+ restype, name, resource_pattern_type, principal, host, operation,
+ permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+}
+
+rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
+ rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size) {
+
+
+ if (restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ restype >= RD_KAFKA_RESOURCE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource type");
+ return NULL;
+ }
+
+ if (resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid resource pattern type");
+ return NULL;
+ }
+
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid operation");
+ return NULL;
+ }
+
+ if (permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid permission type");
+ return NULL;
+ }
+
+ return rd_kafka_AclBinding_new0(
+ restype, name, resource_pattern_type, principal, host, operation,
+ permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+}
+
+rd_kafka_ResourceType_t
+rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl) {
+ return acl->restype;
+}
+
+const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl) {
+ return acl->name;
+}
+
+const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl) {
+ return acl->principal;
+}
+
+const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl) {
+ return acl->host;
+}
+
+rd_kafka_AclOperation_t
+rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl) {
+ return acl->operation;
+}
+
+rd_kafka_AclPermissionType_t
+rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl) {
+ return acl->permission_type;
+}
+
+rd_kafka_ResourcePatternType_t
+rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl) {
+ return acl->resource_pattern_type;
+}
+
+const rd_kafka_error_t *
+rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl) {
+ return acl->error;
+}
+
+/**
+ * @brief Allocate a new AclBinding and make a copy of \p src
+ */
+static rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_copy(const rd_kafka_AclBinding_t *src) {
+ rd_kafka_AclBinding_t *dst;
+
+ dst = rd_kafka_AclBinding_new(
+ src->restype, src->name, src->resource_pattern_type, src->principal,
+ src->host, src->operation, src->permission_type, NULL, 0);
+ rd_assert(dst);
+ return dst;
+}
+
+/**
+ * @brief Allocate a new AclBindingFilter and make a copy of \p src
+ */
+static rd_kafka_AclBindingFilter_t *
+rd_kafka_AclBindingFilter_copy(const rd_kafka_AclBindingFilter_t *src) {
+ rd_kafka_AclBindingFilter_t *dst;
+
+ dst = rd_kafka_AclBindingFilter_new(
+ src->restype, src->name, src->resource_pattern_type, src->principal,
+ src->host, src->operation, src->permission_type, NULL, 0);
+ rd_assert(dst);
+ return dst;
+}
+
+void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding) {
+ if (acl_binding->name)
+ rd_free(acl_binding->name);
+ if (acl_binding->principal)
+ rd_free(acl_binding->principal);
+ if (acl_binding->host)
+ rd_free(acl_binding->host);
+ if (acl_binding->error)
+ rd_kafka_error_destroy(acl_binding->error);
+ rd_free(acl_binding);
+}
+
+static void rd_kafka_AclBinding_free(void *ptr) {
+ rd_kafka_AclBinding_destroy(ptr);
+}
+
+
+void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
+ size_t acl_bindings_cnt) {
+ size_t i;
+ for (i = 0; i < acl_bindings_cnt; i++)
+ rd_kafka_AclBinding_destroy(acl_bindings[i]);
+}
+
+/**
+ * @brief Parse CreateAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_CreateAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t acl_cnt;
+ int i;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
+
+ if (acl_cnt != rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " acls in response, but %d were requested",
+ acl_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, acl_cnt,
+ rd_kafka_topic_result_free);
+
+ for (i = 0; i < (int)acl_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
+ rd_kafka_acl_result_t *acl_res;
+ char *errstr = NULL;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ acl_res = rd_kafka_acl_result_new(
+ error_code ? rd_kafka_error_new(error_code, "%s", errstr)
+ : NULL);
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, i,
+ acl_res);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "CreateAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+void rd_kafka_CreateAcls(rd_kafka_t *rk,
+ rd_kafka_AclBinding_t **new_acls,
+ size_t new_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_CreateAclsRequest, rd_kafka_CreateAclsResponse_parse};
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATEACLS,
+ RD_KAFKA_EVENT_CREATEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)new_acls_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < new_acls_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBinding_copy(new_acls[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**
+ * @brief Get an array of rd_kafka_acl_result_t from a CreateAcls result.
+ *
+ * The returned \p rd_kafka_acl_result_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_acl_result_t **
+rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_acl_results(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+/**
+ * @name DescribeAcls
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Parse DescribeAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DescribeAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t res_cnt;
+ int i;
+ int j;
+ rd_kafka_AclBinding_t *acl = NULL;
+ int16_t error_code;
+ rd_kafkap_str_t error_msg;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ /* #resources */
+ rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ rd_kafkap_str_t kres_name;
+ char *res_name;
+ int8_t resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ int32_t acl_cnt;
+
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1) {
+ rd_kafka_buf_read_i8(reply, &resource_pattern_type);
+ }
+
+ if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ res_type >= RD_KAFKA_RESOURCE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned unknown "
+ "resource type %d",
+ res_type);
+ res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ }
+ if (resource_pattern_type <=
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >=
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned unknown "
+ "resource pattern type %d",
+ resource_pattern_type);
+ resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
+ }
+
+ /* #resources */
+ rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
+
+ for (j = 0; j < (int)acl_cnt; j++) {
+ rd_kafkap_str_t kprincipal;
+ rd_kafkap_str_t khost;
+ int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ int8_t permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ char *principal;
+ char *host;
+
+ rd_kafka_buf_read_str(reply, &kprincipal);
+ rd_kafka_buf_read_str(reply, &khost);
+ rd_kafka_buf_read_i8(reply, &operation);
+ rd_kafka_buf_read_i8(reply, &permission_type);
+ RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
+ RD_KAFKAP_STR_DUPA(&host, &khost);
+
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned "
+ "unknown acl operation %d",
+ operation);
+ operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ }
+ if (permission_type <=
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >=
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned "
+ "unknown acl permission type %d",
+ permission_type);
+ permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ }
+
+ acl = rd_kafka_AclBinding_new0(
+ res_type, res_name, resource_pattern_type,
+ principal, host, operation, permission_type,
+ RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ acl);
+ }
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DescribeAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+void rd_kafka_DescribeAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_filter,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DescribeAclsRequest,
+ rd_kafka_DescribeAclsResponse_parse,
+ };
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DESCRIBEACLS,
+ RD_KAFKA_EVENT_DESCRIBEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_AclBinding_free);
+
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBindingFilter_copy(acl_filter));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**
+ * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
+ *
+ * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_AclBinding_t **
+rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_acl_bindings(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+/**
+ * @name DeleteAcls
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Allocate a new DeleteAcls result response with the given
+ * \p err error code and \p errstr error message.
+ */
+const rd_kafka_DeleteAcls_result_response_t *
+rd_kafka_DeleteAcls_result_response_new(rd_kafka_resp_err_t err, char *errstr) {
+ rd_kafka_DeleteAcls_result_response_t *result_response;
+
+ result_response = rd_calloc(1, sizeof(*result_response));
+ if (err)
+ result_response->error = rd_kafka_error_new(
+ err, "%s", errstr ? errstr : rd_kafka_err2str(err));
+
+ /* List of int32 lists */
+ rd_list_init(&result_response->matching_acls, 0,
+ rd_kafka_AclBinding_free);
+
+ return result_response;
+}
+
+static void rd_kafka_DeleteAcls_result_response_destroy(
+ rd_kafka_DeleteAcls_result_response_t *resp) {
+ if (resp->error)
+ rd_kafka_error_destroy(resp->error);
+ rd_list_destroy(&resp->matching_acls);
+ rd_free(resp);
+}
+
+static void rd_kafka_DeleteAcls_result_response_free(void *ptr) {
+ rd_kafka_DeleteAcls_result_response_destroy(
+ (rd_kafka_DeleteAcls_result_response_t *)ptr);
+}
+
+/**
+ * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
+ *
+ * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_delete_acl_result_responses(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
+ const rd_kafka_DeleteAcls_result_response_t *result_response) {
+ return result_response->error;
+}
+
+const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(
+ const rd_kafka_DeleteAcls_result_response_t *result_response,
+ size_t *matching_acls_cntp) {
+ *matching_acls_cntp = result_response->matching_acls.rl_cnt;
+ return (const rd_kafka_AclBinding_t **)
+ result_response->matching_acls.rl_elems;
+}
+
+/**
+ * @brief Parse DeleteAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DeleteAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_op_t *rko_result = NULL;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int32_t res_cnt;
+ int i;
+ int j;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ /* #responses */
+ rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_DeleteAcls_result_response_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
+ char *errstr = NULL;
+ const rd_kafka_DeleteAcls_result_response_t *result_response;
+ int32_t matching_acls_cnt;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ result_response =
+ rd_kafka_DeleteAcls_result_response_new(error_code, errstr);
+
+ /* #maching_acls */
+ rd_kafka_buf_read_arraycnt(reply, &matching_acls_cnt, 100000);
+ for (j = 0; j < (int)matching_acls_cnt; j++) {
+ int16_t acl_error_code;
+ int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ rd_kafkap_str_t acl_error_msg =
+ RD_KAFKAP_STR_INITIALIZER;
+ rd_kafkap_str_t kres_name;
+ rd_kafkap_str_t khost;
+ rd_kafkap_str_t kprincipal;
+ int8_t resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ int8_t permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ rd_kafka_AclBinding_t *matching_acl;
+ char *acl_errstr = NULL;
+ char *res_name;
+ char *principal;
+ char *host;
+
+ rd_kafka_buf_read_i16(reply, &acl_error_code);
+ rd_kafka_buf_read_str(reply, &acl_error_msg);
+ if (acl_error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&acl_error_msg) ||
+ RD_KAFKAP_STR_LEN(&acl_error_msg) == 0)
+ acl_errstr = (char *)rd_kafka_err2str(
+ acl_error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&acl_errstr,
+ &acl_error_msg);
+ }
+
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1) {
+ rd_kafka_buf_read_i8(reply,
+ &resource_pattern_type);
+ }
+
+ rd_kafka_buf_read_str(reply, &kprincipal);
+ rd_kafka_buf_read_str(reply, &khost);
+ rd_kafka_buf_read_i8(reply, &operation);
+ rd_kafka_buf_read_i8(reply, &permission_type);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+ RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
+ RD_KAFKAP_STR_DUPA(&host, &khost);
+
+ if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ res_type >= RD_KAFKA_RESOURCE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown resource type %d",
+ res_type);
+ res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ }
+ if (resource_pattern_type <=
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >=
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown resource pattern type %d",
+ resource_pattern_type);
+ resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
+ }
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown acl operation %d",
+ operation);
+ operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ }
+ if (permission_type <=
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >=
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown acl permission type %d",
+ permission_type);
+ permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ }
+
+ matching_acl = rd_kafka_AclBinding_new0(
+ res_type, res_name, resource_pattern_type,
+ principal, host, operation, permission_type,
+ acl_error_code, acl_errstr);
+
+ rd_list_add(
+ (rd_list_t *)&result_response->matching_acls,
+ (void *)matching_acl);
+ }
+
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ (void *)result_response);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DeleteAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+
+void rd_kafka_DeleteAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t **del_acls,
+ size_t del_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DeleteAclsRequest, rd_kafka_DeleteAclsResponse_parse};
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETEACLS,
+ RD_KAFKA_EVENT_DELETEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)del_acls_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < del_acls_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBindingFilter_copy(del_acls[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**@}*/
diff --git a/src/rdkafka_admin.h b/src/rdkafka_admin.h
index 36a6b6f448..0140fdc6dd 100644
--- a/src/rdkafka_admin.h
+++ b/src/rdkafka_admin.h
@@ -306,4 +306,41 @@ struct rd_kafka_DeleteConsumerGroupOffsets_s {
/**@}*/
+/**
+ * @name CreateAcls
+ * @{
+ */
+
+/**
+ * @brief AclBinding type, used with CreateAcls.
+ */
+struct rd_kafka_AclBinding_s {
+ rd_kafka_ResourceType_t restype; /**< Resource type */
+ char *name; /**< Resource name, points to .data */
+ rd_kafka_ResourcePatternType_t
+ resource_pattern_type; /**< Resource pattern type */
+ char *principal; /**< Access Control Entry principal */
+ char *host; /**< Access Control Entry host */
+ rd_kafka_AclOperation_t operation; /**< AclOperation enumeration */
+ rd_kafka_AclPermissionType_t
+ permission_type; /**< AclPermissionType enumeration */
+ rd_kafka_error_t *error; /**< Response error, or NULL on success. */
+};
+/**@}*/
+
+/**
+ * @name DeleteAcls
+ * @{
+ */
+
+/**
+ * @brief DeleteAcls_result type, used with DeleteAcls.
+ */
+struct rd_kafka_DeleteAcls_result_response_s {
+ rd_kafka_error_t *error; /**< Response error object, or NULL */
+ rd_list_t matching_acls; /**< Type (rd_kafka_AclBinding_t *) */
+};
+
+/**@}*/
+
#endif /* _RDKAFKA_ADMIN_H_ */
diff --git a/src/rdkafka_assignment.c b/src/rdkafka_assignment.c
index dbb2eee70f..5f05683d94 100644
--- a/src/rdkafka_assignment.c
+++ b/src/rdkafka_assignment.c
@@ -342,11 +342,15 @@ static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
* a manual offset-less commit() or the auto-committer
* will not commit a stored offset from a previous
* assignment (issue #2782). */
- rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID,
+ rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID, rd_true,
RD_DONT_LOCK);
/* Partition is no longer desired */
rd_kafka_toppar_desired_del(rktp);
+
+ rd_assert((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ASSIGNED;
+
rd_kafka_toppar_unlock(rktp);
rd_kafka_dbg(rk, CGRP, "REMOVE",
@@ -713,6 +717,28 @@ rd_kafka_assignment_add(rd_kafka_t *rk,
rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
}
+ /* Mark all partition objects as assigned and reset the stored
+ * offsets back to invalid in case it was explicitly stored during
+ * the time the partition was not assigned. */
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ASSIGNED;
+
+ /* Reset the stored offset to INVALID to avoid the race
+ * condition described in rdkafka_offset.h */
+ rd_kafka_offset_store0(rktp, RD_KAFKA_OFFSET_INVALID,
+ rd_true /* force */, RD_DONT_LOCK);
+
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+
/* Add the new list of partitions to the current assignment.
* Only need to sort the final assignment if it was non-empty
* to begin with since \p partitions is sorted above. */
diff --git a/src/rdkafka_aux.c b/src/rdkafka_aux.c
index 44768fe0bd..05e922405d 100644
--- a/src/rdkafka_aux.c
+++ b/src/rdkafka_aux.c
@@ -184,3 +184,41 @@ void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) {
void rd_kafka_group_result_free(void *ptr) {
rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr);
}
+
+
+const rd_kafka_error_t *
+rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres) {
+ return aclres->error;
+}
+
+/**
+ * @brief Allocates and return an acl result, takes ownership of \p error
+ * (unless NULL).
+ *
+ * @returns The new acl result.
+ */
+rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error) {
+ rd_kafka_acl_result_t *acl_res;
+
+ acl_res = rd_calloc(1, sizeof(*acl_res));
+
+ acl_res->error = error;
+
+ return acl_res;
+}
+
+/**
+ * @brief Destroy acl_result
+ */
+void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res) {
+ if (acl_res->error)
+ rd_kafka_error_destroy(acl_res->error);
+ rd_free(acl_res);
+}
+
+/**
+ * @brief Destroy-variant suitable for rd_list free_cb use.
+ */
+void rd_kafka_acl_result_free(void *ptr) {
+ rd_kafka_acl_result_destroy((rd_kafka_acl_result_t *)ptr);
+}
diff --git a/src/rdkafka_aux.h b/src/rdkafka_aux.h
index cdd2901bde..ecb7e59121 100644
--- a/src/rdkafka_aux.h
+++ b/src/rdkafka_aux.h
@@ -35,8 +35,6 @@
#include "rdkafka_conf.h"
-
-
/**
* @brief Topic [ + Error code + Error string ]
*
@@ -58,8 +56,6 @@ rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
rd_kafka_resp_err_t err,
const char *errstr);
-/**@}*/
-
/**
* @brief Group [ + Error object ]
*
@@ -83,6 +79,21 @@ rd_kafka_group_result_new(const char *group,
const rd_kafka_topic_partition_list_t *partitions,
rd_kafka_error_t *error);
+/**
+ * @brief Acl creation result [ Error code + Error string ]
+ *
+ * @remark Public type.
+ * @remark Single allocation.
+ */
+struct rd_kafka_acl_result_s {
+ rd_kafka_error_t *error; /**< Error object, or NULL on success. */
+};
+
+void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res);
+void rd_kafka_acl_result_free(void *ptr);
+
+rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error);
+
rd_kafka_group_result_t *
rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres);
void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque);
diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c
index 7bc6b0e11a..d670b74b3d 100644
--- a/src/rdkafka_broker.c
+++ b/src/rdkafka_broker.c
@@ -778,7 +778,7 @@ static int rd_kafka_broker_bufq_timeout_scan(rd_kafka_broker_t *rkb,
rd_kafka_bufq_deq(rkbq, rkbuf);
if (now && cnt < log_first_n) {
- char holbstr[128];
+ char holbstr[256];
/* Head of line blocking:
* If this is not the first request in queue, but the
* initial first request did not time out,
@@ -3417,7 +3417,13 @@ rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) {
*
* The return value indicates if ops_serve() below should
* use a timeout or not.
+ *
+ * If there are ops enqueued cut the timeout short so
+ * that they're processed as soon as possible.
*/
+ if (abs_timeout > 0 && rd_kafka_q_len(rkb->rkb_ops) > 0)
+ abs_timeout = RD_POLL_NOWAIT;
+
if (rd_kafka_transport_io_serve(
rkb->rkb_transport, rkb->rkb_ops,
rd_timeout_remains(abs_timeout)))
@@ -3429,6 +3435,8 @@ rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) {
wakeup =
rd_kafka_broker_ops_serve(rkb, rd_timeout_remains_us(abs_timeout));
+ rd_atomic64_add(&rkb->rkb_c.wakeups, 1);
+
/* An op might have triggered the need for a connection, if so
* transition to TRY_CONNECT state. */
if (unlikely(rd_kafka_broker_needs_connection(rkb) &&
@@ -3627,11 +3635,29 @@ rd_kafka_broker_outbufs_space(rd_kafka_broker_t *rkb) {
}
+
+/**
+ * @brief Update \p *next_wakeup_ptr to \p maybe_next_wakeup if it is sooner.
+ *
+ * Both parameters are absolute timestamps.
+ * \p maybe_next_wakeup must not be 0.
+ */
+#define rd_kafka_set_next_wakeup(next_wakeup_ptr, maybe_next_wakeup) \
+ do { \
+ rd_ts_t *__n = (next_wakeup_ptr); \
+ rd_ts_t __m = (maybe_next_wakeup); \
+ rd_dassert(__m != 0); \
+ if (__m < *__n) \
+ *__n = __m; \
+ } while (0)
+
+
/**
* @brief Serve a toppar for producing.
*
* @param next_wakeup will be updated to when the next wake-up/attempt is
- * desired, only lower (sooner) values will be set.
+ * desired. Does not take the current value into
+ * consideration, even if it is lower.
* @param do_timeout_scan perform msg timeout scan
* @param may_send if set to false there is something on the global level
* that prohibits sending messages, such as a transactional
@@ -3659,6 +3685,7 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
int reqcnt;
int inflight = 0;
uint64_t epoch_base_msgid = 0;
+ rd_bool_t batch_ready = rd_false;
/* By limiting the number of not-yet-sent buffers (rkb_outbufs) we
* provide a backpressure mechanism to the producer loop
@@ -3685,8 +3712,8 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
timeoutcnt =
rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &next);
- if (next && next < *next_wakeup)
- *next_wakeup = next;
+ if (next)
+ rd_kafka_set_next_wakeup(next_wakeup, next);
if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
if (!rd_kafka_pid_valid(pid)) {
@@ -3732,10 +3759,32 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
} else if (max_requests > 0) {
/* Move messages from locked partition produce queue
* to broker-local xmit queue. */
- if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0)
+ if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) {
+
rd_kafka_msgq_insert_msgq(
&rktp->rktp_xmit_msgq, &rktp->rktp_msgq,
rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+ }
+
+ /* Calculate maximum wait-time to honour
+ * queue.buffering.max.ms contract.
+ * Unless flushing in which case immediate
+ * wakeups are allowed. */
+ batch_ready = rd_kafka_msgq_allow_wakeup_at(
+ &rktp->rktp_msgq, &rktp->rktp_xmit_msgq,
+ /* Only update the broker thread wakeup time
+ * if connection is up and messages can actually be
+ * sent, otherwise the wakeup can't do much. */
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP ? next_wakeup
+ : NULL,
+ now, flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
+ /* Batch message count threshold */
+ rkb->rkb_rk->rk_conf.batch_num_messages,
+ /* Batch size threshold.
+ * When compression is enabled the
+ * threshold is increased by x8. */
+ (rktp->rktp_rkt->rkt_conf.compression_codec ? 1 : 8) *
+ (int64_t)rkb->rkb_rk->rk_conf.batch_size);
}
rd_kafka_toppar_unlock(rktp);
@@ -3870,30 +3919,9 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
/* Attempt to fill the batch size, but limit our waiting
* to queue.buffering.max.ms, batch.num.messages, and batch.size. */
- if (!flushing && r < rkb->rkb_rk->rk_conf.batch_num_messages &&
- rktp->rktp_xmit_msgq.rkmq_msg_bytes <
- (int64_t)rkb->rkb_rk->rk_conf.batch_size) {
- rd_ts_t wait_max;
-
- /* Calculate maximum wait-time to honour
- * queue.buffering.max.ms contract. */
- wait_max = rd_kafka_msg_enq_time(rkm) +
- rkb->rkb_rk->rk_conf.buffering_max_us;
-
- if (wait_max > now) {
- /* Wait for more messages or queue.buffering.max.ms
- * to expire. */
- if (wait_max < *next_wakeup)
- *next_wakeup = wait_max;
- return 0;
- }
- }
-
- /* Honour retry.backoff.ms. */
- if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
- if (rkm->rkm_u.producer.ts_backoff < *next_wakeup)
- *next_wakeup = rkm->rkm_u.producer.ts_backoff;
- /* Wait for backoff to expire */
+ if (!batch_ready) {
+ /* Wait for more messages or queue.buffering.max.ms
+ * to expire. */
return 0;
}
@@ -3907,10 +3935,22 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
break;
}
- /* If there are messages still in the queue, make the next
- * wakeup immediate. */
- if (rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) > 0)
- *next_wakeup = now;
+ /* Update the allowed wake-up time based on remaining messages
+ * in the queue. */
+ if (cnt > 0) {
+ rd_kafka_toppar_lock(rktp);
+ batch_ready = rd_kafka_msgq_allow_wakeup_at(
+ &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, next_wakeup, now,
+ flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
+ /* Batch message count threshold */
+ rkb->rkb_rk->rk_conf.batch_num_messages,
+ /* Batch size threshold.
+ * When compression is enabled the
+ * threshold is increased by x8. */
+ (rktp->rktp_rkt->rkt_conf.compression_codec ? 1 : 8) *
+ (int64_t)rkb->rkb_rk->rk_conf.batch_size);
+ rd_kafka_toppar_unlock(rktp);
+ }
return cnt;
}
@@ -3921,7 +3961,7 @@ static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
* @brief Produce from all toppars assigned to this broker.
*
* @param next_wakeup is updated if the next IO/ops timeout should be
- * less than the input value.
+ * less than the input value (i.e., sooner).
*
* @returns the total number of messages produced.
*/
@@ -3970,8 +4010,7 @@ static int rd_kafka_broker_produce_toppars(rd_kafka_broker_t *rkb,
rkb, rktp, pid, now, &this_next_wakeup, do_timeout_scan,
may_send, flushing);
- if (this_next_wakeup < ret_next_wakeup)
- ret_next_wakeup = this_next_wakeup;
+ rd_kafka_set_next_wakeup(&ret_next_wakeup, this_next_wakeup);
} while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
rktp_activelink)) !=
@@ -4008,6 +4047,7 @@ static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb,
(abs_timeout > (now = rd_clock()))) {
rd_bool_t do_timeout_scan;
rd_ts_t next_wakeup = abs_timeout;
+ rd_bool_t overshot;
rd_kafka_broker_unlock(rkb);
@@ -4015,9 +4055,8 @@ static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb,
* on each state change, to make sure messages in
* partition rktp_xmit_msgq are timed out before
* being attempted to re-transmit. */
- do_timeout_scan =
- cnt++ == 0 ||
- rd_interval(&timeout_scan, 1000 * 1000, now) >= 0;
+ overshot = rd_interval(&timeout_scan, 1000 * 1000, now) >= 0;
+ do_timeout_scan = cnt++ == 0 || overshot;
rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup,
do_timeout_scan);
@@ -4235,7 +4274,8 @@ static void rd_kafka_fetch_reply_handle_partition_error(
/* Application error */
err_offset = rktp->rktp_offsets.fetch_offset;
rktp->rktp_offsets.fetch_offset = RD_KAFKA_OFFSET_INVALID;
- rd_kafka_offset_reset(rktp, err_offset, err,
+ rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_offset,
+ err,
"fetch failed due to requested offset "
"not available on the broker");
} break;
@@ -5402,9 +5442,9 @@ void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) {
rd_kafka_sasl_broker_term(rkb);
if (rkb->rkb_wakeup_fd[0] != -1)
- rd_close(rkb->rkb_wakeup_fd[0]);
+ rd_socket_close(rkb->rkb_wakeup_fd[0]);
if (rkb->rkb_wakeup_fd[1] != -1)
- rd_close(rkb->rkb_wakeup_fd[1]);
+ rd_socket_close(rkb->rkb_wakeup_fd[1]);
if (rkb->rkb_recv_buf)
rd_kafka_buf_destroy(rkb->rkb_recv_buf);
@@ -6152,11 +6192,11 @@ const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb) {
* @locality any
* @locks any
*/
-void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb) {
+void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason) {
rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_WAKEUP);
rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
rd_kafka_q_enq(rkb->rkb_ops, rko);
- rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up");
+ rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up: %s", reason);
}
/**
@@ -6167,7 +6207,9 @@ void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb) {
*
* @returns the number of broker threads woken up
*/
-int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state) {
+int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
+ int min_state,
+ const char *reason) {
int cnt = 0;
rd_kafka_broker_t *rkb;
@@ -6180,12 +6222,19 @@ int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state) {
rd_kafka_broker_unlock(rkb);
if (do_wakeup) {
- rd_kafka_broker_wakeup(rkb);
+ rd_kafka_broker_wakeup(rkb, reason);
cnt += 1;
}
}
rd_kafka_rdunlock(rk);
+ if (cnt > 0)
+ rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_QUEUE, "WAKEUP",
+ "Wake-up sent to %d broker thread%s in "
+ "state >= %s: %s",
+ cnt, cnt > 1 ? "s" : "",
+ rd_kafka_broker_state_names[min_state], reason);
+
return cnt;
}
diff --git a/src/rdkafka_broker.h b/src/rdkafka_broker.h
index 1ee7a04f48..a574b5e68d 100644
--- a/src/rdkafka_broker.h
+++ b/src/rdkafka_broker.h
@@ -528,8 +528,10 @@ void msghdr_print(rd_kafka_t *rk,
int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb);
const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb);
-void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb);
-int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state);
+void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason);
+int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
+ int min_state,
+ const char *reason);
void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason);
diff --git a/src/rdkafka_buf.c b/src/rdkafka_buf.c
index 3da0fa50cd..5a0e131e8b 100644
--- a/src/rdkafka_buf.c
+++ b/src/rdkafka_buf.c
@@ -184,6 +184,9 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
* @remark \p free_cb (possibly NULL) will be used to free \p ptr when
* buffer refcount reaches 0.
* @remark the buffer may only be read from, not written to.
+ *
+ * @warning If the caller has log_decode_errors > 0 then it must set up
+ * \c rkbuf->rkbuf_rkb to a refcnt-increased broker object.
*/
rd_kafka_buf_t *
rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) {
diff --git a/src/rdkafka_buf.h b/src/rdkafka_buf.h
index 78762036b0..0552d89557 100644
--- a/src/rdkafka_buf.h
+++ b/src/rdkafka_buf.h
@@ -286,7 +286,10 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
void (*rkbuf_free_make_opaque_cb)(void *); /**< Free function for
* rkbuf_make_opaque. */
- struct rd_kafka_broker_s *rkbuf_rkb;
+ struct rd_kafka_broker_s *rkbuf_rkb; /**< Optional broker object
+ * with refcnt increased used
+ * for logging decode errors
+ * if log_decode_errors is > 0 */
rd_refcnt_t rkbuf_refcnt;
void *rkbuf_opaque;
@@ -409,8 +412,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
#define rd_kafka_buf_parse_fail(rkbuf, ...) \
do { \
- if (log_decode_errors > 0) { \
- rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \
+ if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
rd_rkb_log( \
rkbuf->rkbuf_rkb, log_decode_errors, "PROTOERR", \
"Protocol parse failure for %s v%hd%s " \
@@ -437,8 +439,7 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
*/
#define rd_kafka_buf_underflow_fail(rkbuf, wantedlen, ...) \
do { \
- if (log_decode_errors > 0) { \
- rd_kafka_assert(NULL, rkbuf->rkbuf_rkb); \
+ if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
char __tmpstr[256]; \
rd_snprintf(__tmpstr, sizeof(__tmpstr), \
": " __VA_ARGS__); \
@@ -540,29 +541,33 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
#define rd_kafka_buf_read_i64(rkbuf, dstptr) \
do { \
int64_t _v; \
+ int64_t *_vp = dstptr; \
rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *(dstptr) = be64toh(_v); \
+ *_vp = be64toh(_v); \
} while (0)
#define rd_kafka_buf_peek_i64(rkbuf, of, dstptr) \
do { \
int64_t _v; \
+ int64_t *_vp = dstptr; \
rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
- *(dstptr) = be64toh(_v); \
+ *_vp = be64toh(_v); \
} while (0)
#define rd_kafka_buf_read_i32(rkbuf, dstptr) \
do { \
int32_t _v; \
+ int32_t *_vp = dstptr; \
rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *(dstptr) = be32toh(_v); \
+ *_vp = be32toh(_v); \
} while (0)
#define rd_kafka_buf_peek_i32(rkbuf, of, dstptr) \
do { \
int32_t _v; \
+ int32_t *_vp = dstptr; \
rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
- *(dstptr) = be32toh(_v); \
+ *_vp = be32toh(_v); \
} while (0)
@@ -578,10 +583,18 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
#define rd_kafka_buf_read_i16(rkbuf, dstptr) \
do { \
int16_t _v; \
+ int16_t *_vp = dstptr; \
rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
- *(dstptr) = (int16_t)be16toh(_v); \
+ *_vp = (int16_t)be16toh(_v); \
} while (0)
+#define rd_kafka_buf_peek_i16(rkbuf, of, dstptr) \
+ do { \
+ int16_t _v; \
+ int16_t *_vp = dstptr; \
+ rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
+ *_vp = be16toh(_v); \
+ } while (0)
#define rd_kafka_buf_read_i16a(rkbuf, dst) \
do { \
@@ -607,29 +620,31 @@ struct rd_kafka_buf_s { /* rd_kafka_buf_t */
/**
* @brief Read varint and store in int64_t \p dst
*/
-#define rd_kafka_buf_read_varint(rkbuf, dst) \
+#define rd_kafka_buf_read_varint(rkbuf, dstptr) \
do { \
int64_t _v; \
+ int64_t *_vp = dstptr; \
size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v); \
if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
"varint parsing failed"); \
- *(dst) = _v; \
+ *_vp = _v; \
} while (0)
/**
* @brief Read unsigned varint and store in uint64_t \p dst
*/
-#define rd_kafka_buf_read_uvarint(rkbuf, dst) \
+#define rd_kafka_buf_read_uvarint(rkbuf, dstptr) \
do { \
uint64_t _v; \
+ uint64_t *_vp = dstptr; \
size_t _r = \
rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, &_v); \
if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
"uvarint parsing failed"); \
- *(dst) = _v; \
+ *_vp = _v; \
} while (0)
diff --git a/src/rdkafka_cgrp.c b/src/rdkafka_cgrp.c
index 7830d1c65d..ce41b5c9af 100644
--- a/src/rdkafka_cgrp.c
+++ b/src/rdkafka_cgrp.c
@@ -1773,6 +1773,10 @@ static int rd_kafka_group_MemberMetadata_consumer_read(
rkbuf = rd_kafka_buf_new_shadow(
MemberMetadata->data, RD_KAFKAP_BYTES_LEN(MemberMetadata), NULL);
+ /* Protocol parser needs a broker handle to log errors on. */
+ rkbuf->rkbuf_rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+
rd_kafka_buf_read_i16(rkbuf, &Version);
rd_kafka_buf_read_i32(rkbuf, &subscription_cnt);
diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c
index ddf00284d2..e5f2b8491d 100644
--- a/src/rdkafka_conf.c
+++ b/src/rdkafka_conf.c
@@ -979,50 +979,45 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
{_RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I,
_RK(sasl.oauthbearer.method),
"Set to \"default\" or \"oidc\" to control which login method "
- "is used. If set it to \"oidc\", OAuth/OIDC login method will "
- "be used. "
- "sasl.oauthbearer.client.id, sasl.oauthbearer.client.secret, "
- "sasl.oauthbearer.scope, sasl.oauthbearer.extensions, "
- "and sasl.oauthbearer.token.endpoint.url are needed if "
- "sasl.oauthbearer.method is set to \"oidc\".",
+ "to be used. If set to \"oidc\", the following properties must also be "
+ "be specified: "
+ "`sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, "
+ "and `sasl.oauthbearer.token.endpoint.url`.",
.vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
.s2i = {{RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default"},
{RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc"}},
_UNSUPPORTED_OIDC},
{_RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR,
_RK(sasl.oauthbearer.client_id),
- "It's a public identifier for the application. "
- "It must be unique across all clients that the "
+ "Public identifier for the application. "
+ "Must be unique across all clients that the "
"authorization server handles. "
- "This is only used when sasl.oauthbearer.method is set to oidc.",
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
_UNSUPPORTED_OIDC},
{_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR,
_RK(sasl.oauthbearer.client_secret),
- "A client secret only known to the application and the "
+ "Client secret only known to the application and the "
"authorization server. This should be a sufficiently random string "
- "that are not guessable. "
- "This is only used when sasl.oauthbearer.method is set to \"oidc\".",
+ "that is not guessable. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
_UNSUPPORTED_OIDC},
{_RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR,
_RK(sasl.oauthbearer.scope),
"Client use this to specify the scope of the access request to the "
"broker. "
- "This is only used when sasl.oauthbearer.method is set to \"oidc\".",
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
_UNSUPPORTED_OIDC},
{_RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR,
_RK(sasl.oauthbearer.extensions_str),
"Allow additional information to be provided to the broker. "
- "It's comma-separated list of key=value pairs. "
- "The example of the input is "
- "\"supportFeatureX=true,organizationId=sales-emea\"."
- " This is only used when sasl.oauthbearer.method is set "
- "to \"oidc\".",
+ "Comma-separated list of key=value pairs. "
+ "E.g., \"supportFeatureX=true,organizationId=sales-emea\"."
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
_UNSUPPORTED_OIDC},
{_RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR,
_RK(sasl.oauthbearer.token_endpoint_url),
- "OAUTH issuer token endpoint HTTP(S) URI used to retrieve the "
- "token. "
- "This is only used when sasl.oauthbearer.method is set to \"oidc\".",
+ "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
_UNSUPPORTED_OIDC},
/* Plugins */
@@ -1058,6 +1053,9 @@ static const struct rd_kafka_property rd_kafka_properties[] = {
"This will automatically overwrite `bootstrap.servers` with the "
"mock broker list.",
0, 10000, 0},
+ {_RK_GLOBAL | _RK_HIDDEN, "test.mock.broker.rtt", _RK_C_INT,
+ _RK(mock.broker_rtt), "Simulated mock broker latency in milliseconds.", 0,
+ 60 * 60 * 1000 /*1h*/, 0},
/* Unit test interfaces.
* These are not part of the public API and may change at any time.
@@ -1640,14 +1638,15 @@ const struct rd_kafka_property *rd_kafka_conf_prop_find(int scope,
/**
* @returns rd_true if property has been set/modified, else rd_false.
- * If \p name is unknown 0 is returned.
+ *
+ * @warning Asserts if the property does not exist.
*/
rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
const char *name) {
const struct rd_kafka_property *prop;
if (!(prop = rd_kafka_conf_prop_find(_RK_GLOBAL, name)))
- return rd_false;
+ RD_BUG("Configuration property \"%s\" does not exist", name);
return rd_kafka_anyconf_is_modified(conf, prop);
}
@@ -1655,7 +1654,8 @@ rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
/**
* @returns true if property has been set/modified, else 0.
- * If \p name is unknown 0 is returned.
+ *
+ * @warning Asserts if the property does not exist.
*/
static rd_bool_t
rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf,
@@ -1663,7 +1663,8 @@ rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf,
const struct rd_kafka_property *prop;
if (!(prop = rd_kafka_conf_prop_find(_RK_TOPIC, name)))
- return 0;
+ RD_BUG("Topic configuration property \"%s\" does not exist",
+ name);
return rd_kafka_anyconf_is_modified(conf, prop);
}
@@ -3607,8 +3608,7 @@ static void rd_kafka_sw_str_sanitize_inplace(char *str) {
* on success. The array count is returned in \p cntp.
* The returned pointer must be freed with rd_free().
*/
-static RD_UNUSED char **
-rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) {
+char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) {
size_t i;
char **out, *p;
size_t lens = 0;
@@ -3716,12 +3716,34 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
"`sasl.oauthbearer.method=oidc` are "
"mutually exclusive";
+ if (conf->sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) {
+ if (!conf->sasl.oauthbearer.client_id)
+ return "`sasl.oauthbearer.client.id` is "
+ "mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+
+ if (!conf->sasl.oauthbearer.client_secret) {
+ return "`sasl.oauthbearer.client.secret` is "
+ "mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+ }
+
+ if (!conf->sasl.oauthbearer.token_endpoint_url) {
+ return "`sasl.oauthbearer.token.endpoint.url` "
+ "is mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+ }
+ }
+
/* Enable background thread for the builtin OIDC handler,
* unless a refresh callback has been set. */
if (conf->sasl.oauthbearer.method ==
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
- !conf->sasl.oauthbearer.token_refresh_cb)
+ !conf->sasl.oauthbearer.token_refresh_cb) {
conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND;
+ conf->sasl.enable_callback_queue = 1;
+ }
}
#endif
@@ -3890,8 +3912,8 @@ const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
if (tconf->message_timeout_ms != 0 &&
(double)tconf->message_timeout_ms <=
conf->buffering_max_ms_dbl) {
- if (rd_kafka_topic_conf_is_modified(
- tconf, "linger.ms"))
+ if (rd_kafka_conf_is_modified(conf,
+ "linger.ms"))
return "`message.timeout.ms` must be "
"greater than `linger.ms`";
else /* Auto adjust linger.ms to be lower
@@ -3968,7 +3990,7 @@ const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
if (tconf->message_timeout_ms != 0 &&
(double)tconf->message_timeout_ms <= conf->buffering_max_ms_dbl &&
- rd_kafka_topic_conf_is_modified(tconf, "linger.ms"))
+ rd_kafka_conf_is_modified(conf, "linger.ms"))
return "`message.timeout.ms` must be greater than `linger.ms`";
return NULL;
diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h
index c09dbf6def..dceb89278d 100644
--- a/src/rdkafka_conf.h
+++ b/src/rdkafka_conf.h
@@ -531,6 +531,7 @@ struct rd_kafka_conf_s {
*/
struct {
int broker_cnt; /**< Number of mock brokers */
+ int broker_rtt; /**< Broker RTT */
} mock;
/*
@@ -612,6 +613,7 @@ struct rd_kafka_topic_conf_s {
};
+char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp);
void rd_kafka_anyconf_destroy(int scope, void *conf);
diff --git a/src/rdkafka_event.c b/src/rdkafka_event.c
index 0ba71acae5..8e98114175 100644
--- a/src/rdkafka_event.c
+++ b/src/rdkafka_event.c
@@ -68,6 +68,12 @@ const char *rd_kafka_event_name(const rd_kafka_event_t *rkev) {
return "DeleteGroupsResult";
case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
return "DeleteConsumerGroupOffsetsResult";
+ case RD_KAFKA_EVENT_CREATEACLS_RESULT:
+ return "CreateAclsResult";
+ case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
+ return "DescribeAclsResult";
+ case RD_KAFKA_EVENT_DELETEACLS_RESULT:
+ return "DeleteAclsResult";
case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
return "SaslOAuthBearerTokenRefresh";
case RD_KAFKA_EVENT_AWS_MSK_IAM_CREDENTIAL_REFRESH:
@@ -351,3 +357,27 @@ rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
return (
const rd_kafka_DeleteConsumerGroupOffsets_result_t *)rkev;
}
+
+const rd_kafka_CreateAcls_result_t *
+rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_CreateAcls_result_t *)rkev;
+}
+
+const rd_kafka_DescribeAcls_result_t *
+rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DescribeAcls_result_t *)rkev;
+}
+
+const rd_kafka_DeleteAcls_result_t *
+rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DeleteAcls_result_t *)rkev;
+}
diff --git a/src/rdkafka_event.h b/src/rdkafka_event.h
index abb5937cef..d733b63d28 100644
--- a/src/rdkafka_event.h
+++ b/src/rdkafka_event.h
@@ -103,6 +103,9 @@ static RD_UNUSED RD_INLINE int rd_kafka_event_setup(rd_kafka_t *rk,
case RD_KAFKA_EVENT_DELETERECORDS_RESULT:
case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
+ case RD_KAFKA_EVENT_CREATEACLS_RESULT:
+ case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
+ case RD_KAFKA_EVENT_DELETEACLS_RESULT:
case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
case RD_KAFKA_EVENT_AWS_MSK_IAM_CREDENTIAL_REFRESH:
return 1;
diff --git a/src/rdkafka_idempotence.c b/src/rdkafka_idempotence.c
index f79be76b95..6f680c5404 100644
--- a/src/rdkafka_idempotence.c
+++ b/src/rdkafka_idempotence.c
@@ -165,6 +165,7 @@ rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk,
break;
case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
is_fatal = rd_true;
/* Normalize error */
err = RD_KAFKA_RESP_ERR__FENCED;
@@ -313,14 +314,16 @@ void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk) {
rd_kafka_handle_InitProducerId, NULL);
}
- rd_kafka_broker_destroy(rkb);
-
if (err) {
rd_rkb_dbg(rkb, EOS, "GETPID",
"Can't acquire ProducerId from "
"this broker: %s",
errstr);
+ }
+ rd_kafka_broker_destroy(rkb);
+
+ if (err) {
if (rd_kafka_idemp_check_error(rk, err, errstr,
is_fatal))
return; /* Fatal error */
@@ -492,7 +495,8 @@ void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb,
/* Wake up all broker threads (that may have messages to send
* that were waiting for a Producer ID). */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "PID updated");
}
@@ -548,7 +552,8 @@ static void rd_kafka_idemp_drain_done(rd_kafka_t *rk) {
/* Wake up all broker threads (that may have messages to send
* that were waiting for a Producer ID). */
if (wakeup_brokers)
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "message drain done");
}
/**
diff --git a/src/rdkafka_metadata.c b/src/rdkafka_metadata.c
index d5ceed95fa..e647afe5fc 100644
--- a/src/rdkafka_metadata.c
+++ b/src/rdkafka_metadata.c
@@ -236,7 +236,7 @@ rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
int32_t controller_id = -1;
rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
int broker_changes = 0;
- int topic_changes = 0;
+ int cache_changes = 0;
rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread));
@@ -506,7 +506,7 @@ rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
rd_kafka_wrlock(rk);
rd_kafka_metadata_cache_topic_update(
rk, mdt, rd_false /*propagate later*/);
- topic_changes++;
+ cache_changes++;
rd_kafka_wrunlock(rk);
}
}
@@ -571,6 +571,9 @@ rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
}
rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id);
+ /* rd_kafka_clusterid() waits for a cache update even though
+ * the clusterid is not in the cache itself. (#3620) */
+ cache_changes++;
}
/* Update controller id. */
@@ -597,7 +600,7 @@ rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
"%d broker(s) and %d topic(s): %s",
md->broker_cnt, md->topic_cnt, reason);
} else {
- if (topic_changes)
+ if (cache_changes)
rd_kafka_metadata_cache_propagate_changes(rk);
rd_kafka_metadata_cache_expiry_start(rk);
}
diff --git a/src/rdkafka_mock.c b/src/rdkafka_mock.c
index 12c4b06781..394c9e487c 100644
--- a/src/rdkafka_mock.c
+++ b/src/rdkafka_mock.c
@@ -171,6 +171,124 @@ rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
}
+/**
+ * @brief Looks up or creates a new pidstate for the given partition and PID.
+ *
+ * The pidstate is used to verify per-partition per-producer BaseSequences
+ * for the idempotent/txn producer.
+ */
+static rd_kafka_mock_pid_t *
+rd_kafka_mock_partition_pidstate_get(rd_kafka_mock_partition_t *mpart,
+ const rd_kafka_mock_pid_t *mpid) {
+ rd_kafka_mock_pid_t *pidstate;
+ size_t tidlen;
+
+ pidstate = rd_list_find(&mpart->pidstates, mpid, rd_kafka_mock_pid_cmp);
+ if (pidstate)
+ return pidstate;
+
+ tidlen = strlen(mpid->TransactionalId);
+ pidstate = rd_malloc(sizeof(*pidstate) + tidlen);
+ pidstate->pid = mpid->pid;
+ memcpy(pidstate->TransactionalId, mpid->TransactionalId, tidlen);
+ pidstate->TransactionalId[tidlen] = '\0';
+
+ pidstate->lo = pidstate->hi = pidstate->window = 0;
+ memset(pidstate->seq, 0, sizeof(pidstate->seq));
+
+ rd_list_add(&mpart->pidstates, pidstate);
+
+ return pidstate;
+}
+
+
+/**
+ * @brief Validate ProduceRequest records in \p rkbuf.
+ *
+ * @warning The \p rkbuf must not be read, just peek()ed.
+ *
+ * This is a very selective validation, currently only:
+ * - verify idempotency TransactionalId,PID,Epoch,Seq
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_validate_records(rd_kafka_mock_partition_t *mpart,
+ rd_kafka_buf_t *rkbuf,
+ size_t RecordCount,
+ const rd_kafkap_str_t *TransactionalId,
+ rd_bool_t *is_dupd) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster;
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_mock_pid_t *mpidstate = NULL;
+ rd_kafka_pid_t pid;
+ int32_t expected_BaseSequence = -1, BaseSequence = -1;
+ rd_kafka_resp_err_t err;
+
+ *is_dupd = rd_false;
+
+ if (!TransactionalId || RD_KAFKAP_STR_LEN(TransactionalId) < 1)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_buf_peek_i64(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerId,
+ &pid.id);
+ rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch,
+ &pid.epoch);
+ rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_BaseSequence,
+ &BaseSequence);
+
+ mtx_lock(&mcluster->lock);
+ err = rd_kafka_mock_pid_find(mcluster, TransactionalId, pid, &mpid);
+ mtx_unlock(&mcluster->lock);
+
+ if (likely(!err)) {
+
+ if (mpid->pid.epoch != pid.epoch)
+ err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
+
+ /* Each partition tracks the 5 last Produce requests per PID.*/
+ mpidstate = rd_kafka_mock_partition_pidstate_get(mpart, mpid);
+
+ expected_BaseSequence = mpidstate->seq[mpidstate->hi];
+
+ /* A BaseSequence within the range of the last 5 requests is
+ * considered a legal duplicate and will be successfully acked
+ * but not written to the log. */
+ if (BaseSequence < mpidstate->seq[mpidstate->lo])
+ err = RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER;
+ else if (BaseSequence > mpidstate->seq[mpidstate->hi])
+ err = RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER;
+ else if (BaseSequence != expected_BaseSequence)
+ *is_dupd = rd_true;
+ }
+
+ if (unlikely(err)) {
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Log append %s [%" PRId32
+ "] failed: PID mismatch: TransactionalId=%.*s "
+ "expected %s BaseSeq %" PRId32
+ ", not %s BaseSeq %" PRId32 ": %s",
+ mpart->leader->id, mpart->topic->name, mpart->id,
+ RD_KAFKAP_STR_PR(TransactionalId),
+ mpid ? rd_kafka_pid2str(mpid->pid) : "n/a",
+ expected_BaseSequence, rd_kafka_pid2str(pid),
+ BaseSequence, rd_kafka_err2name(err));
+ return err;
+ }
+
+ /* Update BaseSequence window */
+ if (unlikely(mpidstate->window < 5))
+ mpidstate->window++;
+ else
+ mpidstate->lo = (mpidstate->lo + 1) % mpidstate->window;
+ mpidstate->hi = (mpidstate->hi + 1) % mpidstate->window;
+ mpidstate->seq[mpidstate->hi] = (int32_t)(BaseSequence + RecordCount);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ return rkbuf->rkbuf_err;
+}
+
/**
* @brief Append the MessageSets in \p bytes to the \p mpart partition log.
*
@@ -178,21 +296,25 @@ rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
*/
rd_kafka_resp_err_t
rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_bytes_t *bytes,
+ const rd_kafkap_bytes_t *records,
+ const rd_kafkap_str_t *TransactionalId,
int64_t *BaseOffset) {
const int log_decode_errors = LOG_ERR;
rd_kafka_buf_t *rkbuf;
rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
int8_t MagicByte;
int32_t RecordCount;
+ int16_t Attributes;
rd_kafka_mock_msgset_t *mset;
+ rd_bool_t is_dup = rd_false;
/* Partially parse the MessageSet in \p bytes to get
* the message count. */
- rkbuf = rd_kafka_buf_new_shadow(bytes->data, RD_KAFKAP_BYTES_LEN(bytes),
- NULL);
+ rkbuf = rd_kafka_buf_new_shadow(records->data,
+ RD_KAFKAP_BYTES_LEN(records), NULL);
- rd_kafka_buf_peek_i8(rkbuf, 8 + 4 + 4, &MagicByte);
+ rd_kafka_buf_peek_i8(rkbuf, RD_KAFKAP_MSGSET_V2_OF_MagicByte,
+ &MagicByte);
if (MagicByte != 2) {
/* We only support MsgVersion 2 for now */
err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION;
@@ -201,17 +323,28 @@ rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_RecordCount,
&RecordCount);
+ rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_Attributes,
+ &Attributes);
if (RecordCount < 1 ||
- (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(bytes) /
- RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD) {
+ (!(Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
+ (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(records) /
+ RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD)) {
err = RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE;
goto err;
}
+ if ((err = rd_kafka_mock_validate_records(
+ mpart, rkbuf, (size_t)RecordCount, TransactionalId, &is_dup)))
+ goto err;
+
+ /* If this is a legit duplicate, don't write it to the log. */
+ if (is_dup)
+ goto err;
+
rd_kafka_buf_destroy(rkbuf);
- mset = rd_kafka_mock_msgset_new(mpart, bytes, (size_t)RecordCount);
+ mset = rd_kafka_mock_msgset_new(mpart, records, (size_t)RecordCount);
*BaseOffset = mset->first_offset;
@@ -348,6 +481,8 @@ static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) {
TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff)
rd_kafka_mock_committed_offset_destroy(mpart, coff);
+ rd_list_destroy(&mpart->pidstates);
+
rd_free(mpart->replicas);
}
@@ -371,6 +506,8 @@ static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic,
TAILQ_INIT(&mpart->committed_offsets);
+ rd_list_init(&mpart->pidstates, 0, rd_free);
+
rd_kafka_mock_partition_assign_replicas(mpart);
}
@@ -1087,7 +1224,7 @@ rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb,
char errstr[128];
if (!mrkb->up) {
- rd_close(fd);
+ rd_socket_close(fd);
return NULL;
}
@@ -1098,7 +1235,7 @@ rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb,
"Failed to create transport for new "
"mock connection: %s",
errstr);
- rd_close(fd);
+ rd_socket_close(fd);
return NULL;
}
@@ -1131,7 +1268,7 @@ static void rd_kafka_mock_cluster_op_io(rd_kafka_mock_cluster_t *mcluster,
void *opaque) {
/* Read wake-up fd data and throw away, just used for wake-ups*/
char buf[1024];
- while (rd_read(fd, buf, sizeof(buf)) > 0)
+ while (rd_socket_read(fd, buf, sizeof(buf)) > 0)
; /* Read all buffered signalling bytes */
}
@@ -1268,7 +1405,7 @@ static void rd_kafka_mock_broker_destroy(rd_kafka_mock_broker_t *mrkb) {
rd_kafka_mock_broker_close_all(mrkb, "Destroying broker");
rd_kafka_mock_cluster_io_del(mrkb->cluster, mrkb->listen_s);
- rd_close(mrkb->listen_s);
+ rd_socket_close(mrkb->listen_s);
while ((errstack = TAILQ_FIRST(&mrkb->errstacks))) {
TAILQ_REMOVE(&mrkb->errstacks, errstack, link);
@@ -1309,7 +1446,7 @@ rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) {
"Failed to bind mock broker socket to %s: %s",
rd_socket_strerror(rd_socket_errno),
rd_sockaddr2str(&sin, RD_SOCKADDR2STR_F_PORT));
- rd_close(listen_s);
+ rd_socket_close(listen_s);
return NULL;
}
@@ -1318,7 +1455,7 @@ rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) {
rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
"Failed to get mock broker socket name: %s",
rd_socket_strerror(rd_socket_errno));
- rd_close(listen_s);
+ rd_socket_close(listen_s);
return NULL;
}
rd_assert(sin.sin_family == AF_INET);
@@ -1327,7 +1464,7 @@ rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) {
rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
"Failed to listen on mock broker socket: %s",
rd_socket_strerror(rd_socket_errno));
- rd_close(listen_s);
+ rd_socket_close(listen_s);
return NULL;
}
@@ -1630,6 +1767,7 @@ void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster,
va_start(ap, cnt);
for (i = 0; i < cnt; i++)
errors[i] = va_arg(ap, rd_kafka_resp_err_t);
+ va_end(ap);
rd_kafka_mock_push_request_errors_array(mcluster, ApiKey, cnt, errors);
}
@@ -1846,6 +1984,82 @@ rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster,
}
+/**
+ * @brief Apply command to specific broker.
+ *
+ * @locality mcluster thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_broker_cmd(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_mock_broker_t *mrkb,
+ rd_kafka_op_t *rko) {
+ switch (rko->rko_u.mock.cmd) {
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
+ mrkb->up = (rd_bool_t)rko->rko_u.mock.lo;
+
+ if (!mrkb->up)
+ rd_kafka_mock_broker_close_all(mrkb, "Broker down");
+ break;
+
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
+ mrkb->rtt = (rd_ts_t)rko->rko_u.mock.lo * 1000;
+
+ /* Check if there is anything to send now that the RTT
+ * has changed or if a timer is to be started. */
+ rd_kafka_mock_broker_connections_write_out(mrkb);
+ break;
+
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
+ if (mrkb->rack)
+ rd_free(mrkb->rack);
+
+ if (rko->rko_u.mock.name)
+ mrkb->rack = rd_strdup(rko->rko_u.mock.name);
+ else
+ mrkb->rack = NULL;
+ break;
+
+ default:
+ RD_BUG("Unhandled mock cmd %d", rko->rko_u.mock.cmd);
+ break;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Apply command to to one or all brokers, depending on the value of
+ * broker_id, where -1 means all, and != -1 means a specific broker.
+ *
+ * @locality mcluster thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_brokers_cmd(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_op_t *rko) {
+ rd_kafka_mock_broker_t *mrkb;
+
+ if (rko->rko_u.mock.broker_id != -1) {
+ /* Specific broker */
+ mrkb = rd_kafka_mock_broker_find(mcluster,
+ rko->rko_u.mock.broker_id);
+ if (!mrkb)
+ return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
+
+ return rd_kafka_mock_broker_cmd(mcluster, mrkb, rko);
+ }
+
+ /* All brokers */
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
+ rd_kafka_resp_err_t err;
+
+ if ((err = rd_kafka_mock_broker_cmd(mcluster, mrkb, rko)))
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
/**
* @brief Handle command op
@@ -1947,45 +2161,11 @@ rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster,
}
break;
+ /* Broker commands */
case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
- mrkb = rd_kafka_mock_broker_find(mcluster,
- rko->rko_u.mock.broker_id);
- if (!mrkb)
- return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
-
- mrkb->up = (rd_bool_t)rko->rko_u.mock.lo;
-
- if (!mrkb->up)
- rd_kafka_mock_broker_close_all(mrkb, "Broker down");
- break;
-
case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
- mrkb = rd_kafka_mock_broker_find(mcluster,
- rko->rko_u.mock.broker_id);
- if (!mrkb)
- return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
-
- mrkb->rtt = (rd_ts_t)rko->rko_u.mock.lo * 1000;
-
- /* Check if there is anything to send now that the RTT
- * has changed or if a timer is to be started. */
- rd_kafka_mock_broker_connections_write_out(mrkb);
- break;
-
case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
- mrkb = rd_kafka_mock_broker_find(mcluster,
- rko->rko_u.mock.broker_id);
- if (!mrkb)
- return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
-
- if (mrkb->rack)
- rd_free(mrkb->rack);
-
- if (rko->rko_u.mock.name)
- mrkb->rack = rd_strdup(rko->rko_u.mock.name);
- else
- mrkb->rack = NULL;
- break;
+ return rd_kafka_mock_brokers_cmd(mcluster, rko);
case RD_KAFKA_MOCK_CMD_COORD_SET:
if (!rd_kafka_mock_coord_set(mcluster, rko->rko_u.mock.name,
@@ -2101,8 +2281,8 @@ static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) {
rd_free(mcluster->bootstraps);
- rd_close(mcluster->wakeup_fds[0]);
- rd_close(mcluster->wakeup_fds[1]);
+ rd_socket_close(mcluster->wakeup_fds[0]);
+ rd_socket_close(mcluster->wakeup_fds[1]);
}
diff --git a/src/rdkafka_mock.h b/src/rdkafka_mock.h
index 006ffad23e..363d6bd8ae 100644
--- a/src/rdkafka_mock.h
+++ b/src/rdkafka_mock.h
@@ -68,9 +68,6 @@ extern "C" {
* - High-level balanced consumer groups with offset commits
* - Topic Metadata and auto creation
*
- * @remark High-level consumers making use of the balanced consumer groups
- * are not supported.
- *
* @remark This is an experimental public API that is NOT covered by the
* librdkafka API or ABI stability guarantees.
*
@@ -267,6 +264,9 @@ rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster,
/**
* @brief Disconnects the broker and disallows any new connections.
* This does NOT trigger leader change.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
@@ -275,6 +275,9 @@ rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
/**
* @brief Makes the broker accept connections again.
* This does NOT trigger leader change.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
@@ -283,6 +286,9 @@ rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
/**
* @brief Set broker round-trip-time delay in milliseconds.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
@@ -291,6 +297,9 @@ rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
/**
* @brief Sets the broker's rack as reported in Metadata to the client.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster,
diff --git a/src/rdkafka_mock_handlers.c b/src/rdkafka_mock_handlers.c
index eb6e46f1c6..6f7f0a6ffc 100644
--- a/src/rdkafka_mock_handlers.c
+++ b/src/rdkafka_mock_handlers.c
@@ -112,7 +112,8 @@ static int rd_kafka_mock_handle_Produce(rd_kafka_mock_connection_t *mconn,
/* Append to partition log */
if (!err)
err = rd_kafka_mock_partition_log_append(
- mpart, &records, &BaseOffset);
+ mpart, &records, &TransactionalId,
+ &BaseOffset);
/* Response: ErrorCode */
rd_kafka_buf_write_i16(resp, err);
@@ -438,7 +439,8 @@ static int rd_kafka_mock_handle_ListOffsets(rd_kafka_mock_connection_t *mconn,
while (PartitionCnt-- > 0) {
int32_t Partition, CurrentLeaderEpoch;
- int64_t Timestamp, MaxNumOffsets, Offset = -1;
+ int64_t Timestamp, Offset = -1;
+ int32_t MaxNumOffsets;
rd_kafka_mock_partition_t *mpart = NULL;
rd_kafka_resp_err_t err = all_err;
@@ -805,6 +807,10 @@ rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp,
const rd_kafka_mock_topic_t *mtopic,
rd_kafka_resp_err_t err) {
int i;
+ int partition_cnt =
+ (!mtopic || err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ ? 0
+ : mtopic->partition_cnt;
/* Response: Topics.ErrorCode */
rd_kafka_buf_write_i16(resp, err);
@@ -815,9 +821,9 @@ rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp,
rd_kafka_buf_write_bool(resp, rd_false);
}
/* Response: Topics.#Partitions */
- rd_kafka_buf_write_i32(resp, mtopic ? mtopic->partition_cnt : 0);
+ rd_kafka_buf_write_i32(resp, partition_cnt);
- for (i = 0; mtopic && i < mtopic->partition_cnt; i++) {
+ for (i = 0; mtopic && i < partition_cnt; i++) {
const rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i];
int r;
@@ -938,7 +944,7 @@ static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn,
TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
rd_kafka_mock_buf_write_Metadata_Topic(
resp, rkbuf->rkbuf_reqhdr.ApiVersion, mtopic->name,
- mtopic, RD_KAFKA_RESP_ERR_NO_ERROR);
+ mtopic, mtopic->err);
}
} else if (requested_topics) {
@@ -961,7 +967,7 @@ static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn,
rd_kafka_mock_buf_write_Metadata_Topic(
resp, rkbuf->rkbuf_reqhdr.ApiVersion, rktpar->topic,
- mtopic, err);
+ mtopic, err ? err : mtopic->err);
}
if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) {
@@ -1441,40 +1447,84 @@ static int rd_kafka_mock_handle_SyncGroup(rd_kafka_mock_connection_t *mconn,
* @brief Generate a unique ProducerID
*/
static const rd_kafka_pid_t
-rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster) {
- rd_kafka_pid_t *pid = rd_malloc(sizeof(*pid));
+rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId) {
+ size_t tidlen =
+ TransactionalId ? RD_KAFKAP_STR_LEN(TransactionalId) : 0;
+ rd_kafka_mock_pid_t *mpid = rd_malloc(sizeof(*mpid) + tidlen);
rd_kafka_pid_t ret;
- pid->id = rd_jitter(1, 900000) * 1000;
- pid->epoch = 0;
+ mpid->pid.id = rd_jitter(1, 900000) * 1000;
+ mpid->pid.epoch = 0;
+
+ if (tidlen > 0)
+ memcpy(mpid->TransactionalId, TransactionalId->str, tidlen);
+ mpid->TransactionalId[tidlen] = '\0';
mtx_lock(&mcluster->lock);
- rd_list_add(&mcluster->pids, pid);
- ret = *pid;
+ rd_list_add(&mcluster->pids, mpid);
+ ret = mpid->pid;
mtx_unlock(&mcluster->lock);
return ret;
}
+/**
+ * @brief Finds a matching mcluster mock PID for the given \p pid.
+ *
+ * @locks_required mcluster->lock
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ const rd_kafka_pid_t pid,
+ rd_kafka_mock_pid_t **mpidp) {
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_mock_pid_t skel = {pid};
+
+ *mpidp = NULL;
+ mpid = rd_list_find(&mcluster->pids, &skel, rd_kafka_mock_pid_cmp_pid);
+
+ if (!mpid)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
+ else if (((TransactionalId != NULL) !=
+ (*mpid->TransactionalId != '\0')) ||
+ (TransactionalId &&
+ rd_kafkap_str_cmp_str(TransactionalId,
+ mpid->TransactionalId)))
+ return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING;
+
+ *mpidp = mpid;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
/**
* @brief Checks if the given pid is known, else returns an error.
*/
static rd_kafka_resp_err_t
rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
const rd_kafka_pid_t check_pid) {
- const rd_kafka_pid_t *pid;
+ rd_kafka_mock_pid_t *mpid;
rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
mtx_lock(&mcluster->lock);
- pid = rd_list_find(&mcluster->pids, &check_pid, rd_kafka_pid_cmp_pid);
-
- if (!pid)
- err = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
- else if (check_pid.epoch != pid->epoch)
+ err =
+ rd_kafka_mock_pid_find(mcluster, TransactionalId, check_pid, &mpid);
+ if (!err && check_pid.epoch != mpid->pid.epoch)
err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
mtx_unlock(&mcluster->lock);
+ if (unlikely(err))
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "PID check failed for TransactionalId=%.*s: "
+ "expected %s, not %s: %s",
+ RD_KAFKAP_STR_PR(TransactionalId),
+ mpid ? rd_kafka_pid2str(mpid->pid) : "none",
+ rd_kafka_pid2str(check_pid),
+ rd_kafka_err2name(err));
return err;
}
@@ -1485,23 +1535,26 @@ rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster,
*/
static rd_kafka_resp_err_t
rd_kafka_mock_pid_bump(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
rd_kafka_pid_t *current_pid) {
- rd_kafka_pid_t *pid;
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_resp_err_t err;
mtx_lock(&mcluster->lock);
- pid = rd_list_find(&mcluster->pids, current_pid, rd_kafka_pid_cmp_pid);
- if (!pid) {
+ err = rd_kafka_mock_pid_find(mcluster, TransactionalId, *current_pid,
+ &mpid);
+ if (err) {
mtx_unlock(&mcluster->lock);
- return RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
+ return err;
}
- if (current_pid->epoch != pid->epoch) {
+ if (current_pid->epoch != mpid->pid.epoch) {
mtx_unlock(&mcluster->lock);
return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
}
- pid->epoch++;
- *current_pid = *pid;
+ mpid->pid.epoch++;
+ *current_pid = mpid->pid;
mtx_unlock(&mcluster->lock);
rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Bumped PID %s",
@@ -1563,13 +1616,14 @@ rd_kafka_mock_handle_InitProducerId(rd_kafka_mock_connection_t *mconn,
* to bump the epoch (KIP-360).
* Verify that current_pid matches and then
* bump the epoch. */
- err = rd_kafka_mock_pid_bump(mcluster, ¤t_pid);
+ err = rd_kafka_mock_pid_bump(mcluster, &TransactionalId,
+ ¤t_pid);
if (!err)
pid = current_pid;
} else {
/* Generate a new pid */
- pid = rd_kafka_mock_pid_new(mcluster);
+ pid = rd_kafka_mock_pid_new(mcluster, &TransactionalId);
}
}
@@ -1630,7 +1684,8 @@ rd_kafka_mock_handle_AddPartitionsToTxn(rd_kafka_mock_connection_t *mconn,
all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
if (!all_err)
- all_err = rd_kafka_mock_pid_check(mcluster, pid);
+ all_err =
+ rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
while (TopicsCnt-- > 0) {
rd_kafkap_str_t Topic;
@@ -1713,7 +1768,7 @@ rd_kafka_mock_handle_AddOffsetsToTxn(rd_kafka_mock_connection_t *mconn,
err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
if (!err)
- err = rd_kafka_mock_pid_check(mcluster, pid);
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
/* Response: ErrorCode */
rd_kafka_buf_write_i16(resp, err);
@@ -1768,7 +1823,7 @@ rd_kafka_mock_handle_TxnOffsetCommit(rd_kafka_mock_connection_t *mconn,
err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
if (!err)
- err = rd_kafka_mock_pid_check(mcluster, pid);
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
while (TopicsCnt-- > 0) {
rd_kafkap_str_t Topic;
@@ -1861,7 +1916,7 @@ static int rd_kafka_mock_handle_EndTxn(rd_kafka_mock_connection_t *mconn,
err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
if (!err)
- err = rd_kafka_mock_pid_check(mcluster, pid);
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
/* ErrorCode */
rd_kafka_buf_write_i16(resp, err);
diff --git a/src/rdkafka_mock_int.h b/src/rdkafka_mock_int.h
index 1f1179ce80..4b3043fb63 100644
--- a/src/rdkafka_mock_int.h
+++ b/src/rdkafka_mock_int.h
@@ -113,6 +113,62 @@ typedef struct rd_kafka_mock_cgrp_s {
rd_kafka_mock_cgrp_member_t *leader; /**< Elected leader */
} rd_kafka_mock_cgrp_t;
+
+/**
+ * @struct TransactionalId + PID (+ optional sequence state)
+ */
+typedef struct rd_kafka_mock_pid_s {
+ rd_kafka_pid_t pid;
+
+ /* BaseSequence tracking (partition) */
+ int8_t window; /**< increases up to 5 */
+ int8_t lo; /**< Window low bucket: oldest */
+ int8_t hi; /**< Window high bucket: most recent */
+ int32_t seq[5]; /**< Next expected BaseSequence for each bucket */
+
+ char TransactionalId[1]; /**< Allocated after this structure */
+} rd_kafka_mock_pid_t;
+
+/**
+ * @brief rd_kafka_mock_pid_t.pid Pid (not epoch) comparator
+ */
+static RD_UNUSED int rd_kafka_mock_pid_cmp_pid(const void *_a, const void *_b) {
+ const rd_kafka_mock_pid_t *a = _a, *b = _b;
+
+ if (a->pid.id < b->pid.id)
+ return -1;
+ else if (a->pid.id > b->pid.id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * @brief rd_kafka_mock_pid_t.pid TransactionalId,Pid,epoch comparator
+ */
+static RD_UNUSED int rd_kafka_mock_pid_cmp(const void *_a, const void *_b) {
+ const rd_kafka_mock_pid_t *a = _a, *b = _b;
+ int r;
+
+ r = strcmp(a->TransactionalId, b->TransactionalId);
+ if (r)
+ return r;
+
+ if (a->pid.id < b->pid.id)
+ return -1;
+ else if (a->pid.id > b->pid.id)
+ return 1;
+
+ if (a->pid.epoch < b->pid.epoch)
+ return -1;
+ if (a->pid.epoch > b->pid.epoch)
+ return 1;
+
+ return 0;
+}
+
+
+
/**
* @struct A real TCP connection from the client to a mock broker.
*/
@@ -208,6 +264,8 @@ typedef struct rd_kafka_mock_partition_s {
rd_kafka_mock_broker_t **replicas;
int replica_cnt;
+ rd_list_t pidstates; /**< PID states */
+
int32_t follower_id; /**< Preferred replica/follower */
struct rd_kafka_mock_topic_s *topic;
@@ -286,7 +344,7 @@ struct rd_kafka_mock_cluster_s {
TAILQ_HEAD(, rd_kafka_mock_coord_s) coords;
/** Current transactional producer PIDs.
- * Element type is a malloced rd_kafka_pid_t*. */
+ * Element type is a malloced rd_kafka_mock_pid_t*. */
rd_list_t pids;
char *bootstraps; /**< bootstrap.servers */
@@ -384,7 +442,8 @@ rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn,
rd_kafka_resp_err_t
rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
- const rd_kafkap_bytes_t *bytes,
+ const rd_kafkap_bytes_t *records,
+ const rd_kafkap_str_t *TransactionalId,
int64_t *BaseOffset);
@@ -400,6 +459,13 @@ rd_kafka_mock_cluster_ApiVersion_check(const rd_kafka_mock_cluster_t *mcluster,
}
+rd_kafka_resp_err_t
+rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ const rd_kafka_pid_t pid,
+ rd_kafka_mock_pid_t **mpidp);
+
+
/**
* @name Mock consumer group (rdkafka_mock_cgrp.c)
* @{
diff --git a/src/rdkafka_msg.c b/src/rdkafka_msg.c
index 9bd2b8d31b..ee0e177379 100644
--- a/src/rdkafka_msg.c
+++ b/src/rdkafka_msg.c
@@ -776,7 +776,7 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt,
continue;
}
}
- rd_kafka_toppar_enq_msg(rktp, rkm);
+ rd_kafka_toppar_enq_msg(rktp, rkm, now);
if (rd_kafka_is_transactional(rkt->rkt_rk)) {
/* Add partition to transaction */
@@ -796,7 +796,7 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt,
} else {
/* Single destination partition. */
- rd_kafka_toppar_enq_msg(rktp, rkm);
+ rd_kafka_toppar_enq_msg(rktp, rkm, now);
}
rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
@@ -1244,7 +1244,7 @@ int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
rkm->rkm_partition = partition;
/* Partition is available: enqueue msg on partition's queue */
- rd_kafka_toppar_enq_msg(rktp_new, rkm);
+ rd_kafka_toppar_enq_msg(rktp_new, rkm, rd_clock());
if (do_lock)
rd_kafka_topic_rdunlock(rkt);
@@ -1667,6 +1667,155 @@ void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) {
}
+
+/**
+ * @brief Allow queue wakeups after \p abstime, or when the
+ * given \p batch_msg_cnt or \p batch_msg_bytes have been reached.
+ *
+ * @param rkmq Queue to monitor and set wakeup parameters on.
+ * @param dest_rkmq Destination queue used to meter current queue depths
+ * and oldest message. May be the same as \p rkmq but is
+ * typically the rktp_xmit_msgq.
+ * @param next_wakeup If non-NULL: update the caller's next scheduler wakeup
+ * according to the wakeup time calculated by this function.
+ * @param now The current time.
+ * @param linger_us The configured queue linger / batching time.
+ * @param batch_msg_cnt Queue threshold before signalling.
+ * @param batch_msg_bytes Queue threshold before signalling.
+ *
+ * @returns true if the wakeup conditions are already met and messages are ready
+ * to be sent, else false.
+ *
+ * @locks_required rd_kafka_toppar_lock()
+ *
+ *
+ * Producer queue and broker thread wake-up behaviour.
+ *
+ * There are contradicting requirements at play here:
+ * - Latency: queued messages must be batched and sent according to
+ * batch size and linger.ms configuration.
+ * - Wakeups: keep the number of thread wake-ups to a minimum to avoid
+ * high CPU utilization and context switching.
+ *
+ * The message queue (rd_kafka_msgq_t) has functionality for the writer (app)
+ * to wake up the reader (broker thread) when there's a new message added.
+ * This wakeup is done thru a combination of cndvar signalling and IO writes
+ * to make sure a thread wakeup is triggered regardless if the broker thread
+ * is blocking on cnd_timedwait() or on IO poll.
+ * When the broker thread is woken up it will scan all the partitions it is
+ * the leader for to check if there are messages to be sent - all according
+ * to the configured batch size and linger.ms - and then decide its next
+ * wait time depending on the lowest remaining linger.ms setting of any
+ * partition with messages enqueued.
+ *
+ * This wait time must also be set as a threshold on the message queue, telling
+ * the writer (app) that it must not trigger a wakeup until the wait time
+ * has expired, or the batch sizes have been exceeded.
+ *
+ * The message queue wakeup time is per partition, while the broker thread
+ * wakeup time is the lowest of all its partitions' wakeup times.
+ *
+ * The per-partition wakeup constraints are calculated and set by
+ * rd_kafka_msgq_allow_wakeup_at() which is called from the broker thread's
+ * per-partition handler.
+ * This function is called each time there are changes to the broker-local
+ * partition transmit queue (rktp_xmit_msgq), such as:
+ * - messages are moved from the partition queue (rktp_msgq) to rktp_xmit_msgq
+ * - messages are moved to a ProduceRequest
+ * - messages are timed out from the rktp_xmit_msgq
+ * - the flushing state changed (rd_kafka_flush() is called or returned).
+ *
+ * If none of these things happen, the broker thread will simply read the
+ * last stored wakeup time for each partition and use that for calculating its
+ * minimum wait time.
+ *
+ *
+ * On the writer side, namely the application calling rd_kafka_produce(), the
+ * followings checks are performed to see if it may trigger a wakeup when
+ * it adds a new message to the partition queue:
+ * - the current time has reached the wakeup time (e.g., remaining linger.ms
+ * has expired), or
+ * - with the new message(s) being added, either the batch.size or
+ * batch.num.messages thresholds have been exceeded, or
+ * - the application is calling rd_kafka_flush(),
+ * - and no wakeup has been signalled yet. This is critical since it may take
+ * some time for the broker thread to do its work we'll want to avoid
+ * flooding it with wakeups. So a wakeup is only sent once per
+ * wakeup period.
+ */
+rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msgq_t *dest_rkmq,
+ rd_ts_t *next_wakeup,
+ rd_ts_t now,
+ rd_ts_t linger_us,
+ int32_t batch_msg_cnt,
+ int64_t batch_msg_bytes) {
+ int32_t msg_cnt = rd_kafka_msgq_len(dest_rkmq);
+ int64_t msg_bytes = rd_kafka_msgq_size(dest_rkmq);
+
+ if (RD_KAFKA_MSGQ_EMPTY(dest_rkmq)) {
+ rkmq->rkmq_wakeup.on_first = rd_true;
+ rkmq->rkmq_wakeup.abstime = now + linger_us;
+ /* Leave next_wakeup untouched since the queue is empty */
+ msg_cnt = 0;
+ msg_bytes = 0;
+ } else {
+ const rd_kafka_msg_t *rkm = rd_kafka_msgq_first(dest_rkmq);
+
+ rkmq->rkmq_wakeup.on_first = rd_false;
+
+ if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
+ /* Honour retry.backoff.ms:
+ * wait for backoff to expire */
+ rkmq->rkmq_wakeup.abstime =
+ rkm->rkm_u.producer.ts_backoff;
+ } else {
+ /* Use message's produce() time + linger.ms */
+ rkmq->rkmq_wakeup.abstime =
+ rd_kafka_msg_enq_time(rkm) + linger_us;
+ if (rkmq->rkmq_wakeup.abstime <= now)
+ rkmq->rkmq_wakeup.abstime = now;
+ }
+
+ /* Update the caller's scheduler wakeup time */
+ if (next_wakeup && rkmq->rkmq_wakeup.abstime < *next_wakeup)
+ *next_wakeup = rkmq->rkmq_wakeup.abstime;
+
+ msg_cnt = rd_kafka_msgq_len(dest_rkmq);
+ msg_bytes = rd_kafka_msgq_size(dest_rkmq);
+ }
+
+ /*
+ * If there are more messages or bytes in queue than the batch limits,
+ * or the linger time has been exceeded,
+ * then there is no need for wakeup since the broker thread will
+ * produce those messages as quickly as it can.
+ */
+ if (msg_cnt >= batch_msg_cnt || msg_bytes >= batch_msg_bytes ||
+ (msg_cnt > 0 && now >= rkmq->rkmq_wakeup.abstime)) {
+ /* Prevent further signalling */
+ rkmq->rkmq_wakeup.signalled = rd_true;
+
+ /* Batch is ready */
+ return rd_true;
+ }
+
+ /* If the current msg or byte count is less than the batch limit
+ * then set the rkmq count to the remaining count or size to
+ * reach the batch limits.
+ * This is for the case where the producer is waiting for more
+ * messages to accumulate into a batch. The wakeup should only
+ * occur once a threshold is reached or the abstime has expired.
+ */
+ rkmq->rkmq_wakeup.signalled = rd_false;
+ rkmq->rkmq_wakeup.msg_cnt = batch_msg_cnt - msg_cnt;
+ rkmq->rkmq_wakeup.msg_bytes = batch_msg_bytes - msg_bytes;
+
+ return rd_false;
+}
+
+
+
/**
* @brief Verify order (by msgid) in message queue.
* For development use only.
diff --git a/src/rdkafka_msg.h b/src/rdkafka_msg.h
index 3743dfba25..8546a819e2 100644
--- a/src/rdkafka_msg.h
+++ b/src/rdkafka_msg.h
@@ -194,6 +194,16 @@ typedef struct rd_kafka_msgq_s {
struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */
int32_t rkmq_msg_cnt;
int64_t rkmq_msg_bytes;
+ struct {
+ rd_ts_t abstime; /**< Allow wake-ups after this point in time.*/
+ int32_t msg_cnt; /**< Signal wake-up when this message count
+ * is reached. */
+ int64_t msg_bytes; /**< .. or when this byte count is
+ * reached. */
+ rd_bool_t on_first; /**< Wake-up on first message enqueued
+ * regardless of .abstime. */
+ rd_bool_t signalled; /**< Wake-up (already) signalled. */
+ } rkmq_wakeup;
} rd_kafka_msgq_t;
#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \
@@ -383,6 +393,43 @@ rd_kafka_msgq_first_msgid(const rd_kafka_msgq_t *rkmq) {
}
+
+rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msgq_t *dest_rkmq,
+ rd_ts_t *next_wakeup,
+ rd_ts_t now,
+ rd_ts_t linger_us,
+ int32_t batch_msg_cnt,
+ int64_t batch_msg_bytes);
+
+/**
+ * @returns true if msgq may be awoken.
+ */
+
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_msgq_may_wakeup(const rd_kafka_msgq_t *rkmq, rd_ts_t now) {
+ /* No: Wakeup already signalled */
+ if (rkmq->rkmq_wakeup.signalled)
+ return rd_false;
+
+ /* Yes: Wakeup linger time has expired */
+ if (now >= rkmq->rkmq_wakeup.abstime)
+ return rd_true;
+
+ /* Yes: First message enqueued may trigger wakeup */
+ if (rkmq->rkmq_msg_cnt == 1 && rkmq->rkmq_wakeup.on_first)
+ return rd_true;
+
+ /* Yes: batch.size or batch.num.messages exceeded */
+ if (rkmq->rkmq_msg_cnt >= rkmq->rkmq_wakeup.msg_cnt ||
+ rkmq->rkmq_msg_bytes > rkmq->rkmq_wakeup.msg_bytes)
+ return rd_true;
+
+ /* No */
+ return rd_false;
+}
+
+
/**
* @brief Message ordering comparator using the message id
* number to order messages in ascending order (FIFO).
diff --git a/src/rdkafka_msgset_reader.c b/src/rdkafka_msgset_reader.c
index fdbd114104..02a4c02f85 100644
--- a/src/rdkafka_msgset_reader.c
+++ b/src/rdkafka_msgset_reader.c
@@ -194,6 +194,9 @@ typedef struct rd_kafka_msgset_reader_s {
int msetr_ctrl_cnt; /**< Number of control messages
* or MessageSets received. */
+ int msetr_aborted_cnt; /**< Number of aborted MessageSets
+ * encountered. */
+
const char *msetr_srcname; /**< Optional message source string,
* used in debug logging to
* indicate messages were
@@ -536,7 +539,7 @@ rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) {
struct {
int64_t Offset; /* MessageSet header */
int32_t MessageSize; /* MessageSet header */
- uint32_t Crc;
+ int32_t Crc;
int8_t MagicByte; /* MsgVersion */
int8_t Attributes;
int64_t Timestamp; /* v1 */
@@ -600,7 +603,7 @@ rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) {
calc_crc = rd_slice_crc32(&crc_slice);
rd_dassert(rd_slice_remains(&crc_slice) == 0);
- if (unlikely(hdr.Crc != calc_crc)) {
+ if (unlikely(hdr.Crc != (int32_t)calc_crc)) {
/* Propagate CRC error to application and
* continue with next message. */
rd_kafka_consumer_err(
@@ -984,6 +987,7 @@ rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr) {
msetr->msetr_rkbuf,
rd_slice_remains(
&msetr->msetr_rkbuf->rkbuf_reader));
+ msetr->msetr_aborted_cnt++;
return RD_KAFKA_RESP_ERR_NO_ERROR;
}
}
@@ -1341,9 +1345,18 @@ rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) {
* This means the size limit perhaps was too tight,
* increase it automatically.
* If there was at least one control message there
- * is probably not a size limit and nothing is done. */
+ * is probably not a size limit and nothing is done.
+ * If there were aborted messagesets and no underflow then
+ * there is no error either (#2993).
+ *
+ * Also; avoid propagating underflow errors, which cause
+ * backoffs, since we'll want to continue fetching the
+ * remaining truncated messages as soon as possible.
+ */
if (msetr->msetr_ctrl_cnt > 0) {
/* Noop */
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
} else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) {
rktp->rktp_fetch_msg_max_bytes *= 2;
@@ -1354,17 +1367,25 @@ rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) {
rktp->rktp_rkt->rkt_topic->str,
rktp->rktp_partition,
rktp->rktp_fetch_msg_max_bytes);
- } else if (!err) {
+
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ } else if (!err && msetr->msetr_aborted_cnt == 0) {
rd_kafka_consumer_err(
&msetr->msetr_rkq, msetr->msetr_broker_id,
RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
msetr->msetr_tver->version, NULL, rktp,
rktp->rktp_offsets.fetch_offset,
"Message at offset %" PRId64
- " "
- "might be too large to fetch, try increasing "
+ " might be too large to fetch, try increasing "
"receive.message.max.bytes",
rktp->rktp_offsets.fetch_offset);
+
+ } else if (msetr->msetr_aborted_cnt > 0) {
+ /* Noop */
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
}
} else {
@@ -1379,21 +1400,20 @@ rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) {
err = RD_KAFKA_RESP_ERR_NO_ERROR;
}
- rd_rkb_dbg(
- msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME",
- "Enqueue %i %smessage(s) (%" PRId64
- " bytes, %d ops) on "
- "%s [%" PRId32
- "] "
- "fetch queue (qlen %d, v%d, last_offset %" PRId64
- ", %d ctrl msgs, %s)",
- msetr->msetr_msgcnt, msetr->msetr_srcname, msetr->msetr_msg_bytes,
- rd_kafka_q_len(&msetr->msetr_rkq), rktp->rktp_rkt->rkt_topic->str,
- rktp->rktp_partition, rd_kafka_q_len(msetr->msetr_par_rkq),
- msetr->msetr_tver->version, last_offset, msetr->msetr_ctrl_cnt,
- msetr->msetr_compression
- ? rd_kafka_compression2str(msetr->msetr_compression)
- : "uncompressed");
+ rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME",
+ "Enqueue %i %smessage(s) (%" PRId64
+ " bytes, %d ops) on %s [%" PRId32
+ "] fetch queue (qlen %d, v%d, last_offset %" PRId64
+ ", %d ctrl msgs, %d aborted msgsets, %s)",
+ msetr->msetr_msgcnt, msetr->msetr_srcname,
+ msetr->msetr_msg_bytes, rd_kafka_q_len(&msetr->msetr_rkq),
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_q_len(msetr->msetr_par_rkq),
+ msetr->msetr_tver->version, last_offset,
+ msetr->msetr_ctrl_cnt, msetr->msetr_aborted_cnt,
+ msetr->msetr_compression
+ ? rd_kafka_compression2str(msetr->msetr_compression)
+ : "uncompressed");
/* Concat all messages&errors onto the parent's queue
* (the partition's fetch queue) */
diff --git a/src/rdkafka_offset.c b/src/rdkafka_offset.c
index 14f2d4441f..805da2d18b 100644
--- a/src/rdkafka_offset.c
+++ b/src/rdkafka_offset.c
@@ -636,6 +636,7 @@ rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt,
int64_t offset) {
rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
rd_kafka_toppar_t *rktp;
+ rd_kafka_resp_err_t err;
/* Find toppar */
rd_kafka_topic_rdlock(rkt);
@@ -645,11 +646,12 @@ rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt,
}
rd_kafka_topic_rdunlock(rkt);
- rd_kafka_offset_store0(rktp, offset + 1, 1 /*lock*/);
+ err = rd_kafka_offset_store0(rktp, offset + 1,
+ rd_false /* Don't force */, RD_DO_LOCK);
rd_kafka_toppar_destroy(rktp);
- return RD_KAFKA_RESP_ERR_NO_ERROR;
+ return err;
}
@@ -657,7 +659,8 @@ rd_kafka_resp_err_t
rd_kafka_offsets_store(rd_kafka_t *rk,
rd_kafka_topic_partition_list_t *offsets) {
int i;
- int ok_cnt = 0;
+ int ok_cnt = 0;
+ rd_kafka_resp_err_t last_err = RD_KAFKA_RESP_ERR_NO_ERROR;
if (rk->rk_conf.enable_auto_offset_store)
return RD_KAFKA_RESP_ERR__INVALID_ARG;
@@ -670,19 +673,23 @@ rd_kafka_offsets_store(rd_kafka_t *rk,
rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
if (!rktp) {
rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ last_err = rktpar->err;
continue;
}
- rd_kafka_offset_store0(rktp, rktpar->offset, 1 /*lock*/);
+ rktpar->err = rd_kafka_offset_store0(rktp, rktpar->offset,
+ rd_false /* don't force */,
+ RD_DO_LOCK);
rd_kafka_toppar_destroy(rktp);
- rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
- ok_cnt++;
+ if (rktpar->err)
+ last_err = rktpar->err;
+ else
+ ok_cnt++;
}
- return offsets->cnt > 0 && ok_cnt == 0
- ? RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION
- : RD_KAFKA_RESP_ERR_NO_ERROR;
+ return offsets->cnt > 0 && ok_cnt == 0 ? last_err
+ : RD_KAFKA_RESP_ERR_NO_ERROR;
}
@@ -715,8 +722,9 @@ static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk,
rd_kafka_op_t *rko) {
rd_kafka_toppar_t *rktp = rko->rko_rktp;
rd_kafka_toppar_lock(rktp);
- rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.offset,
- rko->rko_err, rko->rko_u.offset_reset.reason);
+ rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.broker_id,
+ rko->rko_u.offset_reset.offset, rko->rko_err,
+ rko->rko_u.offset_reset.reason);
rd_kafka_toppar_unlock(rktp);
return RD_KAFKA_OP_RES_HANDLED;
}
@@ -726,6 +734,7 @@ static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk,
* error, or offset is logical).
*
* @param rktp the toppar
+ * @param broker_id Originating broker, if any, else RD_KAFKA_NODEID_UA.
* @param err_offset a logical offset, or offset corresponding to the error.
* @param err the error, or RD_KAFKA_RESP_ERR_NO_ERROR if offset is logical.
* @param reason a reason string for logging.
@@ -734,6 +743,7 @@ static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk,
* @ocks: toppar_lock() MUST be held
*/
void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
int64_t err_offset,
rd_kafka_resp_err_t err,
const char *reason) {
@@ -744,11 +754,12 @@ void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
rd_kafka_op_t *rko =
rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB);
- rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
- rko->rko_err = err;
- rko->rko_rktp = rd_kafka_toppar_keep(rktp);
- rko->rko_u.offset_reset.offset = err_offset;
- rko->rko_u.offset_reset.reason = rd_strdup(reason);
+ rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
+ rko->rko_err = err;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_u.offset_reset.broker_id = broker_id;
+ rko->rko_u.offset_reset.offset = err_offset;
+ rko->rko_u.offset_reset.reason = rd_strdup(reason);
rd_kafka_q_enq(rktp->rktp_ops, rko);
return;
}
@@ -760,10 +771,19 @@ void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
if (offset == RD_KAFKA_OFFSET_INVALID) {
/* Error, auto.offset.reset tells us to error out. */
- rd_kafka_consumer_err(rktp->rktp_fetchq, RD_KAFKA_NODEID_UA,
- RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0,
- NULL, rktp, err_offset, "%s: %s", reason,
- rd_kafka_err2str(err));
+ if (broker_id != RD_KAFKA_NODEID_UA)
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, broker_id,
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
+ err_offset, "%s: %s (broker %" PRId32 ")", reason,
+ rd_kafka_err2str(err), broker_id);
+ else
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, broker_id,
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
+ err_offset, "%s: %s", reason,
+ rd_kafka_err2str(err));
+
rd_kafka_toppar_set_fetch_state(rktp,
RD_KAFKA_TOPPAR_FETCH_NONE);
@@ -792,19 +812,21 @@ void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
rd_kafka_dbg(
rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
"%s [%" PRId32
- "]: offset reset (at offset %s) "
+ "]: offset reset (at offset %s, broker %" PRId32
+ ") "
"to %s%s: %s: %s",
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_offset2str(err_offset), extra,
+ rd_kafka_offset2str(err_offset), broker_id, extra,
rd_kafka_offset2str(offset), reason, rd_kafka_err2str(err));
else
rd_kafka_log(
rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET",
"%s [%" PRId32
- "]: offset reset (at offset %s) "
+ "]: offset reset (at offset %s, broker %" PRId32
+ ") "
"to %s%s: %s: %s",
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
- rd_kafka_offset2str(err_offset), extra,
+ rd_kafka_offset2str(err_offset), broker_id, extra,
rd_kafka_offset2str(offset), reason, rd_kafka_err2str(err));
/* Note: If rktp is not delegated to the leader, then low and high
@@ -938,9 +960,9 @@ static void rd_kafka_offset_file_init(rd_kafka_toppar_t *rktp) {
} else {
/* Offset was not usable: perform offset reset logic */
rktp->rktp_committed_offset = RD_KAFKA_OFFSET_INVALID;
- rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_INVALID,
- RD_KAFKA_RESP_ERR__FS,
- "non-readable offset file");
+ rd_kafka_offset_reset(
+ rktp, RD_KAFKA_NODEID_UA, RD_KAFKA_OFFSET_INVALID,
+ RD_KAFKA_RESP_ERR__FS, "non-readable offset file");
}
}
@@ -963,7 +985,7 @@ rd_kafka_offset_broker_term(rd_kafka_toppar_t *rktp) {
static void rd_kafka_offset_broker_init(rd_kafka_toppar_t *rktp) {
if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))
return;
- rd_kafka_offset_reset(rktp, RD_KAFKA_OFFSET_STORED,
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, RD_KAFKA_OFFSET_STORED,
RD_KAFKA_RESP_ERR_NO_ERROR,
"query broker for offsets");
}
@@ -1044,7 +1066,7 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp) {
rktp->rktp_stored_offset == RD_KAFKA_OFFSET_INVALID &&
rktp->rktp_offsets_fin.eof_offset > 0)
rd_kafka_offset_store0(rktp, rktp->rktp_offsets_fin.eof_offset,
- 0 /*no lock*/);
+ rd_true /* force */, RD_DONT_LOCK);
/* Commit offset to backing store.
* This might be an async operation. */
diff --git a/src/rdkafka_offset.h b/src/rdkafka_offset.h
index 2db254c28c..c085224cb3 100644
--- a/src/rdkafka_offset.h
+++ b/src/rdkafka_offset.h
@@ -36,19 +36,71 @@ const char *rd_kafka_offset2str(int64_t offset);
/**
- * Stores the offset for the toppar 'rktp'.
- * The actual commit of the offset to backing store is usually
- * performed at a later time (time or threshold based).
+ * @brief Stores the offset for the toppar 'rktp'.
+ * The actual commit of the offset to backing store is usually
+ * performed at a later time (time or threshold based).
+ *
+ * For the high-level consumer (assign()), this function will reject absolute
+ * offsets if the partition is not currently assigned, unless \p force is set.
+ * This check was added to avoid a race condition where an application
+ * would call offsets_store() after the partitions had been revoked, forcing
+ * a future auto-committer on the next assignment to commit this old offset and
+ * overwriting whatever newer offset was committed by another consumer.
+ *
+ * The \p force flag is useful for internal calls to offset_store0() which
+ * do not need the protection described above.
+ *
+ *
+ * There is one situation where the \p force flag is troublesome:
+ * If the application is using any of the consumer batching APIs,
+ * e.g., consume_batch() or the event-based consumption, then it's possible
+ * that while the batch is being accumulated or the application is picking off
+ * messages from the event a rebalance occurs (in the background) which revokes
+ * the current assignment. This revokal will remove all queued messages, but
+ * not the ones the application already has accumulated in the event object.
+ * Enforcing assignment for store in this state is tricky with a bunch of
+ * corner cases, so instead we let those places forcibly store the offset, but
+ * then in assign() we reset the stored offset to .._INVALID, just like we do
+ * on revoke.
+ * Illustrated (with fix):
+ * 1. ev = rd_kafka_queue_poll();
+ * 2. background rebalance revoke unassigns the partition and sets the
+ * stored offset to _INVALID.
+ * 3. application calls message_next(ev) which forcibly sets the
+ * stored offset.
+ * 4. background rebalance assigns the partition again, but forcibly sets
+ * the stored offset to .._INVALID to provide a clean state.
+ *
+ * @param offset Offset to set, may be an absolute offset or .._INVALID.
+ * @param force Forcibly set \p offset regardless of assignment state.
+ * @param do_lock Whether to lock the \p rktp or not (already locked by caller).
*
* See head of rdkafka_offset.c for more information.
+ *
+ * @returns RD_KAFKA_RESP_ERR__STATE if the partition is not currently assigned,
+ * unless \p force is set.
*/
-static RD_INLINE RD_UNUSED void
-rd_kafka_offset_store0(rd_kafka_toppar_t *rktp, int64_t offset, int lock) {
- if (lock)
+static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
+rd_kafka_offset_store0(rd_kafka_toppar_t *rktp,
+ int64_t offset,
+ rd_bool_t force,
+ rd_dolock_t do_lock) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (do_lock)
rd_kafka_toppar_lock(rktp);
- rktp->rktp_stored_offset = offset;
- if (lock)
+
+ if (unlikely(!force && !RD_KAFKA_OFFSET_IS_LOGICAL(offset) &&
+ !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED) &&
+ !rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk)))
+ err = RD_KAFKA_RESP_ERR__STATE;
+ else
+ rktp->rktp_stored_offset = offset;
+
+ if (do_lock)
rd_kafka_toppar_unlock(rktp);
+
+ return err;
}
rd_kafka_resp_err_t
@@ -62,6 +114,7 @@ rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp);
void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp);
void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
int64_t err_offset,
rd_kafka_resp_err_t err,
const char *reason);
diff --git a/src/rdkafka_op.c b/src/rdkafka_op.c
index 14cfc3b927..d3cee13bbe 100644
--- a/src/rdkafka_op.c
+++ b/src/rdkafka_op.c
@@ -84,6 +84,9 @@ const char *rd_kafka_op2str(rd_kafka_op_type_t type) {
[RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS",
[RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
"REPLY:DELETECONSUMERGROUPOFFSETS",
+ [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS",
+ [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS",
+ [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS",
[RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT",
[RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT",
[RD_KAFKA_OP_PURGE] = "REPLY:PURGE",
@@ -224,6 +227,9 @@ rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) {
[RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request),
[RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_CREATEACLS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DESCRIBEACLS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETEACLS] = sizeof(rko->rko_u.admin_request),
[RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request),
[RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result),
[RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge),
@@ -373,6 +379,9 @@ void rd_kafka_op_destroy(rd_kafka_op_t *rko) {
case RD_KAFKA_OP_DELETERECORDS:
case RD_KAFKA_OP_DELETEGROUPS:
case RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS:
+ case RD_KAFKA_OP_CREATEACLS:
+ case RD_KAFKA_OP_DESCRIBEACLS:
+ case RD_KAFKA_OP_DELETEACLS:
rd_kafka_replyq_destroy(&rko->rko_u.admin_request.replyq);
rd_list_destroy(&rko->rko_u.admin_request.args);
rd_assert(!rko->rko_u.admin_request.fanout_parent);
@@ -380,6 +389,7 @@ void rd_kafka_op_destroy(rd_kafka_op_t *rko) {
break;
case RD_KAFKA_OP_ADMIN_RESULT:
+ rd_list_destroy(&rko->rko_u.admin_result.args);
rd_list_destroy(&rko->rko_u.admin_result.results);
RD_IF_FREE(rko->rko_u.admin_result.errstr, rd_free);
rd_assert(!rko->rko_u.admin_result.fanout_parent);
@@ -889,6 +899,8 @@ void rd_kafka_op_offset_store(rd_kafka_t *rk, rd_kafka_op_t *rko) {
rd_kafka_toppar_lock(rktp);
rktp->rktp_app_offset = offset;
if (rk->rk_conf.enable_auto_offset_store)
- rd_kafka_offset_store0(rktp, offset, 0 /*no lock*/);
+ rd_kafka_offset_store0(rktp, offset,
+ /* force: ignore assignment state */
+ rd_true, RD_DONT_LOCK);
rd_kafka_toppar_unlock(rktp);
}
diff --git a/src/rdkafka_op.h b/src/rdkafka_op.h
index a336470076..bf6886c4f0 100644
--- a/src/rdkafka_op.h
+++ b/src/rdkafka_op.h
@@ -135,10 +135,13 @@ typedef enum {
RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin:
* DeleteConsumerGroupOffsets
* u.admin_request */
- RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */
- RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */
- RD_KAFKA_OP_PURGE, /**< Purge queues */
- RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */
+ RD_KAFKA_OP_CREATEACLS, /**< Admin: CreateAcls: u.admin_request*/
+ RD_KAFKA_OP_DESCRIBEACLS, /**< Admin: DescribeAcls: u.admin_request*/
+ RD_KAFKA_OP_DELETEACLS, /**< Admin: DeleteAcls: u.admin_request*/
+ RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */
+ RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */
+ RD_KAFKA_OP_PURGE, /**< Purge queues */
+ RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */
RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */
RD_KAFKA_OP_MOCK, /**< Mock cluster command */
RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */
@@ -374,6 +377,7 @@ struct rd_kafka_op_s {
struct {
int64_t offset;
+ int32_t broker_id; /**< Originating broker, or -1 */
char *reason;
} offset_reset;
diff --git a/src/rdkafka_partition.c b/src/rdkafka_partition.c
index d86f6dd5f6..a0cb99d046 100644
--- a/src/rdkafka_partition.c
+++ b/src/rdkafka_partition.c
@@ -670,8 +670,9 @@ void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) {
/**
* Append message at tail of 'rktp' message queue.
*/
-void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) {
- int queue_len;
+void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
+ rd_kafka_msg_t *rkm,
+ rd_ts_t now) {
rd_kafka_q_t *wakeup_q = NULL;
rd_kafka_toppar_lock(rktp);
@@ -683,18 +684,22 @@ void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm) {
if (rktp->rktp_partition == RD_KAFKA_PARTITION_UA ||
rktp->rktp_rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) {
/* No need for enq_sorted(), this is the oldest message. */
- queue_len = rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
+ rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
} else {
- queue_len = rd_kafka_msgq_enq_sorted(rktp->rktp_rkt,
- &rktp->rktp_msgq, rkm);
+ rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, &rktp->rktp_msgq, rkm);
}
- if (unlikely(queue_len == 1 && (wakeup_q = rktp->rktp_msgq_wakeup_q)))
+ if (unlikely(rktp->rktp_partition != RD_KAFKA_PARTITION_UA &&
+ rd_kafka_msgq_may_wakeup(&rktp->rktp_msgq, now) &&
+ (wakeup_q = rktp->rktp_msgq_wakeup_q))) {
+ /* Wake-up broker thread */
+ rktp->rktp_msgq.rkmq_wakeup.signalled = rd_true;
rd_kafka_q_keep(wakeup_q);
+ }
rd_kafka_toppar_unlock(rktp);
- if (wakeup_q) {
+ if (unlikely(wakeup_q != NULL)) {
rd_kafka_q_yield(wakeup_q);
rd_kafka_q_destroy(wakeup_q);
}
@@ -1209,8 +1214,8 @@ void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
* See issue #2105. */
rktp->rktp_next_offset = Offset;
- rd_kafka_offset_reset(rktp, Offset, RD_KAFKA_RESP_ERR_NO_ERROR,
- "update");
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, Offset,
+ RD_KAFKA_RESP_ERR_NO_ERROR, "update");
return;
}
@@ -1244,7 +1249,7 @@ void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
/* Wake-up broker thread which might be idling on IO */
if (rktp->rktp_broker)
- rd_kafka_broker_wakeup(rktp->rktp_broker);
+ rd_kafka_broker_wakeup(rktp->rktp_broker, "ready to fetch");
}
@@ -1376,8 +1381,8 @@ static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk,
/* Permanent error. Trigger auto.offset.reset policy
* and signal error back to application. */
- rd_kafka_offset_reset(rktp, rktp->rktp_query_offset,
- err,
+ rd_kafka_offset_reset(rktp, rkb->rkb_nodeid,
+ rktp->rktp_query_offset, err,
"failed to query logical offset");
rd_kafka_consumer_err(
@@ -1608,7 +1613,7 @@ static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp,
rd_kafka_offset_store_init(rktp);
} else if (offset == RD_KAFKA_OFFSET_INVALID) {
- rd_kafka_offset_reset(rktp, offset,
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, offset,
RD_KAFKA_RESP_ERR__NO_OFFSET,
"no previously committed offset "
"available");
@@ -1620,7 +1625,8 @@ static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp,
/* Wake-up broker thread which might be idling on IO */
if (rktp->rktp_broker)
- rd_kafka_broker_wakeup(rktp->rktp_broker);
+ rd_kafka_broker_wakeup(rktp->rktp_broker,
+ "fetch start");
}
rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID;
@@ -1772,7 +1778,7 @@ void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp,
/* Wake-up broker thread which might be idling on IO */
if (rktp->rktp_broker)
- rd_kafka_broker_wakeup(rktp->rktp_broker);
+ rd_kafka_broker_wakeup(rktp->rktp_broker, "seek done");
}
/* Signal back to caller thread that seek has commenced, or err */
@@ -2237,7 +2243,7 @@ static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
if (offset >= 0)
rd_kafka_toppar_next_offset_handle(rktp, offset);
else
- rd_kafka_offset_reset(rktp, offset,
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, offset,
RD_KAFKA_RESP_ERR__NO_OFFSET,
"no previously committed offset "
"available");
diff --git a/src/rdkafka_partition.h b/src/rdkafka_partition.h
index 6e751ecd31..c51e666be4 100644
--- a/src/rdkafka_partition.h
+++ b/src/rdkafka_partition.h
@@ -152,8 +152,8 @@ struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
* base msgid.
* When a new epoch is
* acquired, or on transaction
- * abort, the base_seq is set to the
- * current rktp_msgid so that
+ * abort, the base_seq is set to
+ * the current rktp_msgid so that
* sub-sequent produce
* requests will have
* a sequence number series
@@ -165,8 +165,9 @@ struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
* Used when draining outstanding
* issues.
* This value will be the same
- * as next_ack_seq until a drainable
- * error occurs, in which case it
+ * as next_ack_seq until a
+ * drainable error occurs,
+ * in which case it
* will advance past next_ack_seq.
* next_ack_seq can never be larger
* than next_err_seq.
@@ -343,6 +344,9 @@ struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */
#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */
#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */
+#define RD_KAFKA_TOPPAR_F_ASSIGNED \
+ 0x2000 /**< Toppar is part of the consumer \
+ * assignment. */
/*
* Timers
@@ -445,7 +449,9 @@ rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp);
void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state);
void rd_kafka_toppar_insert_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
-void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
+void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
+ rd_kafka_msg_t *rkm,
+ rd_ts_t now);
int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
rd_kafka_msgq_t *srcq,
int incr_retry,
diff --git a/src/rdkafka_proto.h b/src/rdkafka_proto.h
index 419a4640f2..f5ae9ed753 100644
--- a/src/rdkafka_proto.h
+++ b/src/rdkafka_proto.h
@@ -542,11 +542,15 @@ typedef struct rd_kafka_buf_s rd_kafka_buf_t;
/* Byte offsets for MessageSet fields */
#define RD_KAFKAP_MSGSET_V2_OF_Length (8)
+#define RD_KAFKAP_MSGSET_V2_OF_MagicByte (8 + 4 + 4)
#define RD_KAFKAP_MSGSET_V2_OF_CRC (8 + 4 + 4 + 1)
#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8 + 4 + 4 + 1 + 4)
#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8 + 4 + 4 + 1 + 4 + 2)
#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4)
#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8)
+#define RD_KAFKAP_MSGSET_V2_OF_ProducerId (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8)
+#define RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch \
+ (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8)
#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence \
(8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2)
#define RD_KAFKAP_MSGSET_V2_OF_RecordCount \
@@ -599,21 +603,6 @@ static RD_UNUSED int rd_kafka_pid_cmp(const void *_a, const void *_b) {
}
-/**
- * @brief Pid (not epoch) comparator
- */
-static RD_UNUSED int rd_kafka_pid_cmp_pid(const void *_a, const void *_b) {
- const rd_kafka_pid_t *a = _a, *b = _b;
-
- if (a->id < b->id)
- return -1;
- else if (a->id > b->id)
- return 1;
-
- return 0;
-}
-
-
/**
* @returns the string representation of a PID in a thread-safe
* static buffer.
diff --git a/src/rdkafka_queue.h b/src/rdkafka_queue.h
index 2356ade603..0d50f58703 100644
--- a/src/rdkafka_queue.h
+++ b/src/rdkafka_queue.h
@@ -322,8 +322,8 @@ static RD_INLINE RD_UNUSED void rd_kafka_q_io_event(rd_kafka_q_t *rkq) {
/* Write wake-up event to socket.
* Ignore errors, not much to do anyway. */
- if (rd_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload,
- (int)rkq->rkq_qio->size) == -1)
+ if (rd_socket_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload,
+ (int)rkq->rkq_qio->size) == -1)
;
}
diff --git a/src/rdkafka_request.c b/src/rdkafka_request.c
index b4bc684302..e493ffebd0 100644
--- a/src/rdkafka_request.c
+++ b/src/rdkafka_request.c
@@ -4063,6 +4063,360 @@ rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb,
return RD_KAFKA_RESP_ERR_NO_ERROR;
}
+/**
+ * @brief Returns the request size needed to send a specific AclBinding
+ * specified in \p acl, using the ApiVersion provided in
+ * \p ApiVersion.
+ *
+ * @returns and int16_t with the request size in bytes.
+ */
+static RD_INLINE size_t
+rd_kafka_AclBinding_request_size(const rd_kafka_AclBinding_t *acl,
+ int ApiVersion) {
+ return 1 + 2 + (acl->name ? strlen(acl->name) : 0) + 2 +
+ (acl->principal ? strlen(acl->principal) : 0) + 2 +
+ (acl->host ? strlen(acl->host) : 0) + 1 + 1 +
+ (ApiVersion > 0 ? 1 : 0);
+}
+
+/**
+ * @brief Construct and send CreateAclsRequest to \p rkb
+ * with the acls (AclBinding_t*) in \p new_acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_acls /*(AclBinding_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+ int i;
+ size_t len;
+ int op_timeout;
+ rd_kafka_AclBinding_t *new_acl;
+
+ if (rd_list_cnt(new_acls) == 0) {
+ rd_snprintf(errstr, errstr_size, "No acls to create");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_CreateAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (ApiVersion == 0) {
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ if (new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL "
+ "resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+ } else {
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ if (new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED) {
+ rd_snprintf(errstr, errstr_size,
+ "Only LITERAL and PREFIXED "
+ "resource patterns are supported "
+ "when creating ACLs");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+ }
+
+ len = 4;
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ len += rd_kafka_AclBinding_request_size(new_acl, ApiVersion);
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateAcls, 1, len);
+
+ /* #acls */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_acls));
+
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ rd_kafka_buf_write_i8(rkbuf, new_acl->restype);
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->name, -1);
+
+ if (ApiVersion >= 1) {
+ rd_kafka_buf_write_i8(rkbuf,
+ new_acl->resource_pattern_type);
+ }
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->principal, -1);
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->host, -1);
+
+ rd_kafka_buf_write_i8(rkbuf, new_acl->operation);
+
+ rd_kafka_buf_write_i8(rkbuf, new_acl->permission_type);
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Construct and send DescribeAclsRequest to \p rkb
+ * with the acls (AclBinding_t*) in \p acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t rd_kafka_DescribeAclsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *acls /*(rd_kafka_AclBindingFilter_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ const rd_kafka_AclBindingFilter_t *acl;
+ int op_timeout;
+
+ if (rd_list_cnt(acls) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+ if (rd_list_cnt(acls) > 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Too many acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ acl = rd_list_elem(acls, 0);
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DescribeAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (ApiVersion == 0) {
+ if (acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_ANY) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL and ANY "
+ "resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ } else {
+ if (acl->resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
+ rd_snprintf(errstr, errstr_size,
+ "Filter contains UNKNOWN elements");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_DescribeAcls, 1,
+ rd_kafka_AclBinding_request_size(acl, ApiVersion));
+
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, acl->restype);
+
+ /* resource_name filter */
+ rd_kafka_buf_write_str(rkbuf, acl->name, -1);
+
+ if (ApiVersion > 0) {
+ /* resource_pattern_type (rd_kafka_ResourcePatternType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->resource_pattern_type);
+ }
+
+ /* principal filter */
+ rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
+
+ /* host filter */
+ rd_kafka_buf_write_str(rkbuf, acl->host, -1);
+
+ /* operation (rd_kafka_AclOperation_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->operation);
+
+ /* permission type (rd_kafka_AclPermissionType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Construct and send DeleteAclsRequest to \p rkb
+ * with the acl filters (AclBindingFilter_t*) in \p del_acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_acls /*(AclBindingFilter_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ const rd_kafka_AclBindingFilter_t *acl;
+ int op_timeout;
+ int i;
+ size_t len;
+
+ if (rd_list_cnt(del_acls) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DeleteAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ len = 4;
+
+ RD_LIST_FOREACH(acl, del_acls, i) {
+ if (ApiVersion == 0) {
+ if (acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_ANY) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL "
+ "and ANY resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ } else {
+ if (acl->resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
+ rd_snprintf(errstr, errstr_size,
+ "Filter contains UNKNOWN elements");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ len += rd_kafka_AclBinding_request_size(acl, ApiVersion);
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteAcls, 1, len);
+
+ /* #acls */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_acls));
+
+ RD_LIST_FOREACH(acl, del_acls, i) {
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, acl->restype);
+
+ /* resource_name filter */
+ rd_kafka_buf_write_str(rkbuf, acl->name, -1);
+
+ if (ApiVersion > 0) {
+ /* resource_pattern_type
+ * (rd_kafka_ResourcePatternType_t) */
+ rd_kafka_buf_write_i8(rkbuf,
+ acl->resource_pattern_type);
+ }
+
+ /* principal filter */
+ rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
+
+ /* host filter */
+ rd_kafka_buf_write_str(rkbuf, acl->host, -1);
+
+ /* operation (rd_kafka_AclOperation_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->operation);
+
+ /* permission type (rd_kafka_AclPermissionType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
/**
* @brief Parses and handles an InitProducerId reply.
diff --git a/src/rdkafka_request.h b/src/rdkafka_request.h
index 64f6211681..1c2675d51b 100644
--- a/src/rdkafka_request.h
+++ b/src/rdkafka_request.h
@@ -348,6 +348,13 @@ rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb,
rd_kafka_resp_cb_t *resp_cb,
void *opaque);
+void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
rd_kafka_resp_err_t
rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb,
const char *transactional_id,
@@ -383,5 +390,35 @@ rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb,
rd_kafka_resp_cb_t *resp_cb,
void *opaque);
+rd_kafka_resp_err_t
+rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_acls /*(AclBinding_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DescribeAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *acls /*(AclBinding*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_acls /*(AclBindingFilter*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
#endif /* _RDKAFKA_REQUEST_H_ */
diff --git a/src/rdkafka_sasl_aws_msk_iam.c b/src/rdkafka_sasl_aws_msk_iam.c
index 78cf7151c1..7b012287e4 100644
--- a/src/rdkafka_sasl_aws_msk_iam.c
+++ b/src/rdkafka_sasl_aws_msk_iam.c
@@ -211,7 +211,8 @@ rd_kafka_aws_msk_iam_set_credential (rd_kafka_t *rk,
rd_kafka_dbg(rk, SECURITY, "BRKMAIN",
"Waking up waiting broker threads after "
"setting AWS_MSK_IAM credential");
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT,
+ "AWS_MSK_IAM credential refresh");
return RD_KAFKA_RESP_ERR_NO_ERROR;
}
diff --git a/src/rdkafka_sasl_cyrus.c b/src/rdkafka_sasl_cyrus.c
index 04f1ac9415..6e241bb708 100644
--- a/src/rdkafka_sasl_cyrus.c
+++ b/src/rdkafka_sasl_cyrus.c
@@ -238,7 +238,8 @@ static int rd_kafka_sasl_cyrus_kinit_refresh(rd_kafka_t *rk) {
rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
"First kinit command finished: waking up "
"broker threads");
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "Kerberos ticket refresh");
}
if (r == -1) {
diff --git a/src/rdkafka_sasl_oauthbearer.c b/src/rdkafka_sasl_oauthbearer.c
index 5ec3b34d50..95108fec79 100644
--- a/src/rdkafka_sasl_oauthbearer.c
+++ b/src/rdkafka_sasl_oauthbearer.c
@@ -36,6 +36,9 @@
#include
#include "rdunittest.h"
+#if WITH_CURL
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
/**
@@ -439,7 +442,8 @@ rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk,
rd_kafka_dbg(rk, SECURITY, "BRKMAIN",
"Waking up waiting broker threads after "
"setting OAUTHBEARER token");
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT,
+ "OAUTHBEARER token update");
return RD_KAFKA_RESP_ERR_NO_ERROR;
}
@@ -1321,17 +1325,15 @@ static int rd_kafka_sasl_oauthbearer_init(rd_kafka_t *rk,
handle->callback_q = rd_kafka_q_keep(rk->rk_rep);
}
+#if WITH_CURL
if (rk->rk_conf.sasl.oauthbearer.method ==
RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
-#if FIXME /************************ FIXME when .._oidc.c is added ****/
rk->rk_conf.sasl.oauthbearer.token_refresh_cb ==
- rd_kafka_sasl_oauthbearer_oidc_token_refresh_cb
-#else
- 1
-#endif
- ) /* move this paren up on the .._refresh_cb
- * line when FIXME is fixed. */
+ rd_kafka_oidc_token_refresh_cb) {
handle->internal_refresh = rd_true;
+ rd_kafka_sasl_background_callbacks_enable(rk);
+ }
+#endif
/* Otherwise enqueue a refresh callback for the application. */
rd_kafka_oauthbearer_enqueue_token_refresh(handle);
diff --git a/src/rdkafka_sasl_oauthbearer_oidc.c b/src/rdkafka_sasl_oauthbearer_oidc.c
new file mode 100644
index 0000000000..459af2623c
--- /dev/null
+++ b/src/rdkafka_sasl_oauthbearer_oidc.c
@@ -0,0 +1,523 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Builtin SASL OAUTHBEARER OIDC support
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_sasl_int.h"
+#include "rdunittest.h"
+#include "cJSON.h"
+#include
+#include "rdhttp.h"
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+
+
+/**
+ * @brief Base64 encode binary input \p in, and write base64-encoded string
+ * and it's size to \p out
+ */
+static void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) {
+ size_t max_len;
+
+ max_len = (((in->size + 2) / 3) * 4) + 1;
+ out->ptr = rd_malloc(max_len);
+ rd_assert(out->ptr);
+
+ out->size = EVP_EncodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr,
+ (int)in->size);
+
+ rd_assert(out->size <= max_len);
+ out->ptr[out->size] = 0;
+}
+
+
+/**
+ * @brief Generate Authorization field for HTTP header.
+ * The field contains base64-encoded string which
+ * is generated from \p client_id and \p client_secret.
+ *
+ * @returns Return the authorization field.
+ *
+ * @locality Any thread.
+ */
+static char *rd_kafka_oidc_build_auth_header(const char *client_id,
+ const char *client_secret) {
+
+ rd_chariov_t client_authorization_in;
+ rd_chariov_t client_authorization_out;
+
+ size_t authorization_base64_header_size;
+ char *authorization_base64_header;
+
+ client_authorization_in.size =
+ strlen(client_id) + strlen(client_secret) + 2;
+ client_authorization_in.ptr = rd_malloc(client_authorization_in.size);
+ rd_snprintf(client_authorization_in.ptr, client_authorization_in.size,
+ "%s:%s", client_id, client_secret);
+
+ client_authorization_in.size--;
+ rd_base64_encode(&client_authorization_in, &client_authorization_out);
+
+ authorization_base64_header_size =
+ strlen("Authorization: Basic ") + client_authorization_out.size + 1;
+ authorization_base64_header =
+ rd_malloc(authorization_base64_header_size);
+ rd_snprintf(authorization_base64_header,
+ authorization_base64_header_size, "Authorization: Basic %s",
+ client_authorization_out.ptr);
+
+ rd_free(client_authorization_in.ptr);
+ rd_free(client_authorization_out.ptr);
+ return authorization_base64_header;
+}
+
+
+/**
+ * @brief Build headers for HTTP(S) requests based on \p client_id
+ * and \p client_secret. The result will be returned in \p *headersp.
+ *
+ * @locality Any thread.
+ */
+static void rd_kafka_oidc_build_headers(const char *client_id,
+ const char *client_secret,
+ struct curl_slist **headersp) {
+ char *authorization_base64_header;
+
+ authorization_base64_header =
+ rd_kafka_oidc_build_auth_header(client_id, client_secret);
+
+ *headersp = curl_slist_append(*headersp, "Accept: application/json");
+ *headersp = curl_slist_append(*headersp, authorization_base64_header);
+
+ *headersp = curl_slist_append(
+ *headersp, "Content-Type: application/x-www-form-urlencoded");
+
+ rd_free(authorization_base64_header);
+}
+
+/**
+ * @brief The format of JWT is Header.Payload.Signature.
+ * Extract and decode payloads from JWT \p src.
+ * The decoded payloads will be returned in \p *bufplainp.
+ *
+ * @returns Return error message while decoding the payload.
+ */
+static const char *rd_kafka_jwt_b64_decode_payload(const char *src,
+ char **bufplainp) {
+ char *converted_src;
+ char *payload = NULL;
+
+ const char *errstr = NULL;
+
+ int i, padding, len;
+
+ int payload_len;
+ int nbytesdecoded;
+
+ int payloads_start = 0;
+ int payloads_end = 0;
+
+ len = (int)strlen(src);
+ converted_src = rd_malloc(len + 4);
+
+ for (i = 0; i < len; i++) {
+ switch (src[i]) {
+ case '-':
+ converted_src[i] = '+';
+ break;
+
+ case '_':
+ converted_src[i] = '/';
+ break;
+
+ case '.':
+ if (payloads_start == 0)
+ payloads_start = i + 1;
+ else {
+ if (payloads_end > 0) {
+ errstr =
+ "The token is invalid with more "
+ "than 2 delimiters";
+ goto done;
+ }
+ payloads_end = i;
+ }
+ /* FALLTHRU */
+
+ default:
+ converted_src[i] = src[i];
+ }
+ }
+
+ if (payloads_start == 0 || payloads_end == 0) {
+ errstr = "The token is invalid with less than 2 delimiters";
+ goto done;
+ }
+
+ payload_len = payloads_end - payloads_start;
+ payload = rd_malloc(payload_len + 4);
+ strncpy(payload, (converted_src + payloads_start), payload_len);
+
+ padding = 4 - (payload_len % 4);
+ if (padding < 4) {
+ while (padding--)
+ payload[payload_len++] = '=';
+ }
+
+ nbytesdecoded = ((payload_len + 3) / 4) * 3;
+ *bufplainp = rd_malloc(nbytesdecoded + 1);
+
+ if (EVP_DecodeBlock((uint8_t *)(*bufplainp), (uint8_t *)payload,
+ (int)payload_len) == -1) {
+ errstr = "Failed to decode base64 payload";
+ }
+
+done:
+ RD_IF_FREE(payload, rd_free);
+ RD_IF_FREE(converted_src, rd_free);
+ return errstr;
+}
+
+
+/**
+ * @brief Implementation of Oauth/OIDC token refresh callback function,
+ * will receive the JSON response after HTTP call to token provider,
+ * then extract the jwt from the JSON response, and forward it to
+ * the broker.
+ */
+void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque) {
+ const int timeout_s = 20;
+ const int retry = 4;
+ const int retry_ms = 5 * 1000;
+
+ double exp;
+
+ cJSON *json = NULL;
+ cJSON *payloads = NULL;
+ cJSON *parsed_token, *jwt_exp, *jwt_sub;
+
+ rd_http_error_t *herr;
+
+ char *jwt_token;
+ char *post_fields;
+ char *decoded_payloads = NULL;
+
+ struct curl_slist *headers = NULL;
+
+ const char *token_url;
+ const char *sub;
+ const char *errstr;
+
+ size_t post_fields_size;
+ size_t extension_cnt;
+ size_t extension_key_value_cnt = 0;
+ size_t scope_size = 0;
+
+ char set_token_errstr[512];
+ char decode_payload_errstr[512];
+
+ char **extensions = NULL;
+ char **extension_key_value = NULL;
+
+ if (rd_kafka_terminating(rk))
+ return;
+
+ rd_kafka_oidc_build_headers(rk->rk_conf.sasl.oauthbearer.client_id,
+ rk->rk_conf.sasl.oauthbearer.client_secret,
+ &headers);
+
+ /* Build post fields */
+ if (rk->rk_conf.sasl.oauthbearer.scope)
+ scope_size = strlen(rk->rk_conf.sasl.oauthbearer.scope);
+ if (scope_size == 0) {
+ post_fields = rd_strdup("grant_type=client_credentials");
+ post_fields_size = strlen("grant_type=client_credentials");
+ } else {
+ post_fields_size =
+ strlen("grant_type=client_credentials&scope=") + scope_size;
+ post_fields = rd_malloc(post_fields_size + 1);
+ rd_snprintf(post_fields, post_fields_size,
+ "grant_type=client_credentials&scope=%s",
+ rk->rk_conf.sasl.oauthbearer.scope);
+ }
+
+ token_url = rk->rk_conf.sasl.oauthbearer.token_endpoint_url;
+
+ herr = rd_http_post_expect_json(rk, token_url, headers, post_fields,
+ post_fields_size, timeout_s, retry,
+ retry_ms, &json);
+
+ if (unlikely(herr != NULL)) {
+ rd_kafka_log(rk, LOG_ERR, "OIDC",
+ "Failed to retrieve OIDC "
+ "token from \"%s\": %s (%d)",
+ token_url, herr->errstr, herr->code);
+ rd_kafka_oauthbearer_set_token_failure(rk, herr->errstr);
+ rd_http_error_destroy(herr);
+ goto done;
+ }
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ if (parsed_token == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"access_token\" field");
+ goto done;
+ }
+
+ jwt_token = cJSON_GetStringValue(parsed_token);
+ if (jwt_token == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON "
+ "response as a value string");
+ goto done;
+ }
+
+ errstr = rd_kafka_jwt_b64_decode_payload(jwt_token, &decoded_payloads);
+ if (errstr != NULL) {
+ rd_snprintf(decode_payload_errstr,
+ sizeof(decode_payload_errstr),
+ "Failed to decode JWT payload: %s", errstr);
+ rd_kafka_oauthbearer_set_token_failure(rk,
+ decode_payload_errstr);
+ goto done;
+ }
+
+ payloads = cJSON_Parse(decoded_payloads);
+ if (payloads == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk, "Failed to parse JSON JWT payload");
+ goto done;
+ }
+
+ jwt_exp = cJSON_GetObjectItem(payloads, "exp");
+ if (jwt_exp == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"exp\" field");
+ goto done;
+ }
+
+ exp = cJSON_GetNumberValue(jwt_exp);
+ if (exp <= 0) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "valid \"exp\" field");
+ goto done;
+ }
+
+ jwt_sub = cJSON_GetObjectItem(payloads, "sub");
+ if (jwt_sub == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"sub\" field");
+ goto done;
+ }
+
+ sub = cJSON_GetStringValue(jwt_sub);
+ if (sub == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "valid \"sub\" field");
+ goto done;
+ }
+
+ if (rk->rk_conf.sasl.oauthbearer.extensions_str) {
+ extensions =
+ rd_string_split(rk->rk_conf.sasl.oauthbearer.extensions_str,
+ ',', rd_true, &extension_cnt);
+
+ extension_key_value = rd_kafka_conf_kv_split(
+ (const char **)extensions, extension_cnt,
+ &extension_key_value_cnt);
+ }
+
+ if (rd_kafka_oauthbearer_set_token(
+ rk, jwt_token, (int64_t)exp * 1000, sub,
+ (const char **)extension_key_value, extension_key_value_cnt,
+ set_token_errstr,
+ sizeof(set_token_errstr)) != RD_KAFKA_RESP_ERR_NO_ERROR)
+ rd_kafka_oauthbearer_set_token_failure(rk, set_token_errstr);
+
+done:
+ RD_IF_FREE(decoded_payloads, rd_free);
+ RD_IF_FREE(post_fields, rd_free);
+ RD_IF_FREE(json, cJSON_Delete);
+ RD_IF_FREE(headers, curl_slist_free_all);
+ RD_IF_FREE(extensions, rd_free);
+ RD_IF_FREE(extension_key_value, rd_free);
+ RD_IF_FREE(payloads, cJSON_Delete);
+}
+
+
+/**
+ * @brief Make sure the jwt is able to be extracted from HTTP(S) response.
+ * The JSON response after HTTP(S) call to token provider will be in
+ * rd_http_req_t.hreq_buf and jwt is the value of field "access_token",
+ * the format is {"access_token":"*******"}.
+ * This function mocks up the rd_http_req_t.hreq_buf using an dummy
+ * jwt. The rd_http_parse_json will extract the jwt from rd_http_req_t
+ * and make sure the extracted jwt is same with the dummy one.
+ */
+static int ut_sasl_oauthbearer_oidc_should_succeed(void) {
+ /* Generate a token in the https://jwt.io/ website by using the
+ * following steps:
+ * 1. Select the algorithm RS256 from the Algorithm drop-down menu.
+ * 2. Enter the header and the payload.
+ * payload should contains "exp", "iat", "sub", for example:
+ * payloads = {"exp": 1636532769,
+ "iat": 1516239022,
+ "sub": "sub"}
+ header should contains "kid", for example:
+ headers={"kid": "abcedfg"} */
+ static const char *expected_jwt_token =
+ "eyJhbGciOiJIUzI1NiIsInR5"
+ "cCI6IkpXVCIsImtpZCI6ImFiY2VkZmcifQ"
+ "."
+ "eyJpYXQiOjE2MzIzNzUzMjAsInN1YiI6InN"
+ "1YiIsImV4cCI6MTYzMjM3NTYyMH0"
+ "."
+ "bT5oY8K-rS2gQ7Awc40844bK3zhzBhZb7sputErqQHY";
+ char *expected_token_value;
+ size_t token_len;
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+ cJSON *json = NULL;
+ char *token;
+ cJSON *parsed_token;
+
+ RD_UT_BEGIN();
+
+ herr = rd_http_req_init(&hreq, "");
+
+ RD_UT_ASSERT(!herr,
+ "Expected initialize to succeed, "
+ "but failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ token_len = strlen("access_token") + strlen(expected_jwt_token) + 8;
+
+ expected_token_value = rd_malloc(token_len);
+ rd_snprintf(expected_token_value, token_len, "{\"%s\":\"%s\"}",
+ "access_token", expected_jwt_token);
+ rd_buf_write(hreq.hreq_buf, expected_token_value, token_len);
+
+ herr = rd_http_parse_json(&hreq, &json);
+ RD_UT_ASSERT(!herr,
+ "Failed to parse JSON token: error code: %d, "
+ "error string: %s",
+ herr->code, herr->errstr);
+
+ RD_UT_ASSERT(json, "Expected non-empty json.");
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ RD_UT_ASSERT(parsed_token, "Expected access_token in JSON response.");
+ token = parsed_token->valuestring;
+
+ RD_UT_ASSERT(!strcmp(expected_jwt_token, token),
+ "Incorrect token received: "
+ "expected=%s; received=%s",
+ expected_jwt_token, token);
+
+ rd_free(expected_token_value);
+ rd_http_error_destroy(herr);
+ rd_http_req_destroy(&hreq);
+ cJSON_Delete(json);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Make sure JSON doesn't include the "access_token" key,
+ * it will fail and return an empty token.
+ */
+static int ut_sasl_oauthbearer_oidc_with_empty_key(void) {
+ static const char *empty_token_format = "{}";
+ size_t token_len;
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+ cJSON *json = NULL;
+ cJSON *parsed_token;
+
+ RD_UT_BEGIN();
+
+ herr = rd_http_req_init(&hreq, "");
+ RD_UT_ASSERT(!herr,
+ "Expected initialization to succeed, "
+ "but it failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ token_len = strlen(empty_token_format);
+
+ rd_buf_write(hreq.hreq_buf, empty_token_format, token_len);
+
+ herr = rd_http_parse_json(&hreq, &json);
+
+ RD_UT_ASSERT(!herr,
+ "Expected JSON token parsing to succeed, "
+ "but it failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ RD_UT_ASSERT(json, "Expected non-empty json.");
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ RD_UT_ASSERT(!parsed_token,
+ "Did not expecte access_token in JSON response");
+
+ rd_http_req_destroy(&hreq);
+ rd_http_error_destroy(herr);
+ cJSON_Delete(json);
+ cJSON_Delete(parsed_token);
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief make sure the jwt is able to be extracted from HTTP(S) requests
+ * or fail as expected.
+ */
+int unittest_sasl_oauthbearer_oidc(void) {
+ int fails = 0;
+ fails += ut_sasl_oauthbearer_oidc_should_succeed();
+ fails += ut_sasl_oauthbearer_oidc_with_empty_key();
+ return fails;
+}
diff --git a/src/rdkafka_sasl_oauthbearer_oidc.h b/src/rdkafka_sasl_oauthbearer_oidc.h
new file mode 100644
index 0000000000..a944f2efa1
--- /dev/null
+++ b/src/rdkafka_sasl_oauthbearer_oidc.h
@@ -0,0 +1,37 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
+#define _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
+void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque);
+
+int unittest_sasl_oauthbearer_oidc(void);
+
+#endif /* _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ */
diff --git a/src/rdkafka_topic.c b/src/rdkafka_topic.c
index cbee469674..7f79a2ffd5 100644
--- a/src/rdkafka_topic.c
+++ b/src/rdkafka_topic.c
@@ -89,6 +89,7 @@ static void rd_kafka_topic_destroy_app(rd_kafka_topic_t *app_rkt) {
* Final destructor for topic. Refcnt must be 0.
*/
void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) {
+ rd_kafka_partition_msgid_t *partmsgid, *partmsgid_tmp;
rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0);
@@ -97,6 +98,11 @@ void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) {
rkt->rkt_rk->rk_topic_cnt--;
rd_kafka_wrunlock(rkt->rkt_rk);
+ TAILQ_FOREACH_SAFE(partmsgid, &rkt->rkt_saved_partmsgids, link,
+ partmsgid_tmp) {
+ rd_free(partmsgid);
+ }
+
rd_kafka_assert(rkt->rkt_rk, rd_list_empty(&rkt->rkt_desp));
rd_list_destroy(&rkt->rkt_desp);
@@ -450,6 +456,7 @@ rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk,
rd_list_init(&rkt->rkt_desp, 16, NULL);
rd_interval_init(&rkt->rkt_desp_refresh_intvl);
+ TAILQ_INIT(&rkt->rkt_saved_partmsgids);
rd_refcnt_init(&rkt->rkt_refcnt, 0);
rd_refcnt_init(&rkt->rkt_app_refcnt, 0);
@@ -736,6 +743,62 @@ int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp) {
}
+
+/**
+ * @brief Save idempotent producer state for a partition that is about to
+ * be removed.
+ *
+ * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
+ */
+static void rd_kafka_toppar_idemp_msgid_save(rd_kafka_topic_t *rkt,
+ const rd_kafka_toppar_t *rktp) {
+ rd_kafka_partition_msgid_t *partmsgid = rd_malloc(sizeof(*partmsgid));
+ partmsgid->partition = rktp->rktp_partition;
+ partmsgid->msgid = rktp->rktp_msgid;
+ partmsgid->pid = rktp->rktp_eos.pid;
+ partmsgid->epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid;
+ partmsgid->ts = rd_clock();
+
+ TAILQ_INSERT_TAIL(&rkt->rkt_saved_partmsgids, partmsgid, link);
+}
+
+
+/**
+ * @brief Restore idempotent producer state for a new/resurfacing partition.
+ *
+ * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
+ */
+static void rd_kafka_toppar_idemp_msgid_restore(rd_kafka_topic_t *rkt,
+ rd_kafka_toppar_t *rktp) {
+ rd_kafka_partition_msgid_t *partmsgid;
+
+ TAILQ_FOREACH(partmsgid, &rkt->rkt_saved_partmsgids, link) {
+ if (partmsgid->partition == rktp->rktp_partition)
+ break;
+ }
+
+ if (!partmsgid)
+ return;
+
+ rktp->rktp_msgid = partmsgid->msgid;
+ rktp->rktp_eos.pid = partmsgid->pid;
+ rktp->rktp_eos.epoch_base_msgid = partmsgid->epoch_base_msgid;
+
+ rd_kafka_dbg(rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "MSGID",
+ "Topic %s [%" PRId32 "]: restored %s with MsgId %" PRIu64
+ " and "
+ "epoch base MsgId %" PRIu64
+ " that was saved upon removal %dms ago",
+ rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_pid2str(partmsgid->pid), partmsgid->msgid,
+ partmsgid->epoch_base_msgid,
+ (int)((rd_clock() - partmsgid->ts) / 1000));
+
+ TAILQ_REMOVE(&rkt->rkt_saved_partmsgids, partmsgid, link);
+ rd_free(partmsgid);
+}
+
+
/**
* @brief Update the number of partitions for a topic and takes actions
* accordingly.
@@ -749,6 +812,7 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
rd_kafka_t *rk = rkt->rkt_rk;
rd_kafka_toppar_t **rktps;
rd_kafka_toppar_t *rktp;
+ rd_bool_t is_idempodent = rd_kafka_is_idempotent(rk);
int32_t i;
if (likely(rkt->rkt_partition_cnt == partition_cnt))
@@ -790,7 +854,6 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
/* Remove from desp list since the
* partition is now known. */
rd_kafka_toppar_desired_unlink(rktp);
- rd_kafka_toppar_unlock(rktp);
} else {
rktp = rd_kafka_toppar_new(rkt, i);
@@ -798,9 +861,16 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
rktp->rktp_flags &=
~(RD_KAFKA_TOPPAR_F_UNKNOWN |
RD_KAFKA_TOPPAR_F_REMOVE);
- rd_kafka_toppar_unlock(rktp);
}
rktps[i] = rktp;
+
+ if (is_idempodent)
+ /* Restore idempotent producer state for
+ * this partition, if any. */
+ rd_kafka_toppar_idemp_msgid_restore(rkt, rktp);
+
+ rd_kafka_toppar_unlock(rktp);
+
} else {
/* Existing partition, grab our own reference. */
rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]);
@@ -833,6 +903,24 @@ static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
rd_kafka_toppar_lock(rktp);
+ /* Idempotent/Transactional producer:
+ * We need to save each removed partition's base msgid for
+ * the (rare) chance the partition comes back,
+ * in which case we must continue with the correct msgid
+ * in future ProduceRequests.
+ *
+ * These base msgsid are restored (above) if/when partitions
+ * come back and the PID,Epoch hasn't changed.
+ *
+ * One situation where this might happen is if a broker goes
+ * out of sync and starts to wrongfully report an existing
+ * topic as non-existent, triggering the removal of partitions
+ * on the producer client. When metadata is eventually correct
+ * again and the topic is "re-created" on the producer, it
+ * must continue with the next msgid/baseseq. */
+ if (is_idempodent && rd_kafka_pid_valid(rktp->rktp_eos.pid))
+ rd_kafka_toppar_idemp_msgid_save(rkt, rktp);
+
rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) {
diff --git a/src/rdkafka_topic.h b/src/rdkafka_topic.h
index 414cd66228..19e0c02006 100644
--- a/src/rdkafka_topic.h
+++ b/src/rdkafka_topic.h
@@ -82,6 +82,22 @@ rd_kafka_lwtopic_keep(rd_kafka_lwtopic_t *lrkt) {
+/**
+ * @struct Holds partition + transactional PID + base sequence msgid.
+ *
+ * Used in rkt_saved_partmsgids to restore transactional/idempotency state
+ * for a partition that is lost from metadata for some time and then returns.
+ */
+typedef struct rd_kafka_partition_msgid_s {
+ TAILQ_ENTRY(rd_kafka_partition_msgid_s) link;
+ int32_t partition;
+ rd_kafka_pid_t pid;
+ uint64_t msgid;
+ uint64_t epoch_base_msgid;
+ rd_ts_t ts;
+} rd_kafka_partition_msgid_t;
+
+
/*
* @struct Internal representation of a topic.
*
@@ -140,6 +156,10 @@ struct rd_kafka_topic_s {
rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */
rd_kafka_topic_conf_t rkt_conf;
+
+ /** Idempotent/Txn producer:
+ * The PID,Epoch,base Msgid state for removed partitions. */
+ TAILQ_HEAD(, rd_kafka_partition_msgid_s) rkt_saved_partmsgids;
};
#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock)
diff --git a/src/rdkafka_transport.c b/src/rdkafka_transport.c
index 732d1d3461..d848ad7410 100644
--- a/src/rdkafka_transport.c
+++ b/src/rdkafka_transport.c
@@ -72,7 +72,7 @@ static void rd_kafka_transport_close0(rd_kafka_t *rk, rd_socket_t s) {
if (rk->rk_conf.closesocket_cb)
rk->rk_conf.closesocket_cb((int)s, rk->rk_conf.opaque);
else
- rd_close(s);
+ rd_socket_close(s);
}
/**
@@ -1240,13 +1240,11 @@ static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) {
if (r <= 0)
return r;
- rd_atomic64_add(&rktrans->rktrans_rkb->rkb_c.wakeups, 1);
-
if (rktrans->rktrans_pfd[1].revents & POLLIN) {
/* Read wake-up fd data and throw away, just used for wake-ups*/
char buf[1024];
- while (rd_read((int)rktrans->rktrans_pfd[1].fd, buf,
- sizeof(buf)) > 0)
+ while (rd_socket_read((int)rktrans->rktrans_pfd[1].fd, buf,
+ sizeof(buf)) > 0)
; /* Read all buffered signalling bytes */
}
diff --git a/src/rdkafka_txnmgr.c b/src/rdkafka_txnmgr.c
index 13b8479866..6384c601f6 100644
--- a/src/rdkafka_txnmgr.c
+++ b/src/rdkafka_txnmgr.c
@@ -752,7 +752,8 @@ static void rd_kafka_txn_handle_AddPartitionsToTxn(rd_kafka_t *rk,
/* Since these partitions are now allowed to produce
* we wake up all broker threads. */
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "partitions added to transaction");
goto done;
@@ -1418,7 +1419,8 @@ static rd_kafka_op_res_t rd_kafka_txn_op_begin_transaction(rd_kafka_t *rk,
rd_kafka_wrunlock(rk);
if (wakeup_brokers)
- rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT);
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "begin transaction");
rd_kafka_txn_curr_api_reply_error(rd_kafka_q_keep(rko->rko_replyq.q),
error);
diff --git a/src/rdposix.h b/src/rdposix.h
index deb1fe009f..7b2376823f 100644
--- a/src/rdposix.h
+++ b/src/rdposix.h
@@ -238,9 +238,13 @@ static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
#endif
return 0;
}
-#define rd_pipe(fds) pipe(fds)
-#define rd_read(fd, buf, sz) read(fd, buf, sz)
-#define rd_write(fd, buf, sz) write(fd, buf, sz)
-#define rd_close(fd) close(fd)
+#define rd_socket_read(fd, buf, sz) read(fd, buf, sz)
+#define rd_socket_write(fd, buf, sz) write(fd, buf, sz)
+#define rd_socket_close(fd) close(fd)
+
+/* File IO */
+#define rd_write(fd, buf, sz) write(fd, buf, sz)
+#define rd_open(path, flags, mode) open(path, flags, mode)
+#define rd_close(fd) close(fd)
#endif /* _RDPOSIX_H_ */
diff --git a/src/rdunittest.c b/src/rdunittest.c
index 021af37997..120f62bedb 100644
--- a/src/rdunittest.c
+++ b/src/rdunittest.c
@@ -48,6 +48,9 @@
#include "rdsysqueue.h"
#include "rdkafka_sasl_oauthbearer.h"
+#if WITH_CURL
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
#include "rdkafka_msgset.h"
#include "rdkafka_txnmgr.h"
#if WITH_SASL_AWS_MSK_IAM
@@ -151,9 +154,9 @@ static struct ut_tq *ut_tq_find_prev_pos(const struct ut_tq_head *head,
}
static int ut_tq_test(const struct ut_tq_args *args) {
- int totcnt = 0;
- int fails = 0;
- struct ut_tq_head *tqh[3];
+ int totcnt = 0;
+ int fails = 0;
+ struct ut_tq_head *tqh[3] = {NULL, NULL, NULL};
struct ut_tq *e, *insert_after;
int i, qi;
@@ -425,6 +428,7 @@ extern int unittest_assignors(void);
extern int unittest_map(void);
#if WITH_CURL
extern int unittest_http(void);
+extern int unittest_sasl_oauthbearer_oidc(void);
#endif
#if WITH_SASL_AWS_MSK_IAM
extern int unittest_aws_msk_iam (void);
@@ -466,6 +470,7 @@ int rd_unittest(void) {
{"assignors", unittest_assignors},
#if WITH_CURL
{"http", unittest_http},
+ {"sasl_oauthbearer_oidc", unittest_sasl_oauthbearer_oidc},
#endif
#if WITH_SASL_AWS_MSK_IAM
{ "sasl_aws_msk_iam", unittest_aws_msk_iam },
diff --git a/src/rdwin32.h b/src/rdwin32.h
index 8ca0887f60..73edd41d6a 100644
--- a/src/rdwin32.h
+++ b/src/rdwin32.h
@@ -367,9 +367,15 @@ static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
return -1;
}
-#define rd_read(fd, buf, sz) recv(fd, buf, sz, 0)
-#define rd_write(fd, buf, sz) send(fd, buf, sz, 0)
-#define rd_close(fd) closesocket(fd)
+/* Socket IO */
+#define rd_socket_read(fd, buf, sz) recv(fd, buf, sz, 0)
+#define rd_socket_write(fd, buf, sz) send(fd, buf, sz, 0)
+#define rd_socket_close(fd) closesocket(fd)
+
+/* File IO */
+#define rd_write(fd, buf, sz) _write(fd, buf, sz)
+#define rd_open(path, flags, mode) _open(path, flags, mode)
+#define rd_close(fd) _close(fd)
#endif /* !__cplusplus*/
diff --git a/tests/0004-conf.c b/tests/0004-conf.c
index 4b2980a243..8e9e1c9298 100644
--- a/tests/0004-conf.c
+++ b/tests/0004-conf.c
@@ -425,6 +425,80 @@ static void do_test_default_topic_conf(void) {
}
+/**
+ * @brief Verify behaviour of checking that message.timeout.ms fits within
+ * configured linger.ms. By larry-cdn77.
+ */
+static void do_message_timeout_linger_checks(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_t *rk;
+ char errstr[512];
+ int i;
+ const char values[7][3][40] = {
+ {"-", "-", "default and L and M"},
+ {"100", "-", "set L such that L=M"},
+ {"-", "10", "set M such that L>=M"},
+ {"500000", "10", "!set L and M such that L>=M"}};
+
+ SUB_TEST_QUICK();
+
+ for (i = 0; i < 7; i++) {
+ const char *linger = values[i][0];
+ const char *msgtimeout = values[i][1];
+ const char *desc = values[i][2];
+ rd_bool_t expect_fail = *desc == '!';
+
+ if (expect_fail)
+ desc++; /* Push past the '!' */
+
+ conf = rd_kafka_conf_new();
+ tconf = rd_kafka_topic_conf_new();
+
+ if (*linger != '-')
+ test_conf_set(conf, "linger.ms", linger);
+
+ if (*msgtimeout != '-')
+ test_topic_conf_set(tconf, "message.timeout.ms",
+ msgtimeout);
+
+ rd_kafka_conf_set_default_topic_conf(conf, tconf);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr));
+
+ if (!rk)
+ TEST_SAY("#%d \"%s\": rd_kafka_new() failed: %s\n", i,
+ desc, errstr);
+ else
+ TEST_SAY("#%d \"%s\": rd_kafka_new() succeeded\n", i,
+ desc);
+
+ if (!expect_fail) {
+ TEST_ASSERT(rk != NULL,
+ "Expected success: "
+ "message timeout linger: %s: %s",
+ desc, errstr);
+
+ rd_kafka_destroy(rk);
+
+ } else {
+ TEST_ASSERT(rk == NULL,
+ "Expected failure: "
+ "message timeout linger: %s",
+ desc);
+
+ rd_kafka_conf_destroy(conf);
+ }
+ }
+
+ SUB_TEST_PASS();
+}
+
+
int main_0004_conf(int argc, char **argv) {
rd_kafka_t *rk;
rd_kafka_topic_t *rkt;
@@ -784,5 +858,7 @@ int main_0004_conf(int argc, char **argv) {
do_test_default_topic_conf();
+ do_message_timeout_linger_checks();
+
return 0;
}
diff --git a/tests/0019-list_groups.c b/tests/0019-list_groups.c
index ba982edcf7..02729c3396 100644
--- a/tests/0019-list_groups.c
+++ b/tests/0019-list_groups.c
@@ -146,7 +146,7 @@ list_groups(rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) {
-int main_0019_list_groups(int argc, char **argv) {
+static void do_test_list_groups(void) {
const char *topic = test_mk_topic_name(__FUNCTION__, 1);
#define _CONS_CNT 2
char *groups[_CONS_CNT];
@@ -159,6 +159,8 @@ int main_0019_list_groups(int argc, char **argv) {
rd_kafka_topic_t *rkt;
const struct rd_kafka_group_list *grplist;
+ SUB_TEST();
+
/* Handle for group listings */
rk = test_create_producer();
@@ -244,5 +246,44 @@ int main_0019_list_groups(int argc, char **argv) {
rd_kafka_destroy(rk);
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief #3705: Verify that list_groups() doesn't hang if unable to
+ * connect to the cluster.
+ */
+static void do_test_list_groups_hang(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ const struct rd_kafka_group_list *grplist;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+
+ SUB_TEST();
+ test_conf_init(&conf, NULL, 20);
+
+ /* An unavailable broker */
+ test_conf_set(conf, "bootstrap.servers", "127.0.0.1:65531");
+
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ TIMING_START(&timing, "list_groups");
+ err = rd_kafka_list_groups(rk, NULL, &grplist, 5 * 1000);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected ERR__TIMED_OUT, not %s", rd_kafka_err2name(err));
+ TIMING_ASSERT(&timing, 5 * 1000, 7 * 1000);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0019_list_groups(int argc, char **argv) {
+ do_test_list_groups();
+ do_test_list_groups_hang();
return 0;
}
diff --git a/tests/0022-consume_batch.c b/tests/0022-consume_batch.c
index 2298ade2e5..64e826d035 100644
--- a/tests/0022-consume_batch.c
+++ b/tests/0022-consume_batch.c
@@ -39,7 +39,7 @@
*/
-static int do_test_consume_batch(void) {
+static void do_test_consume_batch(void) {
#define topic_cnt 2
char *topics[topic_cnt];
const int partition_cnt = 2;
@@ -53,6 +53,8 @@ static int do_test_consume_batch(void) {
int batch_cnt = 0;
int remains;
+ SUB_TEST();
+
testid = test_id_generate();
/* Produce messages */
@@ -138,18 +140,73 @@ static int do_test_consume_batch(void) {
rd_kafka_destroy(rk);
- return 0;
+ SUB_TEST_PASS();
}
+#if WITH_SASL_OAUTHBEARER
+/**
+ * @brief Verify that the oauthbearer_refresh_cb() is triggered
+ * when using consume_batch_queue() (as opposed to consumer_poll()).
+ */
-int main_0022_consume_batch(int argc, char **argv) {
- int fails = 0;
+static rd_bool_t refresh_called = rd_false;
+
+static void
+refresh_cb(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque) {
+ TEST_SAY("Refresh callback called\n");
+ TEST_ASSERT(!refresh_called);
+ refresh_called = rd_true;
+ rd_kafka_oauthbearer_set_token_failure(rk, "Refresh called");
+}
+
+static void do_test_consume_batch_oauthbearer_cb(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *rkq;
+ rd_kafka_message_t *rkms[1];
+ ssize_t r;
+
+ SUB_TEST_QUICK();
+
+ refresh_called = rd_false;
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "security.protocol", "sasl_plaintext");
+ test_conf_set(conf, "sasl.mechanism", "OAUTHBEARER");
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(conf, refresh_cb);
+
+ /* Create simple consumer */
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_main(rk);
+
+ r = rd_kafka_consume_batch_queue(rkq, 1000, rkms, 1);
+ TEST_ASSERT(r == 0, "Expected return value 0, not %d", (int)r);
+
+ TEST_SAY("refresh_called = %d\n", refresh_called);
+ TEST_ASSERT(refresh_called,
+ "Expected refresh callback to have been called");
+
+ rd_kafka_queue_destroy(rkq);
- fails += do_test_consume_batch();
+ rd_kafka_destroy(rk);
+}
+#endif
+
+
+int main_0022_consume_batch(int argc, char **argv) {
+ do_test_consume_batch();
+ return 0;
+}
- if (fails > 0)
- TEST_FAIL("See %d previous error(s)\n", fails);
+int main_0022_consume_batch_local(int argc, char **argv) {
+#if WITH_SASL_OAUTHBEARER
+ do_test_consume_batch_oauthbearer_cb();
+#else
+ TEST_SKIP("No OAUTHBEARER support\n");
+#endif
return 0;
}
diff --git a/tests/0055-producer_latency.c b/tests/0055-producer_latency.c
index 2759e098f9..e0244cec95 100644
--- a/tests/0055-producer_latency.c
+++ b/tests/0055-producer_latency.c
@@ -43,11 +43,14 @@ struct latconf {
char linger_ms_conf[32]; /**< Read back to show actual value */
/* Result vector */
+ rd_bool_t passed;
float latency[_MSG_COUNT];
float sum;
int cnt;
+ int wakeups;
};
+static int tot_wakeups = 0;
static void
dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
@@ -76,6 +79,46 @@ dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
}
+/**
+ * @brief A stats callback to get the per-broker wakeup counts.
+ *
+ * The JSON "parsing" here is crude..
+ */
+static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+ const char *t = json;
+ int cnt = 0;
+ int total = 0;
+
+ /* Since we're only producing to one partition there will only be
+ * one broker, the leader, who's wakeup counts we're interested in, but
+ * we also want to know that other broker threads aren't spinning
+ * like crazy. So just summarize all the wakeups from all brokers. */
+ while ((t = strstr(t, "\"wakeups\":"))) {
+ int wakeups;
+ const char *next;
+
+ t += strlen("\"wakeups\":");
+ while (isspace((int)*t))
+ t++;
+ wakeups = strtol(t, (char **)&next, 0);
+
+ TEST_ASSERT(t != next, "No wakeup number found at \"%.*s...\"",
+ 16, t);
+
+ total += wakeups;
+ cnt++;
+
+ t = next;
+ }
+
+ TEST_ASSERT(cnt > 0, "No brokers found in stats");
+
+ tot_wakeups = total;
+
+ return 0;
+}
+
+
static int verify_latency(struct latconf *latconf) {
float avg;
int fails = 0;
@@ -86,8 +129,11 @@ static int verify_latency(struct latconf *latconf) {
avg = latconf->sum / (float)latconf->cnt;
- TEST_SAY("%s: average latency %.3fms, allowed range %d..%d +%.0fms\n",
- latconf->name, avg, latconf->min, latconf->max, ext_overhead);
+ TEST_SAY(
+ "%s: average latency %.3fms, allowed range %d..%d +%.0fms, "
+ "%d wakeups\n",
+ latconf->name, avg, latconf->min, latconf->max, ext_overhead,
+ tot_wakeups);
if (avg < (float)latconf->min ||
avg > (float)latconf->max + ext_overhead) {
@@ -99,6 +145,16 @@ static int verify_latency(struct latconf *latconf) {
fails++;
}
+ latconf->wakeups = tot_wakeups;
+ if (latconf->wakeups < 10 || latconf->wakeups > 1000) {
+ TEST_FAIL_LATER(
+ "%s: broker wakeups out of range: %d, "
+ "expected 10..1000",
+ latconf->name, latconf->wakeups);
+ fails++;
+ }
+
+
return fails;
}
@@ -116,23 +172,32 @@ static void measure_rtt(struct latconf *latconf, rd_kafka_t *rk) {
rd_kafka_metadata_destroy(md);
}
-static int test_producer_latency(const char *topic, struct latconf *latconf) {
+
+
+static void test_producer_latency(const char *topic, struct latconf *latconf) {
rd_kafka_t *rk;
rd_kafka_conf_t *conf;
rd_kafka_resp_err_t err;
int i;
size_t sz;
+ rd_bool_t with_transactions = rd_false;
+
+ SUB_TEST("%s (linger.ms=%d)", latconf->name);
test_conf_init(&conf, NULL, 60);
rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
rd_kafka_conf_set_opaque(conf, latconf);
+ rd_kafka_conf_set_stats_cb(conf, stats_cb);
+ test_conf_set(conf, "statistics.interval.ms", "100");
+ tot_wakeups = 0;
- TEST_SAY(_C_BLU "[%s: begin]\n" _C_CLR, latconf->name);
for (i = 0; latconf->conf[i]; i += 2) {
TEST_SAY("%s: set conf %s = %s\n", latconf->name,
latconf->conf[i], latconf->conf[i + 1]);
test_conf_set(conf, latconf->conf[i], latconf->conf[i + 1]);
+ if (!strcmp(latconf->conf[i], "transactional.id"))
+ with_transactions = rd_true;
}
sz = sizeof(latconf->linger_ms_conf);
@@ -140,6 +205,11 @@ static int test_producer_latency(const char *topic, struct latconf *latconf) {
rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ if (with_transactions) {
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 10 * 1000));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ }
+
TEST_SAY("%s: priming producer\n", latconf->name);
/* Send a priming message to make sure everything is up
* and functional before starting measurements */
@@ -151,8 +221,12 @@ static int test_producer_latency(const char *topic, struct latconf *latconf) {
TEST_FAIL("%s: priming producev failed: %s", latconf->name,
rd_kafka_err2str(err));
- /* Await delivery */
- rd_kafka_flush(rk, tmout_multip(5000));
+ if (with_transactions) {
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ } else {
+ /* Await delivery */
+ rd_kafka_flush(rk, tmout_multip(5000));
+ }
/* Get a network+broker round-trip-time base time. */
measure_rtt(latconf, rk);
@@ -160,6 +234,10 @@ static int test_producer_latency(const char *topic, struct latconf *latconf) {
TEST_SAY("%s: producing %d messages\n", latconf->name, _MSG_COUNT);
for (i = 0; i < _MSG_COUNT; i++) {
int64_t *ts_send;
+ int pre_cnt = latconf->cnt;
+
+ if (with_transactions)
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
ts_send = malloc(sizeof(*ts_send));
*ts_send = test_clock();
@@ -174,12 +252,31 @@ static int test_producer_latency(const char *topic, struct latconf *latconf) {
i, rd_kafka_err2str(err));
/* Await delivery */
- rd_kafka_poll(rk, 5000);
+ while (latconf->cnt == pre_cnt)
+ rd_kafka_poll(rk, 5000);
+
+ if (with_transactions) {
+ test_timing_t timing;
+ TIMING_START(&timing, "commit_transaction");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ TIMING_ASSERT_LATER(&timing, 0,
+ (int)(latconf->rtt + 50.0));
+ }
}
+ while (tot_wakeups == 0)
+ rd_kafka_poll(rk, 100); /* Get final stats_cb */
+
rd_kafka_destroy(rk);
- return verify_latency(latconf);
+ if (verify_latency(latconf))
+ return; /* verify_latency() has already
+ * called TEST_FAIL_LATER() */
+
+
+ latconf->passed = rd_true;
+
+ SUB_TEST_PASS();
}
@@ -206,33 +303,37 @@ static float find_max(const struct latconf *latconf) {
}
int main_0055_producer_latency(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0055_producer_latency", 1);
struct latconf latconfs[] = {
{"standard settings", {NULL}, 5, 5}, /* default is now 5ms */
- {"low queue.buffering.max.ms",
- {"queue.buffering.max.ms", "0", NULL},
- 0,
- 0},
- {"microsecond queue.buffering.max.ms",
- {"queue.buffering.max.ms", "0.001", NULL},
+ {"low linger.ms (0ms)", {"linger.ms", "0", NULL}, 0, 0},
+ {"microsecond linger.ms (0.001ms)",
+ {"linger.ms", "0.001", NULL},
0,
1},
- {"high queue.buffering.max.ms",
- {"queue.buffering.max.ms", "3000", NULL},
+ {"high linger.ms (3000ms)",
+ {"linger.ms", "3000", NULL},
3000,
3100},
- {"queue.buffering.max.ms < 1000", /* internal block_max_ms */
- {"queue.buffering.max.ms", "500", NULL},
+ {"linger.ms < 1000 (500ms)", /* internal block_max_ms */
+ {"linger.ms", "500", NULL},
500,
600},
- {"no acks",
- {"queue.buffering.max.ms", "0", "acks", "0", "enable.idempotence",
- "false", NULL},
+ {"no acks (0ms)",
+ {"linger.ms", "0", "acks", "0", "enable.idempotence", "false",
+ NULL},
0,
0},
+ {"idempotence (10ms)",
+ {"linger.ms", "10", "enable.idempotence", "true", NULL},
+ 10,
+ 10},
+ {"transactions (35ms)",
+ {"linger.ms", "35", "transactional.id", topic, NULL},
+ 35,
+ 50 + 35 /* extra time for AddPartitions..*/},
{NULL}};
struct latconf *latconf;
- const char *topic = test_mk_topic_name("0055_producer_latency", 0);
- int fails = 0;
if (test_on_ci) {
TEST_SKIP("Latency measurements not reliable on CI\n");
@@ -240,24 +341,26 @@ int main_0055_producer_latency(int argc, char **argv) {
}
/* Create topic without replicas to keep broker-side latency down */
- test_create_topic(NULL, topic, 4, 1);
+ test_create_topic(NULL, topic, 1, 1);
for (latconf = latconfs; latconf->name; latconf++)
- fails += test_producer_latency(topic, latconf);
-
- if (fails)
- TEST_FAIL("See %d previous failure(s)", fails);
+ test_producer_latency(topic, latconf);
TEST_SAY(_C_YEL "Latency tests summary:\n" _C_CLR);
- TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s\n", "Name",
+ TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s %8s\n", "Name",
"linger.ms", "MinExp", "MaxExp", "RTT", "Min", "Average",
- "Max");
+ "Max", "Wakeups");
for (latconf = latconfs; latconf->name; latconf++)
- TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g\n",
+ TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g %8d%s\n",
latconf->name, latconf->linger_ms_conf, latconf->min,
latconf->max, latconf->rtt, find_min(latconf),
- latconf->sum / latconf->cnt, find_max(latconf));
+ latconf->sum / latconf->cnt, find_max(latconf),
+ latconf->wakeups,
+ latconf->passed ? "" : _C_RED " FAILED");
+
+
+ TEST_LATER_CHECK("");
return 0;
}
diff --git a/tests/0077-compaction.c b/tests/0077-compaction.c
index 3f4bfe7718..fae7bba85d 100644
--- a/tests/0077-compaction.c
+++ b/tests/0077-compaction.c
@@ -190,7 +190,8 @@ static void do_test_compaction(int msgs_per_key, const char *compression) {
"--config segment.bytes=10000 "
"--config min.cleanable.dirty.ratio=0.01 "
"--config delete.retention.ms=86400 "
- "--config file.delete.delay.ms=10000",
+ "--config file.delete.delay.ms=10000 "
+ "--config max.compaction.lag.ms=100",
topic, partition + 1);
test_conf_init(&conf, NULL, 120);
diff --git a/tests/0080-admin_ut.c b/tests/0080-admin_ut.c
index 6f80154c07..3ccb5d3d61 100644
--- a/tests/0080-admin_ut.c
+++ b/tests/0080-admin_ut.c
@@ -694,6 +694,620 @@ static void do_test_DeleteConsumerGroupOffsets(const char *what,
SUB_TEST_PASS();
}
+/**
+ * @brief AclBinding tests
+ *
+ *
+ *
+ */
+static void do_test_AclBinding() {
+ int i;
+ char errstr[512];
+ rd_kafka_AclBinding_t *new_acl;
+
+ rd_bool_t valid_resource_types[] = {rd_false, rd_false, rd_true,
+ rd_true, rd_true, rd_false};
+ rd_bool_t valid_resource_pattern_types[] = {
+ rd_false, rd_false, rd_false, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_operation[] = {
+ rd_false, rd_false, rd_true, rd_true, rd_true, rd_true, rd_true,
+ rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_permission_type[] = {rd_false, rd_false, rd_true,
+ rd_true, rd_false};
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK();
+
+ // Valid acl binding
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid resource name"),
+ "expected error string \"Invalid resource name\", not %s",
+ errstr);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid principal"),
+ "expected error string \"Invalid principal\", not %s",
+ errstr);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid host"),
+ "expected error string \"Invalid host\", not %s", errstr);
+
+ for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
+ host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_types[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr, "Invalid resource type"),
+ "expected error string \"Invalid resource type\", "
+ "not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_pattern_types[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr,
+ "Invalid resource pattern type"),
+ "expected error string \"Invalid resource pattern "
+ "type\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_operation[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(!new_acl &&
+ !strcmp(errstr, "Invalid operation"),
+ "expected error string \"Invalid "
+ "operation\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_permission_type[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr, "Invalid permission type"),
+ "expected error string \"permission type\", not %s",
+ errstr);
+ }
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief AclBindingFilter tests
+ *
+ *
+ *
+ */
+static void do_test_AclBindingFilter() {
+ int i;
+ char errstr[512];
+ rd_kafka_AclBindingFilter_t *new_acl_filter;
+
+ rd_bool_t valid_resource_types[] = {rd_false, rd_true, rd_true,
+ rd_true, rd_true, rd_false};
+ rd_bool_t valid_resource_pattern_types[] = {
+ rd_false, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_operation[] = {
+ rd_false, rd_true, rd_true, rd_true, rd_true, rd_true, rd_true,
+ rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_permission_type[] = {rd_false, rd_true, rd_true,
+ rd_true, rd_false};
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK();
+
+ // Valid acl binding
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
+ host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_types[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr, "Invalid resource type"),
+ "expected error string \"Invalid resource type\", "
+ "not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_pattern_types[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr,
+ "Invalid resource pattern type"),
+ "expected error string \"Invalid resource pattern "
+ "type\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_operation[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(!new_acl_filter &&
+ !strcmp(errstr, "Invalid operation"),
+ "expected error string \"Invalid "
+ "operation\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_permission_type[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr, "Invalid permission type"),
+ "expected error string \"permission type\", not %s",
+ errstr);
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief CreateAcls tests
+ *
+ *
+ *
+ */
+static void do_test_CreateAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+ rd_kafka_queue_t *q;
+#define MY_NEW_ACLS_CNT 2
+ rd_kafka_AclBinding_t *new_acls[MY_NEW_ACLS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreateAcls_result_t *res;
+ const rd_kafka_acl_result_t **resacls;
+ size_t resacls_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s CreaetAcls with %s, timeout %dms", rd_kafka_name(rk),
+ what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBinding array
+ */
+ for (i = 0; i < MY_NEW_ACLS_CNT; i++) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ new_acls[i] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "CreateAcls");
+ TEST_SAY("Call CreateAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_CreateAcls(rk, new_acls, MY_NEW_ACLS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "CreateAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "CreateAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("CreateAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_CreateAcls_result(rkev);
+ TEST_ASSERT(res, "expected CreateAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected CreateAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract acls results anyway, should return NULL. */
+ resacls = rd_kafka_CreateAcls_result_acls(res, &resacls_cnt);
+ TEST_ASSERT(!resacls && resacls_cnt == 0,
+ "expected no acl result, got %p cnt %" PRIusz, resacls,
+ resacls_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy_array(new_acls, MY_NEW_ACLS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+#undef MY_NEW_ACLS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief DescribeAcls tests
+ *
+ *
+ *
+ */
+static void do_test_DescribeAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AclBindingFilter_t *describe_acls;
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DescribeAcls_result_t *res;
+ const rd_kafka_AclBinding_t **res_acls;
+ size_t res_acls_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s DescribeAcls with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBindingFilter
+ */
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ describe_acls = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_PREFIXED,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "DescribeAcls");
+ TEST_SAY("Call DescribeAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_DescribeAcls(rk, describe_acls, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "DescribeAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "DescribeAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DescribeAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DescribeAcls_result(rkev);
+ TEST_ASSERT(res, "expected DescribeAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected DescribeAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract result acls anyway, should return NULL. */
+ res_acls = rd_kafka_DescribeAcls_result_acls(res, &res_acls_cnt);
+ TEST_ASSERT(!res_acls && res_acls_cnt == 0,
+ "expected no result acls, got %p cnt %" PRIusz, res_acls,
+ res_acls_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy(describe_acls);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief DeleteAcls tests
+ *
+ *
+ *
+ */
+static void do_test_DeleteAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+#define DELETE_ACLS_FILTERS_CNT 2
+ rd_kafka_queue_t *q;
+ rd_kafka_AclBindingFilter_t *delete_acls[DELETE_ACLS_FILTERS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteAcls_result_t *res;
+ const rd_kafka_DeleteAcls_result_response_t **res_response;
+ size_t res_response_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s DeleteAcls with %s, timeout %dms", rd_kafka_name(rk),
+ what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBindingFilter array
+ */
+ for (i = 0; i < DELETE_ACLS_FILTERS_CNT; i++) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ delete_acls[i] = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "DeleteAcls");
+ TEST_SAY("Call DeleteAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_DeleteAcls(rk, delete_acls, DELETE_ACLS_FILTERS_CNT, options,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "DeleteAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteAcls_result(rkev);
+ TEST_ASSERT(res, "expected DeleteAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected DeleteAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract result responses anyway, should return NULL. */
+ res_response =
+ rd_kafka_DeleteAcls_result_responses(res, &res_response_cnt);
+ TEST_ASSERT(!res_response && res_response_cnt == 0,
+ "expected no result response, got %p cnt %" PRIusz,
+ res_response, res_response_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy_array(delete_acls, DELETE_ACLS_FILTERS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+#undef DELETE_ACLS_FILTERS_CNT
+
+ SUB_TEST_PASS();
+}
+
/**
@@ -946,11 +1560,14 @@ static void do_test_options(rd_kafka_t *rk) {
RD_KAFKA_ADMIN_OP_DELETEGROUPS, \
RD_KAFKA_ADMIN_OP_DELETERECORDS, \
RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \
+ RD_KAFKA_ADMIN_OP_CREATEACLS, \
+ RD_KAFKA_ADMIN_OP_DESCRIBEACLS, \
+ RD_KAFKA_ADMIN_OP_DELETEACLS, \
RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \
}
struct {
const char *setter;
- const rd_kafka_admin_op_t valid_apis[9];
+ const rd_kafka_admin_op_t valid_apis[12];
} matrix[] = {
{"request_timeout", _all_apis},
{"operation_timeout",
@@ -1121,6 +1738,26 @@ static void do_test_apis(rd_kafka_type_t cltype) {
do_test_DeleteConsumerGroupOffsets("temp queue, options", rk, NULL, 1);
do_test_DeleteConsumerGroupOffsets("main queue, options", rk, mainq, 1);
+ do_test_AclBinding();
+ do_test_AclBindingFilter();
+
+ do_test_CreateAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_CreateAcls("temp queue, options", rk, NULL, rd_false, rd_true);
+ do_test_CreateAcls("main queue, options", rk, mainq, rd_false, rd_true);
+
+ do_test_DescribeAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_DescribeAcls("temp queue, options", rk, NULL, rd_false,
+ rd_true);
+ do_test_DescribeAcls("main queue, options", rk, mainq, rd_false,
+ rd_true);
+
+ do_test_DeleteAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_DeleteAcls("temp queue, options", rk, NULL, rd_false, rd_true);
+ do_test_DeleteAcls("main queue, options", rk, mainq, rd_false, rd_true);
+
do_test_mix(rk, mainq);
do_test_configs(rk, mainq);
diff --git a/tests/0081-admin.c b/tests/0081-admin.c
index 00971d3bcc..b362cf5954 100644
--- a/tests/0081-admin.c
+++ b/tests/0081-admin.c
@@ -1086,7 +1086,816 @@ static void do_test_DescribeConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
SUB_TEST_PASS();
}
+/**
+ * @brief Test CreateAcls
+ */
+static void
+do_test_CreateAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ size_t resacl_cnt;
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+ char errstr[128];
+ const char *errstr2;
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *base_topic_name;
+ char topic1_name[512];
+ char topic2_name[512];
+ rd_kafka_AclBinding_t *acl_bindings[2];
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic =
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ rd_kafka_AdminOptions_t *admin_options;
+ rd_kafka_event_t *rkev_acl_create;
+ const rd_kafka_CreateAcls_result_t *acl_res;
+ const rd_kafka_acl_result_t **acl_res_acls;
+ unsigned int i;
+
+ SUB_TEST_QUICK();
+
+ if (version == 0)
+ pattern_type_first_topic = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+
+ base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
+ rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
+
+
+ acl_bindings[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_first_topic,
+ user_test1, "*", RD_KAFKA_ACL_OPERATION_READ,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, NULL, 0);
+ acl_bindings[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, "*",
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+
+ admin_options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
+ err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ TIMING_START(&timing, "CreateAcls");
+ TEST_SAY("Call CreateAcls\n");
+ rd_kafka_CreateAcls(rk, acl_bindings, 2, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+ /*
+ * Wait for result
+ */
+ rkev_acl_create = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_CREATEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_create);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_create);
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Expected unsupported feature, not: %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(!strcmp(errstr2,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker "
+ "version >= 0.11.0.0"),
+ "Expected a different message, not: %s", errstr2);
+ TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
+ }
+
+ if (version > 0 && test_broker_version < TEST_BRKVER(2, 0, 0, 0)) {
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Expected unsupported feature, not: %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(!strcmp(errstr2,
+ "Broker only supports LITERAL "
+ "resource pattern types"),
+ "Expected a different message, not: %s", errstr2);
+ TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
+ }
+
+ TEST_ASSERT(!err, "Expected success, not %s: %s",
+ rd_kafka_err2name(err), errstr2);
+
+ /*
+ * Extract result
+ */
+ acl_res = rd_kafka_event_CreateAcls_result(rkev_acl_create);
+ TEST_ASSERT(acl_res, "Expected CreateAcls result, not %s",
+ rd_kafka_event_name(rkev_acl_create));
+
+ acl_res_acls = rd_kafka_CreateAcls_result_acls(acl_res, &resacl_cnt);
+ TEST_ASSERT(resacl_cnt == 2, "Expected 2, not %zu", resacl_cnt);
+
+ for (i = 0; i < resacl_cnt; i++) {
+ const rd_kafka_acl_result_t *acl_res_acl = *(acl_res_acls + i);
+ const rd_kafka_error_t *error =
+ rd_kafka_acl_result_error(acl_res_acl);
+
+ TEST_ASSERT(!error,
+ "Expected RD_KAFKA_RESP_ERR_NO_ERROR, not %s",
+ rd_kafka_error_string(error));
+ }
+
+ rd_kafka_AdminOptions_destroy(admin_options);
+ rd_kafka_event_destroy(rkev_acl_create);
+ rd_kafka_AclBinding_destroy_array(acl_bindings, 2);
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test DescribeAcls
+ */
+static void
+do_test_DescribeAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ size_t acl_binding_results_cntp;
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+ uint32_t i;
+ char errstr[128];
+ const char *errstr2;
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *any_host = "*";
+ const char *topic_name;
+ rd_kafka_AclBinding_t *acl_bindings_create[2];
+ rd_kafka_AclBinding_t *acl_bindings_describe;
+ rd_kafka_AclBinding_t *acl;
+ const rd_kafka_DescribeAcls_result_t *acl_describe_result;
+ const rd_kafka_AclBinding_t **acl_binding_results;
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
+ rd_bool_t broker_version1 =
+ test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
+ rd_kafka_resp_err_t create_err;
+ rd_kafka_AdminOptions_t *admin_options;
+ rd_kafka_event_t *rkev_acl_describe;
+ const rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK();
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ SUB_TEST_SKIP(
+ "Skipping DESCRIBE_ACLS test on unsupported "
+ "broker version\n");
+ return;
+ }
+
+ pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ if (!broker_version1)
+ pattern_type_first_topic_create =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+
+ topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ acl_bindings_create[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ pattern_type_first_topic_create, user_test1, any_host,
+ RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+ create_err =
+ test_CreateAcls_simple(rk, NULL, acl_bindings_create, 2, NULL);
+
+ TEST_ASSERT(!create_err, "create error: %s",
+ rd_kafka_err2str(create_err));
+
+ acl_bindings_describe = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_MATCH, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
+ 0);
+
+ admin_options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
+ err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
+ errstr, sizeof(errstr));
+
+ TIMING_START(&timing, "DescribeAcls");
+ TEST_SAY("Call DescribeAcls\n");
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ if (!broker_version1) {
+ TEST_ASSERT(
+ err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "expected RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, not %s",
+ rd_kafka_err2str(err));
+ TEST_ASSERT(strcmp(errstr2,
+ "Broker only supports LITERAL and ANY "
+ "resource pattern types") == 0,
+ "expected another message, not %s", errstr2);
+ } else {
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+ }
+
+ if (!err) {
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ acl_binding_results = rd_kafka_DescribeAcls_result_acls(
+ acl_describe_result, &acl_binding_results_cntp);
+
+ TEST_ASSERT(acl_binding_results_cntp == 2,
+ "acl_binding_results_cntp should be 2, not %zu",
+ acl_binding_results_cntp);
+
+ for (i = 0; i < acl_binding_results_cntp; i++) {
+ acl = (rd_kafka_AclBinding_t *)acl_binding_results[i];
+
+ if (strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test1) == 0) {
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) ==
+ RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be "
+ "RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(
+ rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_name(acl),
+ topic_name) == 0,
+ "acl->name should be %s, not %s",
+ topic_name, rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl) == pattern_type_first_topic_create,
+ "acl->resource_pattern_type should be %s, "
+ "not %s",
+ rd_kafka_ResourcePatternType_name(
+ pattern_type_first_topic_create),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test1) == 0,
+ "acl->principal should be %s, not %s",
+ user_test1,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_host(acl),
+ any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_operation(acl) ==
+ RD_KAFKA_ACL_OPERATION_READ,
+ "acl->operation should be %s, not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_READ),
+ rd_kafka_AclOperation_name(
+ rd_kafka_AclBinding_operation(acl)));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(
+ acl)));
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error,
+ "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+
+ } else {
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) ==
+ RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be "
+ "RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(
+ rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_name(acl),
+ topic_name) == 0,
+ "acl->name should be %s, not %s",
+ topic_name, rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl) ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "acl->resource_pattern_type should be %s, "
+ "not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test2) == 0,
+ "acl->principal should be %s, not %s",
+ user_test2,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_host(acl),
+ any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_operation(acl) ==
+ RD_KAFKA_ACL_OPERATION_WRITE,
+ "acl->operation should be %s, not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(
+ rd_kafka_AclBinding_operation(acl)));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(
+ acl)));
+
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error,
+ "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+ }
+ }
+ }
+
+ rd_kafka_AclBinding_destroy(acl_bindings_describe);
+ rd_kafka_event_destroy(rkev_acl_describe);
+
+ acl_bindings_describe = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
+ NULL, 0);
+
+ TIMING_START(&timing, "DescribeAcls");
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ acl_binding_results = rd_kafka_DescribeAcls_result_acls(
+ acl_describe_result, &acl_binding_results_cntp);
+
+ TEST_ASSERT(acl_binding_results_cntp == 1,
+ "acl_binding_results_cntp should be 1, not %zu",
+ acl_binding_results_cntp);
+
+ acl = (rd_kafka_AclBinding_t *)acl_binding_results[0];
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) == RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_name(acl), topic_name) == 0,
+ "acl->name should be %s, not %s", topic_name,
+ rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(rd_kafka_AclBinding_resource_pattern_type(acl) ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "acl->resource_pattern_type should be %s, not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(acl)));
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_principal(acl), user_test2) == 0,
+ "acl->principal should be %s, not %s", user_test2,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_host(acl), any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(acl)));
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error, "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_AclBinding_destroy(acl_bindings_describe);
+ rd_kafka_event_destroy(rkev_acl_describe);
+ rd_kafka_AdminOptions_destroy(admin_options);
+ rd_kafka_AclBinding_destroy_array(acl_bindings_create, 2);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Count acls by acl filter
+ */
+static size_t
+do_test_acls_count(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_bindings_describe,
+ rd_kafka_queue_t *q) {
+ char errstr[128];
+ rd_kafka_resp_err_t err;
+ rd_kafka_AdminOptions_t *admin_options_describe;
+ rd_kafka_event_t *rkev_acl_describe;
+ const rd_kafka_DescribeAcls_result_t *acl_describe_result;
+ const char *errstr2;
+ size_t acl_binding_results_cntp;
+
+ admin_options_describe =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
+ rd_kafka_AdminOptions_set_request_timeout(admin_options_describe, 10000,
+ errstr, sizeof(errstr));
+
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options_describe,
+ q);
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ rd_kafka_DescribeAcls_result_acls(acl_describe_result,
+ &acl_binding_results_cntp);
+ rd_kafka_event_destroy(rkev_acl_describe);
+ rd_kafka_AdminOptions_destroy(admin_options_describe);
+
+ return acl_binding_results_cntp;
+}
+
+/**
+ * @brief Test DeleteAcls
+ */
+static void
+do_test_DeleteAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ test_timing_t timing;
+ uint32_t i;
+ char errstr[128];
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *any_host = "*";
+ const char *base_topic_name;
+ char topic1_name[512];
+ char topic2_name[512];
+ size_t acl_binding_results_cntp;
+ size_t DeleteAcls_result_responses_cntp;
+ size_t matching_acls_cntp;
+ rd_kafka_AclBinding_t *acl_bindings_create[3];
+ rd_kafka_AclBindingFilter_t *acl_bindings_describe;
+ rd_kafka_AclBindingFilter_t *acl_bindings_delete;
+ rd_kafka_event_t *rkev_acl_delete;
+ rd_kafka_AdminOptions_t *admin_options_delete;
+ const rd_kafka_DeleteAcls_result_t *acl_delete_result;
+ const rd_kafka_DeleteAcls_result_response_t *
+ *DeleteAcls_result_responses;
+ const rd_kafka_DeleteAcls_result_response_t *DeleteAcls_result_response;
+ const rd_kafka_AclBinding_t **matching_acls;
+ const rd_kafka_AclBinding_t *matching_acl;
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
+ rd_kafka_ResourcePatternType_t pattern_type_delete;
+ rd_bool_t broker_version1 =
+ test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
+ rd_kafka_resp_err_t create_err;
+ rd_kafka_ResourceType_t restype;
+ rd_kafka_ResourcePatternType_t resource_pattern_type;
+ rd_kafka_AclOperation_t operation;
+ rd_kafka_AclPermissionType_t permission_type;
+ const char *name;
+ const char *principal;
+ const rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK();
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ SUB_TEST_SKIP(
+ "Skipping DELETE_ACLS test on unsupported "
+ "broker version\n");
+ return;
+ }
+
+ pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_MATCH;
+ if (!broker_version1) {
+ pattern_type_first_topic_create =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ }
+
+ base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
+ rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
+
+ acl_bindings_create[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name,
+ pattern_type_first_topic_create, user_test1, any_host,
+ RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[2] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+ acl_bindings_delete = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_delete, NULL,
+ NULL, RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
+ NULL, 0);
+
+ acl_bindings_describe = acl_bindings_delete;
+
+ create_err =
+ test_CreateAcls_simple(rk, NULL, acl_bindings_create, 3, NULL);
+
+ TEST_ASSERT(!create_err, "create error: %s",
+ rd_kafka_err2str(create_err));
+
+ admin_options_delete =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEACLS);
+ rd_kafka_AdminOptions_set_request_timeout(admin_options_delete, 10000,
+ errstr, sizeof(errstr));
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 2,
+ "acl_binding_results_cntp should not be 2, not %zu\n",
+ acl_binding_results_cntp);
+
+ TIMING_START(&timing, "DeleteAcls");
+ rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_delete = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
+
+ acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
+
+ TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
+
+ DeleteAcls_result_responses_cntp = 0;
+ DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
+ acl_delete_result, &DeleteAcls_result_responses_cntp);
+
+ TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
+ "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
+ DeleteAcls_result_responses_cntp);
+
+ DeleteAcls_result_response = DeleteAcls_result_responses[0];
+
+ TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
+ DeleteAcls_result_response));
+
+ matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
+ DeleteAcls_result_response, &matching_acls_cntp);
+
+ TEST_ASSERT(matching_acls_cntp == 2,
+ "matching_acls_cntp should be 2, not %zu\n",
+ matching_acls_cntp);
+
+ for (i = 0; i < matching_acls_cntp; i++) {
+ rd_kafka_ResourceType_t restype;
+ rd_kafka_ResourcePatternType_t resource_pattern_type;
+ rd_kafka_AclOperation_t operation;
+ rd_kafka_AclPermissionType_t permission_type;
+ const char *name;
+ const char *principal;
+
+ matching_acl = matching_acls[i];
+ error = rd_kafka_AclBinding_error(matching_acl);
+ restype = rd_kafka_AclBinding_restype(matching_acl);
+ name = rd_kafka_AclBinding_name(matching_acl);
+ resource_pattern_type =
+ rd_kafka_AclBinding_resource_pattern_type(matching_acl);
+ principal = rd_kafka_AclBinding_principal(matching_acl);
+ operation = rd_kafka_AclBinding_operation(matching_acl);
+ permission_type =
+ rd_kafka_AclBinding_permission_type(matching_acl);
+
+ TEST_ASSERT(!error, "expected success, not %s",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
+ "expected RD_KAFKA_RESOURCE_TOPIC not %s",
+ rd_kafka_ResourceType_name(restype));
+ TEST_ASSERT(strcmp(name, topic1_name) == 0,
+ "expected %s not %s", topic1_name, name);
+ TEST_ASSERT(permission_type ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "expected %s not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(permission_type));
+
+ if (strcmp(user_test1, principal) == 0) {
+ TEST_ASSERT(resource_pattern_type ==
+ pattern_type_first_topic_create,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ pattern_type_first_topic_create),
+ rd_kafka_ResourcePatternType_name(
+ resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_READ,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_READ),
+ rd_kafka_AclOperation_name(operation));
+
+ } else {
+ TEST_ASSERT(resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(operation));
+ }
+ }
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 0,
+ "acl_binding_results_cntp should be 0, not %zu\n",
+ acl_binding_results_cntp);
+
+ rd_kafka_event_destroy(rkev_acl_delete);
+ rd_kafka_AclBinding_destroy(acl_bindings_delete);
+
+ acl_bindings_delete = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
+ 0);
+ acl_bindings_describe = acl_bindings_delete;
+
+ TIMING_START(&timing, "DeleteAcls");
+ rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_delete = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
+
+ acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
+
+ TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
+
+ DeleteAcls_result_responses_cntp = 0;
+ DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
+ acl_delete_result, &DeleteAcls_result_responses_cntp);
+
+ TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
+ "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
+ DeleteAcls_result_responses_cntp);
+
+ DeleteAcls_result_response = DeleteAcls_result_responses[0];
+
+ TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
+ DeleteAcls_result_response));
+
+ matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
+ DeleteAcls_result_response, &matching_acls_cntp);
+
+ TEST_ASSERT(matching_acls_cntp == 1,
+ "matching_acls_cntp should be 1, not %zu\n",
+ matching_acls_cntp);
+
+ matching_acl = matching_acls[0];
+ error = rd_kafka_AclBinding_error(matching_acl);
+ restype = rd_kafka_AclBinding_restype(matching_acl);
+ name = rd_kafka_AclBinding_name(matching_acl);
+ resource_pattern_type =
+ rd_kafka_AclBinding_resource_pattern_type(matching_acl);
+ principal = rd_kafka_AclBinding_principal(matching_acl);
+ operation = rd_kafka_AclBinding_operation(matching_acl);
+ permission_type = rd_kafka_AclBinding_permission_type(matching_acl);
+
+ TEST_ASSERT(!error, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
+ "expected RD_KAFKA_RESOURCE_TOPIC not %s",
+ rd_kafka_ResourceType_name(restype));
+ TEST_ASSERT(strcmp(name, topic2_name) == 0, "expected %s not %s",
+ topic2_name, name);
+ TEST_ASSERT(
+ permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "expected %s not %s",
+ rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(permission_type));
+ TEST_ASSERT(strcmp(user_test2, principal) == 0, "expected %s not %s",
+ user_test2, principal);
+ TEST_ASSERT(resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(operation));
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 0,
+ "acl_binding_results_cntp should be 0, not %zu\n",
+ acl_binding_results_cntp);
+
+ rd_kafka_AclBinding_destroy(acl_bindings_delete);
+ rd_kafka_event_destroy(rkev_acl_delete);
+ rd_kafka_AdminOptions_destroy(admin_options_delete);
+
+ rd_kafka_AclBinding_destroy_array(acl_bindings_create, 3);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
/**
* @brief Verify that an unclean rd_kafka_destroy() does not hang.
@@ -1877,6 +2686,18 @@ static void do_test_apis(rd_kafka_type_t cltype) {
0);
}
+ /* CreateAcls */
+ do_test_CreateAcls(rk, mainq, 0);
+ do_test_CreateAcls(rk, mainq, 1);
+
+ /* DescribeAcls */
+ do_test_DescribeAcls(rk, mainq, 0);
+ do_test_DescribeAcls(rk, mainq, 1);
+
+ /* DeleteAcls */
+ do_test_DeleteAcls(rk, mainq, 0);
+ do_test_DeleteAcls(rk, mainq, 1);
+
/* AlterConfigs */
do_test_AlterConfigs(rk, mainq);
diff --git a/tests/0101-fetch-from-follower.cpp b/tests/0101-fetch-from-follower.cpp
index 0168ac55d3..cc68530011 100644
--- a/tests/0101-fetch-from-follower.cpp
+++ b/tests/0101-fetch-from-follower.cpp
@@ -303,6 +303,7 @@ static void do_fff_test(void) {
if (get_broker_rack_count(replica_ids) != 3) {
Test::Skip("unexpected broker.rack configuration: skipping test.\n");
+ return;
}
/* arrange for the consumer's client.rack to align with a broker that is not
diff --git a/tests/0105-transactions_mock.c b/tests/0105-transactions_mock.c
index 5c8cd3df7b..bd7604c5f5 100644
--- a/tests/0105-transactions_mock.c
+++ b/tests/0105-transactions_mock.c
@@ -227,6 +227,8 @@ static void do_test_txn_recoverable_errors(void) {
rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ rd_kafka_flush(rk, -1);
+
/*
* Produce a message, let it fail with a non-idempo/non-txn
* retryable error
@@ -492,8 +494,12 @@ static void do_test_txn_slow_reinit(rd_bool_t with_sleep) {
* transaction errors, but let the broker-side bumping of the
* producer PID fail with a fencing error.
* Should raise a fatal error.
+ *
+ * @param error_code Which error code InitProducerIdRequest should fail with.
+ * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
+ * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
*/
-static void do_test_txn_fenced_reinit(void) {
+static void do_test_txn_fenced_reinit(rd_kafka_resp_err_t error_code) {
rd_kafka_t *rk;
rd_kafka_mock_cluster_t *mcluster;
rd_kafka_error_t *error;
@@ -502,7 +508,7 @@ static void do_test_txn_fenced_reinit(void) {
char errstr[512];
rd_kafka_resp_err_t fatal_err;
- SUB_TEST_QUICK();
+ SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
NULL);
@@ -531,8 +537,7 @@ static void do_test_txn_fenced_reinit(void) {
/* Fail the PID reinit */
rd_kafka_mock_broker_push_request_error_rtts(
- mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1,
- RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH, 0);
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
/* Produce a message, let it fail with a fatal idempo error. */
rd_kafka_mock_push_request_errors(
@@ -683,6 +688,16 @@ static void do_test_txn_endtxn_errors(void) {
rd_true /* abortable */,
rd_false /* !fatal */,
},
+ {
+ /* #11 */
+ 1,
+ {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
+ /* This error is normalized */
+ RD_KAFKA_RESP_ERR__FENCED,
+ rd_false /* !retriable */,
+ rd_false /* !abortable */,
+ rd_true /* fatal */
+ },
{0},
};
int i;
@@ -2618,6 +2633,115 @@ static void do_test_out_of_order_seq(void) {
}
+/**
+ * @brief Verify lossless delivery if topic disappears from Metadata for awhile.
+ *
+ * If a topic is removed from metadata inbetween transactions, the producer
+ * will remove its partition state for the topic's partitions.
+ * If later the same topic comes back (same topic instance, not a new creation)
+ * then the producer must restore the previously used msgid/BaseSequence
+ * in case the same Epoch is still used, or messages will be silently lost
+ * as they would seem like legit duplicates to the broker.
+ *
+ * Reproduction:
+ * 1. produce msgs to topic, commit transaction.
+ * 2. remove topic from metadata
+ * 3. make sure client updates its metadata, which removes the partition
+ * objects.
+ * 4. restore the topic in metadata
+ * 5. produce new msgs to topic, commit transaction.
+ * 6. consume topic. All messages should be accounted for.
+ */
+static void do_test_topic_disappears_for_awhile(void) {
+ rd_kafka_t *rk, *c;
+ rd_kafka_conf_t *c_conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *topic = "mytopic";
+ const char *txnid = "myTxnId";
+ test_timing_t timing;
+ int i;
+ int msgcnt = 0;
+ const int partition_cnt = 10;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(
+ &mcluster, txnid, 1, NULL, "batch.num.messages", "3", "linger.ms",
+ "100", "topic.metadata.refresh.interval.ms", "2000", NULL);
+
+ rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ for (i = 0; i < 2; i++) {
+ int cnt = 3 * 2 * partition_cnt;
+ rd_bool_t remove_topic = (i % 2) == 0;
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ while (cnt-- >= 0) {
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(cnt % partition_cnt),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ msgcnt++;
+ }
+
+ /* Commit the transaction */
+ TIMING_START(&timing, "commit_transaction(-1)");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ TIMING_STOP(&timing);
+
+
+
+ if (remove_topic) {
+ /* Make it seem the topic is removed, refresh metadata,
+ * and then make the topic available again. */
+ const rd_kafka_metadata_t *md;
+
+ TEST_SAY("Marking topic as non-existent\n");
+
+ rd_kafka_mock_topic_set_error(
+ mcluster, topic,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
+
+ TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, NULL, &md,
+ tmout_multip(5000)));
+
+ rd_kafka_metadata_destroy(md);
+
+ rd_sleep(2);
+
+ TEST_SAY("Bringing topic back to life\n");
+ rd_kafka_mock_topic_set_error(
+ mcluster, topic, RD_KAFKA_RESP_ERR_NO_ERROR);
+ }
+ }
+
+ TEST_SAY("Verifying messages by consumtion\n");
+ test_conf_init(&c_conf, NULL, 0);
+ test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(c_conf, "bootstrap.servers",
+ rd_kafka_mock_cluster_bootstraps(mcluster));
+ test_conf_set(c_conf, "enable.partition.eof", "true");
+ test_conf_set(c_conf, "auto.offset.reset", "earliest");
+ c = test_create_consumer("mygroup", NULL, c_conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+ test_consumer_poll_exact("consume", c, 0, partition_cnt, 0, msgcnt,
+ rd_true /*exact*/, NULL);
+ rd_kafka_destroy(c);
+
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
int main_0105_transactions_mock(int argc, char **argv) {
if (test_needs_auth()) {
TEST_SKIP("Mock cluster does not support SSL/SASL\n");
@@ -2628,7 +2752,8 @@ int main_0105_transactions_mock(int argc, char **argv) {
do_test_txn_fatal_idempo_errors();
- do_test_txn_fenced_reinit();
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
do_test_txn_req_cnt();
@@ -2688,5 +2813,7 @@ int main_0105_transactions_mock(int argc, char **argv) {
do_test_out_of_order_seq();
+ do_test_topic_disappears_for_awhile();
+
return 0;
}
diff --git a/tests/0113-cooperative_rebalance.cpp b/tests/0113-cooperative_rebalance.cpp
index 1af06363ae..44743bac93 100644
--- a/tests/0113-cooperative_rebalance.cpp
+++ b/tests/0113-cooperative_rebalance.cpp
@@ -694,6 +694,7 @@ static void a_assign_rapid() {
RdKafka::Conf *pconf;
Test::conf_init(&pconf, NULL, 10);
Test::conf_set(pconf, "bootstrap.servers", bootstraps);
+ Test::conf_set(pconf, "security.protocol", "plaintext");
std::string errstr;
RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
if (!p)
@@ -722,6 +723,7 @@ static void a_assign_rapid() {
RdKafka::Conf *conf;
Test::conf_init(&conf, NULL, 20);
Test::conf_set(conf, "bootstrap.servers", bootstraps);
+ Test::conf_set(conf, "security.protocol", "plaintext");
Test::conf_set(conf, "client.id", __FUNCTION__);
Test::conf_set(conf, "group.id", group_id);
Test::conf_set(conf, "auto.offset.reset", "earliest");
@@ -1978,6 +1980,11 @@ static void n_wildcard() {
static void o_java_interop() {
SUB_TEST();
+ if (*test_conf_get(NULL, "sasl.mechanism") != '\0')
+ SUB_TEST_SKIP(
+ "Cluster is set up for SASL: we won't bother with that "
+ "for the Java client\n");
+
std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1);
std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1);
std::string group_name = Test::mk_unique_group_name("0113_o");
@@ -2665,7 +2672,8 @@ static void p_lost_partitions_heartbeat_illegal_generation_test() {
/* Seed the topic with messages */
test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
test_conf_init(&conf, NULL, 30);
test_conf_set(conf, "bootstrap.servers", bootstraps);
@@ -2740,11 +2748,13 @@ static void q_lost_partitions_illegal_generation_test(
/* Seed the topic1 with messages */
test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
/* Seed the topic2 with messages */
test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
test_conf_init(&conf, NULL, 30);
test_conf_set(conf, "bootstrap.servers", bootstraps);
@@ -2827,7 +2837,8 @@ static void r_lost_partitions_commit_illegal_generation_test_local() {
/* Seed the topic with messages */
test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers",
- bootstraps, "batch.num.messages", "10", NULL);
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
test_conf_init(&conf, NULL, 30);
test_conf_set(conf, "bootstrap.servers", bootstraps);
diff --git a/tests/0126-oauthbearer_oidc.c b/tests/0126-oauthbearer_oidc.c
index 56eea3f08b..8eb1870684 100644
--- a/tests/0126-oauthbearer_oidc.c
+++ b/tests/0126-oauthbearer_oidc.c
@@ -31,66 +31,183 @@
* is built from within the librdkafka source tree and thus differs. */
#include "rdkafka.h" /* for Kafka driver */
-
+static rd_bool_t error_seen;
/**
- * @brief After config OIDC fields, make sure the producer gets created
- * successfully.
+ * @brief After config OIDC, make sure the producer and consumer
+ * can work successfully.
*
*/
-static void do_test_create_producer() {
+static void
+do_test_produce_consumer_with_OIDC(const rd_kafka_conf_t *base_conf) {
const char *topic;
uint64_t testid;
- rd_kafka_t *rk;
+ rd_kafka_t *p1;
+ rd_kafka_t *c1;
rd_kafka_conf_t *conf;
- rd_kafka_conf_res_t res;
- char errstr[512];
-
- SUB_TEST("Test producer with oidc configuration");
- test_conf_init(&conf, NULL, 60);
+ const char *url = test_getenv("VALID_OIDC_URL", NULL);
- res = rd_kafka_conf_set(conf, "sasl.oauthbearer.method", "oidc", errstr,
- sizeof(errstr));
+ SUB_TEST("Test producer and consumer with oidc configuration");
- if (res == RD_KAFKA_CONF_INVALID) {
- rd_kafka_conf_destroy(conf);
- TEST_SKIP("%s\n", errstr);
+ if (!url) {
+ SUB_TEST_SKIP(
+ "VALID_OIDC_URL environment variable is not set\n");
return;
}
- if (res != RD_KAFKA_CONF_OK)
- TEST_FAIL("%s", errstr);
-
- test_conf_set(conf, "sasl.oauthbearer.client.id", "randomuniqclientid");
- test_conf_set(conf, "sasl.oauthbearer.client.secret",
- "randomuniqclientsecret");
- test_conf_set(conf, "sasl.oauthbearer.client.secret",
- "randomuniqclientsecret");
- test_conf_set(conf, "sasl.oauthbearer.extensions",
- "supportFeatureX=true");
- test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url",
- "https://localhost:1/token");
+ conf = rd_kafka_conf_dup(base_conf);
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", url);
testid = test_id_generate();
+
rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
- rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
topic = test_mk_topic_name("0126-oauthbearer_oidc", 1);
- test_create_topic(rk, topic, 1, 1);
+ test_create_topic(p1, topic, 1, 3);
+ TEST_SAY("Topic: %s is created\n", topic);
+
+ test_produce_msgs2(p1, topic, testid, 0, 0, 1, NULL, 0);
+
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ test_consumer_subscribe(c1, topic);
+
+ /* Give it some time to trigger the token refresh. */
+ rd_usleep(5 * 1000 * 1000, NULL);
+ test_consumer_poll("OIDC.C1", c1, testid, 1, -1, 1, NULL);
+
+ test_consumer_close(c1);
+
+ rd_kafka_destroy(p1);
+ rd_kafka_destroy(c1);
+ SUB_TEST_PASS();
+}
+
+
+static void
+auth_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ if (err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) {
+ TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err),
+ reason);
+ error_seen = rd_true;
+ } else
+ TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err),
+ reason);
+ rd_kafka_yield(rk);
+}
+
+
+/**
+ * @brief After config OIDC, if the token is expired, make sure
+ * the authentication fail as expected.
+ *
+ */
+static void do_test_produce_consumer_with_OIDC_expired_token_should_fail(
+ const rd_kafka_conf_t *base_conf) {
+ rd_kafka_t *c1;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+
+ const char *expired_url = test_getenv("EXPIRED_TOKEN_OIDC_URL", NULL);
+
+ SUB_TEST("Test OAUTHBEARER/OIDC failing with expired JWT");
+
+ if (!expired_url) {
+ SUB_TEST_SKIP(
+ "EXPIRED_TOKEN_OIDC_URL environment variable is not set\n");
+ return;
+ }
+
+ conf = rd_kafka_conf_dup(base_conf);
+
+ error_seen = rd_false;
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", expired_url);
+
+ rd_kafka_conf_set_error_cb(conf, auth_error_cb);
+
+ testid = test_id_generate();
- /* Produce messages */
- test_produce_msgs2(rk, topic, testid, 1, 0, 0, NULL, 0);
+ c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
- /* Verify messages were actually produced by consuming them back. */
- test_consume_msgs_easy(topic, topic, 0, 1, 1, NULL);
+ test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
+ TEST_ASSERT(error_seen);
- rd_kafka_destroy(rk);
+ test_consumer_close(c1);
+ rd_kafka_destroy(c1);
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief After config OIDC, if the token is not valid, make sure the
+ * authentication fail as expected.
+ *
+ */
+static void do_test_produce_consumer_with_OIDC_should_fail(
+ const rd_kafka_conf_t *base_conf) {
+ rd_kafka_t *c1;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+
+ const char *invalid_url = test_getenv("INVALID_OIDC_URL", NULL);
+
+ SUB_TEST("Test OAUTHBEARER/OIDC failing with invalid JWT");
+
+ if (!invalid_url) {
+ SUB_TEST_SKIP(
+ "INVALID_OIDC_URL environment variable is not set\n");
+ return;
+ }
+
+ conf = rd_kafka_conf_dup(base_conf);
+
+ error_seen = rd_false;
+
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", invalid_url);
+ rd_kafka_conf_set_error_cb(conf, auth_error_cb);
+
+ testid = test_id_generate();
+
+ c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
+
+ test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
+
+ TEST_ASSERT(error_seen);
+
+ test_consumer_close(c1);
+ rd_kafka_destroy(c1);
SUB_TEST_PASS();
}
int main_0126_oauthbearer_oidc(int argc, char **argv) {
- do_test_create_producer();
+ rd_kafka_conf_t *conf;
+ const char *sec;
+ const char *oidc;
+
+ test_conf_init(&conf, NULL, 60);
+
+ sec = test_conf_get(conf, "security.protocol");
+ if (!strstr(sec, "sasl")) {
+ TEST_SKIP("Apache Kafka cluster not configured for SASL\n");
+ return 0;
+ }
+
+ oidc = test_conf_get(conf, "sasl.oauthbearer.method");
+ if (rd_strcasecmp(oidc, "OIDC")) {
+ TEST_SKIP("`sasl.oauthbearer.method=OIDC` is required\n");
+ return 0;
+ }
+
+ do_test_produce_consumer_with_OIDC(conf);
+ do_test_produce_consumer_with_OIDC_should_fail(conf);
+ do_test_produce_consumer_with_OIDC_expired_token_should_fail(conf);
+
+ rd_kafka_conf_destroy(conf);
+
return 0;
}
diff --git a/tests/0128-sasl_callback_queue.cpp b/tests/0128-sasl_callback_queue.cpp
index 6f7298f20c..784f09bf60 100644
--- a/tests/0128-sasl_callback_queue.cpp
+++ b/tests/0128-sasl_callback_queue.cpp
@@ -32,13 +32,18 @@
* a non-polling API after client creation.
*/
#include "testcpp.h"
-
+#include "rdatomic.h"
namespace {
/* Provide our own token refresh callback */
class MyCb : public RdKafka::OAuthBearerTokenRefreshCb {
public:
- MyCb() : called(false) {
+ MyCb() {
+ rd_atomic32_init(&called_, 0);
+ }
+
+ bool called() {
+ return rd_atomic32_get(&called_) > 0;
}
void oauthbearer_token_refresh_cb(RdKafka::Handle *handle,
@@ -46,11 +51,11 @@ class MyCb : public RdKafka::OAuthBearerTokenRefreshCb {
handle->oauthbearer_set_token_failure(
"Not implemented by this test, "
"but that's okay");
- called = true;
+ rd_atomic32_add(&called_, 1);
Test::Say("Callback called!\n");
}
- bool called;
+ rd_atomic32_t called_;
};
}; // namespace
@@ -94,10 +99,10 @@ static void do_test(bool use_background_queue) {
"Expected clusterid() to fail since the token was not set");
if (expect_called)
- TEST_ASSERT(mycb.called,
+ TEST_ASSERT(mycb.called(),
"Expected refresh callback to have been called by now");
else
- TEST_ASSERT(!mycb.called,
+ TEST_ASSERT(!mycb.called(),
"Did not expect refresh callback to have been called");
delete p;
@@ -107,6 +112,11 @@ static void do_test(bool use_background_queue) {
extern "C" {
int main_0128_sasl_callback_queue(int argc, char **argv) {
+ if (!test_check_builtin("sasl_oauthbearer")) {
+ Test::Skip("Test requires OAUTHBEARER support\n");
+ return 0;
+ }
+
do_test(true);
do_test(false);
diff --git a/tests/0129-fetch_aborted_msgs.c b/tests/0129-fetch_aborted_msgs.c
new file mode 100644
index 0000000000..cc150feccb
--- /dev/null
+++ b/tests/0129-fetch_aborted_msgs.c
@@ -0,0 +1,78 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @brief Verify that a FetchResponse containing only aborted messages does not
+ * raise a ERR_MSG_SIZE_TOO_LARGE error. #2993.
+ *
+ * 1. Create topic with a small message.max.bytes to make sure that
+ * there's at least one full fetch response without any control messages,
+ * just aborted messages.
+ * 2. Transactionally produce 10x the message.max.bytes.
+ * 3. Abort the transaction.
+ * 4. Consume from start, verify that no error is received, wait for EOF.
+ *
+ */
+int main_0129_fetch_aborted_msgs(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const char *topic = test_mk_topic_name("0129_fetch_aborted_msgs", 1);
+ const int msgcnt = 1000;
+ const size_t msgsize = 1000;
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "linger.ms", "10000");
+ test_conf_set(conf, "transactional.id", topic);
+ test_conf_set(conf, "message.max.bytes", "10000");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_admin_create_topic(rk, topic, 1, 1,
+ (const char *[]) {"max.message.bytes", "10000",
+ "segment.bytes", "20000",
+ NULL});
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Produce half set of messages without waiting for delivery. */
+ test_produce_msgs2(rk, topic, 0, 0, 0, msgcnt, NULL, msgsize);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ rd_kafka_destroy(rk);
+
+ /* Verify messages were actually produced by consuming them back. */
+ test_consume_msgs_easy(topic, topic, 0, 1, 0, NULL);
+
+ return 0;
+}
diff --git a/tests/0130-store_offsets.c b/tests/0130-store_offsets.c
new file mode 100644
index 0000000000..9fb8d2350a
--- /dev/null
+++ b/tests/0130-store_offsets.c
@@ -0,0 +1,127 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * Verify that offsets_store() is not allowed for unassigned partitions,
+ * and that those offsets are not committed.
+ */
+static void do_test_store_unassigned(void) {
+ const char *topic = test_mk_topic_name("0130_store_unassigned", 1);
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_resp_err_t err;
+ rd_kafka_message_t *rkmessage;
+ const int64_t proper_offset = 900, bad_offset = 300;
+
+ SUB_TEST_QUICK();
+
+ test_produce_msgs_easy(topic, 0, 0, 1000);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.offset.store", "false");
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ c = test_create_consumer(topic, NULL, conf, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, 0);
+ TEST_CALL_ERR__(rd_kafka_assign(c, parts));
+
+ TEST_SAY("Consume one message\n");
+ test_consumer_poll_once(c, NULL, tmout_multip(3000));
+
+ parts->elems[0].offset = proper_offset;
+ TEST_SAY("Storing offset %" PRId64 " while assigned: should succeed\n",
+ parts->elems[0].offset);
+ TEST_CALL_ERR__(rd_kafka_offsets_store(c, parts));
+
+ TEST_SAY("Committing\n");
+ TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false /*sync*/));
+
+ TEST_SAY("Unassigning partitions and trying to store again\n");
+ TEST_CALL_ERR__(rd_kafka_assign(c, NULL));
+
+ parts->elems[0].offset = bad_offset;
+ TEST_SAY("Storing offset %" PRId64 " while unassigned: should fail\n",
+ parts->elems[0].offset);
+ err = rd_kafka_offsets_store(c, parts);
+ TEST_ASSERT_LATER(err != RD_KAFKA_RESP_ERR_NO_ERROR,
+ "Expected offsets_store() to fail");
+ TEST_ASSERT(parts->cnt == 1);
+
+ TEST_ASSERT(parts->elems[0].err == RD_KAFKA_RESP_ERR__STATE,
+ "Expected %s [%" PRId32
+ "] to fail with "
+ "_STATE, not %s",
+ parts->elems[0].topic, parts->elems[0].partition,
+ rd_kafka_err2name(parts->elems[0].err));
+
+ TEST_SAY("Committing: should fail\n");
+ err = rd_kafka_commit(c, NULL, rd_false /*sync*/);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__NO_OFFSET,
+ "Expected commit() to fail with NO_OFFSET, not %s",
+ rd_kafka_err2name(err));
+
+ TEST_SAY("Assigning partition again\n");
+ parts->elems[0].offset = RD_KAFKA_OFFSET_INVALID; /* Use committed */
+ TEST_CALL_ERR__(rd_kafka_assign(c, parts));
+
+ TEST_SAY("Consuming message to verify committed offset\n");
+ rkmessage = rd_kafka_consumer_poll(c, tmout_multip(3000));
+ TEST_ASSERT(rkmessage != NULL, "Expected message");
+ TEST_SAY("Consumed message with offset %" PRId64 "\n",
+ rkmessage->offset);
+ TEST_ASSERT(!rkmessage->err, "Expected proper message, not error %s",
+ rd_kafka_message_errstr(rkmessage));
+ TEST_ASSERT(rkmessage->offset == proper_offset,
+ "Expected first message to be properly stored "
+ "offset %" PRId64 ", not %" PRId64,
+ proper_offset, rkmessage->offset);
+
+ rd_kafka_message_destroy(rkmessage);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ rd_kafka_consumer_close(c);
+ rd_kafka_destroy(c);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0130_store_offsets(int argc, char **argv) {
+
+ do_test_store_unassigned();
+
+ return 0;
+}
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 34422b9375..9ae112ad20 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -119,6 +119,8 @@ set(
0125-immediate_flush.c
0126-oauthbearer_oidc.c
0128-sasl_callback_queue.cpp
+ 0129-fetch_aborted_msgs.c
+ 0130-store_offsets.c
8000-idle.cpp
test.c
testcpp.cpp
diff --git a/tests/LibrdkafkaTestApp.py b/tests/LibrdkafkaTestApp.py
index d1e0df1919..cbe2c4f96d 100644
--- a/tests/LibrdkafkaTestApp.py
+++ b/tests/LibrdkafkaTestApp.py
@@ -10,6 +10,7 @@
from trivup.apps.ZookeeperApp import ZookeeperApp
from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
from trivup.apps.KerberosKdcApp import KerberosKdcApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
import json
@@ -66,10 +67,36 @@ def __init__(self, cluster, version, conf=None,
elif mech == 'OAUTHBEARER':
self.security_protocol = 'SASL_PLAINTEXT'
- conf_blob.append('enable.sasl.oauthbearer.unsecure.jwt=true\n')
- conf_blob.append(
- 'sasl.oauthbearer.config=%s\n' %
- self.conf.get('sasl_oauthbearer_config'))
+ oidc = cluster.find_app(OauthbearerOIDCApp)
+ if oidc is not None:
+ conf_blob.append('sasl.oauthbearer.method=%s\n' %
+ oidc.conf.get('sasl_oauthbearer_method'))
+ conf_blob.append('sasl.oauthbearer.client.id=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_client_id'))
+ conf_blob.append('sasl.oauthbearer.client.secret=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_client_secret'))
+ conf_blob.append('sasl.oauthbearer.extensions=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_extensions'))
+ conf_blob.append('sasl.oauthbearer.scope=%s\n' %
+ oidc.conf.get('sasl_oauthbearer_scope'))
+ conf_blob.append('sasl.oauthbearer.token.endpoint.url=%s\n'
+ % oidc.conf.get('valid_url'))
+ self.env_add('VALID_OIDC_URL', oidc.conf.get('valid_url'))
+ self.env_add(
+ 'INVALID_OIDC_URL',
+ oidc.conf.get('badformat_url'))
+ self.env_add(
+ 'EXPIRED_TOKEN_OIDC_URL',
+ oidc.conf.get('expired_url'))
+ else:
+ conf_blob.append(
+ 'enable.sasl.oauthbearer.unsecure.jwt=true\n')
+ conf_blob.append(
+ 'sasl.oauthbearer.config=%s\n' %
+ self.conf.get('sasl_oauthbearer_config'))
elif mech == 'GSSAPI':
self.security_protocol = 'SASL_PLAINTEXT'
diff --git a/tests/Makefile b/tests/Makefile
index 1fdb17d930..73eab21406 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -12,9 +12,9 @@ CXXFLAGS += -I../src -I../src-cpp
LDFLAGS += -rdynamic -L../src -L../src-cpp
# Latest Kafka version
-KAFKA_VERSION?=2.7.0
+KAFKA_VERSION?=3.1.0
# Kafka versions for compatibility tests
-COMPAT_KAFKA_VERSIONS?=0.8.2.2 0.9.0.1 0.11.0.3 1.0.2 2.4.1 $(KAFKA_VERSION)
+COMPAT_KAFKA_VERSIONS?=0.8.2.2 0.9.0.1 0.11.0.3 1.0.2 2.4.1 2.8.1 $(KAFKA_VERSION)
# Non-default scenarios (FIXME: read from scenarios/*)
SCENARIOS?=noautocreate ak23
diff --git a/tests/autotest.sh b/tests/autotest.sh
index cacd34392e..9d17706f38 100755
--- a/tests/autotest.sh
+++ b/tests/autotest.sh
@@ -20,8 +20,8 @@ pushd tests
[[ -d _venv ]] || virtualenv _venv
source _venv/bin/activate
-# Install trivup that is used to bring up a cluster.
-pip3 install -U trivup
+# Install the requirements
+pip3 install -U -r requirements.txt
# Run tests that automatically spin up their clusters
export KAFKA_VERSION
diff --git a/tests/cluster_testing.py b/tests/cluster_testing.py
index 3136f33307..cfdc08db63 100755
--- a/tests/cluster_testing.py
+++ b/tests/cluster_testing.py
@@ -12,6 +12,7 @@
from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
from trivup.apps.KerberosKdcApp import KerberosKdcApp
from trivup.apps.SslApp import SslApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
import os
import sys
@@ -70,6 +71,12 @@ def __init__(self, version, conf={}, num_brokers=3, debug=False,
# and keytabs are available at the time of Kafka config generation.
kdc.start()
+ if 'OAUTHBEARER'.casefold() == \
+ defconf.get('sasl_mechanisms', "").casefold() and \
+ 'OIDC'.casefold() == \
+ defconf.get('sasl_oauthbearer_method', "").casefold():
+ self.oidc = OauthbearerOIDCApp(self)
+
# Brokers
defconf.update({'replication_factor': min(num_brokers, 3),
'version': version,
diff --git a/tests/interactive_broker_version.py b/tests/interactive_broker_version.py
index 2283f88ca1..54067f24ee 100755
--- a/tests/interactive_broker_version.py
+++ b/tests/interactive_broker_version.py
@@ -12,6 +12,7 @@
from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
from trivup.apps.KerberosKdcApp import KerberosKdcApp
from trivup.apps.SslApp import SslApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
from cluster_testing import read_scenario_conf
@@ -42,6 +43,9 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)
+ if conf.get('sasl_oauthbearer_method') == 'OIDC':
+ oidc = OauthbearerOIDCApp(cluster)
+
# Enable SSL if desired
if 'SSL' in conf.get('security.protocol', ''):
cluster.ssl = SslApp(cluster, conf)
@@ -100,12 +104,36 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
break
elif mech == 'OAUTHBEARER':
security_protocol = 'SASL_PLAINTEXT'
- os.write(
- fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode(
- 'ascii')))
- os.write(fd, ('sasl.oauthbearer.config=%s\n' %
- 'scope=requiredScope principal=admin').encode(
- 'ascii'))
+ if defconf.get('sasl_oauthbearer_method') == 'OIDC':
+ os.write(
+ fd, ('sasl.oauthbearer.method=OIDC\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.client.id=123\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.client.secret=abc\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.extensions=\
+ ExtensionworkloadIdentity=develC348S,\
+ Extensioncluster=lkc123\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.scope=test\n'.encode(
+ 'ascii')))
+ cmd_env['VALID_OIDC_URL'] = oidc.conf.get('valid_url')
+ cmd_env['INVALID_OIDC_URL'] = oidc.conf.get('badformat_url')
+ cmd_env['EXPIRED_TOKEN_OIDC_URL'] = oidc.conf.get(
+ 'expired_url')
+
+ else:
+ os.write(
+ fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode(
+ 'ascii')))
+ os.write(fd, ('sasl.oauthbearer.config=%s\n' %
+ 'scope=requiredScope principal=admin').encode(
+ 'ascii'))
else:
print(
'# FIXME: SASL %s client config not written to %s' %
@@ -283,6 +311,13 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
type=str,
default=None,
help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)')
+ parser.add_argument(
+ '--oauthbearer-method',
+ dest='sasl_oauthbearer_method',
+ type=str,
+ default=None,
+ help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \
+ must config SASL mechanism to OAUTHBEARER')
args = parser.parse_args()
if args.conf is not None:
@@ -303,10 +338,19 @@ def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
!= -1) and 'sasl_users' not in args.conf:
args.conf['sasl_users'] = 'testuser=testpass'
args.conf['sasl_mechanisms'] = args.sasl
+ retcode = 0
+ if args.sasl_oauthbearer_method:
+ if args.sasl_oauthbearer_method == "OIDC" and \
+ args.conf['sasl_mechanisms'] != 'OAUTHBEARER':
+ print('If config `--oauthbearer-method=OIDC`, '
+ '`--sasl` must be set to `OAUTHBEARER`')
+ retcode = 3
+ sys.exit(retcode)
+ args.conf['sasl_oauthbearer_method'] = \
+ args.sasl_oauthbearer_method
args.conf.get('conf', list()).append("log.retention.bytes=1000000000")
- retcode = 0
for version in args.versions:
r = test_version(version, cmd=args.cmd, deploy=args.deploy,
conf=args.conf, debug=args.debug,
diff --git a/tests/librdkafka.suppressions b/tests/librdkafka.suppressions
index 4340f1d803..6259dadb1b 100644
--- a/tests/librdkafka.suppressions
+++ b/tests/librdkafka.suppressions
@@ -443,3 +443,41 @@
fun:rd_atomic64_get
}
+{
+ osx_dyld_img
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ fun:strdup
+ fun:__si_module_static_ds_block_invoke
+ fun:_dispatch_client_callout
+ fun:_dispatch_once_callout
+ fun:si_module_static_ds
+ fun:si_module_with_name
+ fun:si_module_config_modules_for_category
+ fun:__si_module_static_search_block_invoke
+ fun:_dispatch_client_callout
+ fun:_dispatch_once_callout
+ fun:si_module_static_search
+ fun:si_module_with_name
+ fun:si_search
+ fun:getpwuid_r
+ fun:_CFRuntimeBridgeClasses
+ fun:__CFInitialize
+ fun:_ZN16ImageLoaderMachO11doImageInitERKN11ImageLoader11LinkContextE
+ fun:_ZN16ImageLoaderMachO16doInitializationERKN11ImageLoader11LinkContextE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader19processInitializersERKNS_11LinkContextEjRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader15runInitializersERKNS_11LinkContextERNS_21InitializerTimingListE
+ fun:_ZN4dyld24initializeMainExecutableEv
+ fun:_ZN4dyld5_mainEPK12macho_headermiPPKcS5_S5_Pm
+ fun:_ZN13dyldbootstrap5startEPKN5dyld311MachOLoadedEiPPKcS3_Pm
+ fun:_dyld_start
+}
diff --git a/tests/sasl_test.py b/tests/sasl_test.py
index fef02e0509..9cb7d194a1 100755
--- a/tests/sasl_test.py
+++ b/tests/sasl_test.py
@@ -121,6 +121,9 @@ def handle_report(report, version, suite):
parser.add_argument('--no-sasl', action='store_false', dest='sasl',
default=True,
help='Don\'t run SASL tests')
+ parser.add_argument('--no-oidc', action='store_false', dest='oidc',
+ default=True,
+ help='Don\'t run OAuth/OIDC tests')
parser.add_argument('--no-plaintext', action='store_false',
dest='plaintext', default=True,
help='Don\'t run PLAINTEXT tests')
@@ -130,6 +133,8 @@ def handle_report(report, version, suite):
parser.add_argument('--debug', action='store_true', dest='debug',
default=False,
help='Enable trivup debugging')
+ parser.add_argument('--suite', type=str, default=None,
+ help='Only run matching suite(s) (substring match)')
parser.add_argument('versions', type=str, default=None,
nargs='*', help='Limit broker versions to these')
args = parser.parse_args()
@@ -155,7 +160,10 @@ def handle_report(report, version, suite):
versions.append(
(v, ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']))
else:
- versions = [('2.1.0', ['OAUTHBEARER', 'GSSAPI']),
+ versions = [('3.1.0',
+ ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
+ ('2.1.0',
+ ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
('0.10.2.0', ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI']),
('0.9.0.1', ['GSSAPI']),
('0.8.2.2', [])]
@@ -169,6 +177,8 @@ def handle_report(report, version, suite):
sasl_oauthbearer_conf = {'sasl_mechanisms': 'OAUTHBEARER',
'sasl_oauthbearer_config':
'scope=requiredScope principal=admin'}
+ sasl_oauth_oidc_conf = {'sasl_mechanisms': 'OAUTHBEARER',
+ 'sasl_oauthbearer_method': 'OIDC'}
sasl_kerberos_conf = {'sasl_mechanisms': 'GSSAPI',
'sasl_servicename': 'kafka'}
suites = [{'name': 'SASL PLAIN',
@@ -208,6 +218,13 @@ def handle_report(report, version, suite):
'rdkconf': {'sasl_oauthbearer_config': 'scope=wrongScope'},
'tests': ['0001'],
'expect_fail': ['all']},
+ {'name': 'OAuth/OIDC',
+ 'run': args.oidc,
+ 'tests': ['0001', '0126'],
+ 'conf': sasl_oauth_oidc_conf,
+ 'minver': '3.1.0',
+ 'expect_fail': ['2.8.1', '2.1.0', '0.10.2.0',
+ '0.9.0.1', '0.8.2.2']},
{'name': 'SASL Kerberos',
'run': args.sasl,
'conf': sasl_kerberos_conf,
@@ -224,6 +241,19 @@ def handle_report(report, version, suite):
if not suite.get('run', True):
continue
+ if args.suite is not None and suite['name'].find(args.suite) == -1:
+ print(
+ f'# Skipping {suite["name"]} due to --suite {args.suite}')
+ continue
+
+ if 'minver' in suite:
+ minver = [int(x) for x in suite['minver'].split('.')][:3]
+ this_version = [int(x) for x in version.split('.')][:3]
+ if this_version < minver:
+ print(
+ f'# Skipping {suite["name"]} due to version {version} < minimum required version {suite["minver"]}') # noqa: E501
+ continue
+
_conf = conf.copy()
_conf.update(suite.get('conf', {}))
_rdkconf = _conf.copy()
diff --git a/tests/test.c b/tests/test.c
index 20b6d06710..40c35acbfa 100644
--- a/tests/test.c
+++ b/tests/test.c
@@ -125,6 +125,7 @@ _TEST_DECL(0019_list_groups);
_TEST_DECL(0020_destroy_hang);
_TEST_DECL(0021_rkt_destroy);
_TEST_DECL(0022_consume_batch);
+_TEST_DECL(0022_consume_batch_local);
_TEST_DECL(0025_timers);
_TEST_DECL(0026_consume_pause);
_TEST_DECL(0028_long_topicnames);
@@ -235,6 +236,8 @@ _TEST_DECL(0124_openssl_invalid_engine);
_TEST_DECL(0125_immediate_flush);
_TEST_DECL(0126_oauthbearer_oidc);
_TEST_DECL(0128_sasl_callback_queue);
+_TEST_DECL(0129_fetch_aborted_msgs);
+_TEST_DECL(0130_store_offsets);
/* Manual tests */
_TEST_DECL(8000_idle);
@@ -298,6 +301,7 @@ struct test tests[] = {
_TEST(0020_destroy_hang, 0, TEST_BRKVER(0, 9, 0, 0)),
_TEST(0021_rkt_destroy, 0),
_TEST(0022_consume_batch, 0),
+ _TEST(0022_consume_batch_local, TEST_F_LOCAL),
_TEST(0025_timers, TEST_F_LOCAL),
_TEST(0026_consume_pause,
TEST_F_KNOWN_ISSUE,
@@ -469,8 +473,10 @@ struct test tests[] = {
_TEST(0123_connections_max_idle, 0),
_TEST(0124_openssl_invalid_engine, TEST_F_LOCAL),
_TEST(0125_immediate_flush, 0),
- _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 0, 0, 0)),
+ _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 1, 0, 0)),
_TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2, 0, 0, 0)),
+ _TEST(0129_fetch_aborted_msgs, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0130_store_offsets, 0),
/* Manual tests */
_TEST(8000_idle, TEST_F_MANUAL),
@@ -4501,26 +4507,37 @@ void test_kafka_topics(const char *fmt, ...) {
#ifdef _WIN32
TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__);
#else
- char cmd[512];
- int r;
+ char cmd[1024];
+ int r, bytes_left;
va_list ap;
test_timing_t t_cmd;
- const char *kpath, *zk;
+ const char *kpath, *bootstrap_env, *flag, *bootstrap_srvs;
- kpath = test_getenv("KAFKA_PATH", NULL);
- zk = test_getenv("ZK_ADDRESS", NULL);
+ if (test_broker_version >= TEST_BRKVER(3, 0, 0, 0)) {
+ bootstrap_env = "BROKERS";
+ flag = "--bootstrap-server";
+ } else {
+ bootstrap_env = "ZK_ADDRESS";
+ flag = "--zookeeper";
+ }
- if (!kpath || !zk)
- TEST_FAIL("%s: KAFKA_PATH and ZK_ADDRESS must be set",
- __FUNCTION__);
+ kpath = test_getenv("KAFKA_PATH", NULL);
+ bootstrap_srvs = test_getenv(bootstrap_env, NULL);
- r = rd_snprintf(cmd, sizeof(cmd),
- "%s/bin/kafka-topics.sh --zookeeper %s ", kpath, zk);
- TEST_ASSERT(r < (int)sizeof(cmd));
+ if (!kpath || !bootstrap_srvs)
+ TEST_FAIL("%s: KAFKA_PATH and %s must be set", __FUNCTION__,
+ bootstrap_env);
+
+ r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/kafka-topics.sh %s %s ",
+ kpath, flag, bootstrap_srvs);
+ TEST_ASSERT(r > 0 && r < (int)sizeof(cmd));
+
+ bytes_left = sizeof(cmd) - r;
va_start(ap, fmt);
- rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap);
+ r = rd_vsnprintf(cmd + r, bytes_left, fmt, ap);
va_end(ap);
+ TEST_ASSERT(r > 0 && r < bytes_left);
TEST_SAY("Executing: %s\n", cmd);
TIMING_START(&t_cmd, "exec");
@@ -4542,11 +4559,15 @@ void test_kafka_topics(const char *fmt, ...) {
/**
* @brief Create topic using Topic Admin API
+ *
+ * @param configs is an optional key-value tuple array of
+ * topic configs (or NULL).
*/
-static void test_admin_create_topic(rd_kafka_t *use_rk,
- const char *topicname,
- int partition_cnt,
- int replication_factor) {
+void test_admin_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor,
+ const char **configs) {
rd_kafka_t *rk;
rd_kafka_NewTopic_t *newt[1];
const size_t newt_cnt = 1;
@@ -4571,6 +4592,14 @@ static void test_admin_create_topic(rd_kafka_t *use_rk,
errstr, sizeof(errstr));
TEST_ASSERT(newt[0] != NULL, "%s", errstr);
+ if (configs) {
+ int i;
+
+ for (i = 0; configs[i] && configs[i + 1]; i += 2)
+ TEST_CALL_ERR__(rd_kafka_NewTopic_set_config(
+ newt[0], configs[i], configs[i + 1]));
+ }
+
options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
err = rd_kafka_AdminOptions_set_operation_timeout(
options, timeout_ms, errstr, sizeof(errstr));
@@ -4651,7 +4680,7 @@ void test_create_topic(rd_kafka_t *use_rk,
replication_factor);
else
test_admin_create_topic(use_rk, topicname, partition_cnt,
- replication_factor);
+ replication_factor, NULL);
}
@@ -5150,12 +5179,16 @@ void test_report_add(struct test *test, const char *fmt, ...) {
}
/**
- * Returns 1 if KAFKA_PATH and ZK_ADDRESS is set to se we can use the
- * kafka-topics.sh script to manually create topics.
+ * Returns 1 if KAFKA_PATH and BROKERS (or ZK_ADDRESS) is set to se we can use
+ * the kafka-topics.sh script to manually create topics.
*
* If \p skip is set TEST_SKIP() will be called with a helpful message.
*/
int test_can_create_topics(int skip) {
+#ifndef _WIN32
+ const char *bootstrap;
+#endif
+
/* Has AdminAPI */
if (test_broker_version >= TEST_BRKVER(0, 10, 2, 0))
return 1;
@@ -5166,12 +5199,16 @@ int test_can_create_topics(int skip) {
return 0;
#else
- if (!test_getenv("KAFKA_PATH", NULL) ||
- !test_getenv("ZK_ADDRESS", NULL)) {
+ bootstrap = test_broker_version >= TEST_BRKVER(3, 0, 0, 0)
+ ? "BROKERS"
+ : "ZK_ADDRESS";
+
+ if (!test_getenv("KAFKA_PATH", NULL) || !test_getenv(bootstrap, NULL)) {
if (skip)
TEST_SKIP(
"Cannot create topics "
- "(set KAFKA_PATH and ZK_ADDRESS)\n");
+ "(set KAFKA_PATH and %s)\n",
+ bootstrap);
return 0;
}
@@ -5569,8 +5606,6 @@ rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
return NULL;
}
-
-
/**
* @brief Wait for up to \p tmout for an admin API result and return the
* distilled error code.
@@ -5582,8 +5617,9 @@ rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
* - DeleteGroups
* - DeleteRecords
* - DeleteTopics
- * * DeleteConsumerGroupOffsets
+ * - DeleteConsumerGroupOffsets
* - DescribeConfigs
+ * - CreateAcls
*/
rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
rd_kafka_event_type_t evtype,
@@ -5595,6 +5631,8 @@ rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
size_t terr_cnt = 0;
const rd_kafka_ConfigResource_t **cres = NULL;
size_t cres_cnt = 0;
+ const rd_kafka_acl_result_t **aclres = NULL;
+ size_t aclres_cnt = 0;
int errcnt = 0;
rd_kafka_resp_err_t err;
const rd_kafka_group_result_t **gres = NULL;
@@ -5653,6 +5691,15 @@ rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
cres = rd_kafka_AlterConfigs_result_resources(res, &cres_cnt);
+ } else if (evtype == RD_KAFKA_EVENT_CREATEACLS_RESULT) {
+ const rd_kafka_CreateAcls_result_t *res;
+
+ if (!(res = rd_kafka_event_CreateAcls_result(rkev)))
+ TEST_FAIL("Expected a CreateAcls result, not %s",
+ rd_kafka_event_name(rkev));
+
+ aclres = rd_kafka_CreateAcls_result_acls(res, &aclres_cnt);
+
} else if (evtype == RD_KAFKA_EVENT_DELETEGROUPS_RESULT) {
const rd_kafka_DeleteGroups_result_t *res;
if (!(res = rd_kafka_event_DeleteGroups_result(rkev)))
@@ -5710,6 +5757,19 @@ rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
}
}
+ /* Check ACL errors */
+ for (i = 0; i < aclres_cnt; i++) {
+ const rd_kafka_error_t *error =
+ rd_kafka_acl_result_error(aclres[i]);
+ if (error) {
+ TEST_WARN("AclResult error: %s: %s\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ if (!(errcnt++))
+ err = rd_kafka_error_code(error);
+ }
+ }
+
/* Check group errors */
for (i = 0; i < gres_cnt; i++) {
const rd_kafka_topic_partition_list_t *parts;
@@ -6237,7 +6297,55 @@ rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk,
return err;
}
+/**
+ * @brief Topic Admin API helpers
+ *
+ * @param useq Makes the call async and posts the response in this queue.
+ * If NULL this call will be synchronous and return the error
+ * result.
+ *
+ * @remark Fails the current test on failure.
+ */
+
+rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_kafka_AclBinding_t **acls,
+ size_t acl_cnt,
+ void *opaque) {
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *q;
+ rd_kafka_resp_err_t err;
+ const int tmout = 30 * 1000;
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+ if (!useq) {
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Creating %" PRIusz " acls\n", acl_cnt);
+
+ rd_kafka_CreateAcls(rk, acls, acl_cnt, options, q);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATEACLS_RESULT,
+ NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to create %d acl(s): %s", (int)acl_cnt,
+ rd_kafka_err2str(err));
+
+ return err;
+}
static void test_free_string_array(char **strs, size_t cnt) {
size_t i;
@@ -6583,9 +6691,6 @@ int test_sub_start(const char *func,
if (!is_quick && test_quick)
return 0;
- if (subtests_to_run && !strstr(func, subtests_to_run))
- return 0;
-
if (fmt && *fmt) {
va_list ap;
char buf[256];
@@ -6601,6 +6706,11 @@ int test_sub_start(const char *func,
"%s:%d", func, line);
}
+ if (subtests_to_run && !strstr(test_curr->subtest, subtests_to_run)) {
+ *test_curr->subtest = '\0';
+ return 0;
+ }
+
TIMING_START(&test_curr->subtest_duration, "SUBTEST");
TEST_SAY(_C_MAG "[ %s ]\n", test_curr->subtest);
diff --git a/tests/test.h b/tests/test.h
index ca33f713b4..bbfd7a49e5 100644
--- a/tests/test.h
+++ b/tests/test.h
@@ -682,6 +682,11 @@ int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al,
rd_kafka_topic_partition_list_t *bl);
void test_kafka_topics(const char *fmt, ...);
+void test_admin_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor,
+ const char **configs);
void test_create_topic(rd_kafka_t *use_rk,
const char *topicname,
int partition_cnt,
@@ -790,8 +795,13 @@ rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple(
const rd_kafka_topic_partition_list_t *offsets,
void *opaque);
-rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms);
+rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_kafka_AclBinding_t **acls,
+ size_t acl_cnt,
+ void *opaque);
+rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms);
void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt,
@@ -839,7 +849,7 @@ int test_error_is_not_fatal_cb(rd_kafka_t *rk,
do { \
test_timing_t _timing; \
const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
- rd_kafka_error_t *_error; \
+ const rd_kafka_error_t *_error; \
TIMING_START(&_timing, "%s", _desc); \
TEST_SAYL(3, "Begin call %s\n", _desc); \
_error = FUNC_W_ARGS; \
diff --git a/tests/testshared.h b/tests/testshared.h
index b54af26c1c..efdd5d5550 100644
--- a/tests/testshared.h
+++ b/tests/testshared.h
@@ -364,7 +364,7 @@ int test_sub_start(const char *func,
const char *fmt,
...);
void test_sub_pass(void);
-void test_sub_skip(const char *fmt, ...);
+void test_sub_skip(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
#define SUB_TEST0(IS_QUICK, ...) \
do { \
diff --git a/vcpkg.json b/vcpkg.json
index 1e1fa0b1f7..f2953d0dfd 100644
--- a/vcpkg.json
+++ b/vcpkg.json
@@ -1,6 +1,6 @@
{
"name": "librdkafka",
- "version": "1.8.0",
+ "version": "1.9.0",
"dependencies": [
{
"name": "zstd",
@@ -8,16 +8,16 @@
},
{
"name": "zlib",
- "version>=": "1.2.11"
+ "version>=": "1.2.12"
},
{
"name": "openssl",
- "version>=": "1.1.1l"
+ "version>=": "1.1.1n"
},
{
"name": "curl",
- "version>=": "7.74.0#8"
+ "version>=": "7.82.0"
}
],
- "builtin-baseline": "dd3d6df5001d49f954bc39b73a4c49ae3c9e8d15"
+ "builtin-baseline": "01d6f6ff1e5332b926099f0c23bda996940ad4e8"
}
diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj
index cc4b1a2178..49000e0ffe 100644
--- a/win32/librdkafka.vcxproj
+++ b/win32/librdkafka.vcxproj
@@ -127,6 +127,7 @@
+
@@ -202,6 +203,7 @@
+
@@ -252,4 +254,4 @@
-
\ No newline at end of file
+
diff --git a/win32/tests/tests.vcxproj b/win32/tests/tests.vcxproj
index 6fe10900e4..f4815757a7 100644
--- a/win32/tests/tests.vcxproj
+++ b/win32/tests/tests.vcxproj
@@ -209,6 +209,8 @@
+
+