Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
118 commits
Select commit Hold shift + click to select a range
63f05de
1st commit
PratRanj07 May 20, 2024
4445321
2nd commit
PratRanj07 May 20, 2024
28aff0a
3rd commit
PratRanj07 May 20, 2024
0b3a617
Added tests and examples
PratRanj07 May 21, 2024
3c4a907
Formatting
PratRanj07 May 21, 2024
76639a6
Formatting
PratRanj07 May 21, 2024
6ff253b
little change
PratRanj07 May 21, 2024
925fd21
some small changes
PratRanj07 May 21, 2024
5b769a8
Merge branch 'dev_early_access_development_branch' into listConsumerG…
PratRanj07 May 22, 2024
195a922
changes requested
PratRanj07 May 30, 2024
380ee83
requested changes
PratRanj07 May 30, 2024
254765b
requested changes
PratRanj07 Jun 3, 2024
3ba54a1
name change
PratRanj07 Jun 3, 2024
ffe16f7
indentation
PratRanj07 Jun 3, 2024
a98699a
indentation
PratRanj07 Jun 4, 2024
160f762
Add data-governance to code owners for schema registry clients (#52)
emasab Jun 4, 2024
2a99288
Fix deprecation warning
milindl Jun 5, 2024
28e28e3
Separate eachMessage and eachBatch internal consume loop
milindl Jun 5, 2024
ffbbafd
Update performance example with more cases
milindl Jun 10, 2024
42004b1
Add per-partition cache with global expiry
milindl Jun 10, 2024
f949d88
Add per-partition cache expiry logic
milindl Jun 12, 2024
7f24913
Allow cache to disburse multiple messages at once
milindl Jun 13, 2024
fbbf9f2
Add per-partition concurrency
milindl Jun 17, 2024
8c11d0e
Add partition level concurrency to faux-eachBatch
milindl Jun 17, 2024
98ba984
Create persistent workers for per-partition concurrency, prevents exc…
milindl Jun 20, 2024
4f0f25b
Fix tests for Per Partition Concurrency
milindl Jun 24, 2024
ba0603b
Add message set capability to message cache
milindl Jun 25, 2024
fdf56ef
Add naive batching (without resolution handling)
milindl Jul 1, 2024
5ecf261
Add batch staleness, resolution, and offset management to eachBatch
milindl Jul 2, 2024
b0e4372
Update tests for true eachBatch
milindl Jul 2, 2024
ac0bece
Remove debug-only properties
milindl Jul 8, 2024
72305d2
Update MIGRATION.md for eachBatch
milindl Jul 8, 2024
f4b4aaf
Bump version
milindl Jul 8, 2024
31e325c
Fix linting and Makefile issues (#2)
milindl Aug 6, 2024
89e8227
Add SchemaRegistryClient, RestService, and testing (#1)
Claimundefine Aug 8, 2024
f34e086
Add mock client for testing
Claimundefine Aug 9, 2024
6c919ff
Remove testing artifacts
Claimundefine Aug 9, 2024
69daca9
Fix flaky e2e tests (#54)
PratRanj07 Aug 12, 2024
4f2d255
Preset fix (#6)
Claimundefine Aug 13, 2024
14d33b6
Do not modify RegExps which don't start with a ^
milindl Aug 13, 2024
9fe9571
Fix argument mutation in run, pause and resume
milindl Aug 14, 2024
1dcfe39
Dekregistry client (#67)
Claimundefine Aug 19, 2024
b69e87f
Add clientConfig, baseUrl retry, RestError, encodeURIComponent (#12) …
Claimundefine Aug 21, 2024
d73a14d
Update tsconfig.json (#69)
rayokota Aug 21, 2024
bc059a4
Fix broken tests (#70)
Claimundefine Aug 21, 2024
a85cda0
Add commitCb method (#59)
emasab Aug 22, 2024
4b9b340
Fix eslint config (#71)
rayokota Aug 22, 2024
3aab3c2
Add eslint rules (#72)
rayokota Aug 22, 2024
2bbb2af
First cut at JavaScript serdes (#73)
rayokota Aug 23, 2024
f724ed8
Add assign/unassign within rebalance callbacks
milindl Aug 27, 2024
a348985
Add performance benchmarking script modes and README
milindl Sep 11, 2024
15fff05
Add confluent debian repo for performance benchmark
milindl Sep 11, 2024
ffae694
Remove store from promisified API
milindl Aug 12, 2024
aceae76
Add binding level debug logging and client name to logs
milindl Sep 11, 2024
eddaabc
Fix typo in script name
milindl Sep 11, 2024
8bd4940
First cut at Data Contract rules (#77)
rayokota Sep 11, 2024
3d54a18
Separate SR into a different workspace (#78)
rayokota Sep 12, 2024
34302ba
Refactor to always use a barrier for pending operation (#26)
emasab Sep 13, 2024
ad06919
Schemaregistry rebase (#33) (#80)
Claimundefine Sep 13, 2024
9b88c91
Add Docker environment for integration tests (#34) (#81)
Claimundefine Sep 13, 2024
5424a4a
Fix log level config in light of binding logs
milindl Sep 14, 2024
3ca8437
Remove consumerGroupId argument from sendOffsets and add tests (#82)
milindl Sep 14, 2024
d2b7227
Performance measurement improvements
emasab Sep 15, 2024
546df33
Admin examples for available APIs (#84)
emasab Sep 15, 2024
cd0887a
Fix listGroups segfault when passing an undefined matchConsumerGroupS…
emasab Sep 16, 2024
5c637c0
Add more unit tests; minor fixes for KMS clients (#86)
rayokota Sep 16, 2024
cbc69be
Bump version to 0.1.17-devel
milindl Sep 17, 2024
ecdd836
Add complex encryption tests (#89)
rayokota Sep 17, 2024
1b77019
Add index.ts (#91)
rayokota Sep 20, 2024
ac1367c
Enhance HighLevelProducer to take schema serializers (#92)
rayokota Sep 20, 2024
71c4aeb
Add auth features (#47) (#94)
Claimundefine Sep 20, 2024
ffbffe8
Add more JSON Schema validation tests (#95)
rayokota Sep 20, 2024
5adb821
Move ts-jest to dev dependencies (#96)
rayokota Sep 20, 2024
b6379d3
Add JSON integration tests (#46) (#97)
Claimundefine Sep 22, 2024
49e12c6
Unsubscribe before disconnecting to mitigate hangs on destroy (#98)
milindl Sep 24, 2024
5356f81
Pass creds to DEK Registry client (#99)
rayokota Sep 24, 2024
a8e5b39
Bump version to 0.2.0 and drop -devel (#100)
milindl Sep 25, 2024
8b41c1e
Remove mandatory basic or bearer auth credentials (#57) (#101)
Claimundefine Sep 25, 2024
69b28a5
Add build script and readme (#104)
rayokota Sep 26, 2024
a8e3914
Add license (#105)
rayokota Sep 26, 2024
acc94a4
Add clearLatestCaches/clearCaches API, fix test to call clearLatestCa…
rayokota Sep 26, 2024
12cf126
Add avro integration tests (#56) (#106)
Claimundefine Sep 27, 2024
12e33c9
Add tsdoc (#107)
rayokota Sep 27, 2024
98f12f8
Enhance docs (#108)
rayokota Sep 27, 2024
9c7f096
Update schemaregistry README (#109)
rayokota Sep 28, 2024
63a949f
Add restService interfaces to exported types (#110)
rayokota Sep 28, 2024
ad0ff8c
Rename DekClient to avoid conflict with Client (#112)
rayokota Sep 30, 2024
52944ea
Schemaregistry examples (#69) (#113)
Claimundefine Sep 30, 2024
228f64b
Add schemaregistry examples workspace with avro, json, and csfle exam…
Claimundefine Oct 1, 2024
3431a92
bugfix integ tests for registering -value (#71) (#115)
Claimundefine Oct 1, 2024
5cc2dee
Bump version to v0.2.1 (#116)
milindl Oct 3, 2024
73ca334
Update version to 0.2.1 for EA release (#72) (#117)
Claimundefine Oct 10, 2024
4c7c8df
Add Kafka Oauth implementation (#74) (#119)
Claimundefine Oct 11, 2024
fad64ce
Upgrade librdkafka to v2.6.0 (#120)
emasab Oct 14, 2024
a86c3b4
Bump version to 0.3.0-RC1 and: (#122)
emasab Oct 17, 2024
b3712ba
v0.3.0 (#126)
emasab Oct 17, 2024
1501a64
Minor optimization to reduce schema ID lookups (#123)
rayokota Oct 17, 2024
29bc526
v0.3.0-RC2 (#127)
emasab Oct 18, 2024
4e42726
v0.3.0 final release (#128)
emasab Oct 18, 2024
e3de7e4
Fix header conversion in eachBatch (#130)
milindl Oct 21, 2024
0f3a167
1st commit
PratRanj07 May 20, 2024
1bde73a
2nd commit
PratRanj07 May 20, 2024
ce5a4e9
3rd commit
PratRanj07 May 20, 2024
beafa7c
changes requested
PratRanj07 May 30, 2024
a7c5aca
requested changes
PratRanj07 May 30, 2024
99b0252
required Changes
PratRanj07 Oct 25, 2024
f6f5b54
Merge master
PratRanj07 Oct 25, 2024
603ca2e
remove unnecessary changes
PratRanj07 Oct 25, 2024
2f86c63
indentation and unnecessary changes
PratRanj07 Oct 25, 2024
a758b90
indentation
PratRanj07 Oct 25, 2024
7085111
comment removed
PratRanj07 Oct 25, 2024
b2e28fa
comment added
PratRanj07 Oct 25, 2024
92f262d
changelog entry
PratRanj07 Oct 25, 2024
2d90d5b
Changed topic partition js to c conversion structure
PratRanj07 Oct 28, 2024
c435397
refactoring
PratRanj07 Oct 30, 2024
7f6dd40
Requested changes
PratRanj07 Oct 30, 2024
1c1cfe8
final changes
PratRanj07 Nov 4, 2024
3c494e4
Merge master
PratRanj07 Nov 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
120 changes: 120 additions & 0 deletions examples/kafkajs/fetchOffsets.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This file still needs to be moved to examples/kafkajs/admin/fetch-offsets.js, and use parseArgs appropriately for argument parsing. You can follow this file for an idea of how to do it: examples/kafkajs/admin/describe-groups.js

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use arguments for bootstrap server, requireStableOffsets and timeout. Use positionals for the group name and the topic/partitions as you are currently doing, it will be something like

  const args = parseArgs({
    allowPositionals: true,
    options: {
      'bootstrap-servers': {
        type: 'string',
        short: 'b',
        default: 'localhost:9092',
      },
      'timeout': {
        type: 'string',
        short: 'm',
        default: 5000,
      },
...
    },
  });

and then args.values will contain option values, and args.positionals will contain the rest of the arguments which you can parse with your function that you've already written.

const { Kafka } = require("@confluentinc/kafka-javascript").KafkaJS;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is best restructured with something similar to things in: https://github.com/confluentinc/confluent-kafka-javascript/tree/master/examples/kafkajs/admin . Without a consumer/producer, just let people list consumer group offsets.


let producer, consumer, admin;
let Id = "newGroup";
let topicName = "newTopic";

const kafka = new Kafka({
kafkaJS: {
brokers: ["localhost:9092"],
},
});

async function waitFor(check, resolveValue, { delay = 50 } = {}) {
return new Promise((resolve) => {
const interval = setInterval(() => {
if (check()) {
clearInterval(interval);
resolve(resolveValue());
}
}, delay);
});
}

async function waitForMessages(messagesConsumed, { number = 1, delay } = {}) {
return waitFor(
() => messagesConsumed.length >= number,
() => messagesConsumed,
{ delay }
);
}

async function adminStart() {
admin = kafka.admin();
await admin.connect();

producer = kafka.producer();
consumer = kafka.consumer({
kafkaJS: {
groupId: Id,
fromBeginning: true,
autoCommit: false,
},
});

await admin.createTopics({
topics: [{ topic: topicName, numPartitions: 1 }],
});
console.log("Topic created successfully");

await producer.connect();
await consumer.connect();

console.log("Consumer Connected successfully");

await consumer.subscribe({
topics: [topicName],
});
console.log("Consumer subscribed to topic");

const messages = Array.from({ length: 5 }, (_, i) => ({
value: `message${i}`,
}));

await producer.send({ topic: topicName, messages });
console.log("Messages sent till offset 4");

let messagesConsumed = []; // Define messagesConsumed

await consumer.run({
eachMessage: async ({ topic, partition, message }) => {
try {
messagesConsumed.push(message); // Populate messagesConsumed
if (messagesConsumed.length === 5) {
await consumer.commitOffsets([
{
topic,
partition,
offset: (parseInt(message.offset, 10) + 1).toString(),
},
]);
await consumer.stop();
}
} catch (error) {
if (error.message.includes("Offset out of range")) {
await consumer.stop();
} else {
throw error; // Re-throw the error if it's not an "Offset out of range" error
}
}
},
});

await waitForMessages(messagesConsumed, { number: 5 });
console.log("Messages consumed successfully");
await producer.disconnect();
await consumer.disconnect();
// Fetch offsets after all messages have been consumed
const offsets = await admin.fetchOffsets({
groupId: Id,
topics: [
{
topic: topicName,
partitions: [0], // replace with actual partition numbers
},
],
});

console.log("Consumer group offsets: ", JSON.stringify(offsets, null, 2));

await admin.deleteGroups([Id]);
console.log("Consumer group deleted successfully");
await admin.deleteTopics({
topics: [topicName],
});

await admin.disconnect();
}

adminStart();
70 changes: 70 additions & 0 deletions lib/admin.js
Original file line number Diff line number Diff line change
Expand Up @@ -437,3 +437,73 @@ AdminClient.prototype.listTopics = function (options, cb) {
}
});
}
/**
* Fetch Offsets
*
* @param {string} options.groupId - The group ID to fetch offsets for.
* @param {import('../types/rdkafka').TopicInput} options.topics - The topics to fetch offsets for.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use instead

import("../../types/kafkajs").TopicInput

* @param {number?} options.timeout - The request timeout in milliseconds.
* May be unset (default: 5000)
* @param {boolean?} options.requireStableOffsets - Whether broker should return stable offsets
* (transaction-committed). (default: false)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

indentation for the * is off

*
* @param {function} cb - The callback to be executed when finished.
*/
AdminClient.prototype.fetchOffsets = function (options, cb) {

if (!this._isConnected) {
throw new Error('Client is disconnected');
}

if (typeof options === 'function' || !options) {
throw new Error('Options with groupId must be provided');
}

if (!options.groupId) {
throw new Error('groupId must be provided');
}


if (!Object.hasOwn(options, 'timeout')) {
options.timeout = 5000;
}

if(!Object.hasOwn(options, 'requireStableOffsets')){
options.requireStableOffsets = false;
}

if(!Object.hasOwn(options, 'topics')){
options.topics = null;
}

/*
If the topics array consists of strings, we will set it to NULL.
Consequently, the function will return results for all partitions
across all topics associated with the given group ID. Subsequently,
we will filter these results based on the original topics array,
thereby displaying only the relevant results.
*/
let originalTopics = null;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a comment explaining why this is needed, and when this is needed (ie the condition), so future developers aren't confused

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

About the comment:
I see that you have added the comment which explains what you are doing, but a better comment should explain why you are doing it rather than the 'what' (which a developer can figure out through reading the code also). However, the 'why' is something that the other developer can't figure out on their own.

So for instance, you should say that, 'If the input is a list of topic string, the user expects us to fetch offsets for all all partitions of all the input topics. In librdkafka, we can only fetch offsets by topic partitions, or else, we can fetch all of them. This, we must fetch offsets for all topic partitions (by settings topics to null) and filter by the topic strings later.'

^ something like this. You can add this to your comment to annotate both the 'what' and the 'why'.

if (Array.isArray(options.topics) && options.topics.length > 0 && typeof options.topics[0] === 'string') {
originalTopics = options.topics;
options.topics = null;
}

this._client.fetchOffsets(options, function (err, offsets) {
if (err) {
if (cb) {
cb(LibrdKafkaError.create(err));
}
return;
}

if (originalTopics !== null) {
offsets = offsets.filter(offset => originalTopics.includes(offset.topic));
}

if (cb) {
cb(null, offsets);
}
});
}

33 changes: 33 additions & 0 deletions lib/kafkajs/_admin.js
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,39 @@ class Admin {
});
});
}

/**
* Fetch Offsets
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can do slightly better with this comment I think

"Fetch the offsets for topic partition(s) for consumer group(s)."

Here and in the rdkafka file.

*
* @param {string} options.groupId - The group ID to fetch offsets for.
* @param {import('../../types/rdkafka').TopicInput} options.topics - The topics to fetch offsets for.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use instead

import("../../types/kafkajs").TopicInput

* @param {boolean} options.resolveOffsets - not yet implemented
* @param {number?} options.timeout - The request timeout in milliseconds.
* May be unset (default: 5000)
* @param {boolean?} options.requireStableOffsets - Whether broker should return stable offsets
* (transaction-committed). (default: false)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

again the indentation is off for this *

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: "(transaction-committed)" should be aligned to "Whether"

*
* @returns {Promise<Array<topic: string, partitions: import('../../types/kafkajs').FetchOffsetsPartition>>}
*/
async fetchOffsets(options = {}) {
if (this.#state !== AdminState.CONNECTED) {
throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE });
}

if (Object.hasOwn(options, "resolveOffsets")) {
throw new error.KafkaJSError("resolveOffsets is not yet implemented.", { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED });
}

return new Promise((resolve, reject) => {
this.#internalClient.fetchOffsets(options, (err, offsets) => {
if (err) {
reject(createKafkaJsErrorFromLibRdKafkaError(err));
} else {
resolve(offsets);
}
});
});
}
}

module.exports = {
Expand Down
145 changes: 145 additions & 0 deletions src/admin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ void AdminClient::Init(v8::Local<v8::Object> exports) {
Nan::SetPrototypeMethod(tpl, "listGroups", NodeListGroups);
Nan::SetPrototypeMethod(tpl, "describeGroups", NodeDescribeGroups);
Nan::SetPrototypeMethod(tpl, "deleteGroups", NodeDeleteGroups);
Nan::SetPrototypeMethod(tpl, "fetchOffsets", NodeFetchOffsets);

Nan::SetPrototypeMethod(tpl, "connect", NodeConnect);
Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect);
Expand Down Expand Up @@ -666,6 +667,91 @@ Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list,
}
}

Baton AdminClient::FetchOffsets(rd_kafka_ListConsumerGroupOffsets_t **req,
size_t req_cnt, bool require_stable_offsets,
int timeout_ms,
rd_kafka_event_t **event_response) {
if (!IsConnected()) {
return Baton(RdKafka::ERR__STATE);
}

{
scoped_shared_write_lock lock(m_connection_lock);
if (!IsConnected()) {
return Baton(RdKafka::ERR__STATE);
}

// Make admin options to establish that we are fetching offsets
rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new(
m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);

char errstr[512];
rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout(
options, timeout_ms, errstr, sizeof(errstr));
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
return Baton(static_cast<RdKafka::ErrorCode>(err), errstr);
}

if (require_stable_offsets) {
rd_kafka_error_t *error =
rd_kafka_AdminOptions_set_require_stable_offsets(
options, require_stable_offsets);
if (error) {
return Baton::BatonFromErrorAndDestroy(error);
}
}

// Create queue just for this operation.
rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr());

rd_kafka_ListConsumerGroupOffsets(m_client->c_ptr(), req, req_cnt, options,
rkqu);

// Poll for an event by type in that queue
// DON'T destroy the event. It is the out parameter, and ownership is
// the caller's.
*event_response = PollForEvent(
rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, timeout_ms);

// Destroy the queue since we are done with it.
rd_kafka_queue_destroy(rkqu);

// Destroy the options we just made because we polled already
rd_kafka_AdminOptions_destroy(options);

// If we got no response from that operation, this is a failure
// likely due to time out
if (*event_response == NULL) {
return Baton(RdKafka::ERR__TIMED_OUT);
}

// Now we can get the error code from the event
if (rd_kafka_event_error(*event_response)) {
// If we had a special error code, get out of here with it
const rd_kafka_resp_err_t errcode = rd_kafka_event_error(*event_response);
return Baton(static_cast<RdKafka::ErrorCode>(errcode));
}

const rd_kafka_ListConsumerGroupOffsets_result_t *result =
rd_kafka_event_ListConsumerGroupOffsets_result(*event_response);

size_t result_cnt;
const rd_kafka_group_result_t **results =
rd_kafka_ListConsumerGroupOffsets_result_groups(result, &result_cnt);

// Change the type of the 'error' pointer to 'const rd_kafka_error_t *'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's move this processing outside this function and let the caller process this. This sort of works for now, but when/if we add the capacity for multiple groups, it won't.

Let's handle only top level errors here, and move this group-specific errors out.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So with the change that Emanuele suggested, you will need to change the FromFetchOffsetsResult function as well, since the return type signature will be different for listConsumerGroupOffsets.

However, the kafkajs-compatible signature remains the same, so you'll need to convert it in fetchOffsets in _admin.js

const rd_kafka_error_t *error = rd_kafka_group_result_error(results[0]);
if (error) {
// Use the 'rd_kafka_error_code' function to get the error code
return Baton(static_cast<RdKafka::ErrorCode>(rd_kafka_error_code(error)));
}

// At this point, event_response contains the result, which needs
// to be parsed/converted by the caller.
return Baton(RdKafka::ERR_NO_ERROR);
}
}

void AdminClient::ActivateDispatchers() {
// Listen to global config
m_gconfig->listen();
Expand Down Expand Up @@ -986,4 +1072,63 @@ NAN_METHOD(AdminClient::NodeDeleteGroups) {
callback, client, group_list, group_names_vector.size(), timeout_ms));
}

NAN_METHOD(AdminClient::NodeFetchOffsets) {
Nan::HandleScope scope;
if (info.Length() < 2 || !info[1]->IsFunction()) {
return Nan::ThrowError("Need to specify a callback");
}
if (!info[0]->IsObject()) {
return Nan::ThrowError("Must provide an options object");
}

v8::Local<v8::Object> options = info[0].As<v8::Object>();

v8::Local<v8::Value> groupIdValue;
if (!Nan::Get(options, Nan::New("groupId").ToLocalChecked())
.ToLocal(&groupIdValue)) {
return Nan::ThrowError("Must provide 'groupId'");
}

Nan::MaybeLocal<v8::String> groupIdMaybe = Nan::To<v8::String>(groupIdValue);
if (groupIdMaybe.IsEmpty()) {
return Nan::ThrowError("'groupId' must be a string");
}
Nan::Utf8String groupIdUtf8(groupIdMaybe.ToLocalChecked());
std::string groupIdStr = *groupIdUtf8;

v8::Local<v8::Array> topics = GetParameter<v8::Local<v8::Array>>(
options, "topics", Nan::New<v8::Array>());

rd_kafka_topic_partition_list_t *partitions = NULL;

if (!topics->IsNull() && !topics->IsUndefined() && topics->Length() > 0) {
partitions = Conversion::TopicPartition::
GroupedTopicPartitionv8ArrayToTopicPartitionList(topics);
}

rd_kafka_ListConsumerGroupOffsets_t **request =
static_cast<rd_kafka_ListConsumerGroupOffsets_t **>(
malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * 1));
request[0] =
rd_kafka_ListConsumerGroupOffsets_new(groupIdStr.c_str(), partitions);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

partitions list is copied by the new function, so it needs to be freed at this point if not NULL


if (partitions != NULL) {
rd_kafka_topic_partition_list_destroy(partitions);
}

// Get the timeout - default 5000 and require_stable_offsets parameter.

bool require_stable_offsets =
GetParameter<bool>(options, "requireStableOffsets", false);
int timeout_ms = GetParameter<int64_t>(options, "timeout", 5000);

// Create the final callback object
v8::Local<v8::Function> cb = info[1].As<v8::Function>();
Nan::Callback *callback = new Nan::Callback(cb);
AdminClient *client = ObjectWrap::Unwrap<AdminClient>(info.This());

Nan::AsyncQueueWorker(new Workers::AdminClientFetchOffsets(
callback, client, request, 1, require_stable_offsets, timeout_ms));
}

} // namespace NodeKafka
Loading