1
1
import { Callback , createRedisClient , Redis , Result , type RedisOptions } from "@internal/redis" ;
2
- import { Tracer } from "@internal/tracing" ;
2
+ import { startSpan , Tracer } from "@internal/tracing" ;
3
3
import { Logger } from "@trigger.dev/core/logger" ;
4
- import { setInterval } from "node:timers/promises" ;
5
4
import { z } from "zod" ;
5
+ import { setInterval } from "node:timers/promises" ;
6
+ import { flattenAttributes } from "@trigger.dev/core/v3" ;
6
7
7
8
export type ReleaseConcurrencyQueueRetryOptions = {
8
9
maxRetries ?: number ;
@@ -15,7 +16,10 @@ export type ReleaseConcurrencyQueueRetryOptions = {
15
16
16
17
export type ReleaseConcurrencyQueueOptions < T > = {
17
18
redis : RedisOptions ;
18
- executor : ( releaseQueue : T , releaserId : string ) => Promise < void > ;
19
+ /**
20
+ * @returns true if the run was successful, false if the token should be returned to the bucket
21
+ */
22
+ executor : ( releaseQueue : T , releaserId : string ) => Promise < boolean > ;
19
23
keys : {
20
24
fromDescriptor : ( releaseQueue : T ) => string ;
21
25
toDescriptor : ( releaseQueue : string ) => T ;
@@ -78,6 +82,7 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
78
82
79
83
if ( ! options . disableConsumers ) {
80
84
this . #startConsumers( ) ;
85
+ this . #startMetricsProducer( ) ;
81
86
}
82
87
}
83
88
@@ -119,7 +124,7 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
119
124
String ( Date . now ( ) )
120
125
) ;
121
126
122
- this . logger . debug ( "Consumed token in attemptToRelease" , {
127
+ this . logger . info ( "Consumed token in attemptToRelease" , {
123
128
releaseQueueDescriptor,
124
129
releaserId,
125
130
maxTokens,
@@ -270,7 +275,7 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
270
275
return false ;
271
276
}
272
277
273
- await Promise . all (
278
+ await Promise . allSettled (
274
279
result . map ( ( [ queue , releaserId , metadata ] ) => {
275
280
const itemMetadata = QueueItemMetadata . parse ( JSON . parse ( metadata ) ) ;
276
281
const releaseQueueDescriptor = this . keys . toDescriptor ( queue ) ;
@@ -283,9 +288,29 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
283
288
284
289
async #callExecutor( releaseQueueDescriptor : T , releaserId : string , metadata : QueueItemMetadata ) {
285
290
try {
286
- this . logger . info ( "Executing run:" , { releaseQueueDescriptor, releaserId } ) ;
291
+ this . logger . info ( "Calling executor for release" , { releaseQueueDescriptor, releaserId } ) ;
292
+
293
+ const released = await this . options . executor ( releaseQueueDescriptor , releaserId ) ;
287
294
288
- await this . options . executor ( releaseQueueDescriptor , releaserId ) ;
295
+ if ( released ) {
296
+ this . logger . info ( "Executor released concurrency" , { releaseQueueDescriptor, releaserId } ) ;
297
+ } else {
298
+ this . logger . info ( "Executor did not release concurrency" , {
299
+ releaseQueueDescriptor,
300
+ releaserId,
301
+ } ) ;
302
+
303
+ // Return the token but don't requeue
304
+ const releaseQueue = this . keys . fromDescriptor ( releaseQueueDescriptor ) ;
305
+ await this . redis . returnTokenOnly (
306
+ this . masterQueuesKey ,
307
+ this . #bucketKey( releaseQueue ) ,
308
+ this . #queueKey( releaseQueue ) ,
309
+ this . #metadataKey( releaseQueue ) ,
310
+ releaseQueue ,
311
+ releaserId
312
+ ) ;
313
+ }
289
314
} catch ( error ) {
290
315
this . logger . error ( "Error executing run:" , { error } ) ;
291
316
@@ -374,6 +399,30 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
374
399
}
375
400
}
376
401
402
+ async #startMetricsProducer( ) {
403
+ try {
404
+ // Produce metrics every 60 seconds, using a tracer span
405
+ for await ( const _ of setInterval ( 60_000 ) ) {
406
+ const metrics = await this . getQueueMetrics ( ) ;
407
+ this . logger . info ( "Queue metrics:" , { metrics } ) ;
408
+
409
+ await startSpan (
410
+ this . options . tracer ,
411
+ "ReleaseConcurrencyTokenBucketQueue.metrics" ,
412
+ async ( span ) => { } ,
413
+ {
414
+ attributes : {
415
+ ...flattenAttributes ( metrics , "queues" ) ,
416
+ forceRecording : true ,
417
+ } ,
418
+ }
419
+ ) ;
420
+ }
421
+ } catch ( error ) {
422
+ this . logger . error ( "Error starting metrics producer:" , { error } ) ;
423
+ }
424
+ }
425
+
377
426
#calculateBackoffScore( item : QueueItemMetadata ) : string {
378
427
const delay = Math . min (
379
428
this . backoff . maxDelay ,
@@ -382,6 +431,137 @@ export class ReleaseConcurrencyTokenBucketQueue<T> {
382
431
return String ( Date . now ( ) + delay ) ;
383
432
}
384
433
434
+ async getQueueMetrics ( ) : Promise <
435
+ Array < { releaseQueue : string ; currentTokens : number ; queueLength : number } >
436
+ > {
437
+ const streamRedis = this . redis . duplicate ( ) ;
438
+ const queuePattern = `${ this . keyPrefix } *:queue` ;
439
+ const stream = streamRedis . scanStream ( {
440
+ match : queuePattern ,
441
+ type : "zset" ,
442
+ count : 100 ,
443
+ } ) ;
444
+
445
+ let resolvePromise : (
446
+ value : Array < { releaseQueue : string ; currentTokens : number ; queueLength : number } >
447
+ ) => void ;
448
+ let rejectPromise : ( reason ?: any ) => void ;
449
+
450
+ const promise = new Promise <
451
+ Array < { releaseQueue : string ; currentTokens : number ; queueLength : number } >
452
+ > ( ( resolve , reject ) => {
453
+ resolvePromise = resolve ;
454
+ rejectPromise = reject ;
455
+ } ) ;
456
+
457
+ const metrics : Map <
458
+ string ,
459
+ { releaseQueue : string ; currentTokens : number ; queueLength : number }
460
+ > = new Map ( ) ;
461
+
462
+ async function getMetricsForKeys ( queueKeys : string [ ] ) {
463
+ if ( queueKeys . length === 0 ) {
464
+ return [ ] ;
465
+ }
466
+
467
+ const pipeline = streamRedis . pipeline ( ) ;
468
+
469
+ queueKeys . forEach ( ( queueKey ) => {
470
+ const releaseQueue = queueKey
471
+ . replace ( ":queue" , "" )
472
+ . replace ( streamRedis . options . keyPrefix ?? "" , "" ) ;
473
+ const bucketKey = `${ releaseQueue } :bucket` ;
474
+
475
+ pipeline . get ( bucketKey ) ;
476
+ pipeline . zcard ( `${ releaseQueue } :queue` ) ;
477
+ } ) ;
478
+
479
+ const result = await pipeline . exec ( ) ;
480
+
481
+ if ( ! result ) {
482
+ return [ ] ;
483
+ }
484
+
485
+ const results = result . map ( ( [ resultError , queueLengthOrCurrentTokens ] ) => {
486
+ if ( resultError ) {
487
+ return null ;
488
+ }
489
+
490
+ return queueLengthOrCurrentTokens ? Number ( queueLengthOrCurrentTokens ) : 0 ;
491
+ } ) ;
492
+
493
+ // Now zip the results with the queue keys
494
+ const zippedResults = queueKeys . map ( ( queueKey , index ) => {
495
+ const releaseQueue = queueKey
496
+ . replace ( ":queue" , "" )
497
+ . replace ( streamRedis . options . keyPrefix ?? "" , "" ) ;
498
+
499
+ // Current tokens are at indexes 0, 2, 4, 6, etc.
500
+ // Queue length are at indexes 1, 3, 5, 7, etc.
501
+
502
+ const currentTokens = results [ index * 2 ] ;
503
+ const queueLength = results [ index * 2 + 1 ] ;
504
+
505
+ if ( typeof currentTokens !== "number" || typeof queueLength !== "number" ) {
506
+ return null ;
507
+ }
508
+
509
+ return {
510
+ releaseQueue,
511
+ currentTokens : currentTokens ,
512
+ queueLength : queueLength ,
513
+ } ;
514
+ } ) ;
515
+
516
+ return zippedResults . filter ( ( result ) => result !== null ) ;
517
+ }
518
+
519
+ stream . on ( "end" , ( ) => {
520
+ streamRedis . quit ( ) ;
521
+ resolvePromise ( Array . from ( metrics . values ( ) ) ) ;
522
+ } ) ;
523
+
524
+ stream . on ( "error" , ( error ) => {
525
+ this . logger . error ( "Error getting queue metrics:" , { error } ) ;
526
+
527
+ stream . pause ( ) ;
528
+ streamRedis . quit ( ) ;
529
+ rejectPromise ( error ) ;
530
+ } ) ;
531
+
532
+ stream . on ( "data" , async ( keys ) => {
533
+ stream . pause ( ) ;
534
+
535
+ const uniqueKeys = Array . from ( new Set < string > ( keys ) ) ;
536
+
537
+ if ( uniqueKeys . length === 0 ) {
538
+ stream . resume ( ) ;
539
+ return ;
540
+ }
541
+
542
+ const unresolvedKeys = uniqueKeys . filter ( ( key ) => ! metrics . has ( key ) ) ;
543
+
544
+ if ( unresolvedKeys . length === 0 ) {
545
+ stream . resume ( ) ;
546
+ return ;
547
+ }
548
+
549
+ this . logger . debug ( "Fetching queue metrics for keys" , { keys : uniqueKeys } ) ;
550
+
551
+ await getMetricsForKeys ( unresolvedKeys ) . then ( ( results ) => {
552
+ results . forEach ( ( result ) => {
553
+ if ( result ) {
554
+ metrics . set ( result . releaseQueue , result ) ;
555
+ }
556
+ } ) ;
557
+
558
+ stream . resume ( ) ;
559
+ } ) ;
560
+ } ) ;
561
+
562
+ return promise ;
563
+ }
564
+
385
565
#registerCommands( ) {
386
566
this . redis . defineCommand ( "consumeToken" , {
387
567
numberOfKeys : 4 ,
@@ -401,7 +581,9 @@ local currentTokens = tonumber(redis.call("GET", bucketKey) or maxTokens)
401
581
402
582
-- If we have enough tokens, then consume them
403
583
if currentTokens >= 1 then
404
- redis.call("SET", bucketKey, currentTokens - 1)
584
+ local newCurrentTokens = currentTokens - 1
585
+
586
+ redis.call("SET", bucketKey, newCurrentTokens)
405
587
redis.call("ZREM", queueKey, releaserId)
406
588
407
589
-- Clean up metadata when successfully consuming
@@ -411,8 +593,8 @@ if currentTokens >= 1 then
411
593
local queueLength = redis.call("ZCARD", queueKey)
412
594
413
595
-- If we still have tokens and items in queue, update available queues
414
- if currentTokens > 0 and queueLength > 0 then
415
- redis.call("ZADD", masterQueuesKey, currentTokens , releaseQueue)
596
+ if newCurrentTokens > 0 and queueLength > 0 then
597
+ redis.call("ZADD", masterQueuesKey, newCurrentTokens , releaseQueue)
416
598
else
417
599
redis.call("ZREM", masterQueuesKey, releaseQueue)
418
600
end
0 commit comments