{run.logsDeletedAt === null ? (
;
+ stop: () => void;
+ failWithError: (error: TaskRunError) => void;
};
export type EventRepoConfig = {
@@ -916,6 +918,9 @@ export class EventRepository {
]
: [];
+ let isStopped = false;
+ let failedWithError: TaskRunError | undefined;
+
const eventBuilder = {
traceId,
spanId,
@@ -933,10 +938,20 @@ export class EventRepository {
}
}
},
+ stop: () => {
+ isStopped = true;
+ },
+ failWithError: (error: TaskRunError) => {
+ failedWithError = error;
+ },
};
const result = await callback(eventBuilder, traceContext, propagatedContext?.traceparent);
+ if (isStopped) {
+ return result;
+ }
+
const duration = process.hrtime.bigint() - start;
const metadata = {
@@ -970,13 +985,14 @@ export class EventRepository {
parentId,
tracestate,
duration: options.incomplete ? 0 : duration,
- isPartial: options.incomplete,
+ isPartial: failedWithError ? false : options.incomplete,
+ isError: !!failedWithError,
message: message,
serviceName: "api server",
serviceNamespace: "trigger.dev",
level: "TRACE",
kind: options.kind,
- status: "OK",
+ status: failedWithError ? "ERROR" : "OK",
startTime,
environmentId: options.environment.id,
environmentType: options.environment.type,
@@ -1004,6 +1020,17 @@ export class EventRepository {
payload: options.attributes.payload,
payloadType: options.attributes.payloadType,
idempotencyKey: options.attributes.idempotencyKey,
+ events: failedWithError
+ ? [
+ {
+ name: "exception",
+ time: new Date(),
+ properties: {
+ exception: createExceptionPropertiesFromError(failedWithError),
+ },
+ },
+ ]
+ : undefined,
};
if (options.immediate) {
diff --git a/apps/webapp/app/v3/marqs/concurrencyMonitor.server.ts b/apps/webapp/app/v3/marqs/concurrencyMonitor.server.ts
index cc091a6a08..f68d342d31 100644
--- a/apps/webapp/app/v3/marqs/concurrencyMonitor.server.ts
+++ b/apps/webapp/app/v3/marqs/concurrencyMonitor.server.ts
@@ -103,18 +103,16 @@ export class MarqsConcurrencyMonitor {
async #processKey(key: string, redis: Redis) {
key = this.keys.stripKeyPrefix(key);
- const orgKey = this.keys.orgCurrentConcurrencyKeyFromQueue(key);
const envKey = this.keys.envCurrentConcurrencyKeyFromQueue(key);
let runIds: string[] = [];
try {
// Next, we need to get all the items from the key, and any parent keys (org, env, queue) using sunion.
- runIds = await redis.sunion(orgKey, envKey, key);
+ runIds = await redis.sunion(envKey, key);
} catch (e) {
this._logger.error("[MarqsConcurrencyMonitor] error during sunion", {
key,
- orgKey,
envKey,
runIds,
error: e,
@@ -136,7 +134,6 @@ export class MarqsConcurrencyMonitor {
if (completedRunIds.length === 0) {
this._logger.debug("[MarqsConcurrencyMonitor] no completed runs found", {
key,
- orgKey,
envKey,
runIds,
durationMs,
@@ -147,7 +144,6 @@ export class MarqsConcurrencyMonitor {
this._logger.debug("[MarqsConcurrencyMonitor] removing completed runs from queue", {
key,
- orgKey,
envKey,
completedRunIds,
durationMs,
@@ -160,7 +156,6 @@ export class MarqsConcurrencyMonitor {
const pipeline = redis.pipeline();
pipeline.srem(key, ...completedRunIds);
- pipeline.srem(orgKey, ...completedRunIds);
pipeline.srem(envKey, ...completedRunIds);
try {
@@ -168,7 +163,6 @@ export class MarqsConcurrencyMonitor {
} catch (e) {
this._logger.error("[MarqsConcurrencyMonitor] error removing completed runs from queue", {
key,
- orgKey,
envKey,
completedRunIds,
error: e,
diff --git a/apps/webapp/app/v3/marqs/envPriorityDequeuingStrategy.server.ts b/apps/webapp/app/v3/marqs/envPriorityDequeuingStrategy.server.ts
new file mode 100644
index 0000000000..7acaf48818
--- /dev/null
+++ b/apps/webapp/app/v3/marqs/envPriorityDequeuingStrategy.server.ts
@@ -0,0 +1,95 @@
+import { EnvQueues, MarQSFairDequeueStrategy, MarQSKeyProducer } from "./types";
+
+export type EnvPriorityDequeuingStrategyOptions = {
+ keys: MarQSKeyProducer;
+ delegate: MarQSFairDequeueStrategy;
+};
+
+export class EnvPriorityDequeuingStrategy implements MarQSFairDequeueStrategy {
+ private _delegate: MarQSFairDequeueStrategy;
+
+ constructor(private options: EnvPriorityDequeuingStrategyOptions) {
+ this._delegate = options.delegate;
+ }
+
+ async distributeFairQueuesFromParentQueue(
+ parentQueue: string,
+ consumerId: string
+ ): Promise> {
+ const envQueues = await this._delegate.distributeFairQueuesFromParentQueue(
+ parentQueue,
+ consumerId
+ );
+
+ return this.#sortQueuesInEnvironmentsByPriority(envQueues);
+ }
+
+ #sortQueuesInEnvironmentsByPriority(envs: EnvQueues[]): EnvQueues[] {
+ return envs.map((env) => {
+ return this.#sortQueuesInEnvironmentByPriority(env);
+ });
+ }
+
+ // Sorts the queues by priority. A higher priority means the queue should be dequeued first.
+ // All the queues with the same priority should keep the order they were in the original list.
+ // So that means if all the queues have the same priority, the order should be preserved.
+ #sortQueuesInEnvironmentByPriority(env: EnvQueues): EnvQueues {
+ const queues = env.queues;
+
+ // Group queues by their base name (without priority)
+ const queueGroups = new Map();
+
+ queues.forEach((queue) => {
+ const descriptor = this.options.keys.queueDescriptorFromQueue(queue);
+ const baseQueueName = this.options.keys.queueKey(
+ descriptor.organization,
+ descriptor.environment,
+ descriptor.name,
+ descriptor.concurrencyKey
+ );
+
+ if (!queueGroups.has(baseQueueName)) {
+ queueGroups.set(baseQueueName, []);
+ }
+
+ queueGroups.get(baseQueueName)!.push(queue);
+ });
+
+ // For each group, keep only the highest priority queue
+ const resultQueues: string[] = [];
+ queueGroups.forEach((groupQueues) => {
+ const sortedGroupQueues = [...groupQueues].sort((a, b) => {
+ const aPriority = this.#getQueuePriority(a);
+ const bPriority = this.#getQueuePriority(b);
+
+ if (aPriority === bPriority) {
+ return 0;
+ }
+
+ return bPriority - aPriority;
+ });
+
+ resultQueues.push(sortedGroupQueues[0]);
+ });
+
+ // Sort the final result by priority
+ const sortedQueues = resultQueues.sort((a, b) => {
+ const aPriority = this.#getQueuePriority(a);
+ const bPriority = this.#getQueuePriority(b);
+
+ if (aPriority === bPriority) {
+ return 0;
+ }
+
+ return bPriority - aPriority;
+ });
+
+ return { envId: env.envId, queues: sortedQueues };
+ }
+
+ #getQueuePriority(queue: string): number {
+ const queueRecord = this.options.keys.queueDescriptorFromQueue(queue);
+
+ return queueRecord.priority ?? 0;
+ }
+}
diff --git a/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts b/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts
index 6d971ce3c1..986f75c525 100644
--- a/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts
+++ b/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts
@@ -3,7 +3,7 @@ import { createCache, DefaultStatefulContext, Namespace, Cache as UnkeyCache } f
import { MemoryStore } from "@unkey/cache/stores";
import { randomUUID } from "crypto";
import { Redis } from "ioredis";
-import { MarQSFairDequeueStrategy, MarQSKeyProducer } from "./types";
+import { EnvQueues, MarQSFairDequeueStrategy, MarQSKeyProducer } from "./types";
import seedrandom from "seedrandom";
import { Tracer } from "@opentelemetry/api";
import { startSpan } from "../tracing.server";
@@ -33,7 +33,6 @@ export type FairDequeuingStrategyBiases = {
export type FairDequeuingStrategyOptions = {
redis: Redis;
keys: MarQSKeyProducer;
- defaultOrgConcurrency: number;
defaultEnvConcurrency: number;
parentQueueLimit: number;
tracer: Tracer;
@@ -44,19 +43,19 @@ export type FairDequeuingStrategyOptions = {
*/
biases?: FairDequeuingStrategyBiases;
reuseSnapshotCount?: number;
- maximumOrgCount?: number;
+ maximumEnvCount?: number;
};
type FairQueueConcurrency = {
current: number;
limit: number;
+ reserve: number;
};
type FairQueue = { id: string; age: number; org: string; env: string };
type FairQueueSnapshot = {
id: string;
- orgs: Record;
envs: Record;
queues: Array;
};
@@ -73,7 +72,6 @@ type WeightedQueue = {
const emptyFairQueueSnapshot: FairQueueSnapshot = {
id: "empty",
- orgs: {},
envs: {},
queues: [],
};
@@ -113,7 +111,7 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
async distributeFairQueuesFromParentQueue(
parentQueue: string,
consumerId: string
- ): Promise> {
+ ): Promise> {
return await startSpan(
this.options.tracer,
"distributeFairQueuesFromParentQueue",
@@ -124,7 +122,6 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
const snapshot = await this.#createQueueSnapshot(parentQueue, consumerId);
span.setAttributes({
- snapshot_org_count: Object.keys(snapshot.orgs).length,
snapshot_env_count: Object.keys(snapshot.envs).length,
snapshot_queue_count: snapshot.queues.length,
});
@@ -135,21 +132,27 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
return [];
}
- const shuffledQueues = this.#shuffleQueuesByEnv(snapshot);
+ const envQueues = this.#shuffleQueuesByEnv(snapshot);
- span.setAttribute("shuffled_queue_count", shuffledQueues.length);
+ span.setAttribute(
+ "shuffled_queue_count",
+ envQueues.reduce((sum, env) => sum + env.queues.length, 0)
+ );
- if (shuffledQueues[0]) {
- span.setAttribute("winning_env", this.options.keys.envIdFromQueue(shuffledQueues[0]));
- span.setAttribute("winning_org", this.options.keys.orgIdFromQueue(shuffledQueues[0]));
+ if (envQueues[0]?.queues[0]) {
+ span.setAttribute("winning_env", envQueues[0].envId);
+ span.setAttribute(
+ "winning_org",
+ this.options.keys.orgIdFromQueue(envQueues[0].queues[0])
+ );
}
- return shuffledQueues;
+ return envQueues;
}
);
}
- #shuffleQueuesByEnv(snapshot: FairQueueSnapshot): Array {
+ #shuffleQueuesByEnv(snapshot: FairQueueSnapshot): Array {
const envs = Object.keys(snapshot.envs);
const biases = this.options.biases ?? defaultBiases;
@@ -215,7 +218,8 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
}
// Helper method to maintain DRY principle
- #orderQueuesByEnvs(envs: string[], snapshot: FairQueueSnapshot): Array {
+ // Update return type
+ #orderQueuesByEnvs(envs: string[], snapshot: FairQueueSnapshot): Array {
const queuesByEnv = snapshot.queues.reduce((acc, queue) => {
if (!acc[queue.env]) {
acc[queue.env] = [];
@@ -224,15 +228,20 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
return acc;
}, {} as Record>);
- const queues = envs.reduce((acc, envId) => {
+ return envs.reduce((acc, envId) => {
if (queuesByEnv[envId]) {
- // Instead of sorting by age, use weighted random selection
- acc.push(...this.#weightedRandomQueueOrder(queuesByEnv[envId]));
+ // Get ordered queues for this env
+ const orderedQueues = this.#weightedRandomQueueOrder(queuesByEnv[envId]);
+ // Only add the env if it has queues
+ if (orderedQueues.length > 0) {
+ acc.push({
+ envId,
+ queues: orderedQueues.map((queue) => queue.id),
+ });
+ }
}
return acc;
- }, [] as Array);
-
- return queues.map((queue) => queue.id);
+ }, [] as Array);
}
#weightedRandomQueueOrder(queues: FairQueue[]): FairQueue[] {
@@ -344,74 +353,32 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
return emptyFairQueueSnapshot;
}
- // Apply org selection if maximumOrgCount is specified
- let selectedOrgIds: Set;
- if (this.options.maximumOrgCount && this.options.maximumOrgCount > 0) {
- selectedOrgIds = this.#selectTopOrgs(queues, this.options.maximumOrgCount);
- // Filter queues to only include selected orgs
- queues = queues.filter((queue) => selectedOrgIds.has(queue.org));
+ // Apply env selection if maximumEnvCount is specified
+ let selectedEnvIds: Set;
+ if (this.options.maximumEnvCount && this.options.maximumEnvCount > 0) {
+ selectedEnvIds = this.#selectTopEnvs(queues, this.options.maximumEnvCount);
+ // Filter queues to only include selected envs
+ queues = queues.filter((queue) => selectedEnvIds.has(queue.env));
- span.setAttribute("selected_org_count", selectedOrgIds.size);
+ span.setAttribute("selected_env_count", selectedEnvIds.size);
}
span.setAttribute("selected_queue_count", queues.length);
- const orgIds = new Set();
const envIds = new Set();
- const envIdToOrgId = new Map();
for (const queue of queues) {
- orgIds.add(queue.org);
envIds.add(queue.env);
-
- envIdToOrgId.set(queue.env, queue.org);
}
- const orgs = await Promise.all(
- Array.from(orgIds).map(async (orgId) => {
- return { id: orgId, concurrency: await this.#getOrgConcurrency(orgId) };
- })
- );
-
- const orgsAtFullConcurrency = orgs.filter(
- (org) => org.concurrency.current >= org.concurrency.limit
- );
-
- const orgIdsAtFullConcurrency = new Set(orgsAtFullConcurrency.map((org) => org.id));
-
- const orgsSnapshot = orgs.reduce((acc, org) => {
- if (!orgIdsAtFullConcurrency.has(org.id)) {
- acc[org.id] = org;
- }
-
- return acc;
- }, {} as Record);
-
- span.setAttributes({
- org_count: orgs.length,
- orgs_at_full_concurrency_count: orgsAtFullConcurrency.length,
- orgs_snapshot_count: Object.keys(orgsSnapshot).length,
- });
-
- if (Object.keys(orgsSnapshot).length === 0) {
- return emptyFairQueueSnapshot;
- }
-
- const envsWithoutFullOrgs = Array.from(envIds).filter(
- (envId) => !orgIdsAtFullConcurrency.has(envIdToOrgId.get(envId)!)
- );
-
const envs = await Promise.all(
- envsWithoutFullOrgs.map(async (envId) => {
- return {
- id: envId,
- concurrency: await this.#getEnvConcurrency(envId, envIdToOrgId.get(envId)!),
- };
+ Array.from(envIds).map(async (envId) => {
+ return { id: envId, concurrency: await this.#getEnvConcurrency(envId) };
})
);
const envsAtFullConcurrency = envs.filter(
- (env) => env.concurrency.current >= env.concurrency.limit
+ (env) => env.concurrency.current >= env.concurrency.limit + env.concurrency.reserve
);
const envIdsAtFullConcurrency = new Set(envsAtFullConcurrency.map((env) => env.id));
@@ -420,7 +387,6 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
if (!envIdsAtFullConcurrency.has(env.id)) {
acc[env.id] = env;
}
-
return acc;
}, {} as Record);
@@ -429,14 +395,10 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
envs_at_full_concurrency_count: envsAtFullConcurrency.length,
});
- const queuesSnapshot = queues.filter(
- (queue) =>
- !orgIdsAtFullConcurrency.has(queue.org) && !envIdsAtFullConcurrency.has(queue.env)
- );
+ const queuesSnapshot = queues.filter((queue) => !envIdsAtFullConcurrency.has(queue.env));
const snapshot = {
id: randomUUID(),
- orgs: orgsSnapshot,
envs: envsSnapshot,
queues: queuesSnapshot,
};
@@ -455,82 +417,67 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
});
}
- #selectTopOrgs(queues: FairQueue[], maximumOrgCount: number): Set {
- // Group queues by org
- const queuesByOrg = queues.reduce((acc, queue) => {
- if (!acc[queue.org]) {
- acc[queue.org] = [];
+ #selectTopEnvs(queues: FairQueue[], maximumEnvCount: number): Set {
+ // Group queues by env
+ const queuesByEnv = queues.reduce((acc, queue) => {
+ if (!acc[queue.env]) {
+ acc[queue.env] = [];
}
- acc[queue.org].push(queue);
+ acc[queue.env].push(queue);
return acc;
}, {} as Record);
- // Calculate average age for each org
- const orgAverageAges = Object.entries(queuesByOrg).map(([orgId, orgQueues]) => {
- const averageAge = orgQueues.reduce((sum, q) => sum + q.age, 0) / orgQueues.length;
- return { orgId, averageAge };
+ // Calculate average age for each env
+ const envAverageAges = Object.entries(queuesByEnv).map(([envId, envQueues]) => {
+ const averageAge = envQueues.reduce((sum, q) => sum + q.age, 0) / envQueues.length;
+ return { envId, averageAge };
});
// Perform weighted shuffle based on average ages
- const maxAge = Math.max(...orgAverageAges.map((o) => o.averageAge));
- const weightedOrgs = orgAverageAges.map((org) => ({
- orgId: org.orgId,
- weight: org.averageAge / maxAge, // Normalize weights
+ const maxAge = Math.max(...envAverageAges.map((e) => e.averageAge));
+ const weightedEnvs = envAverageAges.map((env) => ({
+ envId: env.envId,
+ weight: env.averageAge / maxAge, // Normalize weights
}));
- // Select top N orgs using weighted shuffle
- const selectedOrgs = new Set();
- let remainingOrgs = [...weightedOrgs];
- let totalWeight = remainingOrgs.reduce((sum, org) => sum + org.weight, 0);
+ // Select top N envs using weighted shuffle
+ const selectedEnvs = new Set();
+ let remainingEnvs = [...weightedEnvs];
+ let totalWeight = remainingEnvs.reduce((sum, env) => sum + env.weight, 0);
- while (selectedOrgs.size < maximumOrgCount && remainingOrgs.length > 0) {
+ while (selectedEnvs.size < maximumEnvCount && remainingEnvs.length > 0) {
let random = this._rng() * totalWeight;
let index = 0;
- while (random > 0 && index < remainingOrgs.length) {
- random -= remainingOrgs[index].weight;
+ while (random > 0 && index < remainingEnvs.length) {
+ random -= remainingEnvs[index].weight;
index++;
}
index = Math.max(0, index - 1);
- selectedOrgs.add(remainingOrgs[index].orgId);
- totalWeight -= remainingOrgs[index].weight;
- remainingOrgs.splice(index, 1);
+ selectedEnvs.add(remainingEnvs[index].envId);
+ totalWeight -= remainingEnvs[index].weight;
+ remainingEnvs.splice(index, 1);
}
- return selectedOrgs;
+ return selectedEnvs;
}
- async #getOrgConcurrency(orgId: string): Promise {
- return await startSpan(this.options.tracer, "getOrgConcurrency", async (span) => {
- span.setAttribute("org_id", orgId);
-
- const [currentValue, limitValue] = await Promise.all([
- this.#getOrgCurrentConcurrency(orgId),
- this.#getOrgConcurrencyLimit(orgId),
- ]);
-
- span.setAttribute("current_value", currentValue);
- span.setAttribute("limit_value", limitValue);
-
- return { current: currentValue, limit: limitValue };
- });
- }
-
- async #getEnvConcurrency(envId: string, orgId: string): Promise {
+ async #getEnvConcurrency(envId: string): Promise {
return await startSpan(this.options.tracer, "getEnvConcurrency", async (span) => {
- span.setAttribute("org_id", orgId);
span.setAttribute("env_id", envId);
- const [currentValue, limitValue] = await Promise.all([
+ const [currentValue, limitValue, reserveValue] = await Promise.all([
this.#getEnvCurrentConcurrency(envId),
this.#getEnvConcurrencyLimit(envId),
+ this.#getEnvReserveConcurrency(envId),
]);
span.setAttribute("current_value", currentValue);
span.setAttribute("limit_value", limitValue);
+ span.setAttribute("reserve_value", reserveValue);
- return { current: currentValue, limit: limitValue };
+ return { current: currentValue, limit: limitValue, reserve: reserveValue };
});
}
@@ -570,31 +517,31 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
});
}
- async #getOrgConcurrencyLimit(orgId: string) {
- return await startSpan(this.options.tracer, "getOrgConcurrencyLimit", async (span) => {
- span.setAttribute("org_id", orgId);
+ async #getEnvConcurrencyLimit(envId: string) {
+ return await startSpan(this.options.tracer, "getEnvConcurrencyLimit", async (span) => {
+ span.setAttribute("env_id", envId);
- const key = this.options.keys.orgConcurrencyLimitKey(orgId);
+ const key = this.options.keys.envConcurrencyLimitKey(envId);
const result = await this._cache.concurrencyLimit.swr(key, async () => {
const value = await this.options.redis.get(key);
if (!value) {
- return this.options.defaultOrgConcurrency;
+ return this.options.defaultEnvConcurrency;
}
return Number(value);
});
- return result.val ?? this.options.defaultOrgConcurrency;
+ return result.val ?? this.options.defaultEnvConcurrency;
});
}
- async #getOrgCurrentConcurrency(orgId: string) {
- return await startSpan(this.options.tracer, "getOrgCurrentConcurrency", async (span) => {
- span.setAttribute("org_id", orgId);
+ async #getEnvCurrentConcurrency(envId: string) {
+ return await startSpan(this.options.tracer, "getEnvCurrentConcurrency", async (span) => {
+ span.setAttribute("env_id", envId);
- const key = this.options.keys.orgCurrentConcurrencyKey(orgId);
+ const key = this.options.keys.envCurrentConcurrencyKey(envId);
const result = await this.options.redis.scard(key);
@@ -604,31 +551,11 @@ export class FairDequeuingStrategy implements MarQSFairDequeueStrategy {
});
}
- async #getEnvConcurrencyLimit(envId: string) {
- return await startSpan(this.options.tracer, "getEnvConcurrencyLimit", async (span) => {
- span.setAttribute("env_id", envId);
-
- const key = this.options.keys.envConcurrencyLimitKey(envId);
-
- const result = await this._cache.concurrencyLimit.swr(key, async () => {
- const value = await this.options.redis.get(key);
-
- if (!value) {
- return this.options.defaultEnvConcurrency;
- }
-
- return Number(value);
- });
-
- return result.val ?? this.options.defaultEnvConcurrency;
- });
- }
-
- async #getEnvCurrentConcurrency(envId: string) {
- return await startSpan(this.options.tracer, "getEnvCurrentConcurrency", async (span) => {
+ async #getEnvReserveConcurrency(envId: string) {
+ return await startSpan(this.options.tracer, "getEnvReserveConcurrency", async (span) => {
span.setAttribute("env_id", envId);
- const key = this.options.keys.envCurrentConcurrencyKey(envId);
+ const key = this.options.keys.envReserveConcurrencyKey(envId);
const result = await this.options.redis.scard(key);
@@ -643,7 +570,7 @@ export class NoopFairDequeuingStrategy implements MarQSFairDequeueStrategy {
async distributeFairQueuesFromParentQueue(
parentQueue: string,
consumerId: string
- ): Promise> {
+ ): Promise> {
return [];
}
}
diff --git a/apps/webapp/app/v3/marqs/index.server.ts b/apps/webapp/app/v3/marqs/index.server.ts
index 341f003464..cef0e95024 100644
--- a/apps/webapp/app/v3/marqs/index.server.ts
+++ b/apps/webapp/app/v3/marqs/index.server.ts
@@ -22,15 +22,19 @@ import { concurrencyTracker } from "../services/taskRunConcurrencyTracker.server
import { attributesFromAuthenticatedEnv, tracer } from "../tracer.server";
import { AsyncWorker } from "./asyncWorker.server";
import { FairDequeuingStrategy } from "./fairDequeuingStrategy.server";
-import { MarQSShortKeyProducer } from "./marqsKeyProducer.server";
+import { MarQSShortKeyProducer } from "./marqsKeyProducer";
import {
+ EnqueueMessageReserveConcurrencyOptions,
MarQSFairDequeueStrategy,
MarQSKeyProducer,
+ MarQSKeyProducerEnv,
MessagePayload,
MessageQueueSubscriber,
VisibilityTimeoutStrategy,
} from "./types";
import { V3LegacyRunEngineWorkerVisibilityTimeout } from "./v3VisibilityTimeout.server";
+import { flattenAttributes } from "@trigger.dev/core/v3";
+import { EnvPriorityDequeuingStrategy } from "./envPriorityDequeuingStrategy.server";
const KEY_PREFIX = "marqs:";
@@ -61,6 +65,11 @@ export type MarQSOptions = {
subscriber?: MessageQueueSubscriber;
};
+export const MarQSPriorityLevel = {
+ resume: 100,
+ retry: 10,
+} as const;
+
/**
* MarQS - Multitenant Asynchronous Reliable Queueing System (pronounced "markus")
*/
@@ -99,32 +108,31 @@ export class MarQS {
}
public async updateEnvConcurrencyLimits(env: AuthenticatedEnvironment) {
+ const envConcurrencyLimitKey = this.keys.envConcurrencyLimitKey(env);
+
+ logger.debug("Updating env concurrency limits", {
+ envConcurrencyLimitKey,
+ service: this.name,
+ });
+
await this.#callUpdateGlobalConcurrencyLimits({
- envConcurrencyLimitKey: this.keys.envConcurrencyLimitKey(env),
- orgConcurrencyLimitKey: this.keys.orgConcurrencyLimitKey(env),
+ envConcurrencyLimitKey,
envConcurrencyLimit: env.maximumConcurrencyLimit,
- orgConcurrencyLimit: env.organization.maximumConcurrencyLimit,
});
}
- public async getQueueConcurrencyLimit(env: AuthenticatedEnvironment, queue: string) {
+ public async getQueueConcurrencyLimit(env: MarQSKeyProducerEnv, queue: string) {
const result = await this.redis.get(this.keys.queueConcurrencyLimitKey(env, queue));
return result ? Number(result) : undefined;
}
- public async getEnvConcurrencyLimit(env: AuthenticatedEnvironment) {
+ public async getEnvConcurrencyLimit(env: MarQSKeyProducerEnv) {
const result = await this.redis.get(this.keys.envConcurrencyLimitKey(env));
return result ? Number(result) : this.options.defaultEnvConcurrency;
}
- public async getOrgConcurrencyLimit(env: AuthenticatedEnvironment) {
- const result = await this.redis.get(this.keys.orgConcurrencyLimitKey(env));
-
- return result ? Number(result) : this.options.defaultOrgConcurrency;
- }
-
public async lengthOfQueue(
env: AuthenticatedEnvironment,
queue: string,
@@ -133,7 +141,7 @@ export class MarQS {
return this.redis.zcard(this.keys.queueKey(env, queue, concurrencyKey));
}
- public async lengthOfEnvQueue(env: AuthenticatedEnvironment) {
+ public async lengthOfEnvQueue(env: MarQSKeyProducerEnv) {
return this.redis.zcard(this.keys.envQueueKey(env));
}
@@ -158,19 +166,29 @@ export class MarQS {
}
public async currentConcurrencyOfQueue(
- env: AuthenticatedEnvironment,
+ env: MarQSKeyProducerEnv,
+ queue: string,
+ concurrencyKey?: string
+ ) {
+ return this.redis.scard(this.keys.queueCurrentConcurrencyKey(env, queue, concurrencyKey));
+ }
+
+ public async reserveConcurrencyOfQueue(
+ env: MarQSKeyProducerEnv,
queue: string,
concurrencyKey?: string
) {
- return this.redis.scard(this.keys.currentConcurrencyKey(env, queue, concurrencyKey));
+ return this.redis.scard(
+ this.keys.queueReserveConcurrencyKeyFromQueue(this.keys.queueKey(env, queue, concurrencyKey))
+ );
}
- public async currentConcurrencyOfEnvironment(env: AuthenticatedEnvironment) {
+ public async currentConcurrencyOfEnvironment(env: MarQSKeyProducerEnv) {
return this.redis.scard(this.keys.envCurrentConcurrencyKey(env));
}
- public async currentConcurrencyOfOrg(env: AuthenticatedEnvironment) {
- return this.redis.scard(this.keys.orgCurrentConcurrencyKey(env));
+ public async reserveConcurrencyOfEnvironment(env: MarQSKeyProducerEnv) {
+ return this.redis.scard(this.keys.envReserveConcurrencyKey(env.id));
}
public async enqueueMessage(
@@ -179,12 +197,14 @@ export class MarQS {
messageId: string,
messageData: Record,
concurrencyKey?: string,
- timestamp?: number
+ timestamp?: number | Date,
+ reserve?: EnqueueMessageReserveConcurrencyOptions,
+ priority?: number
) {
return await this.#trace(
"enqueueMessage",
async (span) => {
- const messageQueue = this.keys.queueKey(env, queue, concurrencyKey);
+ const messageQueue = this.keys.queueKey(env, queue, concurrencyKey, priority);
const parentQueue = this.keys.envSharedQueueKey(env);
@@ -195,7 +215,12 @@ export class MarQS {
data: messageData,
queue: messageQueue,
concurrencyKey,
- timestamp: timestamp ?? Date.now(),
+ timestamp:
+ typeof timestamp === "undefined"
+ ? Date.now()
+ : typeof timestamp === "number"
+ ? timestamp
+ : timestamp.getTime(),
messageId,
parentQueue,
};
@@ -207,9 +232,18 @@ export class MarQS {
[SemanticAttributes.PARENT_QUEUE]: parentQueue,
});
- await this.#callEnqueueMessage(messagePayload);
+ if (reserve) {
+ span.setAttribute("reserve_message_id", reserve.messageId);
+ span.setAttribute("reserve_recursive_queue", reserve.recursiveQueue);
+ }
+
+ const result = await this.#callEnqueueMessage(messagePayload, reserve);
- await this.options.subscriber?.messageEnqueued(messagePayload);
+ if (result) {
+ await this.options.subscriber?.messageEnqueued(messagePayload);
+ }
+
+ return result;
},
{
kind: SpanKind.PRODUCER,
@@ -223,6 +257,75 @@ export class MarQS {
);
}
+ public async replaceMessage(
+ messageId: string,
+ messageData: Record,
+ timestamp?: number,
+ priority?: number,
+ inplace?: boolean
+ ) {
+ return this.#trace(
+ "replaceMessage",
+ async (span) => {
+ const oldMessage = await this.readMessage(messageId);
+
+ if (!oldMessage) {
+ return;
+ }
+
+ const queue = this.keys.queueKeyFromQueue(oldMessage.queue, priority);
+
+ span.setAttributes({
+ [SemanticAttributes.QUEUE]: queue,
+ [SemanticAttributes.MESSAGE_ID]: oldMessage.messageId,
+ [SemanticAttributes.CONCURRENCY_KEY]: oldMessage.concurrencyKey,
+ [SemanticAttributes.PARENT_QUEUE]: oldMessage.parentQueue,
+ });
+
+ const traceContext = {
+ traceparent: oldMessage.data.traceparent,
+ tracestate: oldMessage.data.tracestate,
+ };
+
+ const newMessage: MessagePayload = {
+ version: "1",
+ // preserve original trace context
+ data: { ...oldMessage.data, ...messageData, ...traceContext, queue },
+ queue,
+ concurrencyKey: oldMessage.concurrencyKey,
+ timestamp: timestamp ?? Date.now(),
+ messageId,
+ parentQueue: oldMessage.parentQueue,
+ };
+
+ if (inplace) {
+ await this.#callReplaceMessage(newMessage);
+ return;
+ }
+
+ await this.options.visibilityTimeoutStrategy.cancelHeartbeat(messageId);
+
+ await this.#callAcknowledgeMessage({
+ parentQueue: oldMessage.parentQueue,
+ messageQueue: oldMessage.queue,
+ messageId,
+ });
+
+ await this.#callEnqueueMessage(newMessage);
+
+ await this.options.subscriber?.messageReplaced(newMessage);
+ },
+ {
+ kind: SpanKind.CONSUMER,
+ attributes: {
+ [SEMATTRS_MESSAGING_OPERATION]: "replace",
+ [SEMATTRS_MESSAGE_ID]: messageId,
+ [SEMATTRS_MESSAGING_SYSTEM]: "marqs",
+ },
+ }
+ );
+ }
+
public async dequeueMessageInEnv(env: AuthenticatedEnvironment) {
return this.#trace(
"dequeueMessageInEnv",
@@ -233,24 +336,21 @@ export class MarQS {
span.setAttribute(SemanticAttributes.CONSUMER_ID, env.id);
// Get prioritized list of queues to try
- const queues =
+ const environments =
await this.options.envQueuePriorityStrategy.distributeFairQueuesFromParentQueue(
parentQueue,
env.id
);
+ const queues = environments.flatMap((e) => e.queues);
+
+ span.setAttribute("env_count", environments.length);
span.setAttribute("queue_count", queues.length);
for (const messageQueue of queues) {
const messageData = await this.#callDequeueMessage({
messageQueue,
parentQueue,
- concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(messageQueue),
- currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue),
- envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(messageQueue),
- envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue),
- orgConcurrencyLimitKey: this.keys.orgConcurrencyLimitKeyFromQueue(messageQueue),
- orgCurrentConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(messageQueue),
});
if (!messageData) {
@@ -280,11 +380,7 @@ export class MarQS {
await this.#callAcknowledgeMessage({
parentQueue,
- messageKey: this.keys.messageKey(messageData.messageId),
messageQueue: messageQueue,
- concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue),
- envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue),
- orgConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(messageQueue),
messageId: messageData.messageId,
});
@@ -327,70 +423,78 @@ export class MarQS {
span.setAttribute(SemanticAttributes.PARENT_QUEUE, parentQueue);
// Get prioritized list of queues to try
- const queues = await this.options.queuePriorityStrategy.distributeFairQueuesFromParentQueue(
- parentQueue,
- consumerId
- );
+ const envQueues =
+ await this.options.queuePriorityStrategy.distributeFairQueuesFromParentQueue(
+ parentQueue,
+ consumerId
+ );
- span.setAttribute("queue_count", queues.length);
+ span.setAttribute("environment_count", envQueues.length);
- if (queues.length === 0) {
+ if (envQueues.length === 0) {
return;
}
+ let attemptedEnvs = 0;
+ let attemptedQueues = 0;
+
// Try each queue in order until we successfully dequeue a message
- for (const messageQueue of queues) {
- try {
- const messageData = await this.#callDequeueMessage({
- messageQueue,
- parentQueue,
- concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(messageQueue),
- currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue),
- envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(messageQueue),
- envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue),
- orgConcurrencyLimitKey: this.keys.orgConcurrencyLimitKeyFromQueue(messageQueue),
- orgCurrentConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(messageQueue),
- });
+ for (const env of envQueues) {
+ attemptedEnvs++;
- if (!messageData) {
- continue; // Try next queue if no message was dequeued
- }
+ for (const messageQueue of env.queues) {
+ attemptedQueues++;
- const message = await this.readMessage(messageData.messageId);
-
- if (message) {
- const ageOfMessageInMs = Date.now() - message.timestamp;
-
- span.setAttributes({
- [SEMATTRS_MESSAGE_ID]: message.messageId,
- [SemanticAttributes.QUEUE]: message.queue,
- [SemanticAttributes.MESSAGE_ID]: message.messageId,
- [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey,
- [SemanticAttributes.PARENT_QUEUE]: message.parentQueue,
- age_in_seconds: ageOfMessageInMs / 1000,
- attempted_queues: queues.indexOf(messageQueue) + 1, // How many queues we tried before success
- message_timestamp: message.timestamp,
- message_age: Date.now() - message.timestamp,
+ try {
+ const messageData = await this.#callDequeueMessage({
+ messageQueue,
+ parentQueue,
});
- await this.options.subscriber?.messageDequeued(message);
-
- await this.options.visibilityTimeoutStrategy.heartbeat(
- messageData.messageId,
- this.visibilityTimeoutInMs
- );
-
- return message;
+ if (!messageData) {
+ continue; // Try next queue if no message was dequeued
+ }
+
+ const message = await this.readMessage(messageData.messageId);
+
+ if (message) {
+ const ageOfMessageInMs = Date.now() - message.timestamp;
+
+ span.setAttributes({
+ [SEMATTRS_MESSAGE_ID]: message.messageId,
+ [SemanticAttributes.QUEUE]: message.queue,
+ [SemanticAttributes.MESSAGE_ID]: message.messageId,
+ [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey,
+ [SemanticAttributes.PARENT_QUEUE]: message.parentQueue,
+ age_in_seconds: ageOfMessageInMs / 1000,
+ attempted_queues: attemptedQueues, // How many queues we tried before success
+ attempted_envs: attemptedEnvs, // How many environments we tried before success
+ message_timestamp: message.timestamp,
+ message_age: Date.now() - message.timestamp,
+ ...flattenAttributes(message.data, "message"),
+ });
+
+ await this.options.subscriber?.messageDequeued(message);
+
+ await this.options.visibilityTimeoutStrategy.heartbeat(
+ messageData.messageId,
+ this.visibilityTimeoutInMs
+ );
+
+ return message;
+ }
+ } catch (error) {
+ // Log error but continue trying other queues
+ logger.warn(`[${this.name}] Failed to dequeue from queue ${messageQueue}`, { error });
+ continue;
}
- } catch (error) {
- // Log error but continue trying other queues
- logger.warn(`[${this.name}] Failed to dequeue from queue ${messageQueue}`, { error });
- continue;
}
}
// If we get here, we tried all queues but couldn't dequeue a message
- span.setAttribute("attempted_queues", queues.length);
+ span.setAttribute("attempted_queues", attemptedQueues);
+ span.setAttribute("attempted_envs", attemptedEnvs);
+
return;
},
{
@@ -430,11 +534,7 @@ export class MarQS {
await this.#callAcknowledgeMessage({
parentQueue: message.parentQueue,
- messageKey: this.keys.messageKey(messageId),
messageQueue: message.queue,
- concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(message.queue),
- envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(message.queue),
- orgConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue),
messageId,
});
@@ -451,69 +551,72 @@ export class MarQS {
);
}
- public async replaceMessage(
+ /**
+ * Negative acknowledge a message, which will requeue the message.
+ * Returns whether it went back into the queue or not.
+ */
+ public async nackMessage(
messageId: string,
- messageData: Record,
- timestamp?: number,
- inplace?: boolean
+ retryAt: number = Date.now(),
+ updates?: Record
) {
return this.#trace(
- "replaceMessage",
+ "nackMessage",
async (span) => {
- const oldMessage = await this.readMessage(messageId);
+ const message = await this.readMessage(messageId);
- if (!oldMessage) {
- return;
+ if (!message) {
+ logger.debug(`[${this.name}].nackMessage() message not found`, {
+ messageId,
+ retryAt,
+ updates,
+ service: this.name,
+ });
+ return false;
}
- span.setAttributes({
- [SemanticAttributes.QUEUE]: oldMessage.queue,
- [SemanticAttributes.MESSAGE_ID]: oldMessage.messageId,
- [SemanticAttributes.CONCURRENCY_KEY]: oldMessage.concurrencyKey,
- [SemanticAttributes.PARENT_QUEUE]: oldMessage.parentQueue,
- });
+ const nackCount = await this.#getNackCount(messageId);
- const traceContext = {
- traceparent: oldMessage.data.traceparent,
- tracestate: oldMessage.data.tracestate,
- };
+ span.setAttribute("nack_count", nackCount);
- const newMessage: MessagePayload = {
- version: "1",
- // preserve original trace context
- data: { ...oldMessage.data, ...messageData, ...traceContext },
- queue: oldMessage.queue,
- concurrencyKey: oldMessage.concurrencyKey,
- timestamp: timestamp ?? Date.now(),
- messageId,
- parentQueue: oldMessage.parentQueue,
- };
+ if (nackCount >= this.options.maximumNackCount) {
+ logger.debug(`[${this.name}].nackMessage() maximum nack count reached`, {
+ messageId,
+ retryAt,
+ updates,
+ service: this.name,
+ });
- if (inplace) {
- await this.#callReplaceMessage(newMessage);
- return;
+ span.setAttribute("maximum_nack_count_reached", true);
+
+ // If we have reached the maximum nack count, we will ack the message
+ await this.acknowledgeMessage(messageId, "maximum nack count reached");
+ return false;
+ }
+
+ span.setAttributes({
+ [SemanticAttributes.QUEUE]: message.queue,
+ [SemanticAttributes.MESSAGE_ID]: message.messageId,
+ [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey,
+ [SemanticAttributes.PARENT_QUEUE]: message.parentQueue,
+ });
+
+ if (updates) {
+ await this.replaceMessage(messageId, updates, retryAt, undefined, true);
}
await this.options.visibilityTimeoutStrategy.cancelHeartbeat(messageId);
- await this.#callAcknowledgeMessage({
- parentQueue: oldMessage.parentQueue,
- messageKey: this.keys.messageKey(messageId),
- messageQueue: oldMessage.queue,
- concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(oldMessage.queue),
- envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(oldMessage.queue),
- orgConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(oldMessage.queue),
- messageId,
- });
+ await this.#callNackMessage(messageId, message, retryAt);
- await this.#callEnqueueMessage(newMessage);
+ await this.options.subscriber?.messageNacked(message);
- await this.options.subscriber?.messageReplaced(newMessage);
+ return true;
},
{
kind: SpanKind.CONSUMER,
attributes: {
- [SEMATTRS_MESSAGING_OPERATION]: "replace",
+ [SEMATTRS_MESSAGING_OPERATION]: "nack",
[SEMATTRS_MESSAGE_ID]: messageId,
[SEMATTRS_MESSAGING_SYSTEM]: "marqs",
},
@@ -542,65 +645,6 @@ export class MarQS {
);
}
- public async releaseConcurrency(messageId: string, releaseForRun: boolean = false) {
- return this.#trace(
- "releaseConcurrency",
- async (span) => {
- span.setAttributes({
- [SemanticAttributes.MESSAGE_ID]: messageId,
- });
-
- const message = await this.readMessage(messageId);
-
- if (!message) {
- logger.log(`[${this.name}].releaseConcurrency() message not found`, {
- messageId,
- releaseForRun,
- service: this.name,
- });
- return;
- }
-
- span.setAttributes({
- [SemanticAttributes.QUEUE]: message.queue,
- [SemanticAttributes.MESSAGE_ID]: message.messageId,
- [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey,
- [SemanticAttributes.PARENT_QUEUE]: message.parentQueue,
- });
-
- const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue);
- const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue);
- const orgConcurrencyKey = this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue);
-
- logger.debug("Calling releaseConcurrency", {
- messageId,
- queue: message.queue,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
- service: this.name,
- releaseForRun,
- });
-
- return this.redis.releaseConcurrency(
- //don't release the for the run, it breaks concurrencyLimits
- releaseForRun ? concurrencyKey : "",
- envConcurrencyKey,
- orgConcurrencyKey,
- message.messageId
- );
- },
- {
- kind: SpanKind.CONSUMER,
- attributes: {
- [SEMATTRS_MESSAGING_OPERATION]: "releaseConcurrency",
- [SEMATTRS_MESSAGE_ID]: messageId,
- [SEMATTRS_MESSAGING_SYSTEM]: "marqs",
- },
- }
- );
- }
-
async #trace(
name: string,
fn: (span: Span) => Promise,
@@ -637,103 +681,20 @@ export class MarQS {
);
}
- /**
- * Negative acknowledge a message, which will requeue the message.
- * Returns whether it went back into the queue or not.
- */
- public async nackMessage(
- messageId: string,
- retryAt: number = Date.now(),
- updates?: Record
- ) {
- return this.#trace(
- "nackMessage",
- async (span) => {
- const message = await this.readMessage(messageId);
+ async #getNackCount(messageId: string): Promise {
+ const result = await this.redis.get(this.keys.nackCounterKey(messageId));
- if (!message) {
- logger.debug(`[${this.name}].nackMessage() message not found`, {
- messageId,
- retryAt,
- updates,
- service: this.name,
- });
- return false;
- }
+ return result ? Number(result) : 0;
+ }
- const nackCount = await this.#getNackCount(messageId);
+ // This should increment by the number of seconds, but with a max value of Date.now() + visibilityTimeoutInMs
+ public async heartbeatMessage(messageId: string) {
+ await this.options.visibilityTimeoutStrategy.heartbeat(messageId, this.visibilityTimeoutInMs);
+ }
- span.setAttribute("nack_count", nackCount);
-
- if (nackCount >= this.options.maximumNackCount) {
- logger.debug(`[${this.name}].nackMessage() maximum nack count reached`, {
- messageId,
- retryAt,
- updates,
- service: this.name,
- });
-
- span.setAttribute("maximum_nack_count_reached", true);
-
- // If we have reached the maximum nack count, we will ack the message
- await this.acknowledgeMessage(messageId, "maximum nack count reached");
- return false;
- }
-
- span.setAttributes({
- [SemanticAttributes.QUEUE]: message.queue,
- [SemanticAttributes.MESSAGE_ID]: message.messageId,
- [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey,
- [SemanticAttributes.PARENT_QUEUE]: message.parentQueue,
- });
-
- if (updates) {
- await this.replaceMessage(messageId, updates, retryAt, true);
- }
-
- await this.options.visibilityTimeoutStrategy.cancelHeartbeat(messageId);
-
- await this.#callNackMessage({
- messageKey: this.keys.messageKey(messageId),
- messageQueue: message.queue,
- parentQueue: message.parentQueue,
- concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(message.queue),
- envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(message.queue),
- orgConcurrencyKey: this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue),
- nackCounterKey: this.keys.nackCounterKey(messageId),
- messageId,
- messageScore: retryAt,
- });
-
- await this.options.subscriber?.messageNacked(message);
-
- return true;
- },
- {
- kind: SpanKind.CONSUMER,
- attributes: {
- [SEMATTRS_MESSAGING_OPERATION]: "nack",
- [SEMATTRS_MESSAGE_ID]: messageId,
- [SEMATTRS_MESSAGING_SYSTEM]: "marqs",
- },
- }
- );
- }
-
- async #getNackCount(messageId: string): Promise {
- const result = await this.redis.get(this.keys.nackCounterKey(messageId));
-
- return result ? Number(result) : 0;
- }
-
- // This should increment by the number of seconds, but with a max value of Date.now() + visibilityTimeoutInMs
- public async heartbeatMessage(messageId: string) {
- await this.options.visibilityTimeoutStrategy.heartbeat(messageId, this.visibilityTimeoutInMs);
- }
-
- get visibilityTimeoutInMs() {
- return this.options.visibilityTimeoutInMs ?? 300000; // 5 minutes
- }
+ get visibilityTimeoutInMs() {
+ return this.options.visibilityTimeoutInMs ?? 300000; // 5 minutes
+ }
async readMessage(messageId: string) {
return this.#trace(
@@ -928,67 +889,247 @@ export class MarQS {
});
}
- async #callEnqueueMessage(message: MessagePayload) {
- const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue);
- const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue);
- const orgConcurrencyKey = this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue);
+ async #callEnqueueMessage(
+ message: MessagePayload,
+ reserve?: EnqueueMessageReserveConcurrencyOptions
+ ) {
+ const queueKey = message.queue;
+ const parentQueueKey = message.parentQueue;
+ const messageKey = this.keys.messageKey(message.messageId);
+ const queueCurrentConcurrencyKey = this.keys.queueCurrentConcurrencyKeyFromQueue(message.queue);
+ const queueReserveConcurrencyKey = this.keys.queueReserveConcurrencyKeyFromQueue(message.queue);
+ const envCurrentConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue);
+ const envReserveConcurrencyKey = this.keys.envReserveConcurrencyKeyFromQueue(message.queue);
+ const envQueueKey = this.keys.envQueueKeyFromQueue(message.queue);
+
+ const queueName = message.queue;
+ const messageId = message.messageId;
+ const messageData = JSON.stringify(message);
+ const messageScore = String(message.timestamp);
+
+ if (!reserve) {
+ logger.debug("Calling enqueueMessage", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ });
- logger.debug("Calling enqueueMessage", {
- messagePayload: message,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
- service: this.name,
- });
+ const result = await this.redis.enqueueMessage(
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore
+ );
- return this.redis.enqueueMessage(
- message.queue,
- message.parentQueue,
- this.keys.messageKey(message.messageId),
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
- this.keys.envQueueKeyFromQueue(message.queue),
- message.queue,
- message.messageId,
- JSON.stringify(message),
- String(message.timestamp)
- );
+ logger.debug("enqueueMessage result", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ result,
+ });
+
+ return true;
+ }
+
+ const envConcurrencyLimitKey = this.keys.envConcurrencyLimitKeyFromQueue(message.queue);
+ const reserveMessageId = reserve.messageId;
+ const defaultEnvConcurrencyLimit = String(this.options.defaultEnvConcurrency);
+
+ if (!reserve.recursiveQueue) {
+ logger.debug("Calling enqueueMessageWithReservingConcurrency", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit,
+ });
+
+ const result = await this.redis.enqueueMessageWithReservingConcurrency(
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit
+ );
+
+ logger.debug("enqueueMessageWithReservingConcurrency result", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit,
+ result,
+ });
+
+ return true;
+ } else {
+ const queueConcurrencyLimitKey = this.keys.queueConcurrencyLimitKeyFromQueue(message.queue);
+
+ logger.debug("Calling enqueueMessageWithReservingConcurrencyForRecursiveQueue", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ queueConcurrencyLimitKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit,
+ });
+
+ const result = await this.redis.enqueueMessageWithReservingConcurrencyOnRecursiveQueue(
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ queueConcurrencyLimitKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit
+ );
+
+ logger.debug("enqueueMessageWithReservingConcurrencyOnRecursiveQueue result", {
+ service: this.name,
+ queueKey,
+ parentQueueKey,
+ messageKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ queueConcurrencyLimitKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envConcurrencyLimitKey,
+ envQueueKey,
+ queueName,
+ messageId,
+ messageData,
+ messageScore,
+ reserveMessageId,
+ defaultEnvConcurrencyLimit,
+ result,
+ });
+
+ return !!result;
+ }
}
async #callDequeueMessage({
messageQueue,
parentQueue,
- concurrencyLimitKey,
- envConcurrencyLimitKey,
- orgConcurrencyLimitKey,
- currentConcurrencyKey,
- envCurrentConcurrencyKey,
- orgCurrentConcurrencyKey,
}: {
messageQueue: string;
parentQueue: string;
- concurrencyLimitKey: string;
- envConcurrencyLimitKey: string;
- orgConcurrencyLimitKey: string;
- currentConcurrencyKey: string;
- envCurrentConcurrencyKey: string;
- orgCurrentConcurrencyKey: string;
}) {
+ const queueConcurrencyLimitKey = this.keys.queueConcurrencyLimitKeyFromQueue(messageQueue);
+ const queueCurrentConcurrencyKey = this.keys.queueCurrentConcurrencyKeyFromQueue(messageQueue);
+ const envConcurrencyLimitKey = this.keys.envConcurrencyLimitKeyFromQueue(messageQueue);
+ const envCurrentConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue);
+ const envReserveConcurrencyKey = this.keys.envReserveConcurrencyKeyFromQueue(messageQueue);
+ const queueReserveConcurrencyKey = this.keys.queueReserveConcurrencyKeyFromQueue(messageQueue);
+ const envQueueKey = this.keys.envQueueKeyFromQueue(messageQueue);
+
+ logger.debug("Calling dequeueMessage", {
+ messageQueue,
+ parentQueue,
+ queueConcurrencyLimitKey,
+ envConcurrencyLimitKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envReserveConcurrencyKey,
+ envQueueKey,
+ service: this.name,
+ });
+
const result = await this.redis.dequeueMessage(
messageQueue,
parentQueue,
- concurrencyLimitKey,
+ queueConcurrencyLimitKey,
envConcurrencyLimitKey,
- orgConcurrencyLimitKey,
- currentConcurrencyKey,
+ queueCurrentConcurrencyKey,
+ queueReserveConcurrencyKey,
envCurrentConcurrencyKey,
- orgCurrentConcurrencyKey,
- this.keys.envQueueKeyFromQueue(messageQueue),
+ envReserveConcurrencyKey,
+ envQueueKey,
messageQueue,
String(Date.now()),
- String(this.options.defaultEnvConcurrency),
- String(this.options.defaultOrgConcurrency)
+ String(this.options.defaultEnvConcurrency)
);
if (!result) {
@@ -1024,29 +1165,28 @@ export class MarQS {
async #callAcknowledgeMessage({
parentQueue,
- messageKey,
messageQueue,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
messageId,
}: {
parentQueue: string;
- messageKey: string;
messageQueue: string;
- concurrencyKey: string;
- envConcurrencyKey: string;
- orgConcurrencyKey: string;
messageId: string;
}) {
+ const messageKey = this.keys.messageKey(messageId);
+ const concurrencyKey = this.keys.queueCurrentConcurrencyKeyFromQueue(messageQueue);
+ const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue);
+ const envReserveConcurrencyKey = this.keys.envReserveConcurrencyKeyFromQueue(messageQueue);
+ const queueReserveConcurrencyKey = this.keys.queueReserveConcurrencyKeyFromQueue(messageQueue);
+ const envQueueKey = this.keys.envQueueKeyFromQueue(messageQueue);
+
logger.debug("Calling acknowledgeMessage", {
messageKey,
messageQueue,
concurrencyKey,
envConcurrencyKey,
- orgConcurrencyKey,
messageId,
parentQueue,
+ envQueueKey,
service: this.name,
});
@@ -1055,106 +1195,63 @@ export class MarQS {
messageKey,
messageQueue,
concurrencyKey,
+ queueReserveConcurrencyKey,
envConcurrencyKey,
- orgConcurrencyKey,
- this.keys.envQueueKeyFromQueue(messageQueue),
+ envReserveConcurrencyKey,
+ envQueueKey,
messageId,
messageQueue
);
}
- async #callNackMessage({
- messageKey,
- messageQueue,
- parentQueue,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
- nackCounterKey,
- messageId,
- messageScore,
- }: {
- messageKey: string;
- messageQueue: string;
- parentQueue: string;
- concurrencyKey: string;
- envConcurrencyKey: string;
- orgConcurrencyKey: string;
- nackCounterKey: string;
- messageId: string;
- messageScore: number;
- }) {
+ async #callNackMessage(messageId: string, message: MessagePayload, messageScore: number) {
+ const messageKey = this.keys.messageKey(message.messageId);
+ const queueKey = message.queue;
+ const parentQueueKey = message.parentQueue;
+ const queueCurrentConcurrencyKey = this.keys.queueCurrentConcurrencyKeyFromQueue(message.queue);
+ const envCurrentConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue);
+ const nackCounterKey = this.keys.nackCounterKey(message.messageId);
+ const envQueueKey = this.keys.envQueueKeyFromQueue(message.queue);
+ const queueName = message.queue;
+
logger.debug("Calling nackMessage", {
messageKey,
- messageQueue,
- parentQueue,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
+ queueKey,
+ parentQueueKey,
+ queueCurrentConcurrencyKey,
+ envCurrentConcurrencyKey,
nackCounterKey,
messageId,
messageScore,
+ envQueueKey,
service: this.name,
});
return this.redis.nackMessage(
messageKey,
- messageQueue,
- parentQueue,
- concurrencyKey,
- envConcurrencyKey,
- orgConcurrencyKey,
- this.keys.envQueueKeyFromQueue(messageQueue),
+ queueKey,
+ parentQueueKey,
+ queueCurrentConcurrencyKey,
+ envCurrentConcurrencyKey,
+ envQueueKey,
nackCounterKey,
- messageQueue,
+ queueName,
messageId,
String(Date.now()),
String(messageScore)
);
}
- async #callCalculateQueueCurrentConcurrencies({
- currentConcurrencyKey,
- currentEnvConcurrencyKey,
- currentOrgConcurrencyKey,
- }: {
- currentConcurrencyKey: string;
- currentEnvConcurrencyKey: string;
- currentOrgConcurrencyKey: string;
- }) {
- const currentConcurrencies = await this.redis.calculateQueueCurrentConcurrencies(
- currentConcurrencyKey,
- currentEnvConcurrencyKey,
- currentOrgConcurrencyKey
- );
-
- const orgCurrent = Number(currentConcurrencies[0]);
- const envCurrent = Number(currentConcurrencies[1]);
- const queueCurrent = Number(currentConcurrencies[2]);
-
- return {
- queue: queueCurrent,
- env: envCurrent,
- org: orgCurrent,
- };
- }
-
#callUpdateGlobalConcurrencyLimits({
envConcurrencyLimitKey,
- orgConcurrencyLimitKey,
envConcurrencyLimit,
- orgConcurrencyLimit,
}: {
envConcurrencyLimitKey: string;
- orgConcurrencyLimitKey: string;
envConcurrencyLimit: number;
- orgConcurrencyLimit: number;
}) {
return this.redis.updateGlobalConcurrencyLimits(
envConcurrencyLimitKey,
- orgConcurrencyLimitKey,
- String(envConcurrencyLimit),
- String(orgConcurrencyLimit)
+ String(envConcurrencyLimit)
);
}
@@ -1190,15 +1287,16 @@ export class MarQS {
#registerCommands() {
this.redis.defineCommand("enqueueMessage", {
- numberOfKeys: 7,
+ numberOfKeys: 8,
lua: `
-local queue = KEYS[1]
-local parentQueue = KEYS[2]
+local queueKey = KEYS[1]
+local parentQueueKey = KEYS[2]
local messageKey = KEYS[3]
-local concurrencyKey = KEYS[4]
-local envCurrentConcurrencyKey = KEYS[5]
-local orgCurrentConcurrencyKey = KEYS[6]
-local envQueue = KEYS[7]
+local queueCurrentConcurrencyKey = KEYS[4]
+local queueReserveConcurrencyKey = KEYS[5]
+local envCurrentConcurrencyKey = KEYS[6]
+local envReserveConcurrencyKey = KEYS[7]
+local envQueueKey = KEYS[8]
local queueName = ARGV[1]
local messageId = ARGV[2]
@@ -1209,73 +1307,199 @@ local messageScore = ARGV[4]
redis.call('SET', messageKey, messageData)
-- Add the message to the queue
-redis.call('ZADD', queue, messageScore, messageId)
+redis.call('ZADD', queueKey, messageScore, messageId)
-- Add the message to the env queue
-redis.call('ZADD', envQueue, messageScore, messageId)
+redis.call('ZADD', envQueueKey, messageScore, messageId)
-- Rebalance the parent queue
-local earliestMessage = redis.call('ZRANGE', queue, 0, 0, 'WITHSCORES')
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
if #earliestMessage == 0 then
- redis.call('ZREM', parentQueue, queueName)
+ redis.call('ZREM', parentQueueKey, queueName)
else
- redis.call('ZADD', parentQueue, earliestMessage[2], queueName)
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
end
-- Update the concurrency keys
-redis.call('SREM', concurrencyKey, messageId)
+redis.call('SREM', queueCurrentConcurrencyKey, messageId)
redis.call('SREM', envCurrentConcurrencyKey, messageId)
-redis.call('SREM', orgCurrentConcurrencyKey, messageId)
+redis.call('SREM', envReserveConcurrencyKey, messageId)
+redis.call('SREM', queueReserveConcurrencyKey, messageId)
+
+return true
+ `,
+ });
+
+ this.redis.defineCommand("enqueueMessageWithReservingConcurrency", {
+ numberOfKeys: 9,
+ lua: `
+local queueKey = KEYS[1]
+local parentQueueKey = KEYS[2]
+local messageKey = KEYS[3]
+local queueCurrentConcurrencyKey = KEYS[4]
+local queueReserveConcurrencyKey = KEYS[5]
+local envCurrentConcurrencyKey = KEYS[6]
+local envReserveConcurrencyKey = KEYS[7]
+local envConcurrencyLimitKey = KEYS[8]
+local envQueueKey = KEYS[9]
+
+local queueName = ARGV[1]
+local messageId = ARGV[2]
+local messageData = ARGV[3]
+local messageScore = ARGV[4]
+local reserveMessageId = ARGV[5]
+local defaultEnvConcurrencyLimit = ARGV[6]
+
+-- Write the message to the message key
+redis.call('SET', messageKey, messageData)
+
+-- Add the message to the queue
+redis.call('ZADD', queueKey, messageScore, messageId)
+
+-- Add the message to the env queue
+redis.call('ZADD', envQueueKey, messageScore, messageId)
+
+-- Rebalance the parent queue
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
+if #earliestMessage == 0 then
+ redis.call('ZREM', parentQueueKey, queueName)
+else
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
+end
+
+-- Update the concurrency keys
+redis.call('SREM', queueCurrentConcurrencyKey, messageId)
+redis.call('SREM', envCurrentConcurrencyKey, messageId)
+redis.call('SREM', envReserveConcurrencyKey, messageId)
+redis.call('SREM', queueReserveConcurrencyKey, messageId)
+
+-- Reserve the concurrency for the message
+local envReserveConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit)
+-- Count the number of messages in the reserve concurrency set
+local envReserveConcurrency = tonumber(redis.call('SCARD', envReserveConcurrencyKey) or '0')
+
+-- If there is space, add the messaageId to the env reserve concurrency set
+if envReserveConcurrency < envReserveConcurrencyLimit then
+ redis.call('SADD', envReserveConcurrencyKey, reserveMessageId)
+end
+
+return true
+ `,
+ });
+
+ this.redis.defineCommand("enqueueMessageWithReservingConcurrencyOnRecursiveQueue", {
+ numberOfKeys: 10,
+ lua: `
+local queueKey = KEYS[1]
+local parentQueueKey = KEYS[2]
+local messageKey = KEYS[3]
+local queueCurrentConcurrencyKey = KEYS[4]
+local queueReserveConcurrencyKey = KEYS[5]
+local queueConcurrencyLimitKey = KEYS[6]
+local envCurrentConcurrencyKey = KEYS[7]
+local envReserveConcurrencyKey = KEYS[8]
+local envConcurrencyLimitKey = KEYS[9]
+local envQueueKey = KEYS[10]
+
+local queueName = ARGV[1]
+local messageId = ARGV[2]
+local messageData = ARGV[3]
+local messageScore = ARGV[4]
+local reserveMessageId = ARGV[5]
+local defaultEnvConcurrencyLimit = ARGV[6]
+
+-- Get the env reserve concurrency limit because we need it to calculate the max reserve concurrency
+-- for the specific queue
+local envReserveConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit)
+
+-- Count the number of messages in the queue reserve concurrency set
+local queueReserveConcurrency = tonumber(redis.call('SCARD', queueReserveConcurrencyKey) or '0')
+local queueConcurrencyLimit = tonumber(redis.call('GET', queueConcurrencyLimitKey) or '1000000')
+
+local queueReserveConcurrencyLimit = math.min(queueConcurrencyLimit, envReserveConcurrencyLimit)
+
+-- If we cannot add the reserve concurrency, then we have to return false
+if queueReserveConcurrency >= queueReserveConcurrencyLimit then
+ return false
+end
+
+-- Write the message to the message key
+redis.call('SET', messageKey, messageData)
+
+-- Add the message to the queue
+redis.call('ZADD', queueKey, messageScore, messageId)
+
+-- Add the message to the env queue
+redis.call('ZADD', envQueueKey, messageScore, messageId)
+
+-- Rebalance the parent queue
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
+if #earliestMessage == 0 then
+ redis.call('ZREM', parentQueueKey, queueName)
+else
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
+end
+
+-- Update the concurrency keys
+redis.call('SREM', queueCurrentConcurrencyKey, messageId)
+redis.call('SREM', envCurrentConcurrencyKey, messageId)
+redis.call('SREM', envReserveConcurrencyKey, messageId)
+redis.call('SREM', queueReserveConcurrencyKey, messageId)
+
+-- Count the number of messages in the env reserve concurrency set
+local envReserveConcurrency = tonumber(redis.call('SCARD', envReserveConcurrencyKey) or '0')
+
+-- If there is space, add the messaageId to the env reserve concurrency set
+if envReserveConcurrency < envReserveConcurrencyLimit then
+ redis.call('SADD', envReserveConcurrencyKey, reserveMessageId)
+end
+
+redis.call('SADD', queueReserveConcurrencyKey, reserveMessageId)
+
+return true
`,
});
this.redis.defineCommand("dequeueMessage", {
numberOfKeys: 9,
lua: `
--- Keys: childQueue, parentQueue, concurrencyLimitKey, envConcurrencyLimitKey, orgConcurrencyLimitKey, currentConcurrencyKey, envCurrentConcurrencyKey, orgCurrentConcurrencyKey
-local childQueue = KEYS[1]
-local parentQueue = KEYS[2]
-local concurrencyLimitKey = KEYS[3]
+local queueKey = KEYS[1]
+local parentQueueKey = KEYS[2]
+local queueConcurrencyLimitKey = KEYS[3]
local envConcurrencyLimitKey = KEYS[4]
-local orgConcurrencyLimitKey = KEYS[5]
-local currentConcurrencyKey = KEYS[6]
+local queueCurrentConcurrencyKey = KEYS[5]
+local queueReserveConcurrencyKey = KEYS[6]
local envCurrentConcurrencyKey = KEYS[7]
-local orgCurrentConcurrencyKey = KEYS[8]
+local envReserveConcurrencyKey = KEYS[8]
local envQueueKey = KEYS[9]
--- Args: childQueueName, currentTime, defaultEnvConcurrencyLimit, defaultOrgConcurrencyLimit
-local childQueueName = ARGV[1]
+local queueName = ARGV[1]
local currentTime = tonumber(ARGV[2])
local defaultEnvConcurrencyLimit = ARGV[3]
-local defaultOrgConcurrencyLimit = ARGV[4]
-
--- Check current org concurrency against the limit
-local orgCurrentConcurrency = tonumber(redis.call('SCARD', orgCurrentConcurrencyKey) or '0')
-local orgConcurrencyLimit = tonumber(redis.call('GET', orgConcurrencyLimitKey) or defaultOrgConcurrencyLimit)
-
-if orgCurrentConcurrency >= orgConcurrencyLimit then
- return nil
-end
-- Check current env concurrency against the limit
local envCurrentConcurrency = tonumber(redis.call('SCARD', envCurrentConcurrencyKey) or '0')
local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit)
+local envReserveConcurrency = tonumber(redis.call('SCARD', envReserveConcurrencyKey) or '0')
+local totalEnvConcurrencyLimit = envConcurrencyLimit + envReserveConcurrency
-if envCurrentConcurrency >= envConcurrencyLimit then
+if envCurrentConcurrency >= totalEnvConcurrencyLimit then
return nil
end
-- Check current queue concurrency against the limit
-local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0')
-local concurrencyLimit = tonumber(redis.call('GET', concurrencyLimitKey) or envConcurrencyLimit)
+local queueCurrentConcurrency = tonumber(redis.call('SCARD', queueCurrentConcurrencyKey) or '0')
+local queueConcurrencyLimit = math.min(tonumber(redis.call('GET', queueConcurrencyLimitKey) or '1000000'), envConcurrencyLimit)
+local queueReserveConcurrency = tonumber(redis.call('SCARD', queueReserveConcurrencyKey) or '0')
+local totalQueueConcurrencyLimit = queueConcurrencyLimit + queueReserveConcurrency
-- Check condition only if concurrencyLimit exists
-if currentConcurrency >= concurrencyLimit then
+if queueCurrentConcurrency >= totalQueueConcurrencyLimit then
return nil
end
-- Attempt to dequeue the next message
-local messages = redis.call('ZRANGEBYSCORE', childQueue, '-inf', currentTime, 'WITHSCORES', 'LIMIT', 0, 1)
+local messages = redis.call('ZRANGEBYSCORE', queueKey, '-inf', currentTime, 'WITHSCORES', 'LIMIT', 0, 1)
if #messages == 0 then
return nil
@@ -1284,19 +1508,24 @@ end
local messageId = messages[1]
local messageScore = tonumber(messages[2])
--- Move message to timeout queue and update concurrency
-redis.call('ZREM', childQueue, messageId)
+-- Remove the message from the queue and update concurrency
+redis.call('ZREM', queueKey, messageId)
redis.call('ZREM', envQueueKey, messageId)
-redis.call('SADD', currentConcurrencyKey, messageId)
+redis.call('SADD', queueCurrentConcurrencyKey, messageId)
redis.call('SADD', envCurrentConcurrencyKey, messageId)
-redis.call('SADD', orgCurrentConcurrencyKey, messageId)
+
+-- Remove the message from the reserve concurrency set
+redis.call('SREM', envReserveConcurrencyKey, messageId)
+
+-- Remove the message from the queue reserve concurrency set
+redis.call('SREM', queueReserveConcurrencyKey, messageId)
-- Rebalance the parent queue
-local earliestMessage = redis.call('ZRANGE', childQueue, 0, 0, 'WITHSCORES')
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
if #earliestMessage == 0 then
- redis.call('ZREM', parentQueue, childQueueName)
+ redis.call('ZREM', parentQueueKey, queueName)
else
- redis.call('ZADD', parentQueue, earliestMessage[2], childQueueName)
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
end
return {messageId, messageScore} -- Return message details
@@ -1323,70 +1552,63 @@ redis.call('SET', messageKey, messageData, 'GET')
});
this.redis.defineCommand("acknowledgeMessage", {
- numberOfKeys: 7,
+ numberOfKeys: 8,
lua: `
--- Keys: parentQueue, messageKey, messageQueue, concurrencyKey, envCurrentConcurrencyKey, orgCurrentConcurrencyKey
-local parentQueue = KEYS[1]
+local parentQueueKey = KEYS[1]
local messageKey = KEYS[2]
-local messageQueue = KEYS[3]
-local concurrencyKey = KEYS[4]
-local envCurrentConcurrencyKey = KEYS[5]
-local orgCurrentConcurrencyKey = KEYS[6]
-local envQueueKey = KEYS[7]
+local queueKey = KEYS[3]
+local queueConcurrencyKey = KEYS[4]
+local queueReserveConcurrencyKey = KEYS[5]
+local envCurrentConcurrencyKey = KEYS[6]
+local envReserveConcurrencyKey = KEYS[7]
+local envQueueKey = KEYS[8]
--- Args: messageId, messageQueueName
local messageId = ARGV[1]
-local messageQueueName = ARGV[2]
-
--- Remove the message from the message key
-redis.call('DEL', messageKey)
+local queueName = ARGV[2]
-- Remove the message from the queue
-redis.call('ZREM', messageQueue, messageId)
-
--- Remove the message from the env queue
-redis.call('ZREM', envQueueKey, messageId)
+redis.call('ZREM', queueKey, messageId)
-- Rebalance the parent queue
-local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES')
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
if #earliestMessage == 0 then
- redis.call('ZREM', parentQueue, messageQueueName)
+ redis.call('ZREM', parentQueueKey, queueName)
else
- redis.call('ZADD', parentQueue, earliestMessage[2], messageQueueName)
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
end
-- Update the concurrency keys
-redis.call('SREM', concurrencyKey, messageId)
+redis.call('SREM', queueConcurrencyKey, messageId)
redis.call('SREM', envCurrentConcurrencyKey, messageId)
-redis.call('SREM', orgCurrentConcurrencyKey, messageId)
+redis.call('SREM', envReserveConcurrencyKey, messageId)
+redis.call('SREM', queueReserveConcurrencyKey, messageId)
+redis.call('ZREM', envQueueKey, messageId)
+redis.call('DEL', messageKey)
`,
});
this.redis.defineCommand("nackMessage", {
- numberOfKeys: 8,
+ numberOfKeys: 7,
lua: `
local messageKey = KEYS[1]
-local childQueueKey = KEYS[2]
+local queueKey = KEYS[2]
local parentQueueKey = KEYS[3]
-local concurrencyKey = KEYS[4]
-local envConcurrencyKey = KEYS[5]
-local orgConcurrencyKey = KEYS[6]
-local envQueueKey = KEYS[7]
-local nackCounterKey = KEYS[8]
+local queueCurrentConcurrencyKey = KEYS[4]
+local envCurrentConcurrencyKey = KEYS[5]
+local envQueueKey = KEYS[6]
+local nackCounterKey = KEYS[7]
--- Args: childQueueName, messageId, currentTime, messageScore
-local childQueueName = ARGV[1]
+local queueName = ARGV[1]
local messageId = ARGV[2]
local currentTime = tonumber(ARGV[3])
local messageScore = tonumber(ARGV[4])
--- Update the concurrency keys
-redis.call('SREM', concurrencyKey, messageId)
-redis.call('SREM', envConcurrencyKey, messageId)
-redis.call('SREM', orgConcurrencyKey, messageId)
+-- Update the current concurrency keys
+redis.call('SREM', queueCurrentConcurrencyKey, messageId)
+redis.call('SREM', envCurrentConcurrencyKey, messageId)
-- Enqueue the message into the queue
-redis.call('ZADD', childQueueKey, messageScore, messageId)
+redis.call('ZADD', queueKey, messageScore, messageId)
-- Enqueue the message into the env queue
redis.call('ZADD', envQueueKey, messageScore, messageId)
@@ -1396,64 +1618,22 @@ redis.call('INCR', nackCounterKey)
redis.call('EXPIRE', nackCounterKey, 2592000)
-- Rebalance the parent queue
-local earliestMessage = redis.call('ZRANGE', childQueueKey, 0, 0, 'WITHSCORES')
+local earliestMessage = redis.call('ZRANGE', queueKey, 0, 0, 'WITHSCORES')
if #earliestMessage == 0 then
- redis.call('ZREM', parentQueueKey, childQueueName)
+ redis.call('ZREM', parentQueueKey, queueName)
else
- redis.call('ZADD', parentQueueKey, earliestMessage[2], childQueueName)
+ redis.call('ZADD', parentQueueKey, earliestMessage[2], queueName)
end
`,
});
- this.redis.defineCommand("releaseConcurrency", {
- numberOfKeys: 3,
- lua: `
-local concurrencyKey = KEYS[1]
-local envCurrentConcurrencyKey = KEYS[2]
-local orgCurrentConcurrencyKey = KEYS[3]
-
-local messageId = ARGV[1]
-
--- Update the concurrency keys
-if concurrencyKey ~= "" then
- redis.call('SREM', concurrencyKey, messageId)
-end
-redis.call('SREM', envCurrentConcurrencyKey, messageId)
-redis.call('SREM', orgCurrentConcurrencyKey, messageId)
-`,
- });
-
- this.redis.defineCommand("calculateQueueCurrentConcurrencies", {
- numberOfKeys: 3,
- lua: `
--- Keys: currentConcurrencyKey, currentEnvConcurrencyKey, currentOrgConcurrencyKey
-local currentConcurrencyKey = KEYS[1]
-local currentEnvConcurrencyKey = KEYS[2]
-local currentOrgConcurrencyKey = KEYS[3]
-
-local currentOrgConcurrency = tonumber(redis.call('SCARD', currentOrgConcurrencyKey) or '0')
-
-local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0')
-
-local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0')
-
-return { currentOrgConcurrency, currentEnvConcurrency, currentConcurrency }
- `,
- });
-
this.redis.defineCommand("updateGlobalConcurrencyLimits", {
- numberOfKeys: 2,
+ numberOfKeys: 1,
lua: `
--- Keys: envConcurrencyLimitKey, orgConcurrencyLimitKey
local envConcurrencyLimitKey = KEYS[1]
-local orgConcurrencyLimitKey = KEYS[2]
-
--- Args: envConcurrencyLimit, orgConcurrencyLimit
local envConcurrencyLimit = ARGV[1]
-local orgConcurrencyLimit = ARGV[2]
redis.call('SET', envConcurrencyLimitKey, envConcurrencyLimit)
-redis.call('SET', orgConcurrencyLimitKey, orgConcurrencyLimit)
`,
});
@@ -1493,34 +1673,73 @@ end
declare module "ioredis" {
interface RedisCommander {
enqueueMessage(
- queue: string,
- parentQueue: string,
+ queueKey: string,
+ parentQueueKey: string,
messageKey: string,
- concurrencyKey: string,
- envConcurrencyKey: string,
- orgConcurrencyKey: string,
- envQueue: string,
+ queueCurrentConcurrencyKey: string,
+ queueReserveConcurrencyKey: string,
+ envCurrentConcurrencyKey: string,
+ envReserveConcurrencyKey: string,
+ envQueueKey: string,
queueName: string,
messageId: string,
messageData: string,
messageScore: string,
- callback?: Callback
- ): Result;
+ callback?: Callback
+ ): Result;
+
+ enqueueMessageWithReservingConcurrency(
+ queueKey: string,
+ parentQueueKey: string,
+ messageKey: string,
+ queueCurrentConcurrencyKey: string,
+ queueReserveConcurrencyKey: string,
+ envCurrentConcurrencyKey: string,
+ envReserveConcurrencyKey: string,
+ envConcurrencyLimitKey: string,
+ envQueueKey: string,
+ queueName: string,
+ messageId: string,
+ messageData: string,
+ messageScore: string,
+ reserveMessageId: string,
+ defaultEnvConcurrencyLimit: string,
+ callback?: Callback
+ ): Result;
+
+ enqueueMessageWithReservingConcurrencyOnRecursiveQueue(
+ queueKey: string,
+ parentQueueKey: string,
+ messageKey: string,
+ queueCurrentConcurrencyKey: string,
+ queueReserveConcurrencyKey: string,
+ queueConcurrencyLimitKey: string,
+ envCurrentConcurrencyKey: string,
+ envReserveConcurrencyKey: string,
+ envConcurrencyLimitKey: string,
+ envQueueKey: string,
+ queueName: string,
+ messageId: string,
+ messageData: string,
+ messageScore: string,
+ reserveMessageId: string,
+ defaultEnvConcurrencyLimit: string,
+ callback?: Callback
+ ): Result;
dequeueMessage(
- childQueue: string,
- parentQueue: string,
- concurrencyLimitKey: string,
+ queueKey: string,
+ parentQueueKey: string,
+ queueConcurrencyLimitKey: string,
envConcurrencyLimitKey: string,
- orgConcurrencyLimitKey: string,
- currentConcurrencyKey: string,
+ queueCurrentConcurrencyKey: string,
+ queueReserveConcurrencyKey: string,
envCurrentConcurrencyKey: string,
- orgCurrentConcurrencyKey: string,
+ envReserveConcurrencyKey: string,
envQueueKey: string,
- childQueueName: string,
+ queueName: string,
currentTime: string,
defaultEnvConcurrencyLimit: string,
- defaultOrgConcurrencyLimit: string,
callback?: Callback<[string, string]>
): Result<[string, string] | null, Context>;
@@ -1535,8 +1754,9 @@ declare module "ioredis" {
messageKey: string,
messageQueue: string,
concurrencyKey: string,
+ queueReserveConcurrencyKey: string,
envConcurrencyKey: string,
- orgConcurrencyKey: string,
+ envReserveConcurrencyKey: string,
envQueueKey: string,
messageId: string,
messageQueueName: string,
@@ -1545,33 +1765,22 @@ declare module "ioredis" {
nackMessage(
messageKey: string,
- childQueueKey: string,
+ queueKey: string,
parentQueueKey: string,
- concurrencyKey: string,
- envConcurrencyKey: string,
- orgConcurrencyKey: string,
+ queueCurrentConcurrencyKey: string,
+ envCurrentConcurrencyKey: string,
envQueueKey: string,
nackCounterKey: string,
- childQueueName: string,
+ queueName: string,
messageId: string,
currentTime: string,
messageScore: string,
callback?: Callback
): Result;
- releaseConcurrency(
- concurrencyKey: string,
- envConcurrencyKey: string,
- orgConcurrencyKey: string,
- messageId: string,
- callback?: Callback
- ): Result;
-
updateGlobalConcurrencyLimits(
envConcurrencyLimitKey: string,
- orgConcurrencyLimitKey: string,
envConcurrencyLimit: string,
- orgConcurrencyLimit: string,
callback?: Callback
): Result;
@@ -1582,80 +1791,75 @@ declare module "ioredis" {
currentScore: string,
callback?: Callback
): Result;
-
- calculateQueueCurrentConcurrencies(
- currentConcurrencyKey: string,
- currentEnvConcurrencyKey: string,
- currentOrgConcurrencyKey: string,
- callback?: Callback
- ): Result;
}
}
export const marqs = singleton("marqs", getMarQSClient);
function getMarQSClient() {
- if (env.V3_ENABLED) {
- if (env.REDIS_HOST && env.REDIS_PORT) {
- const redisOptions = {
- keyPrefix: KEY_PREFIX,
- port: env.REDIS_PORT,
- host: env.REDIS_HOST,
- username: env.REDIS_USERNAME,
- password: env.REDIS_PASSWORD,
- enableAutoPipelining: true,
- ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }),
- };
-
- const redis = new Redis(redisOptions);
- const keysProducer = new MarQSShortKeyProducer(KEY_PREFIX);
-
- return new MarQS({
- name: "marqs",
- tracer: trace.getTracer("marqs"),
- keysProducer,
- visibilityTimeoutStrategy: new V3LegacyRunEngineWorkerVisibilityTimeout(),
- queuePriorityStrategy: new FairDequeuingStrategy({
- tracer: tracer,
- redis,
- parentQueueLimit: env.MARQS_SHARED_QUEUE_LIMIT,
- keys: keysProducer,
- defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT,
- defaultOrgConcurrency: env.DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT,
- biases: {
- concurrencyLimitBias: env.MARQS_CONCURRENCY_LIMIT_BIAS,
- availableCapacityBias: env.MARQS_AVAILABLE_CAPACITY_BIAS,
- queueAgeRandomization: env.MARQS_QUEUE_AGE_RANDOMIZATION_BIAS,
- },
- reuseSnapshotCount: env.MARQS_REUSE_SNAPSHOT_COUNT,
- maximumOrgCount: env.MARQS_MAXIMUM_ORG_COUNT,
- }),
- envQueuePriorityStrategy: new FairDequeuingStrategy({
- tracer: tracer,
- redis,
- parentQueueLimit: env.MARQS_DEV_QUEUE_LIMIT,
- keys: keysProducer,
- defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT,
- defaultOrgConcurrency: env.DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT,
- biases: {
- concurrencyLimitBias: 0.0,
- availableCapacityBias: 0.0,
- queueAgeRandomization: 0.1,
- },
- }),
- workers: 1,
+ if (!env.REDIS_HOST || !env.REDIS_PORT) {
+ throw new Error(
+ "Could not initialize Trigger.dev because process.env.REDIS_HOST and process.env.REDIS_PORT are required to be set."
+ );
+ }
+
+ const redisOptions = {
+ keyPrefix: KEY_PREFIX,
+ port: env.REDIS_PORT,
+ host: env.REDIS_HOST,
+ username: env.REDIS_USERNAME,
+ password: env.REDIS_PASSWORD,
+ enableAutoPipelining: true,
+ ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }),
+ };
+
+ const redis = new Redis(redisOptions);
+ const keysProducer = new MarQSShortKeyProducer(KEY_PREFIX);
+
+ return new MarQS({
+ name: "marqs",
+ tracer: trace.getTracer("marqs"),
+ keysProducer,
+ visibilityTimeoutStrategy: new V3LegacyRunEngineWorkerVisibilityTimeout(),
+ queuePriorityStrategy: new EnvPriorityDequeuingStrategy({
+ keys: keysProducer,
+ delegate: new FairDequeuingStrategy({
+ tracer: tracer,
redis,
+ parentQueueLimit: env.MARQS_SHARED_QUEUE_LIMIT,
+ keys: keysProducer,
defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT,
- defaultOrgConcurrency: env.DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT,
- visibilityTimeoutInMs: env.MARQS_VISIBILITY_TIMEOUT_MS,
- enableRebalancing: !env.MARQS_DISABLE_REBALANCING,
- maximumNackCount: env.MARQS_MAXIMUM_NACK_COUNT,
- subscriber: concurrencyTracker,
- });
- } else {
- console.warn(
- "Could not initialize MarQS because process.env.REDIS_HOST and process.env.REDIS_PORT are required to be set. Trigger.dev v3 will not work without this."
- );
- }
- }
+ biases: {
+ concurrencyLimitBias: env.MARQS_CONCURRENCY_LIMIT_BIAS,
+ availableCapacityBias: env.MARQS_AVAILABLE_CAPACITY_BIAS,
+ queueAgeRandomization: env.MARQS_QUEUE_AGE_RANDOMIZATION_BIAS,
+ },
+ reuseSnapshotCount: env.MARQS_REUSE_SNAPSHOT_COUNT,
+ maximumEnvCount: env.MARQS_MAXIMUM_ENV_COUNT,
+ }),
+ }),
+ envQueuePriorityStrategy: new EnvPriorityDequeuingStrategy({
+ keys: keysProducer,
+ delegate: new FairDequeuingStrategy({
+ tracer: tracer,
+ redis,
+ parentQueueLimit: env.MARQS_DEV_QUEUE_LIMIT,
+ keys: keysProducer,
+ defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT,
+ biases: {
+ concurrencyLimitBias: 0.0,
+ availableCapacityBias: 0.0,
+ queueAgeRandomization: 0.1,
+ },
+ }),
+ }),
+ workers: 1,
+ redis,
+ defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT,
+ defaultOrgConcurrency: env.DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT,
+ visibilityTimeoutInMs: env.MARQS_VISIBILITY_TIMEOUT_MS,
+ enableRebalancing: !env.MARQS_DISABLE_REBALANCING,
+ maximumNackCount: env.MARQS_MAXIMUM_NACK_COUNT,
+ subscriber: concurrencyTracker,
+ });
}
diff --git a/apps/webapp/app/v3/marqs/marqsKeyProducer.server.ts b/apps/webapp/app/v3/marqs/marqsKeyProducer.server.ts
deleted file mode 100644
index 227a0a9dd2..0000000000
--- a/apps/webapp/app/v3/marqs/marqsKeyProducer.server.ts
+++ /dev/null
@@ -1,227 +0,0 @@
-import { AuthenticatedEnvironment } from "~/services/apiAuth.server";
-import { MarQSKeyProducer } from "./types";
-
-const constants = {
- SHARED_QUEUE: "sharedQueue",
- CURRENT_CONCURRENCY_PART: "currentConcurrency",
- CONCURRENCY_LIMIT_PART: "concurrency",
- DISABLED_CONCURRENCY_LIMIT_PART: "disabledConcurrency",
- ENV_PART: "env",
- ORG_PART: "org",
- QUEUE_PART: "queue",
- CONCURRENCY_KEY_PART: "ck",
- MESSAGE_PART: "message",
-} as const;
-
-export class MarQSShortKeyProducer implements MarQSKeyProducer {
- constructor(private _prefix: string) {}
-
- sharedQueueScanPattern() {
- return `${this._prefix}*${constants.SHARED_QUEUE}`;
- }
-
- queueCurrentConcurrencyScanPattern() {
- return `${this._prefix}${constants.ORG_PART}:*:${constants.ENV_PART}:*:queue:*:${constants.CURRENT_CONCURRENCY_PART}`;
- }
-
- stripKeyPrefix(key: string): string {
- if (key.startsWith(this._prefix)) {
- return key.slice(this._prefix.length);
- }
-
- return key;
- }
-
- queueConcurrencyLimitKey(env: AuthenticatedEnvironment, queue: string) {
- return [this.queueKey(env, queue), constants.CONCURRENCY_LIMIT_PART].join(":");
- }
-
- envConcurrencyLimitKey(envId: string): string;
- envConcurrencyLimitKey(env: AuthenticatedEnvironment): string;
- envConcurrencyLimitKey(envOrId: AuthenticatedEnvironment | string): string {
- return [
- this.envKeySection(typeof envOrId === "string" ? envOrId : envOrId.id),
- constants.CONCURRENCY_LIMIT_PART,
- ].join(":");
- }
-
- orgConcurrencyLimitKey(orgId: string): string;
- orgConcurrencyLimitKey(env: AuthenticatedEnvironment): string;
- orgConcurrencyLimitKey(envOrOrgId: AuthenticatedEnvironment | string) {
- return [
- this.orgKeySection(typeof envOrOrgId === "string" ? envOrOrgId : envOrOrgId.organizationId),
- constants.CONCURRENCY_LIMIT_PART,
- ].join(":");
- }
-
- queueKey(orgId: string, envId: string, queue: string, concurrencyKey?: string): string;
- queueKey(env: AuthenticatedEnvironment, queue: string, concurrencyKey?: string): string;
- queueKey(
- envOrOrgId: AuthenticatedEnvironment | string,
- queueOrEnvId: string,
- queueOrConcurrencyKey: string,
- concurrencyKey?: string
- ): string {
- if (typeof envOrOrgId === "string") {
- return [
- this.orgKeySection(envOrOrgId),
- this.envKeySection(queueOrEnvId),
- this.queueSection(queueOrConcurrencyKey),
- ]
- .concat(concurrencyKey ? this.concurrencyKeySection(concurrencyKey) : [])
- .join(":");
- } else {
- return [
- this.orgKeySection(envOrOrgId.organizationId),
- this.envKeySection(envOrOrgId.id),
- this.queueSection(queueOrEnvId),
- ]
- .concat(queueOrConcurrencyKey ? this.concurrencyKeySection(queueOrConcurrencyKey) : [])
- .join(":");
- }
- }
-
- envSharedQueueKey(env: AuthenticatedEnvironment) {
- if (env.type === "DEVELOPMENT") {
- return [
- this.orgKeySection(env.organizationId),
- this.envKeySection(env.id),
- constants.SHARED_QUEUE,
- ].join(":");
- }
-
- return this.sharedQueueKey();
- }
-
- sharedQueueKey(): string {
- return constants.SHARED_QUEUE;
- }
-
- concurrencyLimitKeyFromQueue(queue: string) {
- const concurrencyQueueName = queue.replace(/:ck:.+$/, "");
-
- return `${concurrencyQueueName}:${constants.CONCURRENCY_LIMIT_PART}`;
- }
-
- currentConcurrencyKeyFromQueue(queue: string) {
- return `${queue}:${constants.CURRENT_CONCURRENCY_PART}`;
- }
-
- currentConcurrencyKey(
- env: AuthenticatedEnvironment,
- queue: string,
- concurrencyKey?: string
- ): string {
- return [this.queueKey(env, queue, concurrencyKey), constants.CURRENT_CONCURRENCY_PART].join(
- ":"
- );
- }
-
- disabledConcurrencyLimitKeyFromQueue(queue: string) {
- const orgId = this.normalizeQueue(queue).split(":")[1];
-
- return this.disabledConcurrencyLimitKey(orgId);
- }
-
- disabledConcurrencyLimitKey(orgId: string) {
- return `${constants.ORG_PART}:${orgId}:${constants.DISABLED_CONCURRENCY_LIMIT_PART}`;
- }
-
- orgConcurrencyLimitKeyFromQueue(queue: string) {
- const orgId = this.normalizeQueue(queue).split(":")[1];
-
- return `${constants.ORG_PART}:${orgId}:${constants.CONCURRENCY_LIMIT_PART}`;
- }
-
- orgCurrentConcurrencyKeyFromQueue(queue: string) {
- const orgId = this.normalizeQueue(queue).split(":")[1];
-
- return `${constants.ORG_PART}:${orgId}:${constants.CURRENT_CONCURRENCY_PART}`;
- }
-
- envConcurrencyLimitKeyFromQueue(queue: string) {
- const envId = this.normalizeQueue(queue).split(":")[3];
-
- return `${constants.ENV_PART}:${envId}:${constants.CONCURRENCY_LIMIT_PART}`;
- }
-
- envCurrentConcurrencyKeyFromQueue(queue: string) {
- const envId = this.normalizeQueue(queue).split(":")[3];
-
- return `${constants.ENV_PART}:${envId}:${constants.CURRENT_CONCURRENCY_PART}`;
- }
-
- orgCurrentConcurrencyKey(orgId: string): string;
- orgCurrentConcurrencyKey(env: AuthenticatedEnvironment): string;
- orgCurrentConcurrencyKey(envOrOrgId: AuthenticatedEnvironment | string): string {
- return [
- this.orgKeySection(typeof envOrOrgId === "string" ? envOrOrgId : envOrOrgId.organizationId),
- constants.CURRENT_CONCURRENCY_PART,
- ].join(":");
- }
-
- envCurrentConcurrencyKey(envId: string): string;
- envCurrentConcurrencyKey(env: AuthenticatedEnvironment): string;
- envCurrentConcurrencyKey(envOrId: AuthenticatedEnvironment | string): string {
- return [
- this.envKeySection(typeof envOrId === "string" ? envOrId : envOrId.id),
- constants.CURRENT_CONCURRENCY_PART,
- ].join(":");
- }
-
- envQueueKeyFromQueue(queue: string) {
- const envId = this.normalizeQueue(queue).split(":")[3];
-
- return `${constants.ENV_PART}:${envId}:${constants.QUEUE_PART}`;
- }
-
- envQueueKey(env: AuthenticatedEnvironment): string {
- return [constants.ENV_PART, this.shortId(env.id), constants.QUEUE_PART].join(":");
- }
-
- messageKey(messageId: string) {
- return `${constants.MESSAGE_PART}:${messageId}`;
- }
-
- nackCounterKey(messageId: string): string {
- return `${constants.MESSAGE_PART}:${messageId}:nacks`;
- }
-
- orgIdFromQueue(queue: string) {
- return this.normalizeQueue(queue).split(":")[1];
- }
-
- envIdFromQueue(queue: string) {
- return this.normalizeQueue(queue).split(":")[3];
- }
-
- private shortId(id: string) {
- // Return the last 12 characters of the id
- return id.slice(-12);
- }
-
- private envKeySection(envId: string) {
- return `${constants.ENV_PART}:${this.shortId(envId)}`;
- }
-
- private orgKeySection(orgId: string) {
- return `${constants.ORG_PART}:${this.shortId(orgId)}`;
- }
-
- private queueSection(queue: string) {
- return `${constants.QUEUE_PART}:${queue}`;
- }
-
- private concurrencyKeySection(concurrencyKey: string) {
- return `${constants.CONCURRENCY_KEY_PART}:${concurrencyKey}`;
- }
-
- // This removes the leading prefix from the queue name if it exists
- private normalizeQueue(queue: string) {
- if (queue.startsWith(this._prefix)) {
- return queue.slice(this._prefix.length);
- }
-
- return queue;
- }
-}
diff --git a/apps/webapp/app/v3/marqs/marqsKeyProducer.ts b/apps/webapp/app/v3/marqs/marqsKeyProducer.ts
new file mode 100644
index 0000000000..a4fbfb6a62
--- /dev/null
+++ b/apps/webapp/app/v3/marqs/marqsKeyProducer.ts
@@ -0,0 +1,312 @@
+import { MarQSKeyProducer, MarQSKeyProducerEnv, QueueDescriptor } from "./types";
+
+const constants = {
+ SHARED_QUEUE: "sharedQueue",
+ CURRENT_CONCURRENCY_PART: "currentConcurrency",
+ CONCURRENCY_LIMIT_PART: "concurrency",
+ DISABLED_CONCURRENCY_LIMIT_PART: "disabledConcurrency",
+ ENV_PART: "env",
+ ORG_PART: "org",
+ QUEUE_PART: "queue",
+ CONCURRENCY_KEY_PART: "ck",
+ MESSAGE_PART: "message",
+ RESERVE_CONCURRENCY_PART: "reserveConcurrency",
+ PRIORITY_PART: "priority",
+} as const;
+
+const ORG_REGEX = /org:([^:]+):/;
+const ENV_REGEX = /env:([^:]+):/;
+const QUEUE_REGEX = /queue:([^:]+)(?::|$)/;
+const CONCURRENCY_KEY_REGEX = /ck:([^:]+)(?::|$)/;
+const PRIORITY_REGEX = /priority:(\d+)(?::|$)/;
+
+export class MarQSShortKeyProducer implements MarQSKeyProducer {
+ constructor(private _prefix: string) {}
+
+ sharedQueueScanPattern() {
+ return `${this._prefix}*${constants.SHARED_QUEUE}`;
+ }
+
+ queueCurrentConcurrencyScanPattern() {
+ return `${this._prefix}${constants.ORG_PART}:*:${constants.ENV_PART}:*:queue:*:${constants.CURRENT_CONCURRENCY_PART}`;
+ }
+
+ stripKeyPrefix(key: string): string {
+ if (key.startsWith(this._prefix)) {
+ return key.slice(this._prefix.length);
+ }
+
+ return key;
+ }
+
+ queueConcurrencyLimitKey(env: MarQSKeyProducerEnv, queue: string) {
+ return [this.queueKey(env, queue), constants.CONCURRENCY_LIMIT_PART].join(":");
+ }
+
+ envConcurrencyLimitKey(envId: string): string;
+ envConcurrencyLimitKey(env: MarQSKeyProducerEnv): string;
+ envConcurrencyLimitKey(envOrId: MarQSKeyProducerEnv | string): string {
+ return [
+ this.envKeySection(typeof envOrId === "string" ? envOrId : envOrId.id),
+ constants.CONCURRENCY_LIMIT_PART,
+ ].join(":");
+ }
+
+ queueKey(
+ orgId: string,
+ envId: string,
+ queue: string,
+ concurrencyKey?: string,
+ priority?: number
+ ): string;
+ queueKey(
+ env: MarQSKeyProducerEnv,
+ queue: string,
+ concurrencyKey?: string,
+ priority?: number
+ ): string;
+ queueKey(
+ envOrOrgId: MarQSKeyProducerEnv | string,
+ queueOrEnvId: string,
+ queueOrConcurrencyKey: string,
+ concurrencyKeyOrPriority?: string | number,
+ priority?: number
+ ): string {
+ if (typeof envOrOrgId === "string") {
+ return [
+ this.orgKeySection(envOrOrgId),
+ this.envKeySection(queueOrEnvId),
+ this.queueSection(queueOrConcurrencyKey),
+ ]
+ .concat(
+ typeof concurrencyKeyOrPriority === "string"
+ ? this.concurrencyKeySection(concurrencyKeyOrPriority)
+ : []
+ )
+ .concat(typeof priority === "number" && priority ? this.prioritySection(priority) : [])
+ .join(":");
+ } else {
+ return [
+ this.orgKeySection(envOrOrgId.organizationId),
+ this.envKeySection(envOrOrgId.id),
+ this.queueSection(queueOrEnvId),
+ ]
+ .concat(queueOrConcurrencyKey ? this.concurrencyKeySection(queueOrConcurrencyKey) : [])
+ .concat(
+ typeof concurrencyKeyOrPriority === "number" && concurrencyKeyOrPriority
+ ? this.prioritySection(concurrencyKeyOrPriority)
+ : []
+ )
+ .join(":");
+ }
+ }
+
+ queueKeyFromQueue(queue: string, priority?: number): string {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return this.queueKey(
+ descriptor.organization,
+ descriptor.environment,
+ descriptor.name,
+ descriptor.concurrencyKey,
+ descriptor.priority ?? priority
+ );
+ }
+
+ envSharedQueueKey(env: MarQSKeyProducerEnv) {
+ if (env.type === "DEVELOPMENT") {
+ return [
+ this.orgKeySection(env.organizationId),
+ this.envKeySection(env.id),
+ constants.SHARED_QUEUE,
+ ].join(":");
+ }
+
+ return this.sharedQueueKey();
+ }
+
+ sharedQueueKey(): string {
+ return constants.SHARED_QUEUE;
+ }
+
+ queueConcurrencyLimitKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return this.queueConcurrencyLimitKeyFromDescriptor(descriptor);
+ }
+
+ queueCurrentConcurrencyKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+ return this.currentConcurrencyKeyFromDescriptor(descriptor);
+ }
+
+ queueReserveConcurrencyKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return this.queueReserveConcurrencyKeyFromDescriptor(descriptor);
+ }
+
+ queueCurrentConcurrencyKey(
+ env: MarQSKeyProducerEnv,
+ queue: string,
+ concurrencyKey?: string
+ ): string {
+ return [this.queueKey(env, queue, concurrencyKey), constants.CURRENT_CONCURRENCY_PART].join(
+ ":"
+ );
+ }
+
+ envConcurrencyLimitKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return `${constants.ENV_PART}:${descriptor.environment}:${constants.CONCURRENCY_LIMIT_PART}`;
+ }
+
+ envCurrentConcurrencyKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return `${constants.ENV_PART}:${descriptor.environment}:${constants.CURRENT_CONCURRENCY_PART}`;
+ }
+
+ envReserveConcurrencyKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return this.envReserveConcurrencyKey(descriptor.environment);
+ }
+
+ envReserveConcurrencyKey(envId: string): string {
+ return `${constants.ENV_PART}:${this.shortId(envId)}:${constants.RESERVE_CONCURRENCY_PART}`;
+ }
+
+ envCurrentConcurrencyKey(envId: string): string;
+ envCurrentConcurrencyKey(env: MarQSKeyProducerEnv): string;
+ envCurrentConcurrencyKey(envOrId: MarQSKeyProducerEnv | string): string {
+ return [
+ this.envKeySection(typeof envOrId === "string" ? envOrId : envOrId.id),
+ constants.CURRENT_CONCURRENCY_PART,
+ ].join(":");
+ }
+
+ envQueueKeyFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return `${constants.ENV_PART}:${descriptor.environment}:${constants.QUEUE_PART}`;
+ }
+
+ envQueueKey(env: MarQSKeyProducerEnv): string {
+ return [constants.ENV_PART, this.shortId(env.id), constants.QUEUE_PART].join(":");
+ }
+
+ messageKey(messageId: string) {
+ return `${constants.MESSAGE_PART}:${messageId}`;
+ }
+
+ nackCounterKey(messageId: string): string {
+ return `${constants.MESSAGE_PART}:${messageId}:nacks`;
+ }
+
+ orgIdFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return descriptor.organization;
+ }
+
+ envIdFromQueue(queue: string) {
+ const descriptor = this.queueDescriptorFromQueue(queue);
+
+ return descriptor.environment;
+ }
+
+ queueDescriptorFromQueue(queue: string): QueueDescriptor {
+ const match = queue.match(QUEUE_REGEX);
+
+ if (!match) {
+ throw new Error(`Invalid queue: ${queue}, no queue name found`);
+ }
+
+ const [, queueName] = match;
+
+ const envMatch = queue.match(ENV_REGEX);
+
+ if (!envMatch) {
+ throw new Error(`Invalid queue: ${queue}, no environment found`);
+ }
+
+ const [, envId] = envMatch;
+
+ const orgMatch = queue.match(ORG_REGEX);
+
+ if (!orgMatch) {
+ throw new Error(`Invalid queue: ${queue}, no organization found`);
+ }
+
+ const [, orgId] = orgMatch;
+
+ const concurrencyKeyMatch = queue.match(CONCURRENCY_KEY_REGEX);
+
+ const concurrencyKey = concurrencyKeyMatch ? concurrencyKeyMatch[1] : undefined;
+
+ const priorityMatch = queue.match(PRIORITY_REGEX);
+
+ const priority = priorityMatch ? parseInt(priorityMatch[1], 10) : undefined;
+
+ return {
+ name: queueName,
+ environment: envId,
+ organization: orgId,
+ concurrencyKey,
+ priority,
+ };
+ }
+
+ private shortId(id: string) {
+ // Return the last 12 characters of the id
+ return id.slice(-12);
+ }
+
+ private envKeySection(envId: string) {
+ return `${constants.ENV_PART}:${this.shortId(envId)}`;
+ }
+
+ private orgKeySection(orgId: string) {
+ return `${constants.ORG_PART}:${this.shortId(orgId)}`;
+ }
+
+ private queueSection(queue: string) {
+ return `${constants.QUEUE_PART}:${queue}`;
+ }
+
+ private concurrencyKeySection(concurrencyKey: string) {
+ return `${constants.CONCURRENCY_KEY_PART}:${concurrencyKey}`;
+ }
+
+ private prioritySection(priority: number) {
+ return `${constants.PRIORITY_PART}:${priority}`;
+ }
+
+ private currentConcurrencyKeyFromDescriptor(descriptor: QueueDescriptor) {
+ return [
+ this.queueKey(
+ descriptor.organization,
+ descriptor.environment,
+ descriptor.name,
+ descriptor.concurrencyKey
+ ),
+ constants.CURRENT_CONCURRENCY_PART,
+ ].join(":");
+ }
+
+ private queueReserveConcurrencyKeyFromDescriptor(descriptor: QueueDescriptor) {
+ return [
+ this.queueKey(descriptor.organization, descriptor.environment, descriptor.name),
+ constants.RESERVE_CONCURRENCY_PART,
+ ].join(":");
+ }
+
+ private queueConcurrencyLimitKeyFromDescriptor(descriptor: QueueDescriptor) {
+ return [
+ this.queueKey(descriptor.organization, descriptor.environment, descriptor.name),
+ constants.CONCURRENCY_LIMIT_PART,
+ ].join(":");
+ }
+}
diff --git a/apps/webapp/app/v3/marqs/types.ts b/apps/webapp/app/v3/marqs/types.ts
index e769a71206..339f1bded9 100644
--- a/apps/webapp/app/v3/marqs/types.ts
+++ b/apps/webapp/app/v3/marqs/types.ts
@@ -1,57 +1,85 @@
+import type { RuntimeEnvironmentType } from "@trigger.dev/database";
import { z } from "zod";
-import { type AuthenticatedEnvironment } from "~/services/apiAuth.server";
export type QueueRange = { offset: number; count: number };
+export type QueueDescriptor = {
+ organization: string;
+ environment: string;
+ name: string;
+ concurrencyKey?: string;
+ priority?: number;
+};
+
+export type MarQSKeyProducerEnv = {
+ id: string;
+ organizationId: string;
+ type: RuntimeEnvironmentType;
+};
+
export interface MarQSKeyProducer {
- queueConcurrencyLimitKey(env: AuthenticatedEnvironment, queue: string): string;
+ queueConcurrencyLimitKey(env: MarQSKeyProducerEnv, queue: string): string;
envConcurrencyLimitKey(envId: string): string;
- envConcurrencyLimitKey(env: AuthenticatedEnvironment): string;
+ envConcurrencyLimitKey(env: MarQSKeyProducerEnv): string;
- orgConcurrencyLimitKey(orgId: string): string;
- orgConcurrencyLimitKey(env: AuthenticatedEnvironment): string;
+ envCurrentConcurrencyKey(envId: string): string;
+ envCurrentConcurrencyKey(env: MarQSKeyProducerEnv): string;
- orgCurrentConcurrencyKey(orgId: string): string;
- orgCurrentConcurrencyKey(env: AuthenticatedEnvironment): string;
+ envReserveConcurrencyKey(envId: string): string;
- envCurrentConcurrencyKey(envId: string): string;
- envCurrentConcurrencyKey(env: AuthenticatedEnvironment): string;
+ queueKey(
+ orgId: string,
+ envId: string,
+ queue: string,
+ concurrencyKey?: string,
+ priority?: number
+ ): string;
+ queueKey(
+ env: MarQSKeyProducerEnv,
+ queue: string,
+ concurrencyKey?: string,
+ priority?: number
+ ): string;
- queueKey(orgId: string, envId: string, queue: string, concurrencyKey?: string): string;
- queueKey(env: AuthenticatedEnvironment, queue: string, concurrencyKey?: string): string;
+ queueKeyFromQueue(queue: string, priority?: number): string;
- envQueueKey(env: AuthenticatedEnvironment): string;
- envSharedQueueKey(env: AuthenticatedEnvironment): string;
+ envQueueKey(env: MarQSKeyProducerEnv): string;
+ envSharedQueueKey(env: MarQSKeyProducerEnv): string;
sharedQueueKey(): string;
sharedQueueScanPattern(): string;
queueCurrentConcurrencyScanPattern(): string;
- concurrencyLimitKeyFromQueue(queue: string): string;
- currentConcurrencyKeyFromQueue(queue: string): string;
- currentConcurrencyKey(
- env: AuthenticatedEnvironment,
+ queueConcurrencyLimitKeyFromQueue(queue: string): string;
+ queueCurrentConcurrencyKeyFromQueue(queue: string): string;
+ queueCurrentConcurrencyKey(
+ env: MarQSKeyProducerEnv,
queue: string,
concurrencyKey?: string
): string;
- disabledConcurrencyLimitKey(orgId: string): string;
- disabledConcurrencyLimitKeyFromQueue(queue: string): string;
- orgConcurrencyLimitKeyFromQueue(queue: string): string;
- orgCurrentConcurrencyKeyFromQueue(queue: string): string;
envConcurrencyLimitKeyFromQueue(queue: string): string;
envCurrentConcurrencyKeyFromQueue(queue: string): string;
+ envReserveConcurrencyKeyFromQueue(queue: string): string;
envQueueKeyFromQueue(queue: string): string;
messageKey(messageId: string): string;
nackCounterKey(messageId: string): string;
stripKeyPrefix(key: string): string;
orgIdFromQueue(queue: string): string;
envIdFromQueue(queue: string): string;
+
+ queueReserveConcurrencyKeyFromQueue(queue: string): string;
+ queueDescriptorFromQueue(queue: string): QueueDescriptor;
}
+export type EnvQueues = {
+ envId: string;
+ queues: string[];
+};
+
export interface MarQSFairDequeueStrategy {
distributeFairQueuesFromParentQueue(
parentQueue: string,
consumerId: string
- ): Promise>;
+ ): Promise>;
}
export const MessagePayload = z.object({
@@ -78,3 +106,8 @@ export interface VisibilityTimeoutStrategy {
heartbeat(messageId: string, timeoutInMs: number): Promise;
cancelHeartbeat(messageId: string): Promise;
}
+
+export type EnqueueMessageReserveConcurrencyOptions = {
+ messageId: string;
+ recursiveQueue: boolean;
+};
diff --git a/apps/webapp/app/v3/marqs/v2.server.ts b/apps/webapp/app/v3/marqs/v2.server.ts
index fee98846b4..3492f42c57 100644
--- a/apps/webapp/app/v3/marqs/v2.server.ts
+++ b/apps/webapp/app/v3/marqs/v2.server.ts
@@ -9,7 +9,7 @@ import { PerformRunExecutionV3Service } from "~/services/runs/performRunExecutio
import { singleton } from "~/utils/singleton";
import { generateFriendlyId } from "../friendlyIdentifiers";
import { MarQS } from "./index.server";
-import { MarQSShortKeyProducer } from "./marqsKeyProducer.server";
+import { MarQSShortKeyProducer } from "./marqsKeyProducer";
import { RequeueV2Message } from "./requeueV2Message.server";
import { VisibilityTimeoutStrategy } from "./types";
import Redis from "ioredis";
@@ -80,7 +80,6 @@ function getMarQSClient() {
parentQueueLimit: 100,
keys: new MarQSV2KeyProducer(KEY_PREFIX),
defaultEnvConcurrency: env.V2_MARQS_DEFAULT_ENV_CONCURRENCY,
- defaultOrgConcurrency: env.DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT,
}),
envQueuePriorityStrategy: new NoopFairDequeuingStrategy(), // We don't use this in v2, since all queues go through the shared queue
workers: 0,
diff --git a/apps/webapp/app/v3/services/batchTriggerV3.server.ts b/apps/webapp/app/v3/services/batchTriggerV3.server.ts
index fdbc8d9c64..4ffd59aacc 100644
--- a/apps/webapp/app/v3/services/batchTriggerV3.server.ts
+++ b/apps/webapp/app/v3/services/batchTriggerV3.server.ts
@@ -456,18 +456,17 @@ export class BatchTriggerV3Service extends BaseService {
error: result.error,
});
- await this.#enqueueBatchTaskRun({
- batchId: batch.id,
- processingId: "0",
- range: {
- start: result.workingIndex,
- count: PROCESSING_BATCH_SIZE,
+ await this._prisma.batchTaskRun.update({
+ where: {
+ id: batch.id,
+ },
+ data: {
+ status: "ABORTED",
+ completedAt: new Date(),
},
- attemptCount: 0,
- strategy: "sequential",
});
- return batch;
+ throw result.error;
}
// Update the batch to be sealed
diff --git a/apps/webapp/app/v3/services/completeAttempt.server.ts b/apps/webapp/app/v3/services/completeAttempt.server.ts
index 235ae87352..ae5beb5bbc 100644
--- a/apps/webapp/app/v3/services/completeAttempt.server.ts
+++ b/apps/webapp/app/v3/services/completeAttempt.server.ts
@@ -22,7 +22,7 @@ import { env } from "~/env.server";
import { AuthenticatedEnvironment } from "~/services/apiAuth.server";
import { logger } from "~/services/logger.server";
import { safeJsonParse } from "~/utils/json";
-import { marqs } from "~/v3/marqs/index.server";
+import { marqs, MarQSPriorityLevel } from "~/v3/marqs/index.server";
import { createExceptionPropertiesFromError, eventRepository } from "../eventRepository.server";
import { FailedTaskRunRetryHelper } from "../failedTaskRun.server";
import { FAILED_RUN_STATUSES, isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus";
@@ -476,7 +476,8 @@ export class CompleteAttemptService extends BaseService {
checkpointEventId: this.opts.supportsRetryCheckpoints ? checkpointEventId : undefined,
retryCheckpointsDisabled: !this.opts.supportsRetryCheckpoints,
},
- executionRetry.timestamp
+ executionRetry.timestamp,
+ MarQSPriorityLevel.retry
);
};
@@ -614,8 +615,13 @@ export class CompleteAttemptService extends BaseService {
});
if (environment.type === "DEVELOPMENT") {
- // This is already an EXECUTE message so we can just NACK
- await marqs?.nackMessage(taskRunAttempt.taskRunId, executionRetry.timestamp);
+ marqs.replaceMessage(
+ taskRunAttempt.taskRunId,
+ {},
+ executionRetry.timestamp,
+ MarQSPriorityLevel.retry
+ );
+
return "RETRIED";
}
diff --git a/apps/webapp/app/v3/services/createCheckpoint.server.ts b/apps/webapp/app/v3/services/createCheckpoint.server.ts
index bbd8618898..15cdf697d9 100644
--- a/apps/webapp/app/v3/services/createCheckpoint.server.ts
+++ b/apps/webapp/app/v3/services/createCheckpoint.server.ts
@@ -2,7 +2,7 @@ import { CoordinatorToPlatformMessages, ManualCheckpointMetadata } from "@trigge
import type { InferSocketMessageSchema } from "@trigger.dev/core/v3/zodSocket";
import type { Checkpoint, CheckpointRestoreEvent } from "@trigger.dev/database";
import { logger } from "~/services/logger.server";
-import { marqs } from "~/v3/marqs/index.server";
+import { marqs, MarQSPriorityLevel } from "~/v3/marqs/index.server";
import { generateFriendlyId } from "../friendlyIdentifiers";
import { isFreezableAttemptStatus, isFreezableRunStatus } from "../taskStatus";
import { BaseService } from "./baseService.server";
@@ -174,7 +174,8 @@ export class CreateCheckpointService extends BaseService {
resumableAttemptId: attempt.id,
checkpointEventId: checkpointEvent.id,
},
- restoreAtUnixTimeMs
+ restoreAtUnixTimeMs,
+ MarQSPriorityLevel.resume
);
return {
@@ -302,6 +303,7 @@ export class CreateCheckpointService extends BaseService {
checkpointEventId: checkpointEvent.id,
},
undefined,
+ undefined,
true
);
diff --git a/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts b/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts
index 6306df4765..fd0f22f3c2 100644
--- a/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts
+++ b/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts
@@ -6,6 +6,7 @@ import { BaseService } from "./baseService.server";
import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server";
import { commonWorker } from "../commonWorker.server";
import { workerQueue } from "~/services/worker.server";
+import { enqueueRun } from "./enqueueRun.server";
export class EnqueueDelayedRunService extends BaseService {
public static async enqueue(runId: string, runAt?: Date) {
@@ -44,6 +45,24 @@ export class EnqueueDelayedRunService extends BaseService {
project: true,
},
},
+ dependency: {
+ include: {
+ dependentBatchRun: {
+ include: {
+ dependentTaskAttempt: {
+ include: {
+ taskRun: true,
+ },
+ },
+ },
+ },
+ dependentAttempt: {
+ include: {
+ taskRun: true,
+ },
+ },
+ },
+ },
},
});
@@ -83,18 +102,12 @@ export class EnqueueDelayedRunService extends BaseService {
}
});
- await marqs?.enqueueMessage(
- run.runtimeEnvironment,
- run.queue,
- run.id,
- {
- type: "EXECUTE",
- taskIdentifier: run.taskIdentifier,
- projectId: run.runtimeEnvironment.projectId,
- environmentId: run.runtimeEnvironment.id,
- environmentType: run.runtimeEnvironment.type,
- },
- run.concurrencyKey ?? undefined
- );
+ await enqueueRun({
+ env: run.runtimeEnvironment,
+ run: run,
+ dependentRun:
+ run.dependency?.dependentAttempt?.taskRun ??
+ run.dependency?.dependentBatchRun?.dependentTaskAttempt?.taskRun,
+ });
}
}
diff --git a/apps/webapp/app/v3/services/enqueueRun.server.ts b/apps/webapp/app/v3/services/enqueueRun.server.ts
new file mode 100644
index 0000000000..cb091d70d9
--- /dev/null
+++ b/apps/webapp/app/v3/services/enqueueRun.server.ts
@@ -0,0 +1,66 @@
+import { TaskRunError, TaskRunErrorCodes } from "@trigger.dev/core/v3/schemas";
+import { TaskRun } from "@trigger.dev/database";
+import { AuthenticatedEnvironment } from "~/services/apiAuth.server";
+import { marqs } from "../marqs/index.server";
+
+export type EnqueueRunOptions = {
+ env: AuthenticatedEnvironment;
+ run: TaskRun;
+ dependentRun?: { queue: string; id: string };
+};
+
+export type EnqueueRunResult =
+ | {
+ ok: true;
+ }
+ | {
+ ok: false;
+ error: TaskRunError;
+ };
+
+export async function enqueueRun({
+ env,
+ run,
+ dependentRun,
+}: EnqueueRunOptions): Promise {
+ // If this is a triggerAndWait or batchTriggerAndWait,
+ // we need to add the parent run to the reserve concurrency set
+ // to free up concurrency for the children to run
+ // In the case of a recursive queue, reserving concurrency can fail, which means there is a deadlock and we need to fail the run
+
+ // TODO: reserveConcurrency can fail because of a deadlock, we need to handle that case
+ const wasEnqueued = await marqs.enqueueMessage(
+ env,
+ run.queue,
+ run.id,
+ {
+ type: "EXECUTE",
+ taskIdentifier: run.taskIdentifier,
+ projectId: env.projectId,
+ environmentId: env.id,
+ environmentType: env.type,
+ },
+ run.concurrencyKey ?? undefined,
+ run.queueTimestamp ?? undefined,
+ dependentRun
+ ? { messageId: dependentRun.id, recursiveQueue: dependentRun.queue === run.queue }
+ : undefined
+ );
+
+ if (!wasEnqueued) {
+ const error = {
+ type: "INTERNAL_ERROR",
+ code: TaskRunErrorCodes.RECURSIVE_WAIT_DEADLOCK,
+ message: `This run will never execute because it was triggered recursively and the task has no remaining concurrency available`,
+ } satisfies TaskRunError;
+
+ return {
+ ok: false,
+ error,
+ };
+ }
+
+ return {
+ ok: true,
+ };
+}
diff --git a/apps/webapp/app/v3/services/resumeBatchRun.server.ts b/apps/webapp/app/v3/services/resumeBatchRun.server.ts
index 6a794e238c..68f5a59cf7 100644
--- a/apps/webapp/app/v3/services/resumeBatchRun.server.ts
+++ b/apps/webapp/app/v3/services/resumeBatchRun.server.ts
@@ -1,6 +1,6 @@
import { PrismaClientOrTransaction } from "~/db.server";
import { workerQueue } from "~/services/worker.server";
-import { marqs } from "~/v3/marqs/index.server";
+import { marqs, MarQSPriorityLevel } from "~/v3/marqs/index.server";
import { BaseService } from "./baseService.server";
import { logger } from "~/services/logger.server";
import { BatchTaskRun } from "@trigger.dev/database";
@@ -152,6 +152,8 @@ export class ResumeBatchRunService extends BaseService {
queue: true,
taskIdentifier: true,
concurrencyKey: true,
+ createdAt: true,
+ queueTimestamp: true,
},
},
},
@@ -186,6 +188,7 @@ export class ResumeBatchRunService extends BaseService {
dependentTaskAttemptId: dependentTaskAttempt.id,
});
+ // TODO: use the new priority queue thingie
await marqs?.enqueueMessage(
environment,
dependentRun.queue,
@@ -200,7 +203,10 @@ export class ResumeBatchRunService extends BaseService {
environmentId: environment.id,
environmentType: environment.type,
},
- dependentRun.concurrencyKey ?? undefined
+ dependentRun.concurrencyKey ?? undefined,
+ dependentRun.queueTimestamp ?? dependentRun.createdAt,
+ undefined,
+ MarQSPriorityLevel.resume
);
return "COMPLETED";
@@ -246,16 +252,25 @@ export class ResumeBatchRunService extends BaseService {
hasCheckpointEvent: !!batchRun.checkpointEventId,
});
- await marqs?.replaceMessage(dependentRun.id, {
- type: "RESUME",
- completedAttemptIds: batchRun.items.map((item) => item.taskRunAttemptId).filter(Boolean),
- resumableAttemptId: dependentTaskAttempt.id,
- checkpointEventId: batchRun.checkpointEventId ?? undefined,
- taskIdentifier: dependentTaskAttempt.taskRun.taskIdentifier,
- projectId: environment.projectId,
- environmentId: environment.id,
- environmentType: environment.type,
- });
+ await marqs?.replaceMessage(
+ dependentRun.id,
+ {
+ type: "RESUME",
+ completedAttemptIds: batchRun.items
+ .map((item) => item.taskRunAttemptId)
+ .filter(Boolean),
+ resumableAttemptId: dependentTaskAttempt.id,
+ checkpointEventId: batchRun.checkpointEventId ?? undefined,
+ taskIdentifier: dependentTaskAttempt.taskRun.taskIdentifier,
+ projectId: environment.projectId,
+ environmentId: environment.id,
+ environmentType: environment.type,
+ },
+ (
+ dependentTaskAttempt.taskRun.queueTimestamp ?? dependentTaskAttempt.taskRun.createdAt
+ ).getTime(),
+ MarQSPriorityLevel.resume
+ );
return "COMPLETED";
} else {
diff --git a/apps/webapp/app/v3/services/resumeTaskDependency.server.ts b/apps/webapp/app/v3/services/resumeTaskDependency.server.ts
index bdbece9778..3151de18a0 100644
--- a/apps/webapp/app/v3/services/resumeTaskDependency.server.ts
+++ b/apps/webapp/app/v3/services/resumeTaskDependency.server.ts
@@ -1,6 +1,6 @@
import { PrismaClientOrTransaction } from "~/db.server";
import { workerQueue } from "~/services/worker.server";
-import { marqs } from "~/v3/marqs/index.server";
+import { MarQS, marqs, MarQSPriorityLevel } from "~/v3/marqs/index.server";
import { BaseService } from "./baseService.server";
import { logger } from "~/services/logger.server";
@@ -49,6 +49,8 @@ export class ResumeTaskDependencyService extends BaseService {
runId: dependentRun.id,
}
);
+
+ // TODO: use the new priority queue thingie
await marqs?.enqueueMessage(
dependency.taskRun.runtimeEnvironment,
dependentRun.queue,
@@ -64,7 +66,9 @@ export class ResumeTaskDependencyService extends BaseService {
environmentType: dependency.taskRun.runtimeEnvironment.type,
},
dependentRun.concurrencyKey ?? undefined,
- dependentRun.createdAt.getTime()
+ dependentRun.queueTimestamp ?? dependentRun.createdAt,
+ undefined,
+ MarQSPriorityLevel.resume
);
} else {
logger.debug("Task dependency resume: Attempt is not paused or there's no checkpoint event", {
@@ -97,7 +101,8 @@ export class ResumeTaskDependencyService extends BaseService {
environmentId: dependency.taskRun.runtimeEnvironment.id,
environmentType: dependency.taskRun.runtimeEnvironment.type,
},
- dependentRun.createdAt.getTime()
+ (dependentRun.queueTimestamp ?? dependentRun.createdAt).getTime(),
+ MarQSPriorityLevel.resume
);
}
}
diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts
index b7df21cde5..91a201e9cf 100644
--- a/apps/webapp/app/v3/services/triggerTask.server.ts
+++ b/apps/webapp/app/v3/services/triggerTask.server.ts
@@ -1,34 +1,36 @@
import {
IOPacket,
+ packetRequiresOffloading,
QueueOptions,
SemanticInternalAttributes,
+ taskRunErrorEnhancer,
+ taskRunErrorToString,
TriggerTaskRequestBody,
- packetRequiresOffloading,
} from "@trigger.dev/core/v3";
+import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps";
+import { Prisma, TaskRun } from "@trigger.dev/database";
import { env } from "~/env.server";
+import { sanitizeQueueName } from "~/models/taskQueue.server";
+import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server";
import { AuthenticatedEnvironment } from "~/services/apiAuth.server";
import { autoIncrementCounter } from "~/services/autoIncrementCounter.server";
-import { workerQueue } from "~/services/worker.server";
+import { logger } from "~/services/logger.server";
+import { getEntitlement } from "~/services/platform.v3.server";
+import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server";
+import { handleMetadataPacket } from "~/utils/packets";
import { marqs } from "~/v3/marqs/index.server";
import { eventRepository } from "../eventRepository.server";
import { generateFriendlyId } from "../friendlyIdentifiers";
-import { uploadPacketToObjectStore } from "../r2.server";
-import { startActiveSpan } from "../tracer.server";
-import { getEntitlement } from "~/services/platform.v3.server";
-import { BaseService, ServiceValidationError } from "./baseService.server";
-import { logger } from "~/services/logger.server";
-import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus";
-import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server";
import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server";
-import { handleMetadataPacket } from "~/utils/packets";
-import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps";
-import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server";
import { guardQueueSizeLimitsForEnv } from "../queueSizeLimits.server";
+import { uploadPacketToObjectStore } from "../r2.server";
+import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus";
+import { startActiveSpan } from "../tracer.server";
import { clampMaxDuration } from "../utils/maxDuration";
-import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server";
-import { Prisma, TaskRun } from "@trigger.dev/database";
-import { sanitizeQueueName } from "~/models/taskQueue.server";
+import { BaseService, ServiceValidationError } from "./baseService.server";
import { EnqueueDelayedRunService } from "./enqueueDelayedRun.server";
+import { enqueueRun } from "./enqueueRun.server";
+import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server";
import { getTaskEventStore } from "../taskEventStore.server";
export type TriggerTaskServiceOptions = {
@@ -186,6 +188,8 @@ export class TriggerTaskService extends BaseService {
taskIdentifier: true,
rootTaskRunId: true,
depth: true,
+ queueTimestamp: true,
+ queue: true,
},
},
},
@@ -242,6 +246,8 @@ export class TriggerTaskService extends BaseService {
taskIdentifier: true,
rootTaskRunId: true,
depth: true,
+ queueTimestamp: true,
+ queue: true,
},
},
},
@@ -294,7 +300,7 @@ export class TriggerTaskService extends BaseService {
: undefined;
try {
- return await eventRepository.traceEvent(
+ const result = await eventRepository.traceEvent(
taskId,
{
context: options.traceContext,
@@ -367,6 +373,12 @@ export class TriggerTaskService extends BaseService {
? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1
: 0;
+ const queueTimestamp =
+ dependentAttempt?.taskRun.queueTimestamp ??
+ dependentBatchRun?.dependentTaskAttempt?.taskRun.queueTimestamp ??
+ delayUntil ??
+ new Date();
+
const taskRun = await tx.taskRun.create({
data: {
status: delayUntil ? "DELAYED" : "PENDING",
@@ -394,6 +406,7 @@ export class TriggerTaskService extends BaseService {
isTest: body.options?.test ?? false,
delayUntil,
queuedAt: delayUntil ? undefined : new Date(),
+ queueTimestamp,
maxAttempts: body.options?.maxAttempts,
taskEventStore: getTaskEventStore(),
ttl,
@@ -547,44 +560,61 @@ export class TriggerTaskService extends BaseService {
this._prisma
);
- //release the concurrency for the env and org, if part of a (batch)triggerAndWait
- if (dependentAttempt) {
- const isSameTask = dependentAttempt.taskRun.taskIdentifier === taskId;
- await marqs?.releaseConcurrency(dependentAttempt.taskRun.id, isSameTask);
- }
- if (dependentBatchRun?.dependentTaskAttempt) {
- const isSameTask =
- dependentBatchRun.dependentTaskAttempt.taskRun.taskIdentifier === taskId;
- await marqs?.releaseConcurrency(
- dependentBatchRun.dependentTaskAttempt.taskRun.id,
- isSameTask
- );
- }
-
if (!run) {
return;
}
- // We need to enqueue the task run into the appropriate queue. This is done after the tx completes to prevent a race condition where the task run hasn't been created yet by the time we dequeue.
+ // Now enqueue the run if it's not delayed
if (run.status === "PENDING") {
- await marqs?.enqueueMessage(
- environment,
- run.queue,
- run.id,
- {
- type: "EXECUTE",
- taskIdentifier: taskId,
- projectId: environment.projectId,
- environmentId: environment.id,
- environmentType: environment.type,
- },
- body.options?.concurrencyKey
- );
+ const enqueueResult = await enqueueRun({
+ env: environment,
+ run,
+ dependentRun:
+ dependentAttempt?.taskRun ?? dependentBatchRun?.dependentTaskAttempt?.taskRun,
+ });
+
+ if (!enqueueResult.ok) {
+ // Now we need to fail the run with enqueueResult.error and make sure and
+ // set the traced event to failed as well
+ await this._prisma.taskRun.update({
+ where: { id: run.id },
+ data: {
+ status: "SYSTEM_FAILURE",
+ completedAt: new Date(),
+ error: enqueueResult.error,
+ },
+ });
+
+ event.failWithError(enqueueResult.error);
+
+ return {
+ run,
+ isCached: false,
+ error: enqueueResult.error,
+ };
+ }
}
return { run, isCached: false };
}
);
+
+ if (result?.error) {
+ throw new ServiceValidationError(
+ taskRunErrorToString(taskRunErrorEnhancer(result.error))
+ );
+ }
+
+ const run = result?.run;
+
+ if (!run) {
+ return;
+ }
+
+ return {
+ run,
+ isCached: result?.isCached,
+ };
} catch (error) {
// Detect a prisma transaction Unique constraint violation
if (error instanceof Prisma.PrismaClientKnownRequestError) {
diff --git a/apps/webapp/test/envPriorityDequeueingStrategy.test.ts b/apps/webapp/test/envPriorityDequeueingStrategy.test.ts
new file mode 100644
index 0000000000..8335136923
--- /dev/null
+++ b/apps/webapp/test/envPriorityDequeueingStrategy.test.ts
@@ -0,0 +1,388 @@
+import { describe, expect, it } from "vitest";
+import type { EnvQueues, MarQSFairDequeueStrategy } from "~/v3/marqs/types.js";
+import { EnvPriorityDequeuingStrategy } from "../app/v3/marqs/envPriorityDequeuingStrategy.server.js";
+import { createKeyProducer } from "./utils/marqs.js";
+
+const keyProducer = createKeyProducer("test");
+
+describe("EnvPriorityDequeuingStrategy", () => {
+ class TestDelegate implements MarQSFairDequeueStrategy {
+ constructor(private queues: EnvQueues[]) {}
+
+ async distributeFairQueuesFromParentQueue(): Promise> {
+ return this.queues;
+ }
+ }
+
+ describe("distributeFairQueuesFromParentQueue", () => {
+ it("should preserve order when all queues have the same priority", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue2:priority:1",
+ "org:org1:env:env1:queue:queue3:priority:1",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result).toEqual(inputQueues);
+ expect(result[0].queues).toEqual(inputQueues[0].queues);
+ });
+
+ it("should sort queues by priority in descending order", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue2:priority:3",
+ "org:org1:env:env1:queue:queue3:priority:2",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:3",
+ "org:org1:env:env1:queue:queue3:priority:2",
+ "org:org1:env:env1:queue:queue1:priority:1",
+ ]);
+ });
+
+ it("should handle queues without priority by treating them as priority 0", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1",
+ "org:org1:env:env1:queue:queue2:priority:2",
+ "org:org1:env:env1:queue:queue3",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:2",
+ "org:org1:env:env1:queue:queue1",
+ "org:org1:env:env1:queue:queue3",
+ ]);
+ });
+
+ it("should handle multiple environments", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue2:priority:2",
+ ],
+ },
+ {
+ envId: "env2",
+ queues: [
+ "org:org1:env:env2:queue:queue3:priority:3",
+ "org:org1:env:env2:queue:queue4:priority:1",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result).toHaveLength(2);
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:2",
+ "org:org1:env:env1:queue:queue1:priority:1",
+ ]);
+ expect(result[1].queues).toEqual([
+ "org:org1:env:env2:queue:queue3:priority:3",
+ "org:org1:env:env2:queue:queue4:priority:1",
+ ]);
+ });
+
+ it("should handle negative priorities correctly", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:-1",
+ "org:org1:env:env1:queue:queue2:priority:1",
+ "org:org1:env:env1:queue:queue3:priority:-2",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:1",
+ "org:org1:env:env1:queue:queue1:priority:-1",
+ "org:org1:env:env1:queue:queue3:priority:-2",
+ ]);
+ });
+
+ it("should maintain stable sort for mixed priority and non-priority queues", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1",
+ "org:org1:env:env1:queue:queue2:priority:1",
+ "org:org1:env:env1:queue:queue3",
+ "org:org1:env:env1:queue:queue4:priority:1",
+ "org:org1:env:env1:queue:queue5",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ // Check that queue2 and queue4 (priority 1) maintain their relative order
+ // and queue1, queue3, and queue5 (priority 0) maintain their relative order
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:1",
+ "org:org1:env:env1:queue:queue4:priority:1",
+ "org:org1:env:env1:queue:queue1",
+ "org:org1:env:env1:queue:queue3",
+ "org:org1:env:env1:queue:queue5",
+ ]);
+ });
+
+ it("should handle empty queue arrays", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result).toEqual(inputQueues);
+ expect(result[0].queues).toEqual([]);
+ });
+
+ it("should handle empty environments array", async () => {
+ const inputQueues: EnvQueues[] = [];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result).toEqual([]);
+ });
+
+ it("should handle large priority differences", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue2:priority:1000",
+ "org:org1:env:env1:queue:queue3:priority:500",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:1000",
+ "org:org1:env:env1:queue:queue3:priority:500",
+ "org:org1:env:env1:queue:queue1:priority:1",
+ ]);
+ });
+
+ it("should handle multiple environments with mixed priority patterns", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1", // priority 0
+ "org:org1:env:env1:queue:queue2:priority:2",
+ ],
+ },
+ {
+ envId: "env2",
+ queues: [
+ "org:org1:env:env2:queue:queue3:priority:1",
+ "org:org1:env:env2:queue:queue4", // priority 0
+ ],
+ },
+ {
+ envId: "env3",
+ queues: [
+ "org:org1:env:env3:queue:queue5:priority:1",
+ "org:org1:env:env3:queue:queue6:priority:1",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result).toHaveLength(3);
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:priority:2",
+ "org:org1:env:env1:queue:queue1",
+ ]);
+ expect(result[1].queues).toEqual([
+ "org:org1:env:env2:queue:queue3:priority:1",
+ "org:org1:env:env2:queue:queue4",
+ ]);
+ expect(result[2].queues).toEqual([
+ "org:org1:env:env3:queue:queue5:priority:1",
+ "org:org1:env:env3:queue:queue6:priority:1",
+ ]);
+ });
+
+ it("should sort queues with concurrency keys while maintaining priority order", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:ck:key1:priority:1",
+ "org:org1:env:env1:queue:queue2:ck:key1:priority:3",
+ "org:org1:env:env1:queue:queue3:ck:key2:priority:2",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue2:ck:key1:priority:3",
+ "org:org1:env:env1:queue:queue3:ck:key2:priority:2",
+ "org:org1:env:env1:queue:queue1:ck:key1:priority:1",
+ ]);
+ });
+
+ it("should handle mixed queues with and without concurrency keys", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue2:ck:shared-key:priority:2",
+ "org:org1:env:env1:queue:queue3:ck:shared-key:priority:1",
+ "org:org1:env:env1:queue:queue4:priority:3",
+ "org:org1:env:env1:queue:queue5:ck:other-key:priority:2",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue4:priority:3",
+ "org:org1:env:env1:queue:queue2:ck:shared-key:priority:2",
+ "org:org1:env:env1:queue:queue5:ck:other-key:priority:2",
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue3:ck:shared-key:priority:1",
+ ]);
+ });
+
+ it("should only return the highest priority queue of the same queue", async () => {
+ const inputQueues: EnvQueues[] = [
+ {
+ envId: "env1",
+ queues: [
+ "org:org1:env:env1:queue:queue1",
+ "org:org1:env:env1:queue:queue1:priority:1",
+ "org:org1:env:env1:queue:queue1:priority:2",
+ "org:org1:env:env1:queue:queue1:priority:3",
+ "org:org1:env:env1:queue:queue2",
+ ],
+ },
+ ];
+
+ const delegate = new TestDelegate(inputQueues);
+ const strategy = new EnvPriorityDequeuingStrategy({
+ delegate,
+ keys: keyProducer,
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue("parentQueue", "consumer1");
+
+ expect(result[0].queues).toEqual([
+ "org:org1:env:env1:queue:queue1:priority:3",
+ "org:org1:env:env1:queue:queue2",
+ ]);
+ });
+ });
+});
diff --git a/apps/webapp/test/fairDequeuingStrategy.test.ts b/apps/webapp/test/fairDequeuingStrategy.test.ts
index fa96d11bc4..5d0b8e4af8 100644
--- a/apps/webapp/test/fairDequeuingStrategy.test.ts
+++ b/apps/webapp/test/fairDequeuingStrategy.test.ts
@@ -8,25 +8,24 @@ import {
setupQueue,
} from "./utils/marqs.js";
import { trace } from "@opentelemetry/api";
+import { EnvQueues } from "~/v3/marqs/types.js";
const tracer = trace.getTracer("test");
vi.setConfig({ testTimeout: 30_000 }); // 30 seconds timeout
describe("FairDequeuingStrategy", () => {
- redisTest("should distribute a single queue from a single org/env", async ({ redis }) => {
+ redisTest("should distribute a single queue from a single env", async ({ redis }) => {
const keyProducer = createKeyProducer("test");
const strategy = new FairDequeuingStrategy({
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: "test-seed-1", // for deterministic shuffling
});
- // Setup a single queue
await setupQueue({
redis,
keyProducer,
@@ -40,42 +39,10 @@ describe("FairDequeuingStrategy", () => {
const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1");
expect(result).toHaveLength(1);
- expect(result[0]).toBe("org:org-1:env:env-1:queue:queue-1");
- });
-
- redisTest("should respect org concurrency limits", async ({ redis }) => {
- const keyProducer = createKeyProducer("test");
- const strategy = new FairDequeuingStrategy({
- tracer,
- redis,
- keys: keyProducer,
- defaultOrgConcurrency: 2,
- defaultEnvConcurrency: 5,
- parentQueueLimit: 100,
- seed: "test-seed-2",
- });
-
- // Setup queue
- await setupQueue({
- redis,
- keyProducer,
- parentQueue: "parent-queue",
- score: Date.now() - 1000,
- queueId: "queue-1",
- orgId: "org-1",
+ expect(result[0]).toEqual({
envId: "env-1",
+ queues: ["org:org-1:env:env-1:queue:queue-1"],
});
-
- // Set org-1 to be at its concurrency limit
- await setupConcurrency({
- redis,
- keyProducer,
- org: { id: "org-1", currentConcurrency: 2, limit: 2 },
- env: { id: "env-1", currentConcurrency: 0 },
- });
-
- const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1");
- expect(result).toHaveLength(0);
});
redisTest("should respect env concurrency limits", async ({ redis }) => {
@@ -84,7 +51,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 2,
parentQueueLimit: 100,
seed: "test-seed-3",
@@ -103,7 +69,6 @@ describe("FairDequeuingStrategy", () => {
await setupConcurrency({
redis,
keyProducer,
- org: { id: "org-1", currentConcurrency: 0 },
env: { id: "env-1", currentConcurrency: 2, limit: 2 },
});
@@ -111,13 +76,53 @@ describe("FairDequeuingStrategy", () => {
expect(result).toHaveLength(0);
});
+ redisTest(
+ "should give extra concurrency when the env has reserve concurrency",
+ async ({ redis }) => {
+ const keyProducer = createKeyProducer("test");
+ const strategy = new FairDequeuingStrategy({
+ tracer,
+ redis,
+ keys: keyProducer,
+ defaultEnvConcurrency: 2,
+ parentQueueLimit: 100,
+ seed: "test-seed-3",
+ });
+
+ await setupQueue({
+ redis,
+ keyProducer,
+ parentQueue: "parent-queue",
+ score: Date.now() - 1000,
+ queueId: "queue-1",
+ orgId: "org-1",
+ envId: "env-1",
+ });
+
+ await setupConcurrency({
+ redis,
+ keyProducer,
+ env: { id: "env-1", currentConcurrency: 2, limit: 2, reserveConcurrency: 1 },
+ });
+
+ const result = await strategy.distributeFairQueuesFromParentQueue(
+ "parent-queue",
+ "consumer-1"
+ );
+ expect(result).toHaveLength(1);
+ expect(result[0]).toEqual({
+ envId: "env-1",
+ queues: ["org:org-1:env:env-1:queue:queue-1"],
+ });
+ }
+ );
+
redisTest("should respect parentQueueLimit", async ({ redis }) => {
const keyProducer = createKeyProducer("test");
const strategy = new FairDequeuingStrategy({
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 2, // Only take 2 queues
seed: "test-seed-6",
@@ -158,11 +163,13 @@ describe("FairDequeuingStrategy", () => {
const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1");
- expect(result).toHaveLength(2);
- // Should only get the two oldest queues
+ expect(result).toHaveLength(1);
const queue1 = keyProducer.queueKey("org-1", "env-1", "queue-1");
const queue2 = keyProducer.queueKey("org-1", "env-1", "queue-2");
- expect(result).toEqual([queue1, queue2]);
+ expect(result[0]).toEqual({
+ envId: "env-1",
+ queues: [queue1, queue2],
+ });
});
redisTest("should reuse snapshots across calls for the same consumer", async ({ redis }) => {
@@ -171,7 +178,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 10,
seed: "test-seed-reuse-1",
@@ -212,7 +218,11 @@ describe("FairDequeuingStrategy", () => {
const startDistribute1 = performance.now();
- const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1");
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
+ "parent-queue",
+ "consumer-1"
+ );
+ const result = flattenResults(envResult);
const distribute1Duration = performance.now() - startDistribute1;
@@ -236,8 +246,8 @@ describe("FairDequeuingStrategy", () => {
console.log("Second distribution took", distribute2Duration, "ms");
- // Make sure the second call is more than 10 times faster than the first
- expect(distribute2Duration).toBeLessThan(distribute1Duration / 10);
+ // Make sure the second call is more than 9 times faster than the first
+ expect(distribute2Duration).toBeLessThan(distribute1Duration / 9);
const startDistribute3 = performance.now();
@@ -260,7 +270,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: "test-seed-5",
@@ -296,7 +305,6 @@ describe("FairDequeuingStrategy", () => {
await setupConcurrency({
redis,
keyProducer,
- org: { id: orgId, currentConcurrency: 2, limit: 10 },
env: { id: envId, currentConcurrency: 1, limit: 5 },
});
}
@@ -323,10 +331,11 @@ describe("FairDequeuingStrategy", () => {
// Run multiple iterations
for (let i = 0; i < iterations; i++) {
- const result = await strategy.distributeFairQueuesFromParentQueue(
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
"parent-queue",
`consumer-${i % 3}` // Simulate 3 different consumers
);
+ const result = flattenResults(envResult);
// Track positions of queues
result.forEach((queueId, position) => {
@@ -417,7 +426,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: "fixed-seed",
@@ -472,20 +480,19 @@ describe("FairDequeuingStrategy", () => {
await setupConcurrency({
redis,
keyProducer,
- org: { id: "org-1", currentConcurrency: 0, limit: 10 },
env: { id: "env-1", currentConcurrency: 0, limit: 5 },
});
await setupConcurrency({
redis,
keyProducer,
- org: { id: "org-1", currentConcurrency: 0, limit: 10 },
env: { id: "env-2", currentConcurrency: 0, limit: 5 },
});
- const result = await strategy.distributeFairQueuesFromParentQueue(
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
"parent-queue",
"consumer-1"
);
+ const result = flattenResults(envResult);
// Group queues by environment
const queuesByEnv = result.reduce((acc, queueId) => {
@@ -546,7 +553,6 @@ describe("FairDequeuingStrategy", () => {
await setupConcurrency({
redis,
keyProducer,
- org: { id: "org-1", currentConcurrency: 0, limit: 200 },
env: {
id: setup.envId,
currentConcurrency: setup.current,
@@ -576,7 +582,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: `test-seed-${i}`,
@@ -596,10 +601,11 @@ describe("FairDequeuingStrategy", () => {
const firstPositionCounts: Record = {};
for (let i = 0; i < iterationsPerStrategy; i++) {
- const result = await strategy.distributeFairQueuesFromParentQueue(
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
"parent-queue",
`consumer-${i % 3}`
);
+ const result = flattenResults(envResult);
expect(result.length).toBeGreaterThan(0);
@@ -660,7 +666,6 @@ describe("FairDequeuingStrategy", () => {
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: "fixed-seed",
@@ -679,10 +684,11 @@ describe("FairDequeuingStrategy", () => {
const iterations = 1000;
for (let i = 0; i < iterations; i++) {
- const result = await strategy.distributeFairQueuesFromParentQueue(
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
"parent-queue",
"consumer-1"
);
+ const result = flattenResults(envResult);
result.forEach((queueId, position) => {
const baseQueueId = queueId.split(":").pop()!;
@@ -709,7 +715,6 @@ describe("FairDequeuingStrategy", () => {
await setupConcurrency({
redis,
keyProducer,
- org: { id: "org-1", currentConcurrency: 0, limit: 10 },
env: { id: "env-1", currentConcurrency: 0, limit: 5 },
});
@@ -738,46 +743,45 @@ describe("FairDequeuingStrategy", () => {
});
redisTest(
- "should respect maximumOrgCount and select orgs based on queue ages",
+ "should respect maximumEnvCount and select envs based on queue ages",
async ({ redis }) => {
const keyProducer = createKeyProducer("test");
const strategy = new FairDequeuingStrategy({
tracer,
redis,
keys: keyProducer,
- defaultOrgConcurrency: 10,
defaultEnvConcurrency: 5,
parentQueueLimit: 100,
seed: "test-seed-max-orgs",
- maximumOrgCount: 2, // Only select top 2 orgs
+ maximumEnvCount: 2, // Only select top 2 orgs
});
const now = Date.now();
- // Setup 4 orgs with different queue age profiles
- const orgSetups = [
+ // Setup 4 envs with different queue age profiles
+ const envSetups = [
{
- orgId: "org-1",
+ envId: "env-1",
queues: [
{ age: 1000 }, // Average age: 1000
],
},
{
- orgId: "org-2",
+ envId: "env-2",
queues: [
{ age: 5000 }, // Average age: 5000
{ age: 5000 },
],
},
{
- orgId: "org-3",
+ envId: "env-3",
queues: [
{ age: 2000 }, // Average age: 2000
{ age: 2000 },
],
},
{
- orgId: "org-4",
+ envId: "env-4",
queues: [
{ age: 500 }, // Average age: 500
{ age: 500 },
@@ -786,12 +790,11 @@ describe("FairDequeuingStrategy", () => {
];
// Setup queues and concurrency for each org
- for (const setup of orgSetups) {
+ for (const setup of envSetups) {
await setupConcurrency({
redis,
keyProducer,
- org: { id: setup.orgId, currentConcurrency: 0, limit: 10 },
- env: { id: "env-1", currentConcurrency: 0, limit: 5 },
+ env: { id: setup.envId, currentConcurrency: 0, limit: 5 },
});
for (let i = 0; i < setup.queues.length; i++) {
@@ -800,56 +803,57 @@ describe("FairDequeuingStrategy", () => {
keyProducer,
parentQueue: "parent-queue",
score: now - setup.queues[i].age,
- queueId: `queue-${setup.orgId}-${i}`,
- orgId: setup.orgId,
- envId: "env-1",
+ queueId: `queue-${setup.envId}-${i}`,
+ orgId: `org-${setup.envId}`,
+ envId: setup.envId,
});
}
}
// Run multiple iterations to verify consistent behavior
const iterations = 100;
- const selectedOrgCounts: Record = {};
+ const selectedEnvCounts: Record = {};
for (let i = 0; i < iterations; i++) {
- const result = await strategy.distributeFairQueuesFromParentQueue(
+ const envResult = await strategy.distributeFairQueuesFromParentQueue(
"parent-queue",
`consumer-${i}`
);
+ const result = flattenResults(envResult);
// Track which orgs were included in the result
- const selectedOrgs = new Set(result.map((queueId) => keyProducer.orgIdFromQueue(queueId)));
+ const selectedEnvs = new Set(result.map((queueId) => keyProducer.envIdFromQueue(queueId)));
// Verify we never get more than maximumOrgCount orgs
- expect(selectedOrgs.size).toBeLessThanOrEqual(2);
+ expect(selectedEnvs.size).toBeLessThanOrEqual(2);
- for (const orgId of selectedOrgs) {
- selectedOrgCounts[orgId] = (selectedOrgCounts[orgId] || 0) + 1;
+ for (const envId of selectedEnvs) {
+ selectedEnvCounts[envId] = (selectedEnvCounts[envId] || 0) + 1;
}
}
- console.log("Organization selection counts:", selectedOrgCounts);
+ console.log("Environment selection counts:", selectedEnvCounts);
// org-2 should be selected most often (highest average age)
- expect(selectedOrgCounts["org-2"]).toBeGreaterThan(selectedOrgCounts["org-4"] || 0);
+ expect(selectedEnvCounts["env-2"]).toBeGreaterThan(selectedEnvCounts["env-4"] || 0);
// org-4 should be selected least often (lowest average age)
- const org4Count = selectedOrgCounts["org-4"] || 0;
- expect(org4Count).toBeLessThan(selectedOrgCounts["org-2"]);
+ const env4Count = selectedEnvCounts["env-4"] || 0;
+ expect(env4Count).toBeLessThan(selectedEnvCounts["env-2"]);
- // Verify that orgs with higher average queue age are selected more frequently
- const sortedOrgs = Object.entries(selectedOrgCounts).sort((a, b) => b[1] - a[1]);
- console.log("Sorted organization frequencies:", sortedOrgs);
+ // Verify that envs with higher average queue age are selected more frequently
+ const sortedEnvs = Object.entries(selectedEnvCounts).sort((a, b) => b[1] - a[1]);
+ console.log("Sorted environment frequencies:", sortedEnvs);
- // The top 2 most frequently selected orgs should be org-2 and org-3
+ // The top 2 most frequently selected orgs should be env-2 and env-3
// as they have the highest average queue ages
- const topTwoOrgs = new Set([sortedOrgs[0][0], sortedOrgs[1][0]]);
- expect(topTwoOrgs).toContain("org-2"); // Highest average age
- expect(topTwoOrgs).toContain("org-3"); // Second highest average age
+ const topTwoEnvs = new Set([sortedEnvs[0][0], sortedEnvs[1][0]]);
+ expect(topTwoEnvs).toContain("env-2"); // Highest average age
+ expect(topTwoEnvs).toContain("env-3"); // Second highest average age
// Calculate selection percentages
- const totalSelections = Object.values(selectedOrgCounts).reduce((a, b) => a + b, 0);
- const selectionPercentages = Object.entries(selectedOrgCounts).reduce(
+ const totalSelections = Object.values(selectedEnvCounts).reduce((a, b) => a + b, 0);
+ const selectionPercentages = Object.entries(selectedEnvCounts).reduce(
(acc, [orgId, count]) => {
acc[orgId] = (count / totalSelections) * 100;
return acc;
@@ -857,13 +861,18 @@ describe("FairDequeuingStrategy", () => {
{} as Record
);
- console.log("Organization selection percentages:", selectionPercentages);
+ console.log("Environment selection percentages:", selectionPercentages);
- // Verify that org-2 (highest average age) gets selected in at least 40% of iterations
- expect(selectionPercentages["org-2"]).toBeGreaterThan(40);
+ // Verify that env-2 (highest average age) gets selected in at least 40% of iterations
+ expect(selectionPercentages["env-2"]).toBeGreaterThan(40);
- // Verify that org-4 (lowest average age) gets selected in less than 20% of iterations
- expect(selectionPercentages["org-4"] || 0).toBeLessThan(20);
+ // Verify that env-4 (lowest average age) gets selected in less than 20% of iterations
+ expect(selectionPercentages["env-4"] || 0).toBeLessThan(20);
}
);
});
+
+// Helper function to flatten results for counting
+function flattenResults(results: Array): string[] {
+ return results.flatMap((envQueue) => envQueue.queues);
+}
diff --git a/apps/webapp/test/marqsKeyProducer.test.ts b/apps/webapp/test/marqsKeyProducer.test.ts
new file mode 100644
index 0000000000..1f6af24545
--- /dev/null
+++ b/apps/webapp/test/marqsKeyProducer.test.ts
@@ -0,0 +1,315 @@
+import { describe, it, expect } from "vitest";
+import { MarQSShortKeyProducer } from "../app/v3/marqs/marqsKeyProducer.js";
+import { MarQSKeyProducerEnv } from "~/v3/marqs/types.js";
+
+describe("MarQSShortKeyProducer", () => {
+ const prefix = "test:";
+ const producer = new MarQSShortKeyProducer(prefix);
+
+ // Sample test data
+ const sampleEnv: MarQSKeyProducerEnv = {
+ id: "123456789012345678901234",
+ organizationId: "987654321098765432109876",
+ type: "PRODUCTION",
+ };
+
+ const devEnv: MarQSKeyProducerEnv = {
+ id: "123456789012345678901234",
+ organizationId: "987654321098765432109876",
+ type: "DEVELOPMENT",
+ };
+
+ describe("sharedQueueScanPattern", () => {
+ it("should return correct shared queue scan pattern", () => {
+ expect(producer.sharedQueueScanPattern()).toBe("test:*sharedQueue");
+ });
+ });
+
+ describe("queueCurrentConcurrencyScanPattern", () => {
+ it("should return correct queue current concurrency scan pattern", () => {
+ expect(producer.queueCurrentConcurrencyScanPattern()).toBe(
+ "test:org:*:env:*:queue:*:currentConcurrency"
+ );
+ });
+ });
+
+ describe("stripKeyPrefix", () => {
+ it("should strip prefix from key if present", () => {
+ expect(producer.stripKeyPrefix("test:someKey")).toBe("someKey");
+ });
+
+ it("should return original key if prefix not present", () => {
+ expect(producer.stripKeyPrefix("someKey")).toBe("someKey");
+ });
+ });
+
+ describe("queueKey", () => {
+ it("should generate queue key with environment object", () => {
+ expect(producer.queueKey(sampleEnv, "testQueue")).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ );
+ });
+
+ it("should generate queue key with separate parameters", () => {
+ expect(producer.queueKey("org123", "env456", "testQueue")).toBe(
+ "org:org123:env:env456:queue:testQueue"
+ );
+ });
+
+ it("should include concurrency key when provided", () => {
+ expect(producer.queueKey(sampleEnv, "testQueue", "concKey")).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey"
+ );
+ });
+
+ it("should include priority when provided", () => {
+ expect(producer.queueKey(sampleEnv, "testQueue", undefined, 1)).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue:priority:1"
+ );
+ });
+
+ it("should NOT include priority when provided with 0", () => {
+ expect(producer.queueKey(sampleEnv, "testQueue", undefined, 0)).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ );
+ });
+
+ it("should include priority when provided with overloaded call", () => {
+ expect(
+ producer.queueKey(sampleEnv.organizationId, sampleEnv.id, "testQueue", undefined, 1)
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:priority:1");
+ });
+ });
+
+ describe("queueKeyFromQueue", () => {
+ it("should generate queue key", () => {
+ expect(producer.queueKeyFromQueue("org:765432109876:env:345678901234:queue:testQueue")).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ );
+ });
+
+ it("should include concurrency key when provided", () => {
+ expect(
+ producer.queueKeyFromQueue("org:765432109876:env:345678901234:queue:testQueue:ck:concKey")
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:ck:concKey");
+ });
+
+ it("should include priority when provided", () => {
+ expect(
+ producer.queueKeyFromQueue("org:765432109876:env:345678901234:queue:testQueue", 1)
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:priority:1");
+ });
+
+ it("should NOT include priority when provided with 0", () => {
+ expect(
+ producer.queueKeyFromQueue("org:765432109876:env:345678901234:queue:testQueue", 0)
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue");
+ });
+
+ it("should NOT change the priority when provided", () => {
+ expect(
+ producer.queueKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:priority:1",
+ 10
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:priority:1");
+ });
+ });
+
+ describe("envSharedQueueKey", () => {
+ it("should return organization-specific shared queue for development environment", () => {
+ expect(producer.envSharedQueueKey(devEnv)).toBe(
+ "org:765432109876:env:345678901234:sharedQueue"
+ );
+ });
+
+ it("should return global shared queue for production environment", () => {
+ expect(producer.envSharedQueueKey(sampleEnv)).toBe("sharedQueue");
+ });
+ });
+
+ describe("queueDescriptorFromQueue", () => {
+ it("should parse queue string into descriptor", () => {
+ const queueString = "org:123:env:456:queue:testQueue:ck:concKey:priority:5";
+ const descriptor = producer.queueDescriptorFromQueue(queueString);
+
+ expect(descriptor).toEqual({
+ name: "testQueue",
+ environment: "456",
+ organization: "123",
+ concurrencyKey: "concKey",
+ priority: 5,
+ });
+ });
+
+ it("should parse queue string without optional parameters", () => {
+ const queueString = "org:123:env:456:queue:testQueue";
+ const descriptor = producer.queueDescriptorFromQueue(queueString);
+
+ expect(descriptor).toEqual({
+ name: "testQueue",
+ environment: "456",
+ organization: "123",
+ concurrencyKey: undefined,
+ priority: undefined,
+ });
+ });
+
+ it("should throw error for invalid queue string", () => {
+ const invalidQueue = "invalid:queue:string";
+ expect(() => producer.queueDescriptorFromQueue(invalidQueue)).toThrow("Invalid queue");
+ });
+ });
+
+ describe("messageKey", () => {
+ it("should generate correct message key", () => {
+ expect(producer.messageKey("msg123")).toBe("message:msg123");
+ });
+ });
+
+ describe("nackCounterKey", () => {
+ it("should generate correct nack counter key", () => {
+ expect(producer.nackCounterKey("msg123")).toBe("message:msg123:nacks");
+ });
+ });
+
+ describe("currentConcurrencyKey", () => {
+ it("should generate correct current concurrency key", () => {
+ expect(producer.queueCurrentConcurrencyKey(sampleEnv, "testQueue")).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue:currentConcurrency"
+ );
+ });
+
+ it("should include concurrency key when provided", () => {
+ expect(producer.queueCurrentConcurrencyKey(sampleEnv, "testQueue", "concKey")).toBe(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey:currentConcurrency"
+ );
+ });
+ });
+
+ describe("currentConcurrencyKeyFromQueue", () => {
+ it("should generate correct current concurrency key", () => {
+ expect(
+ producer.queueCurrentConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:currentConcurrency");
+ });
+
+ it("should include concurrency key when provided", () => {
+ expect(
+ producer.queueCurrentConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:ck:concKey:currentConcurrency");
+ });
+
+ it("should remove the priority bit when provided", () => {
+ expect(
+ producer.queueCurrentConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:currentConcurrency");
+ });
+
+ it("should remove the priority bit when provided, but keep the concurrency key", () => {
+ expect(
+ producer.queueCurrentConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:ck:concKey:currentConcurrency");
+ });
+ });
+
+ describe("queueReserveConcurrencyKeyFromQueue", () => {
+ it("should generate correct queue reserve concurrency key", () => {
+ expect(
+ producer.queueReserveConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:reserveConcurrency");
+ });
+
+ it("should NOT include the concurrency key when provided", () => {
+ expect(
+ producer.queueReserveConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:reserveConcurrency");
+ });
+
+ it("should remove the priority bit when provided", () => {
+ expect(
+ producer.queueReserveConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:reserveConcurrency");
+ });
+
+ it("should remove the priority bit when provided, AND remove the concurrency key", () => {
+ expect(
+ producer.queueReserveConcurrencyKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:reserveConcurrency");
+ });
+ });
+
+ describe("queueConcurrencyLimitKeyFromQueue", () => {
+ it("should generate correct queue concurrency limit key", () => {
+ expect(
+ producer.queueConcurrencyLimitKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:concurrency");
+ });
+
+ it("should NOT include the concurrency key when provided", () => {
+ expect(
+ producer.queueConcurrencyLimitKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:concurrency");
+ });
+
+ it("should remove the priority bit when provided", () => {
+ expect(
+ producer.queueConcurrencyLimitKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:concurrency");
+ });
+
+ it("should remove the priority bit when provided, AND remove the concurrency key", () => {
+ expect(
+ producer.queueConcurrencyLimitKeyFromQueue(
+ "org:765432109876:env:345678901234:queue:testQueue:ck:concKey:priority:1"
+ )
+ ).toBe("org:765432109876:env:345678901234:queue:testQueue:concurrency");
+ });
+ });
+
+ describe("envCurrentConcurrencyKey", () => {
+ it("should generate correct env current concurrency key with environment object", () => {
+ expect(producer.envCurrentConcurrencyKey(sampleEnv)).toBe(
+ "env:345678901234:currentConcurrency"
+ );
+ });
+
+ it("should generate correct env current concurrency key with env id", () => {
+ expect(producer.envCurrentConcurrencyKey("env456")).toBe("env:env456:currentConcurrency");
+ });
+ });
+
+ describe("orgIdFromQueue and envIdFromQueue", () => {
+ it("should extract org id from queue string", () => {
+ const queue = "org:123:env:456:queue:testQueue";
+ expect(producer.orgIdFromQueue(queue)).toBe("123");
+ });
+
+ it("should extract env id from queue string", () => {
+ const queue = "org:123:env:456:queue:testQueue";
+ expect(producer.envIdFromQueue(queue)).toBe("456");
+ });
+ });
+});
diff --git a/apps/webapp/test/utils/marqs.ts b/apps/webapp/test/utils/marqs.ts
index 246ce413d3..4fc3884926 100644
--- a/apps/webapp/test/utils/marqs.ts
+++ b/apps/webapp/test/utils/marqs.ts
@@ -1,5 +1,5 @@
import { MarQSKeyProducer } from "~/v3/marqs/types";
-import { MarQSShortKeyProducer } from "~/v3/marqs/marqsKeyProducer.server.js";
+import { MarQSShortKeyProducer } from "~/v3/marqs/marqsKeyProducer.js";
import Redis from "ioredis";
export function createKeyProducer(prefix: string): MarQSKeyProducer {
@@ -48,36 +48,13 @@ export async function setupQueue({
type SetupConcurrencyOptions = {
redis: Redis;
keyProducer: MarQSKeyProducer;
- org: { id: string; currentConcurrency: number; limit?: number; isDisabled?: boolean };
- env: { id: string; currentConcurrency: number; limit?: number };
+ env: { id: string; currentConcurrency: number; limit?: number; reserveConcurrency?: number };
};
/**
* Sets up concurrency-related Redis keys for orgs and envs
*/
-export async function setupConcurrency({ redis, keyProducer, org, env }: SetupConcurrencyOptions) {
- // Set org concurrency limit if provided
- if (typeof org.limit === "number") {
- await redis.set(keyProducer.orgConcurrencyLimitKey(org.id), org.limit.toString());
- }
-
- if (org.currentConcurrency > 0) {
- // Set current concurrency by adding dummy members to the set
- const orgCurrentKey = keyProducer.orgCurrentConcurrencyKey(org.id);
-
- // Add dummy running job IDs to simulate current concurrency
- const dummyJobs = Array.from(
- { length: org.currentConcurrency },
- (_, i) => `dummy-job-${i}-${Date.now()}`
- );
-
- await redis.sadd(orgCurrentKey, ...dummyJobs);
- }
-
- if (org.isDisabled) {
- await redis.set(keyProducer.disabledConcurrencyLimitKey(org.id), "1");
- }
-
+export async function setupConcurrency({ redis, keyProducer, env }: SetupConcurrencyOptions) {
// Set env concurrency limit
if (typeof env.limit === "number") {
await redis.set(keyProducer.envConcurrencyLimitKey(env.id), env.limit.toString());
@@ -95,6 +72,19 @@ export async function setupConcurrency({ redis, keyProducer, org, env }: SetupCo
await redis.sadd(envCurrentKey, ...dummyJobs);
}
+
+ if (env.reserveConcurrency && env.reserveConcurrency > 0) {
+ // Set reserved concurrency by adding dummy members to the set
+ const envReservedKey = keyProducer.envReserveConcurrencyKey(env.id);
+
+ // Add dummy reserved job IDs to simulate reserved concurrency
+ const dummyJobs = Array.from(
+ { length: env.reserveConcurrency },
+ (_, i) => `dummy-reserved-job-${i}-${Date.now()}`
+ );
+
+ await redis.sadd(envReservedKey, ...dummyJobs);
+ }
}
/**
diff --git a/docs/images/recursive-task-deadlock-min.png b/docs/images/recursive-task-deadlock-min.png
new file mode 100644
index 0000000000..423a539928
Binary files /dev/null and b/docs/images/recursive-task-deadlock-min.png differ
diff --git a/docs/queue-concurrency.mdx b/docs/queue-concurrency.mdx
index b629834eda..3529eaa44c 100644
--- a/docs/queue-concurrency.mdx
+++ b/docs/queue-concurrency.mdx
@@ -3,13 +3,26 @@ title: "Concurrency & Queues"
description: "Configure what you want to happen when there is more than one run at a time."
---
+When you trigger a task, it isn't executed immediately. Instead, the task [run](/runs) is placed into a queue for execution. By default, each task gets its own queue with unbounded concurrency—meaning the task runs as soon as resources are available, subject only to the overall concurrency limits of your environment. If you need more control (for example, to limit concurrency or share limits across multiple tasks), you can define a custom queue as described later in this document.
+
Controlling concurrency is useful when you have a task that can't be run concurrently, or when you want to limit the number of runs to avoid overloading a resource.
-## One at a time
+## Default concurrency
+
+By default, all tasks have an unbounded concurrency limit, limited only by the overall concurrency limits of your environment. This means that each task could possibly "fill up" the entire
+concurrency limit of your environment.
+
+
+ Your environment has a maximum concurrency limit which depends on your plan. If you're a paying
+ customer you can request a higher limit by [contacting us](https://www.trigger.dev/contact).
+
+
+## Setting task concurrency
-This task will only ever have a single run executing at a time. All other runs will be queued until the current run is complete.
+You can set the concurrency limit for a task by setting the `concurrencyLimit` property on the task's queue. This limits the number of runs that can be executing at any one time:
```ts /trigger/one-at-a-time.ts
+// This task will only run one at a time
export const oneAtATime = task({
id: "one-at-a-time",
queue: {
@@ -21,38 +34,14 @@ export const oneAtATime = task({
});
```
-## Parallelism
+This is useful if you need to control access to a shared resource, like a database or an API that has rate limits.
-You can execute lots of tasks at once by combining high concurrency with [batch triggering](/triggering) (or just triggering in a loop).
-
-```ts /trigger/parallelism.ts
-export const parallelism = task({
- id: "parallelism",
- queue: {
- concurrencyLimit: 100,
- },
- run: async (payload) => {
- //...
- },
-});
-```
-
-
- Be careful with high concurrency. If you're doing API requests you might hit rate limits. If
- you're hitting your database you might overload it.
-
-
-
- Your organization has a maximum concurrency limit which depends on your plan. If you're a paying
- customer you can request a higher limit by [contacting us](https://www.trigger.dev/contact).
-
-
-## Defining a queue
+## Sharing concurrency between tasks
As well as putting queue settings directly on a task, you can define a queue and reuse it across multiple tasks. This allows you to share the same concurrency limit:
```ts /trigger/queue.ts
-const myQueue = queue({
+export const myQueue = queue({
name: "my-queue",
concurrencyLimit: 1,
});
@@ -74,6 +63,8 @@ export const task2 = task({
});
```
+In this example, `task1` and `task2` share the same queue, so only one of them can run at a time.
+
## Setting the concurrency when you trigger a run
When you trigger a task you can override the concurrency limit. This is really useful if you sometimes have high priority runs.
@@ -81,7 +72,7 @@ When you trigger a task you can override the concurrency limit. This is really u
The task:
```ts /trigger/override-concurrency.ts
-const generatePullRequest = task({
+export const generatePullRequest = task({
id: "generate-pull-request",
queue: {
//normally when triggering this task it will be limited to 1 run at a time
@@ -107,7 +98,7 @@ export async function POST(request: Request) {
queue: {
//the "main-branch" queue will have a concurrency limit of 10
//this triggered run will use that queue
- name: "main-branch",
+ name: "main-branch", // Make sure to change the queue name or the task concurrency limit will be updated
concurrencyLimit: 10,
},
});
@@ -123,7 +114,7 @@ export async function POST(request: Request) {
## Concurrency keys and per-tenant queuing
-If you're building an application where you want to run tasks for your users, you might want a separate queue for each of your users. (It doesn't have to be users, it can be any entity you want to separately limit the concurrency for.)
+If you're building an application where you want to run tasks for your users, you might want a separate queue for each of your users (or orgs, projects, etc.).
You can do this by using `concurrencyKey`. It creates a separate queue for each value of the key.
@@ -164,3 +155,146 @@ export async function POST(request: Request) {
}
}
```
+
+## Concurrency and subtasks
+
+When you trigger a task that has subtasks, the subtasks will not inherit the concurrency settings of the parent task. Unless otherwise specified, subtasks will run on their own queue
+
+```ts /trigger/subtasks.ts
+export const parentTask = task({
+ id: "parent-task",
+ run: async (payload) => {
+ //trigger a subtask
+ await subtask.triggerAndWait(payload);
+ },
+});
+
+// This subtask will run on its own queue
+export const subtask = task({
+ id: "subtask",
+ run: async (payload) => {
+ //...
+ },
+});
+```
+
+## Waits and concurrency
+
+With our [task checkpoint system](/how-it-works#the-checkpoint-resume-system), a parent task can trigger and wait for a subtask to complete. The way this system interacts with the concurrency system is a little complicated but important to understand. There are two main scenarios that we handle slightly differently:
+
+- When a parent task waits for a subtask on a different queue.
+- When a parent task waits for a subtask on the same queue.
+
+These scenarios are discussed in more detail below:
+
+
+ We sometimes refer to the parent task as the "parent" and the subtask as the "child". Subtask and
+ child task are used interchangeably. We apologize for the confusion.
+
+
+### Waiting for a subtask on a different queue
+
+During the time when a parent task is waiting on a subtask, the "concurrency" slot of the parent task is still considered occupied on the parent task queue, but is temporarily "released" to the environment. An example will help illustrate this:
+
+```ts /trigger/waiting.ts
+export const parentTask = task({
+ id: "parent-task",
+ queue: {
+ concurrencyLimit: 1,
+ },
+ run: async (payload) => {
+ //trigger a subtask
+ await subtask.triggerAndWait(payload);
+ },
+});
+
+export const subtask = task({
+ id: "subtask",
+ run: async (payload) => {
+ //...
+ },
+});
+```
+
+For example purposes, let's say the environment concurrency limit is 1. When the parent task is triggered, it will occupy the only slot in the environment. When the parent task triggers the subtask, the subtask will be placed in the queue for the subtask. The parent task will then wait for the subtask to complete. During this time, the parent task slot is temporarily released to the environment, allowing another task to run. Once the subtask completes, the parent task slot is reoccupied.
+
+This system prevents "stuck" tasks. If the parent task were to wait on the subtask and not release the slot, the environment would be stuck with only one task running.
+
+And because only the environment slot is released, the parent task queue slot is still occupied. This means that if another task is triggered on the parent task queue, it will be placed in the queue and wait for the parent task to complete, respecting the concurrency limit.
+
+### Waiting for a subtask on the same queue
+
+Because tasks can trigger and wait recursively, or share the same queue, we've added special handling for when a parent task waits for a subtask on the same queue.
+
+Recall above that when waiting for a subtask on a different queue, the parent task slot is temporarily released to the environment. When the parent task and the subtask share a queue, we also release the parent task slot to the queue. Again, an example will help illustrate this:
+
+```ts /trigger/waiting-same-queue.ts
+export const myQueue = queue({
+ name: "my-queue",
+ concurrencyLimit: 1,
+});
+
+export const parentTask = task({
+ id: "parent-task",
+ queue: myQueue,
+ run: async (payload) => {
+ //trigger a subtask
+ await subtask.triggerAndWait(payload);
+ },
+});
+
+export const subtask = task({
+ id: "subtask",
+ queue: myQueue,
+ run: async (payload) => {
+ //...
+ },
+});
+```
+
+In this example, the parent task and the subtask share the same queue with a concurrency limit of 1. When the parent task triggers the subtask, the parent task slot is released to the queue, giving the subtask the opportunity to run. Once the subtask completes, the parent task slot is reoccupied.
+
+It's very important to note that we only release at-most X slots to the queue, where X is the concurrency limit of the queue. This means that you can only trigger and wait for X subtasks on the same queue. If you try to trigger and wait for more than X subtasks, you will receive a `RECURSIVE_WAIT_DEADLOCK` error. The following example will result in a deadlock:
+
+```ts /trigger/deadlock.ts
+export const myQueue = queue({
+ name: "my-queue",
+ concurrencyLimit: 1,
+});
+
+export const parentTask = task({
+ id: "parent-task",
+ queue: myQueue,
+ run: async (payload) => {
+ //trigger a subtask
+ await subtask.triggerAndWait(payload);
+ },
+});
+
+export const subtask = task({
+ id: "subtask",
+ queue: myQueue,
+ run: async (payload) => {
+ //trigger a subtask
+ await subsubtask.triggerAndWait(payload);
+ },
+});
+
+export const subsubtask = task({
+ id: "subsubtask",
+ queue: myQueue,
+ run: async (payload) => {
+ //...
+ },
+});
+```
+
+Now this will result in a `RECURSIVE_WAIT_DEADLOCK` error because the parent task is waiting for the subtask, and the subtask is waiting for the subsubtask, but there is no more concurrency available in the queue. It will look a bit like this in the logs:
+
+
+
+### Mitigating recursive wait deadlocks
+
+If you are recursively triggering and waiting for tasks on the same queue, you can mitigate the risk of a deadlock by increasing the concurrency limit of the queue. This will allow you to trigger and wait for more subtasks.
+
+You can also use different queues for the parent task and the subtask. This will allow you to trigger and wait for more subtasks without the risk of a deadlock.
diff --git a/internal-packages/database/prisma/migrations/20250210164232_add_queue_timestamp_to_run/migration.sql b/internal-packages/database/prisma/migrations/20250210164232_add_queue_timestamp_to_run/migration.sql
new file mode 100644
index 0000000000..ef3add8f6d
--- /dev/null
+++ b/internal-packages/database/prisma/migrations/20250210164232_add_queue_timestamp_to_run/migration.sql
@@ -0,0 +1,5 @@
+-- AlterTable
+ALTER TABLE
+ "TaskRun"
+ADD
+ COLUMN "queueTimestamp" TIMESTAMP(3);
\ No newline at end of file
diff --git a/internal-packages/database/prisma/migrations/20250211150836_add_aborted_batch_task_run_status/migration.sql b/internal-packages/database/prisma/migrations/20250211150836_add_aborted_batch_task_run_status/migration.sql
new file mode 100644
index 0000000000..4fcc2da707
--- /dev/null
+++ b/internal-packages/database/prisma/migrations/20250211150836_add_aborted_batch_task_run_status/migration.sql
@@ -0,0 +1,4 @@
+-- AlterEnum
+ALTER TYPE "BatchTaskRunStatus"
+ADD
+ VALUE 'ABORTED';
\ No newline at end of file
diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma
index d676484d9b..7ff86d95e1 100644
--- a/internal-packages/database/prisma/schema.prisma
+++ b/internal-packages/database/prisma/schema.prisma
@@ -1731,6 +1731,8 @@ model TaskRun {
taskEventStore String @default("taskEvent")
+ queueTimestamp DateTime?
+
batchItems BatchTaskRunItem[]
dependency TaskRunDependency?
CheckpointRestoreEvent CheckpointRestoreEvent[]
@@ -2208,6 +2210,7 @@ model BatchTaskRun {
enum BatchTaskRunStatus {
PENDING
COMPLETED
+ ABORTED
}
model BatchTaskRunItem {
diff --git a/packages/core/src/v3/errors.ts b/packages/core/src/v3/errors.ts
index a079d0d71c..46d1f1a3df 100644
--- a/packages/core/src/v3/errors.ts
+++ b/packages/core/src/v3/errors.ts
@@ -235,6 +235,7 @@ export function shouldRetryError(error: TaskRunError): boolean {
case "TASK_RUN_HEARTBEAT_TIMEOUT":
case "OUTDATED_SDK_VERSION":
case "TASK_DID_CONCURRENT_WAIT":
+ case "RECURSIVE_WAIT_DEADLOCK":
return false;
case "GRACEFUL_EXIT_TIMEOUT":
@@ -512,6 +513,14 @@ const prettyInternalErrors: Partial<
href: links.docs.troubleshooting.concurrentWaits,
},
},
+ RECURSIVE_WAIT_DEADLOCK: {
+ message:
+ "This run will never execute because it was triggered recursively and the task has no remaining concurrency available.",
+ link: {
+ name: "See docs for help",
+ href: links.docs.concurrency.recursiveDeadlock,
+ },
+ },
};
const getPrettyTaskRunError = (code: TaskRunInternalError["code"]): TaskRunInternalError => {
@@ -672,6 +681,11 @@ export function exceptionEventEnhancer(
default:
return exception;
}
+ } else if (exception.message?.includes(TaskRunErrorCodes.RECURSIVE_WAIT_DEADLOCK)) {
+ return {
+ ...exception,
+ ...prettyInternalErrors.RECURSIVE_WAIT_DEADLOCK,
+ };
}
break;
}
@@ -898,3 +912,20 @@ function tryJsonParse(data: string | undefined): any {
return;
}
}
+
+export function taskRunErrorToString(error: TaskRunError): string {
+ switch (error.type) {
+ case "INTERNAL_ERROR": {
+ return `Internal error [${error.code}]${error.message ? `: ${error.message}` : ""}`;
+ }
+ case "BUILT_IN_ERROR": {
+ return `${error.name}: ${error.message}`;
+ }
+ case "STRING_ERROR": {
+ return error.raw;
+ }
+ case "CUSTOM_ERROR": {
+ return error.raw;
+ }
+ }
+}
diff --git a/packages/core/src/v3/links.ts b/packages/core/src/v3/links.ts
index 11a45fe023..b7936a5bc3 100644
--- a/packages/core/src/v3/links.ts
+++ b/packages/core/src/v3/links.ts
@@ -15,6 +15,10 @@ export const links = {
troubleshooting: {
concurrentWaits: "https://trigger.dev/docs/troubleshooting#parallel-waits-are-not-supported",
},
+ concurrency: {
+ recursiveDeadlock:
+ "https://trigger.dev/docs/queue-concurrency#waiting-for-a-subtask-on-the-same-queue",
+ },
},
site: {
home: "https://trigger.dev",
diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts
index 5b47d99c20..a8f9faaeed 100644
--- a/packages/core/src/v3/schemas/common.ts
+++ b/packages/core/src/v3/schemas/common.ts
@@ -172,6 +172,7 @@ export const TaskRunInternalError = z.object({
"POD_UNKNOWN_ERROR",
"OUTDATED_SDK_VERSION",
"TASK_DID_CONCURRENT_WAIT",
+ "RECURSIVE_WAIT_DEADLOCK",
]),
message: z.string().optional(),
stackTrace: z.string().optional(),
diff --git a/packages/core/src/v3/utils/flattenAttributes.ts b/packages/core/src/v3/utils/flattenAttributes.ts
index 545b0184e6..e2791f21ac 100644
--- a/packages/core/src/v3/utils/flattenAttributes.ts
+++ b/packages/core/src/v3/utils/flattenAttributes.ts
@@ -5,7 +5,7 @@ export const CIRCULAR_REFERENCE_SENTINEL = "$@circular((";
export function flattenAttributes(
obj: Record | Array | string | boolean | number | null | undefined,
- prefix?: string ,
+ prefix?: string,
seen: WeakSet