Skip to content

Commit 352643e

Browse files
authored
chore: Additional unit test coverage and better error handling (#984)
1 parent 66bfa3f commit 352643e

File tree

2 files changed

+403
-5
lines changed

2 files changed

+403
-5
lines changed

packages/ai-providers/server-ai-vercel/__tests__/VercelProvider.test.ts

Lines changed: 391 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,4 +388,395 @@ describe('VercelProvider', () => {
388388
expect(result.getModel()).toBeDefined();
389389
});
390390
});
391+
392+
describe('toVercelAISDK', () => {
393+
const mockToVercelModel = { name: 'mockModel' };
394+
const mockMessages = [
395+
{ role: 'user' as const, content: 'test prompt' },
396+
{ role: 'system' as const, content: 'test instruction' },
397+
];
398+
const mockOptions = {
399+
nonInterpolatedMessages: [
400+
{ role: 'assistant' as const, content: 'test assistant instruction' },
401+
],
402+
};
403+
const mockProvider = jest.fn().mockReturnValue(mockToVercelModel);
404+
405+
beforeEach(() => {
406+
jest.clearAllMocks();
407+
});
408+
409+
it('handles undefined model and messages', () => {
410+
const aiConfig = {
411+
enabled: true,
412+
};
413+
414+
const result = VercelProvider.toVercelAISDK(aiConfig, mockProvider);
415+
416+
expect(mockProvider).toHaveBeenCalledWith('');
417+
expect(result).toEqual(
418+
expect.objectContaining({
419+
model: mockToVercelModel,
420+
messages: undefined,
421+
}),
422+
);
423+
});
424+
425+
it('uses additional messages', () => {
426+
const aiConfig = {
427+
model: { name: 'test-ai-model' },
428+
enabled: true,
429+
};
430+
431+
const result = VercelProvider.toVercelAISDK(aiConfig, mockProvider, mockOptions);
432+
433+
expect(mockProvider).toHaveBeenCalledWith('test-ai-model');
434+
expect(result).toEqual(
435+
expect.objectContaining({
436+
model: mockToVercelModel,
437+
messages: mockOptions.nonInterpolatedMessages,
438+
}),
439+
);
440+
});
441+
442+
it('combines config messages and additional messages', () => {
443+
const aiConfig = {
444+
model: { name: 'test-ai-model' },
445+
messages: mockMessages,
446+
enabled: true,
447+
};
448+
449+
const result = VercelProvider.toVercelAISDK(aiConfig, mockProvider, mockOptions);
450+
451+
expect(mockProvider).toHaveBeenCalledWith('test-ai-model');
452+
expect(result).toEqual(
453+
expect.objectContaining({
454+
model: mockToVercelModel,
455+
messages: [...mockMessages, ...(mockOptions.nonInterpolatedMessages ?? [])],
456+
}),
457+
);
458+
});
459+
460+
it('maps parameters correctly', () => {
461+
const aiConfig = {
462+
model: {
463+
name: 'test-ai-model',
464+
parameters: {
465+
max_tokens: 100,
466+
temperature: 0.7,
467+
top_p: 0.9,
468+
top_k: 50,
469+
presence_penalty: 0.1,
470+
frequency_penalty: 0.2,
471+
stop: ['stop1', 'stop2'],
472+
seed: 42,
473+
},
474+
},
475+
messages: mockMessages,
476+
enabled: true,
477+
};
478+
479+
const result = VercelProvider.toVercelAISDK(aiConfig, mockProvider);
480+
481+
expect(mockProvider).toHaveBeenCalledWith('test-ai-model');
482+
expect(result).toEqual({
483+
model: mockToVercelModel,
484+
messages: mockMessages,
485+
maxTokens: 100,
486+
temperature: 0.7,
487+
topP: 0.9,
488+
topK: 50,
489+
presencePenalty: 0.1,
490+
frequencyPenalty: 0.2,
491+
stopSequences: ['stop1', 'stop2'],
492+
seed: 42,
493+
});
494+
});
495+
496+
it('handles provider map with provider name', () => {
497+
const providerMap = {
498+
openai: jest.fn().mockReturnValue(mockToVercelModel),
499+
anthropic: jest.fn().mockReturnValue({ name: 'other-model' }),
500+
};
501+
502+
const aiConfig = {
503+
model: { name: 'test-ai-model' },
504+
provider: { name: 'openai' },
505+
enabled: true,
506+
};
507+
508+
const result = VercelProvider.toVercelAISDK(aiConfig, providerMap);
509+
510+
expect(providerMap.openai).toHaveBeenCalledWith('test-ai-model');
511+
expect(providerMap.anthropic).not.toHaveBeenCalled();
512+
expect(result.model).toBe(mockToVercelModel);
513+
});
514+
515+
it('throws error when model cannot be determined', () => {
516+
const aiConfig = {
517+
model: { name: 'test-ai-model' },
518+
provider: { name: 'unknown' },
519+
enabled: true,
520+
};
521+
522+
const providerMap = {
523+
openai: jest.fn().mockReturnValue(mockToVercelModel),
524+
};
525+
526+
expect(() => VercelProvider.toVercelAISDK(aiConfig, providerMap)).toThrow(
527+
'Vercel AI SDK model cannot be determined from the supplied provider parameter.',
528+
);
529+
});
530+
531+
it('throws error when function provider returns undefined', () => {
532+
const aiConfig = {
533+
model: { name: 'test-ai-model' },
534+
enabled: true,
535+
};
536+
537+
const undefinedProvider = jest.fn().mockReturnValue(undefined);
538+
539+
expect(() => VercelProvider.toVercelAISDK(aiConfig, undefinedProvider)).toThrow(
540+
'Vercel AI SDK model cannot be determined from the supplied provider parameter.',
541+
);
542+
});
543+
});
544+
545+
describe('getAIMetricsFromStream', () => {
546+
it('extracts metrics from successful stream with usage', async () => {
547+
const mockStream = {
548+
finishReason: Promise.resolve('stop'),
549+
usage: Promise.resolve({
550+
totalTokens: 100,
551+
promptTokens: 49,
552+
completionTokens: 51,
553+
}),
554+
};
555+
556+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
557+
558+
expect(result).toEqual({
559+
success: true,
560+
usage: {
561+
total: 100,
562+
input: 49,
563+
output: 51,
564+
},
565+
});
566+
});
567+
568+
it('extracts metrics using totalUsage when available', async () => {
569+
const mockStream = {
570+
finishReason: Promise.resolve('stop'),
571+
usage: Promise.resolve({
572+
totalTokens: 50,
573+
promptTokens: 20,
574+
completionTokens: 30,
575+
}),
576+
totalUsage: Promise.resolve({
577+
totalTokens: 100,
578+
promptTokens: 49,
579+
completionTokens: 51,
580+
}),
581+
};
582+
583+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
584+
585+
expect(result).toEqual({
586+
success: true,
587+
usage: {
588+
total: 100,
589+
input: 49,
590+
output: 51,
591+
},
592+
});
593+
});
594+
595+
it('handles stream without usage data', async () => {
596+
const mockStream = {
597+
finishReason: Promise.resolve('stop'),
598+
};
599+
600+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
601+
602+
expect(result).toEqual({
603+
success: true,
604+
usage: undefined,
605+
});
606+
});
607+
608+
it('handles error finishReason', async () => {
609+
const mockStream = {
610+
finishReason: Promise.resolve('error'),
611+
usage: Promise.resolve({
612+
totalTokens: 100,
613+
promptTokens: 49,
614+
completionTokens: 51,
615+
}),
616+
};
617+
618+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
619+
620+
expect(result).toEqual({
621+
success: false,
622+
usage: {
623+
total: 100,
624+
input: 49,
625+
output: 51,
626+
},
627+
});
628+
});
629+
630+
it('handles rejected finishReason promise', async () => {
631+
const mockStream = {
632+
finishReason: Promise.reject(new Error('API error')),
633+
usage: Promise.resolve({
634+
totalTokens: 100,
635+
promptTokens: 49,
636+
completionTokens: 51,
637+
}),
638+
};
639+
640+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
641+
642+
expect(result).toEqual({
643+
success: false,
644+
usage: {
645+
total: 100,
646+
input: 49,
647+
output: 51,
648+
},
649+
});
650+
});
651+
652+
it('handles missing finishReason', async () => {
653+
const mockStream = {
654+
usage: Promise.resolve({
655+
totalTokens: 100,
656+
promptTokens: 49,
657+
completionTokens: 51,
658+
}),
659+
};
660+
661+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
662+
663+
// When finishReason is missing, it defaults to 'unknown' which is !== 'error', so success is true
664+
expect(result).toEqual({
665+
success: true,
666+
usage: {
667+
total: 100,
668+
input: 49,
669+
output: 51,
670+
},
671+
});
672+
});
673+
674+
it('handles missing finishReason and usage', async () => {
675+
const mockStream = {};
676+
677+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
678+
679+
// When finishReason is missing, it defaults to 'unknown' which is !== 'error', so success is true
680+
expect(result).toEqual({
681+
success: true,
682+
usage: undefined,
683+
});
684+
});
685+
686+
it('handles rejected usage promise gracefully', async () => {
687+
const mockStream = {
688+
finishReason: Promise.resolve('stop'),
689+
usage: Promise.reject(new Error('Usage API error')),
690+
};
691+
692+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
693+
694+
expect(result).toEqual({
695+
success: true,
696+
usage: undefined,
697+
});
698+
});
699+
700+
it('handles rejected totalUsage promise and falls back to usage', async () => {
701+
const mockStream = {
702+
finishReason: Promise.resolve('stop'),
703+
totalUsage: Promise.reject(new Error('TotalUsage API error')),
704+
usage: Promise.resolve({
705+
totalTokens: 100,
706+
promptTokens: 49,
707+
completionTokens: 51,
708+
}),
709+
};
710+
711+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
712+
713+
expect(result).toEqual({
714+
success: true,
715+
usage: {
716+
total: 100,
717+
input: 49,
718+
output: 51,
719+
},
720+
});
721+
});
722+
723+
it('handles rejected totalUsage and usage promises gracefully', async () => {
724+
const mockStream = {
725+
finishReason: Promise.resolve('stop'),
726+
totalUsage: Promise.reject(new Error('TotalUsage API error')),
727+
usage: Promise.reject(new Error('Usage API error')),
728+
};
729+
730+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
731+
732+
expect(result).toEqual({
733+
success: true,
734+
usage: undefined,
735+
});
736+
});
737+
738+
it('supports v4 field names (promptTokens, completionTokens)', async () => {
739+
const mockStream = {
740+
finishReason: Promise.resolve('stop'),
741+
usage: Promise.resolve({
742+
totalTokens: 100,
743+
promptTokens: 40,
744+
completionTokens: 60,
745+
}),
746+
};
747+
748+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
749+
750+
expect(result).toEqual({
751+
success: true,
752+
usage: {
753+
total: 100,
754+
input: 40,
755+
output: 60,
756+
},
757+
});
758+
});
759+
760+
it('supports v5 field names (inputTokens, outputTokens)', async () => {
761+
const mockStream = {
762+
finishReason: Promise.resolve('stop'),
763+
usage: Promise.resolve({
764+
totalTokens: 100,
765+
inputTokens: 40,
766+
outputTokens: 60,
767+
}),
768+
};
769+
770+
const result = await VercelProvider.getAIMetricsFromStream(mockStream);
771+
772+
expect(result).toEqual({
773+
success: true,
774+
usage: {
775+
total: 100,
776+
input: 40,
777+
output: 60,
778+
},
779+
});
780+
});
781+
});
391782
});

0 commit comments

Comments
 (0)