You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
⨯ Error: failed to pipe response
at pipeToNodeResponse (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/pipe-readable.js:126:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async sendResponse (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/send-response.js:40:13)
at async doRender (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:1382:25)
at async cacheEntry.responseCache.get.routeKind (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:1574:28)
at async DevServer.renderToResponseWithComponentsImpl (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:1482:28)
at async DevServer.renderPageComponent (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:1908:24)
at async DevServer.renderToResponseImpl (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:1946:32)
at async DevServer.pipeImpl (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:921:25)
at async NextNodeServer.handleCatchallRenderRequest (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/next-server.js:272:17)
at async DevServer.handleRequestImpl (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/base-server.js:817:17)
at async /Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/dev/next-dev-server.js:339:20
at async Span.traceAsyncFn (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/trace/trace.js:154:20)
at async DevServer.handleRequest (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/dev/next-dev-server.js:336:24)
at async invokeRender (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/lib/router-server.js:173:21)
at async handleRequest (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/lib/router-server.js:350:24)
at async requestHandlerImpl (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/lib/router-server.js:374:13)
at async Server.requestListener (/Users/aashwathanarayanan/Desktop/learning/analytics/genui/genui-hello-world/node_modules/next/dist/server/lib/start-server.js:141:13) {
[cause]: RetryError [AI_RetryError]: Failed after 3 attempts. Last error: Requests to the ChatCompletions_Create Operation under Azure OpenAI API version 2024-08-01-preview have exceeded token rate limit of your current OpenAI S0 pricing tier. Please retry after 31 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further increase the default rate limit.
at _retryWithExponentialBackoff (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:176:13)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async startStep (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:3949:13)
at async Object.flush (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:4355:21) {
cause: undefined,
reason: 'maxRetriesExceeded',
errors: [ [APICallError], [APICallError], [APICallError] ],
lastError: APICallError [AI_APICallError]: Requests to the ChatCompletions_Create Operation under Azure OpenAI API version 2024-08-01-preview have exceeded token rate limit of your current OpenAI S0 pricing tier. Please retry after 31 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further increase the default rate limit.
at eval (webpack-internal:///(rsc)/./node_modules/@ai-sdk/azure/node_modules/@ai-sdk/provider-utils/dist/index.mjs:474:14)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async postToApi (webpack-internal:///(rsc)/./node_modules/@ai-sdk/azure/node_modules/@ai-sdk/provider-utils/dist/index.mjs:381:28)
at async OpenAIChatLanguageModel.doStream (webpack-internal:///(rsc)/./node_modules/@ai-sdk/azure/node_modules/@ai-sdk/openai/internal/dist/index.mjs:600:50)
at async fn (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:3984:23)
at async eval (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:336:22)
at async _retryWithExponentialBackoff (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:164:12)
at async startStep (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:3949:13)
at async Object.flush (webpack-internal:///(rsc)/./node_modules/ai/dist/index.mjs:4355:21) {
cause: undefined,
url: 'https://analytics-sandbox-eastus-ai-stage01.openai.azure.com/openai/deployments/gpt-4o-2024-08-06/chat/completions?api-version=2024-08-01-preview',
requestBodyValues: [Object],
statusCode: 429,
responseHeaders: [Object],
responseBody: '{"error":{"code":"429","message": "Requests to the ChatCompletions_Create Operation under Azure OpenAI API version 2024-08-01-preview have exceeded token rate limit of your current OpenAI S0 pricing tier. Please retry after 31 seconds. Please go here: https://aka.ms/oai/quotaincrease if you would like to further increase the default rate limit."}}',
isRetryable: true,
data: [Object],
[Symbol(vercel.ai.error)]: true,
[Symbol(vercel.ai.error.AI_APICallError)]: true
},
[Symbol(vercel.ai.error)]: true,
[Symbol(vercel.ai.error.AI_RetryError)]: true
}
}
Code example
const result = await streamText({});
I tried using try/catch but unable to catch the error. How can i handle manual retries ?.
AI provider
@ai-sdk/azure - 0.0.52
Additional context
No response
The text was updated successfully, but these errors were encountered:
Description
Am getting the below error
Code example
I tried using
try/catch
but unable to catch the error. How can i handle manual retries ?.AI provider
@ai-sdk/azure - 0.0.52
Additional context
No response
The text was updated successfully, but these errors were encountered: