|
4 | 4 | from collections.abc import AsyncGenerator |
5 | 5 | import time |
6 | 6 | from math import ceil |
7 | | -from typing import AsyncGenerator, Final, Optional, Union, cast |
| 7 | +from typing import AsyncGenerator, Final, Optional, Tuple, Union, cast |
8 | 8 |
|
9 | 9 | from fastapi import Request |
10 | 10 |
|
|
14 | 14 | from vllm.entrypoints.openai.protocol import ( |
15 | 15 | DeltaMessage, ErrorResponse, RequestResponseMetadata, TranscriptionRequest, |
16 | 16 | TranscriptionResponse, TranscriptionResponseStreamChoice, |
17 | | - TranscriptionResponseVerbose, TranscriptionStreamResponse, UsageInfo) |
| 17 | + TranscriptionStreamResponse, UsageInfo) |
18 | 18 | from vllm.entrypoints.openai.serving_engine import OpenAIServing |
19 | 19 | from vllm.entrypoints.openai.serving_models import OpenAIServingModels |
20 | 20 | from vllm.inputs.data import PromptType |
@@ -177,7 +177,7 @@ async def _preprocess_transcription( |
177 | 177 | self, |
178 | 178 | request: TranscriptionRequest, |
179 | 179 | audio_data: bytes, |
180 | | - ) -> PromptType: |
| 180 | + ) -> Tuple[PromptType, float]: |
181 | 181 | # Validate request |
182 | 182 | # TODO language should be optional and can be guessed. |
183 | 183 | # For now we default to en. See |
@@ -226,7 +226,7 @@ async def _preprocess_transcription( |
226 | 226 | async def create_transcription( |
227 | 227 | self, audio_data: bytes, request: TranscriptionRequest, |
228 | 228 | raw_request: Request |
229 | | - ) -> Union[TranscriptionResponse, TranscriptionResponseVerbose, |
| 229 | + ) -> Union[TranscriptionResponse, AsyncGenerator[str, None], |
230 | 230 | ErrorResponse]: |
231 | 231 | """Transcription API similar to OpenAI's API. |
232 | 232 |
|
|
0 commit comments