From a5ad0a78d9e1541a7bd9d80897f0ae90089c6530 Mon Sep 17 00:00:00 2001 From: Aurish Hammad Hafeez Date: Wed, 17 Jul 2024 11:19:41 +0500 Subject: [PATCH 1/2] Fix handling of --response-format in audio transcriptions create command --- src/openai/cli/_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/openai/cli/_utils.py b/src/openai/cli/_utils.py index 673eed613c..d53e6acc6c 100644 --- a/src/openai/cli/_utils.py +++ b/src/openai/cli/_utils.py @@ -33,7 +33,10 @@ def organization_info() -> str: def print_model(model: BaseModel) -> None: - sys.stdout.write(model_json(model, indent=2) + "\n") + if isinstance(model, BaseModel): + sys.stdout.write(model_json(model, indent=2) + "\n") + elif isinstance(model, str): + sys.stdout.write(model) def can_use_http2() -> bool: From 6f384559c3eb73405bb07d8258c06a10ebdcd19a Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 22 Jul 2024 11:04:28 +0100 Subject: [PATCH 2/2] handle the string case in audio directly --- src/openai/cli/_api/audio.py | 52 +++++++++++++++++++++++------------- src/openai/cli/_utils.py | 5 +--- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/src/openai/cli/_api/audio.py b/src/openai/cli/_api/audio.py index 90d21b9932..269c67df28 100644 --- a/src/openai/cli/_api/audio.py +++ b/src/openai/cli/_api/audio.py @@ -1,5 +1,6 @@ from __future__ import annotations +import sys from typing import TYPE_CHECKING, Any, Optional, cast from argparse import ArgumentParser @@ -7,6 +8,7 @@ from ..._types import NOT_GIVEN from .._models import BaseModel from .._progress import BufferReader +from ...types.audio import Transcription if TYPE_CHECKING: from argparse import _SubParsersAction @@ -65,30 +67,42 @@ def transcribe(args: CLITranscribeArgs) -> None: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - model = get_client().audio.transcriptions.create( - file=(args.file, buffer_reader), - model=args.model, - language=args.language or NOT_GIVEN, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - response_format=cast(Any, args.response_format), + model = cast( + "Transcription | str", + get_client().audio.transcriptions.create( + file=(args.file, buffer_reader), + model=args.model, + language=args.language or NOT_GIVEN, + temperature=args.temperature or NOT_GIVEN, + prompt=args.prompt or NOT_GIVEN, + # casts required because the API is typed for enums + # but we don't want to validate that here for forwards-compat + response_format=cast(Any, args.response_format), + ), ) - print_model(model) + if isinstance(model, str): + sys.stdout.write(model + "\n") + else: + print_model(model) @staticmethod def translate(args: CLITranslationArgs) -> None: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") - model = get_client().audio.translations.create( - file=(args.file, buffer_reader), - model=args.model, - temperature=args.temperature or NOT_GIVEN, - prompt=args.prompt or NOT_GIVEN, - # casts required because the API is typed for enums - # but we don't want to validate that here for forwards-compat - response_format=cast(Any, args.response_format), + model = cast( + "Transcription | str", + get_client().audio.translations.create( + file=(args.file, buffer_reader), + model=args.model, + temperature=args.temperature or NOT_GIVEN, + prompt=args.prompt or NOT_GIVEN, + # casts required because the API is typed for enums + # but we don't want to validate that here for forwards-compat + response_format=cast(Any, args.response_format), + ), ) - print_model(model) + if isinstance(model, str): + sys.stdout.write(model + "\n") + else: + print_model(model) diff --git a/src/openai/cli/_utils.py b/src/openai/cli/_utils.py index d53e6acc6c..673eed613c 100644 --- a/src/openai/cli/_utils.py +++ b/src/openai/cli/_utils.py @@ -33,10 +33,7 @@ def organization_info() -> str: def print_model(model: BaseModel) -> None: - if isinstance(model, BaseModel): - sys.stdout.write(model_json(model, indent=2) + "\n") - elif isinstance(model, str): - sys.stdout.write(model) + sys.stdout.write(model_json(model, indent=2) + "\n") def can_use_http2() -> bool: