From aa92cfe1d61691b5fb73f65544518ed0bf6fe9a1 Mon Sep 17 00:00:00 2001 From: Arundas666 Date: Wed, 4 Sep 2024 16:54:45 +0530 Subject: [PATCH 1/2] depricated model GPT3Ada changed to GPT3Babbage002 --- README.md | 4 ++-- example_test.go | 4 ++-- examples/completion/main.go | 2 +- stream_test.go | 2 +- test.mp3 | 1 + 5 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 test.mp3 diff --git a/README.md b/README.md index 0d6aafa40..b3ebc1471 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ func main() { ctx := context.Background() req := openai.CompletionRequest{ - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", } @@ -174,7 +174,7 @@ func main() { ctx := context.Background() req := openai.CompletionRequest{ - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", Stream: true, diff --git a/example_test.go b/example_test.go index e5dbf44bf..5910ffb84 100644 --- a/example_test.go +++ b/example_test.go @@ -82,7 +82,7 @@ func ExampleClient_CreateCompletion() { resp, err := client.CreateCompletion( context.Background(), openai.CompletionRequest{ - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", }, @@ -99,7 +99,7 @@ func ExampleClient_CreateCompletionStream() { stream, err := client.CreateCompletionStream( context.Background(), openai.CompletionRequest{ - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", Stream: true, diff --git a/examples/completion/main.go b/examples/completion/main.go index 22af1fd82..8c5cbd5ca 100644 --- a/examples/completion/main.go +++ b/examples/completion/main.go @@ -13,7 +13,7 @@ func main() { resp, err := client.CreateCompletion( context.Background(), openai.CompletionRequest{ - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", }, diff --git a/stream_test.go b/stream_test.go index 2822a3535..9dd95bb5f 100644 --- a/stream_test.go +++ b/stream_test.go @@ -169,7 +169,7 @@ func TestCreateCompletionStreamRateLimitError(t *testing.T) { var apiErr *openai.APIError _, err := client.CreateCompletionStream(context.Background(), openai.CompletionRequest{ MaxTokens: 5, - Model: openai.GPT3Ada, + Model: openai.GPT3Babbage002, Prompt: "Hello!", Stream: true, }) diff --git a/test.mp3 b/test.mp3 new file mode 100644 index 000000000..b6fc4c620 --- /dev/null +++ b/test.mp3 @@ -0,0 +1 @@ +hello \ No newline at end of file From 6fdc73b5e783545ab601f2563588a9ec9bb4c2bc Mon Sep 17 00:00:00 2001 From: Arun Das <89579096+Arundas666@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:15:33 +0530 Subject: [PATCH 2/2] Delete test.mp3 --- test.mp3 | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test.mp3 diff --git a/test.mp3 b/test.mp3 deleted file mode 100644 index b6fc4c620..000000000 --- a/test.mp3 +++ /dev/null @@ -1 +0,0 @@ -hello \ No newline at end of file