Skip to content

Commit dee4ef0

Browse files
dgellowstainless-app[bot]
authored andcommitted
feat: dynamically build domain for agents.chat.completions.create()
1 parent 61bbd9e commit dee4ef0

File tree

3 files changed

+40
-6
lines changed

3 files changed

+40
-6
lines changed

src/do_gradientai/resources/agents/chat/completions.py

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
5050
def create(
5151
self,
5252
*,
53+
agent_domain: str,
5354
messages: Iterable[completion_create_params.Message],
5455
model: str,
5556
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -80,6 +81,8 @@ def create(
8081
Creates a model response for the given chat conversation.
8182
8283
Args:
84+
agent_domain: The agent domain to use for the request.
85+
8386
messages: A list of messages comprising the conversation so far.
8487
8588
model: Model ID used to generate the response.
@@ -178,6 +181,7 @@ def create(
178181
def create(
179182
self,
180183
*,
184+
agent_domain: str,
181185
messages: Iterable[completion_create_params.Message],
182186
model: str,
183187
stream: Literal[True],
@@ -208,6 +212,8 @@ def create(
208212
Creates a model response for the given chat conversation.
209213
210214
Args:
215+
agent_domain: The agent domain to use for the request.
216+
211217
messages: A list of messages comprising the conversation so far.
212218
213219
model: Model ID used to generate the response.
@@ -306,6 +312,7 @@ def create(
306312
def create(
307313
self,
308314
*,
315+
agent_domain: str,
309316
messages: Iterable[completion_create_params.Message],
310317
model: str,
311318
stream: bool,
@@ -336,6 +343,8 @@ def create(
336343
Creates a model response for the given chat conversation.
337344
338345
Args:
346+
agent_domain: The agent domain to use for the request.
347+
339348
messages: A list of messages comprising the conversation so far.
340349
341350
model: Model ID used to generate the response.
@@ -430,10 +439,11 @@ def create(
430439
"""
431440
...
432441

433-
@required_args(["messages", "model"], ["messages", "model", "stream"])
442+
@required_args(["agent_domain", "messages", "model"], ["agent_domain", "messages", "model", "stream"])
434443
def create(
435444
self,
436445
*,
446+
agent_domain: str,
437447
messages: Iterable[completion_create_params.Message],
438448
model: str,
439449
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -463,7 +473,7 @@ def create(
463473
return self._post(
464474
"/chat/completions?agent=true"
465475
if self._client._base_url_overridden
466-
else "https://inference.do-ai.run/v1/chat/completions?agent=true",
476+
else f"https://{agent_domain}/v1/chat/completions?agent=true",
467477
body=maybe_transform(
468478
{
469479
"messages": messages,
@@ -523,6 +533,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
523533
async def create(
524534
self,
525535
*,
536+
agent_domain: str,
526537
messages: Iterable[completion_create_params.Message],
527538
model: str,
528539
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -553,6 +564,8 @@ async def create(
553564
Creates a model response for the given chat conversation.
554565
555566
Args:
567+
agent_domain: The agent domain to use for the request.
568+
556569
messages: A list of messages comprising the conversation so far.
557570
558571
model: Model ID used to generate the response.
@@ -651,6 +664,7 @@ async def create(
651664
async def create(
652665
self,
653666
*,
667+
agent_domain: str,
654668
messages: Iterable[completion_create_params.Message],
655669
model: str,
656670
stream: Literal[True],
@@ -681,6 +695,8 @@ async def create(
681695
Creates a model response for the given chat conversation.
682696
683697
Args:
698+
agent_domain: The agent domain to use for the request.
699+
684700
messages: A list of messages comprising the conversation so far.
685701
686702
model: Model ID used to generate the response.
@@ -779,6 +795,7 @@ async def create(
779795
async def create(
780796
self,
781797
*,
798+
agent_domain: str,
782799
messages: Iterable[completion_create_params.Message],
783800
model: str,
784801
stream: bool,
@@ -809,6 +826,8 @@ async def create(
809826
Creates a model response for the given chat conversation.
810827
811828
Args:
829+
agent_domain: The agent domain to use for the request.
830+
812831
messages: A list of messages comprising the conversation so far.
813832
814833
model: Model ID used to generate the response.
@@ -903,10 +922,11 @@ async def create(
903922
"""
904923
...
905924

906-
@required_args(["messages", "model"], ["messages", "model", "stream"])
925+
@required_args(["agent_domain", "messages", "model"], ["agent_domain", "messages", "model", "stream"])
907926
async def create(
908927
self,
909928
*,
929+
agent_domain: str,
910930
messages: Iterable[completion_create_params.Message],
911931
model: str,
912932
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -936,7 +956,7 @@ async def create(
936956
return await self._post(
937957
"/chat/completions?agent=true"
938958
if self._client._base_url_overridden
939-
else "https://inference.do-ai.run/v1/chat/completions?agent=true",
959+
else f"https://{agent_domain}/v1/chat/completions?agent=true",
940960
body=await async_maybe_transform(
941961
{
942962
"messages": messages,

src/do_gradientai/resources/chat/completions.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -461,7 +461,6 @@ def create(
461461
extra_body: Body | None = None,
462462
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
463463
) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
464-
465464
# This method requires an inference_key to be set via client argument or environment variable
466465
if not self._client.inference_key:
467466
raise TypeError(
@@ -944,7 +943,6 @@ async def create(
944943
extra_body: Body | None = None,
945944
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
946945
) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
947-
948946
# This method requires an inference_key to be set via client argument or environment variable
949947
if not hasattr(self._client, "inference_key") or not self._client.inference_key:
950948
raise TypeError(

tests/api_resources/agents/chat/test_completions.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ class TestCompletions:
2121
@parametrize
2222
def test_method_create_overload_1(self, client: GradientAI) -> None:
2323
completion = client.agents.chat.completions.create(
24+
agent_domain="inference.do-ai.run",
2425
messages=[
2526
{
2627
"content": "string",
@@ -35,6 +36,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None:
3536
@parametrize
3637
def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None:
3738
completion = client.agents.chat.completions.create(
39+
agent_domain="inference.do-ai.run",
3840
messages=[
3941
{
4042
"content": "string",
@@ -75,6 +77,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N
7577
@parametrize
7678
def test_raw_response_create_overload_1(self, client: GradientAI) -> None:
7779
response = client.agents.chat.completions.with_raw_response.create(
80+
agent_domain="inference.do-ai.run",
7881
messages=[
7982
{
8083
"content": "string",
@@ -93,6 +96,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None:
9396
@parametrize
9497
def test_streaming_response_create_overload_1(self, client: GradientAI) -> None:
9598
with client.agents.chat.completions.with_streaming_response.create(
99+
agent_domain="inference.do-ai.run",
96100
messages=[
97101
{
98102
"content": "string",
@@ -113,6 +117,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None:
113117
@parametrize
114118
def test_method_create_overload_2(self, client: GradientAI) -> None:
115119
completion_stream = client.agents.chat.completions.create(
120+
agent_domain="inference.do-ai.run",
116121
messages=[
117122
{
118123
"content": "string",
@@ -128,6 +133,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None:
128133
@parametrize
129134
def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None:
130135
completion_stream = client.agents.chat.completions.create(
136+
agent_domain="inference.do-ai.run",
131137
messages=[
132138
{
133139
"content": "string",
@@ -168,6 +174,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N
168174
@parametrize
169175
def test_raw_response_create_overload_2(self, client: GradientAI) -> None:
170176
response = client.agents.chat.completions.with_raw_response.create(
177+
agent_domain="inference.do-ai.run",
171178
messages=[
172179
{
173180
"content": "string",
@@ -186,6 +193,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None:
186193
@parametrize
187194
def test_streaming_response_create_overload_2(self, client: GradientAI) -> None:
188195
with client.agents.chat.completions.with_streaming_response.create(
196+
agent_domain="inference.do-ai.run",
189197
messages=[
190198
{
191199
"content": "string",
@@ -213,6 +221,7 @@ class TestAsyncCompletions:
213221
@parametrize
214222
async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None:
215223
completion = await async_client.agents.chat.completions.create(
224+
agent_domain="inference.do-ai.run",
216225
messages=[
217226
{
218227
"content": "string",
@@ -227,6 +236,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) ->
227236
@parametrize
228237
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None:
229238
completion = await async_client.agents.chat.completions.create(
239+
agent_domain="inference.do-ai.run",
230240
messages=[
231241
{
232242
"content": "string",
@@ -267,6 +277,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
267277
@parametrize
268278
async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None:
269279
response = await async_client.agents.chat.completions.with_raw_response.create(
280+
agent_domain="inference.do-ai.run",
270281
messages=[
271282
{
272283
"content": "string",
@@ -285,6 +296,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA
285296
@parametrize
286297
async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None:
287298
async with async_client.agents.chat.completions.with_streaming_response.create(
299+
agent_domain="inference.do-ai.run",
288300
messages=[
289301
{
290302
"content": "string",
@@ -305,6 +317,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra
305317
@parametrize
306318
async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None:
307319
completion_stream = await async_client.agents.chat.completions.create(
320+
agent_domain="inference.do-ai.run",
308321
messages=[
309322
{
310323
"content": "string",
@@ -320,6 +333,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) ->
320333
@parametrize
321334
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None:
322335
completion_stream = await async_client.agents.chat.completions.create(
336+
agent_domain="inference.do-ai.run",
323337
messages=[
324338
{
325339
"content": "string",
@@ -360,6 +374,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
360374
@parametrize
361375
async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None:
362376
response = await async_client.agents.chat.completions.with_raw_response.create(
377+
agent_domain="inference.do-ai.run",
363378
messages=[
364379
{
365380
"content": "string",
@@ -378,6 +393,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA
378393
@parametrize
379394
async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None:
380395
async with async_client.agents.chat.completions.with_streaming_response.create(
396+
agent_domain="inference.do-ai.run",
381397
messages=[
382398
{
383399
"content": "string",

0 commit comments

Comments
 (0)