Skip to content

Commit d0e46d9

Browse files
release: 0.1.0-alpha.10 (#16)
1 parent da40480 commit d0e46d9

File tree

10 files changed

+257
-122
lines changed

10 files changed

+257
-122
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.1.0-alpha.9"
2+
".": "0.1.0-alpha.10"
33
}

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 76
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
33
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
4-
config_hash: 0bc3af28d4abd9be8bcc81f615bc832d
4+
config_hash: 9b44ce3fd39c43f2001bc11934e6b1b0

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,15 @@
11
# Changelog
22

3+
## 0.1.0-alpha.10 (2025-06-28)
4+
5+
Full Changelog: [v0.1.0-alpha.9...v0.1.0-alpha.10](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.9...v0.1.0-alpha.10)
6+
7+
### Features
8+
9+
* **api:** manual updates ([0e5effc](https://github.com/digitalocean/gradientai-python/commit/0e5effc727cebe88ea38f0ec4c3fcb45ffeb4924))
10+
* **api:** manual updates ([d510ae0](https://github.com/digitalocean/gradientai-python/commit/d510ae03f13669af7f47093af06a00609e9b7c07))
11+
* **api:** manual updates ([c5bc3ca](https://github.com/digitalocean/gradientai-python/commit/c5bc3caa477945dc19bbf90661ffeea86370189d))
12+
313
## 0.1.0-alpha.9 (2025-06-28)
414

515
Full Changelog: [v0.1.0-alpha.8...v0.1.0-alpha.9](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.8...v0.1.0-alpha.9)

README.md

Lines changed: 86 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,13 @@ print(api_client.agents.list())
3939
completion = inference_client.chat.completions.create(
4040
messages=[
4141
{
42-
"content": "string",
43-
"role": "system",
42+
"role": "user",
43+
"content": "What is the capital of France?",
4444
}
4545
],
46-
model="llama3-8b-instruct",
46+
model="llama3.3-70b-instruct",
4747
)
48+
4849
print(completion.choices[0].message)
4950

5051
```
@@ -72,13 +73,13 @@ async def main() -> None:
7273
completion = await client.agents.chat.completions.create(
7374
messages=[
7475
{
75-
"content": "string",
76-
"role": "system",
76+
"role": "user",
77+
"content": "What is the capital of France?",
7778
}
7879
],
79-
model="llama3-8b-instruct",
80+
model="llama3.3-70b-instruct",
8081
)
81-
print(completion.id)
82+
print(completion.choices)
8283

8384

8485
asyncio.run(main())
@@ -114,41 +115,61 @@ async def main() -> None:
114115
completion = await client.agents.chat.completions.create(
115116
messages=[
116117
{
117-
"content": "string",
118-
"role": "system",
118+
"role": "user",
119+
"content": "What is the capital of France?",
119120
}
120121
],
121-
model="llama3-8b-instruct",
122+
model="llama3.3-70b-instruct",
122123
)
123-
print(completion.id)
124+
print(completion.choices)
124125

125126

126127
asyncio.run(main())
127128
```
128129

129-
## Streaming
130-
Support for streaming responses are available by Server Side Events (SSE) for Serverless Inference and Agents.
131-
```
132-
import os
130+
## Streaming responses
131+
132+
We provide support for streaming responses using Server Side Events (SSE).
133+
134+
```python
133135
from gradientai import GradientAI
134136

135-
client = GradientAI(
136-
inference_key=os.environ.get("GRADIENTAI_INFERENCE_KEY")
137-
)
137+
client = GradientAI()
138138

139-
response = client.chat.completions.create(
139+
stream = client.agents.chat.completions.create(
140+
messages=[
141+
{
142+
"role": "user",
143+
"content": "What is the capital of France?",
144+
}
145+
],
140146
model="llama3.3-70b-instruct",
141-
messages=[{ "role": "user", "content": "Write a story about a brave squirrel."}],
142147
stream=True,
143148
)
149+
for completion in stream:
150+
print(completion.choices)
151+
```
144152

145-
for chunk in response:
146-
if len(chunk.choices) > 0:
147-
if chunk.choices[0].delta.content:
148-
print(chunk.choices[0].delta.content, end="", flush=True)
153+
The async client uses the exact same interface.
149154

150-
```
155+
```python
156+
from gradientai import AsyncGradientAI
157+
158+
client = AsyncGradientAI()
151159

160+
stream = await client.agents.chat.completions.create(
161+
messages=[
162+
{
163+
"role": "user",
164+
"content": "What is the capital of France?",
165+
}
166+
],
167+
model="llama3.3-70b-instruct",
168+
stream=True,
169+
)
170+
async for completion in stream:
171+
print(completion.choices)
172+
```
152173

153174
## Using types
154175

@@ -197,8 +218,14 @@ from gradientai import GradientAI
197218
client = GradientAI()
198219

199220
try:
200-
client.agents.versions.list(
201-
uuid="REPLACE_ME",
221+
client.agents.chat.completions.create(
222+
messages=[
223+
{
224+
"role": "user",
225+
"content": "What is the capital of France?",
226+
}
227+
],
228+
model="llama3.3-70b-instruct",
202229
)
203230
except gradientai.APIConnectionError as e:
204231
print("The server could not be reached")
@@ -242,8 +269,14 @@ client = GradientAI(
242269
)
243270

244271
# Or, configure per-request:
245-
client.with_options(max_retries=5).agents.versions.list(
246-
uuid="REPLACE_ME",
272+
client.with_options(max_retries=5).agents.chat.completions.create(
273+
messages=[
274+
{
275+
"role": "user",
276+
"content": "What is the capital of France?",
277+
}
278+
],
279+
model="llama3.3-70b-instruct",
247280
)
248281
```
249282

@@ -267,8 +300,14 @@ client = GradientAI(
267300
)
268301

269302
# Override per-request:
270-
client.with_options(timeout=5.0).agents.versions.list(
271-
uuid="REPLACE_ME",
303+
client.with_options(timeout=5.0).agents.chat.completions.create(
304+
messages=[
305+
{
306+
"role": "user",
307+
"content": "What is the capital of France?",
308+
}
309+
],
310+
model="llama3.3-70b-instruct",
272311
)
273312
```
274313

@@ -310,13 +349,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
310349
from gradientai import GradientAI
311350

312351
client = GradientAI()
313-
response = client.agents.versions.with_raw_response.list(
314-
uuid="REPLACE_ME",
352+
response = client.agents.chat.completions.with_raw_response.create(
353+
messages=[{
354+
"role": "user",
355+
"content": "What is the capital of France?",
356+
}],
357+
model="llama3.3-70b-instruct",
315358
)
316359
print(response.headers.get('X-My-Header'))
317360

318-
version = response.parse() # get the object that `agents.versions.list()` would have returned
319-
print(version.agent_versions)
361+
completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned
362+
print(completion.choices)
320363
```
321364

322365
These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object.
@@ -330,8 +373,14 @@ The above interface eagerly reads the full response body when you make the reque
330373
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
331374

332375
```python
333-
with client.agents.versions.with_streaming_response.list(
334-
uuid="REPLACE_ME",
376+
with client.agents.chat.completions.with_streaming_response.create(
377+
messages=[
378+
{
379+
"role": "user",
380+
"content": "What is the capital of France?",
381+
}
382+
],
383+
model="llama3.3-70b-instruct",
335384
) as response:
336385
print(response.headers.get("X-My-Header"))
337386

api.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,20 @@ Methods:
253253
- <code title="post /v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}">client.agents.routes.<a href="./src/gradientai/resources/agents/routes.py">add</a>(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*<a href="src/gradientai/types/agents/route_add_params.py">params</a>) -> <a href="./src/gradientai/types/agents/route_add_response.py">RouteAddResponse</a></code>
254254
- <code title="get /v2/gen-ai/agents/{uuid}/child_agents">client.agents.routes.<a href="./src/gradientai/resources/agents/routes.py">view</a>(uuid) -> <a href="./src/gradientai/types/agents/route_view_response.py">RouteViewResponse</a></code>
255255

256+
# Chat
257+
258+
## Completions
259+
260+
Types:
261+
262+
```python
263+
from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse
264+
```
265+
266+
Methods:
267+
268+
- <code title="post /chat/completions">client.chat.completions.<a href="./src/gradientai/resources/chat/completions.py">create</a>(\*\*<a href="src/gradientai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/gradientai/types/chat/completion_create_response.py">CompletionCreateResponse</a></code>
269+
256270
# ModelProviders
257271

258272
## Anthropic
@@ -389,20 +403,6 @@ Methods:
389403
- <code title="get /v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources">client.knowledge_bases.indexing_jobs.<a href="./src/gradientai/resources/knowledge_bases/indexing_jobs.py">retrieve_data_sources</a>(indexing_job_uuid) -> <a href="./src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py">IndexingJobRetrieveDataSourcesResponse</a></code>
390404
- <code title="put /v2/gen-ai/indexing_jobs/{uuid}/cancel">client.knowledge_bases.indexing_jobs.<a href="./src/gradientai/resources/knowledge_bases/indexing_jobs.py">update_cancel</a>(path_uuid, \*\*<a href="src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py">params</a>) -> <a href="./src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py">IndexingJobUpdateCancelResponse</a></code>
391405

392-
# Chat
393-
394-
## Completions
395-
396-
Types:
397-
398-
```python
399-
from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse
400-
```
401-
402-
Methods:
403-
404-
- <code title="post /chat/completions">client.chat.completions.<a href="./src/gradientai/resources/chat/completions.py">create</a>(\*\*<a href="src/gradientai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/gradientai/types/chat/completion_create_response.py">CompletionCreateResponse</a></code>
405-
406406
# Inference
407407

408408
## APIKeys

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python"
3-
version = "0.1.0-alpha.9"
3+
version = "0.1.0-alpha.10"
44
description = "The official Python library for GradientAI"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

0 commit comments

Comments
 (0)