Skip to content

Commit 691923d

Browse files
feat(api): update via SDK Studio
1 parent 5a6d480 commit 691923d

File tree

342 files changed

+342
-47572
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

342 files changed

+342
-47572
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 126
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-bdf24159c6ebb5402d6c05a5165cb1501dc37cf6c664baa9eb318efb0f89dddd.yml
3-
openapi_spec_hash: 686329a97002025d118dc2367755c18d
1+
configured_endpoints: 4
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml
3+
openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a
44
config_hash: 2da74b81015f4ef6cad3a0bcb9025834

README.md

Lines changed: 75 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,16 @@ client = DigitaloceanGenaiSDK(
3333
), # This is the default and can be omitted
3434
)
3535

36-
assistants = client.assistants.list()
37-
print(assistants.first_id)
36+
create_response = client.chat.completions.create(
37+
messages=[
38+
{
39+
"content": "string",
40+
"role": "system",
41+
}
42+
],
43+
model="llama3-8b-instruct",
44+
)
45+
print(create_response.id)
3846
```
3947

4048
While you can provide an `api_key` keyword argument,
@@ -59,8 +67,16 @@ client = AsyncDigitaloceanGenaiSDK(
5967

6068

6169
async def main() -> None:
62-
assistants = await client.assistants.list()
63-
print(assistants.first_id)
70+
create_response = await client.chat.completions.create(
71+
messages=[
72+
{
73+
"content": "string",
74+
"role": "system",
75+
}
76+
],
77+
model="llama3-8b-instruct",
78+
)
79+
print(create_response.id)
6480

6581

6682
asyncio.run(main())
@@ -86,31 +102,19 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK
86102

87103
client = DigitaloceanGenaiSDK()
88104

89-
assistant_object = client.assistants.create(
90-
model="gpt-4o",
91-
tool_resources={},
105+
create_response = client.chat.completions.create(
106+
messages=[
107+
{
108+
"content": "string",
109+
"role": "system",
110+
}
111+
],
112+
model="llama3-8b-instruct",
113+
stream_options={},
92114
)
93-
print(assistant_object.tool_resources)
115+
print(create_response.stream_options)
94116
```
95117

96-
## File uploads
97-
98-
Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
99-
100-
```python
101-
from pathlib import Path
102-
from digitalocean_genai_sdk import DigitaloceanGenaiSDK
103-
104-
client = DigitaloceanGenaiSDK()
105-
106-
client.audio.transcribe_audio(
107-
file=Path("/path/to/file"),
108-
model="gpt-4o-transcribe",
109-
)
110-
```
111-
112-
The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
113-
114118
## Handling errors
115119

116120
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised.
@@ -127,7 +131,15 @@ from digitalocean_genai_sdk import DigitaloceanGenaiSDK
127131
client = DigitaloceanGenaiSDK()
128132

129133
try:
130-
client.assistants.list()
134+
client.chat.completions.create(
135+
messages=[
136+
{
137+
"content": "string",
138+
"role": "system",
139+
}
140+
],
141+
model="llama3-8b-instruct",
142+
)
131143
except digitalocean_genai_sdk.APIConnectionError as e:
132144
print("The server could not be reached")
133145
print(e.__cause__) # an underlying Exception, likely raised within httpx.
@@ -170,7 +182,15 @@ client = DigitaloceanGenaiSDK(
170182
)
171183

172184
# Or, configure per-request:
173-
client.with_options(max_retries=5).assistants.list()
185+
client.with_options(max_retries=5).chat.completions.create(
186+
messages=[
187+
{
188+
"content": "string",
189+
"role": "system",
190+
}
191+
],
192+
model="llama3-8b-instruct",
193+
)
174194
```
175195

176196
### Timeouts
@@ -193,7 +213,15 @@ client = DigitaloceanGenaiSDK(
193213
)
194214

195215
# Override per-request:
196-
client.with_options(timeout=5.0).assistants.list()
216+
client.with_options(timeout=5.0).chat.completions.create(
217+
messages=[
218+
{
219+
"content": "string",
220+
"role": "system",
221+
}
222+
],
223+
model="llama3-8b-instruct",
224+
)
197225
```
198226

199227
On timeout, an `APITimeoutError` is thrown.
@@ -234,11 +262,17 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
234262
from digitalocean_genai_sdk import DigitaloceanGenaiSDK
235263

236264
client = DigitaloceanGenaiSDK()
237-
response = client.assistants.with_raw_response.list()
265+
response = client.chat.completions.with_raw_response.create(
266+
messages=[{
267+
"content": "string",
268+
"role": "system",
269+
}],
270+
model="llama3-8b-instruct",
271+
)
238272
print(response.headers.get('X-My-Header'))
239273

240-
assistant = response.parse() # get the object that `assistants.list()` would have returned
241-
print(assistant.first_id)
274+
completion = response.parse() # get the object that `chat.completions.create()` would have returned
275+
print(completion.id)
242276
```
243277

244278
These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object.
@@ -252,7 +286,15 @@ The above interface eagerly reads the full response body when you make the reque
252286
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
253287

254288
```python
255-
with client.assistants.with_streaming_response.list() as response:
289+
with client.chat.completions.with_streaming_response.create(
290+
messages=[
291+
{
292+
"content": "string",
293+
"role": "system",
294+
}
295+
],
296+
model="llama3-8b-instruct",
297+
) as response:
256298
print(response.headers.get("X-My-Header"))
257299

258300
for line in response.iter_lines():

0 commit comments

Comments
 (0)