Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "3.0.0"
".": "3.0.1"
}
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Changelog

## 3.0.1 (2025-09-24)

Full Changelog: [v3.0.0...v3.0.1](https://github.com/digitalocean/gradient-python/compare/v3.0.0...v3.0.1)

### Bug Fixes

* add proto to default inference url ([#52](https://github.com/digitalocean/gradient-python/issues/52)) ([108d7cb](https://github.com/digitalocean/gradient-python/commit/108d7cb79f4d9046136cbc03cf92056575d04f7a))

## 3.0.0 (2025-09-18)

Full Changelog: [v3.0.0-beta.6...v3.0.0](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.6...v3.0.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "gradient"
version = "3.0.0"
version = "3.0.1"
description = "The official Python library for the Gradient API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
70 changes: 53 additions & 17 deletions src/gradient/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ def __init__(
self._agent_endpoint = agent_endpoint

if inference_endpoint is None:
inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "inference.do-ai.run"
inference_endpoint = (
os.environ.get("GRADIENT_INFERENCE_ENDPOINT")
or "https://inference.do-ai.run"
)
self.inference_endpoint = inference_endpoint

if base_url is None:
Expand Down Expand Up @@ -250,7 +253,9 @@ def default_headers(self) -> dict[str, str | Omit]:

@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
if (
self.access_token or self.agent_access_key or self.model_access_key
) and headers.get("Authorization"):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
Expand Down Expand Up @@ -283,10 +288,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
raise ValueError(
"The `default_headers` and `set_default_headers` arguments are mutually exclusive"
)

if default_query is not None and set_default_query is not None:
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
raise ValueError(
"The `default_query` and `set_default_query` arguments are mutually exclusive"
)

headers = self._custom_headers
if default_headers is not None:
Expand Down Expand Up @@ -336,10 +345,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)

if response.status_code == 401:
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
return _exceptions.AuthenticationError(
err_msg, response=response, body=body
)

if response.status_code == 403:
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
return _exceptions.PermissionDeniedError(
err_msg, response=response, body=body
)

if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
Expand All @@ -348,13 +361,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)

if response.status_code == 422:
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
return _exceptions.UnprocessableEntityError(
err_msg, response=response, body=body
)

if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)

if response.status_code >= 500:
return _exceptions.InternalServerError(err_msg, response=response, body=body)
return _exceptions.InternalServerError(
err_msg, response=response, body=body
)
return APIStatusError(err_msg, response=response, body=body)


Expand Down Expand Up @@ -422,7 +439,10 @@ def __init__(
self._agent_endpoint = agent_endpoint

if inference_endpoint is None:
inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "inference.do-ai.run"
inference_endpoint = (
os.environ.get("GRADIENT_INFERENCE_ENDPOINT")
or "https://inference.do-ai.run"
)
self.inference_endpoint = inference_endpoint

if base_url is None:
Expand Down Expand Up @@ -539,7 +559,9 @@ def default_headers(self) -> dict[str, str | Omit]:

@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
if (
self.access_token or self.agent_access_key or self.model_access_key
) and headers.get("Authorization"):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
Expand Down Expand Up @@ -572,10 +594,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
raise ValueError(
"The `default_headers` and `set_default_headers` arguments are mutually exclusive"
)

if default_query is not None and set_default_query is not None:
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
raise ValueError(
"The `default_query` and `set_default_query` arguments are mutually exclusive"
)

headers = self._custom_headers
if default_headers is not None:
Expand Down Expand Up @@ -625,10 +651,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)

if response.status_code == 401:
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
return _exceptions.AuthenticationError(
err_msg, response=response, body=body
)

if response.status_code == 403:
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
return _exceptions.PermissionDeniedError(
err_msg, response=response, body=body
)

if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
Expand All @@ -637,13 +667,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)

if response.status_code == 422:
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
return _exceptions.UnprocessableEntityError(
err_msg, response=response, body=body
)

if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)

if response.status_code >= 500:
return _exceptions.InternalServerError(err_msg, response=response, body=body)
return _exceptions.InternalServerError(
err_msg, response=response, body=body
)
return APIStatusError(err_msg, response=response, body=body)


Expand Down Expand Up @@ -862,7 +896,9 @@ def knowledge_bases(
AsyncKnowledgeBasesResourceWithStreamingResponse,
)

return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
return AsyncKnowledgeBasesResourceWithStreamingResponse(
self._client.knowledge_bases
)

@cached_property
def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
Expand Down
2 changes: 1 addition & 1 deletion src/gradient/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "gradient"
__version__ = "3.0.0" # x-release-please-version
__version__ = "3.0.1" # x-release-please-version