diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b069996..08e82c4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.14" + ".": "0.1.0-alpha.15" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 6701b4a..be8b2ba 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 22 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/opencode%2Fopencode-7270b9e4859010d6680bcc92afcd6f7c679d80a2645f65d7097d19ce2e8cdc5a.yml openapi_spec_hash: 5fcbfaedebfea62c17c74437a9728b04 -config_hash: 931828b5dd9393834a3c1703e02e02b0 +config_hash: 38a50dff50297c2f735b5e1c83fa0188 diff --git a/CHANGELOG.md b/CHANGELOG.md index 424664c..824965a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.1.0-alpha.15 (2025-07-15) + +Full Changelog: [v0.1.0-alpha.14...v0.1.0-alpha.15](https://github.com/sst/opencode-sdk-python/compare/v0.1.0-alpha.14...v0.1.0-alpha.15) + +### Features + +* **api:** api update ([88bbf66](https://github.com/sst/opencode-sdk-python/commit/88bbf66c1f6ec7266fccb7f8e3265bb074afd5e6)) + ## 0.1.0-alpha.14 (2025-07-15) Full Changelog: [v0.1.0-alpha.13...v0.1.0-alpha.14](https://github.com/sst/opencode-sdk-python/compare/v0.1.0-alpha.13...v0.1.0-alpha.14) diff --git a/README.md b/README.md index c6ee56a..13677b3 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ from opencode_ai import Opencode client = Opencode() -events = client.event.list() +sessions = client.session.list() ``` ## Async usage @@ -44,7 +44,7 @@ client = AsyncOpencode() async def main() -> None: - events = await client.event.list() + sessions = await client.session.list() asyncio.run(main()) @@ -75,38 +75,12 @@ async def main() -> None: async with AsyncOpencode( http_client=DefaultAioHttpClient(), ) as client: - events = await client.event.list() + sessions = await client.session.list() asyncio.run(main()) ``` -## Streaming responses - -We provide support for streaming responses using Server Side Events (SSE). - -```python -from opencode_ai import Opencode - -client = Opencode() - -stream = client.event.list() -for events in stream: - print(events) -``` - -The async client uses the exact same interface. - -```python -from opencode_ai import AsyncOpencode - -client = AsyncOpencode() - -stream = await client.event.list() -async for events in stream: - print(events) -``` - ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -132,7 +106,7 @@ from opencode_ai import Opencode client = Opencode() try: - client.event.list() + client.session.list() except opencode_ai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. @@ -175,7 +149,7 @@ client = Opencode( ) # Or, configure per-request: -client.with_options(max_retries=5).event.list() +client.with_options(max_retries=5).session.list() ``` ### Timeouts @@ -198,7 +172,7 @@ client = Opencode( ) # Override per-request: -client.with_options(timeout=5.0).event.list() +client.with_options(timeout=5.0).session.list() ``` On timeout, an `APITimeoutError` is thrown. @@ -239,11 +213,11 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from opencode_ai import Opencode client = Opencode() -response = client.event.with_raw_response.list() +response = client.session.with_raw_response.list() print(response.headers.get('X-My-Header')) -event = response.parse() # get the object that `event.list()` would have returned -print(event) +session = response.parse() # get the object that `session.list()` would have returned +print(session) ``` These methods return an [`APIResponse`](https://github.com/sst/opencode-sdk-python/tree/main/src/opencode_ai/_response.py) object. @@ -257,7 +231,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.event.with_streaming_response.list() as response: +with client.session.with_streaming_response.list() as response: print(response.headers.get("X-My-Header")) for line in response.iter_lines(): diff --git a/pyproject.toml b/pyproject.toml index d3cbd31..7ec2903 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "opencode-ai" -version = "0.1.0-alpha.14" +version = "0.1.0-alpha.15" description = "The official Python library for the opencode API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/opencode_ai/_version.py b/src/opencode_ai/_version.py index 3a796bd..f33ec7c 100644 --- a/src/opencode_ai/_version.py +++ b/src/opencode_ai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "opencode_ai" -__version__ = "0.1.0-alpha.14" # x-release-please-version +__version__ = "0.1.0-alpha.15" # x-release-please-version diff --git a/tests/test_client.py b/tests/test_client.py index 5fb4ca7..f8225c7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -686,20 +686,20 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("opencode_ai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Opencode) -> None: - respx_mock.get("/event").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.get("/session").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.event.with_streaming_response.list().__enter__() + client.session.with_streaming_response.list().__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("opencode_ai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Opencode) -> None: - respx_mock.get("/event").mock(return_value=httpx.Response(500)) + respx_mock.get("/session").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.event.with_streaming_response.list().__enter__() + client.session.with_streaming_response.list().__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -726,9 +726,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = client.event.with_raw_response.list() + response = client.session.with_raw_response.list() assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -750,9 +750,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = client.event.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + response = client.session.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -773,9 +773,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = client.event.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + response = client.session.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) assert response.http_request.headers.get("x-stainless-retry-count") == "42" @@ -1468,10 +1468,10 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncOpencode ) -> None: - respx_mock.get("/event").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.get("/session").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.event.with_streaming_response.list().__aenter__() + await async_client.session.with_streaming_response.list().__aenter__() assert _get_open_connections(self.client) == 0 @@ -1480,10 +1480,10 @@ async def test_retrying_timeout_errors_doesnt_leak( async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncOpencode ) -> None: - respx_mock.get("/event").mock(return_value=httpx.Response(500)) + respx_mock.get("/session").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.event.with_streaming_response.list().__aenter__() + await async_client.session.with_streaming_response.list().__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1511,9 +1511,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = await client.event.with_raw_response.list() + response = await client.session.with_raw_response.list() assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1536,9 +1536,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = await client.event.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) + response = await client.session.with_raw_response.list(extra_headers={"x-stainless-retry-count": Omit()}) assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @@ -1560,9 +1560,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: return httpx.Response(500) return httpx.Response(200) - respx_mock.get("/event").mock(side_effect=retry_handler) + respx_mock.get("/session").mock(side_effect=retry_handler) - response = await client.event.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) + response = await client.session.with_raw_response.list(extra_headers={"x-stainless-retry-count": "42"}) assert response.http_request.headers.get("x-stainless-retry-count") == "42"