Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/).

### Added

- Add host param to OllamaLLM construction to properly connect the internal AsyncClient to it. (#125)
- [Feature] Add id_to ToolCall and tool_call_id to ToolCallResult (#119)

### Changed
Expand Down
11 changes: 9 additions & 2 deletions src/llm_agents_from_scratch/llms/ollama/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,24 @@ class OllamaLLM(BaseLLM):
Integration to `ollama` library for running open source models locally.
"""

def __init__(self, model: str, *args: Any, **kwargs: Any) -> None:
def __init__(
self,
model: str,
host: str | None = None,
*args: Any,
**kwargs: Any,
) -> None:
"""Create an OllamaLLM instance.

Args:
model (str): The name of the LLM model.
host (str | None): Host of running Ollama service. Defaults to None.
*args (Any): Additional positional arguments.
**kwargs (Any): Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.model = model
self._client = AsyncClient()
self._client = AsyncClient(host=host)

async def complete(self, prompt: str, **kwargs: Any) -> CompleteResult:
"""Complete a prompt with an Ollama LLM.
Expand Down