|
1 | 1 | import json |
2 | 2 | from collections.abc import AsyncGenerator |
3 | 3 | from dataclasses import dataclass, asdict |
| 4 | +from typing import Any |
4 | 5 |
|
5 | 6 | import httpx |
6 | 7 |
|
7 | 8 | from . import Provider, ProviderError |
| 9 | +from ..config import Setting |
| 10 | + |
| 11 | + |
| 12 | +def dataclass_to_json(obj: Any) -> dict[str, Any]: |
| 13 | + """Convert dataclass to a json dict |
| 14 | +
|
| 15 | + This function filters out 'None' values. |
| 16 | +
|
| 17 | + :param obj: the dataclass to serialize |
| 18 | + :return: serialized dataclass |
| 19 | + :raises TypeError: if obj is not a dataclass |
| 20 | + """ |
| 21 | + return {k: v for k, v in asdict(obj).items() if v is not None} |
8 | 22 |
|
9 | 23 |
|
10 | 24 | @dataclass |
@@ -33,29 +47,35 @@ class GenerateRequest: |
33 | 47 | the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing |
34 | 48 | history yourself. JSON mode""" |
35 | 49 |
|
36 | | - def to_json(self): |
37 | | - return json.dumps(asdict(self)) |
| 50 | + |
| 51 | +system_prompt = ( |
| 52 | + "Based on the following user description, generate a corresponding Bash command. Focus solely on interpreting " |
| 53 | + "the requirements and translating them into a single, executable Bash command. Ensure accuracy and relevance " |
| 54 | + "to the user's description. The output should be a valid Bash command that directly aligns with the user's " |
| 55 | + "intent, ready for execution in a command-line environment. Output nothing except for the command. No code " |
| 56 | + "block, no English explanation, no start/end tags." |
| 57 | +) |
38 | 58 |
|
39 | 59 |
|
40 | 60 | class Ollama(Provider): |
41 | 61 | name = "Ollama" |
42 | 62 |
|
43 | | - host = "localhost" |
44 | | - port = 11434 |
45 | | - endpoint = f"http://{host}:{port}/api/generate" |
| 63 | + host = Setting(default="localhost") |
| 64 | + port = Setting(default=11434) |
| 65 | + model = Setting(default="codellama:13b") |
| 66 | + system_prompt = Setting(default=system_prompt) |
46 | 67 |
|
47 | | - model = "codellama:13b" |
48 | | - system_prompt = """Based on the following user description, generate a corresponding Bash command. Focus solely on |
49 | | - interpreting the requirements and translating them into a single, executable Bash command. Ensure accuracy and |
50 | | - relevance to the user's description. The output should be a valid Bash command that directly aligns with the user's |
51 | | - intent, ready for execution in a command-line environment. Output nothing except for the command. No code block, no |
52 | | - English explanation, no start/end tags.""" |
| 68 | + @property |
| 69 | + def endpoint(self): |
| 70 | + # computed property because python descriptors need to be bound to an instance before access |
| 71 | + return f"http://{self.host}:{self.port}/api/generate" |
53 | 72 |
|
54 | 73 | async def generate(self, prompt: str) -> AsyncGenerator[str, None, None]: |
| 74 | + request = GenerateRequest(self.model, prompt, system=system_prompt, stream=True) |
| 75 | + data = dataclass_to_json(request) |
55 | 76 | try: |
56 | | - request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True).to_json() |
57 | 77 | async with httpx.AsyncClient() as client: |
58 | | - async with client.stream("POST", self.endpoint, content=request, timeout=20.0) as stream: |
| 78 | + async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream: |
59 | 79 | async for line in stream.aiter_lines(): |
60 | 80 | yield json.loads(line)["response"] |
61 | 81 | except (httpx.HTTPError, httpx.StreamError) as e: |
|
0 commit comments