diff --git a/operate/config.py b/operate/config.py index b97b20ac..bcadb45b 100644 --- a/operate/config.py +++ b/operate/config.py @@ -85,15 +85,15 @@ def initialize_google(self): def initialize_ollama(self): if self.ollama_host: if self.verbose: - print("[Config][initialize_ollama] using cached ollama host") + print("[Config][initialize_ollama] using ollama host", self.ollama_host) else: if self.verbose: print( "[Config][initialize_ollama] no cached ollama host. Assuming ollama running locally." ) - self.ollama_host = os.getenv("OLLAMA_HOST", None) - model = Client(host=self.ollama_host) - return model + self.ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11434") + client = Client(host=self.ollama_host) + return client def initialize_anthropic(self): if self.anthropic_api_key: diff --git a/operate/models/apis.py b/operate/models/apis.py index d0ccb0c4..96fc890f 100644 --- a/operate/models/apis.py +++ b/operate/models/apis.py @@ -563,7 +563,7 @@ def call_ollama_llava(messages): print("[call_ollama_llava]") time.sleep(1) try: - model = config.initialize_ollama() + client = config.initialize_ollama() screenshots_dir = "screenshots" if not os.path.exists(screenshots_dir): os.makedirs(screenshots_dir) @@ -590,7 +590,7 @@ def call_ollama_llava(messages): } messages.append(vision_message) - response = model.chat( + response = client.chat( model="llava", messages=messages, )