From 0615af5714181ca3f173af6507698fe6f0484478 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 1 Jan 2025 10:16:28 -0800 Subject: [PATCH] add litellm --- python/helpers/perplexity_search.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/helpers/perplexity_search.py b/python/helpers/perplexity_search.py index b090ced45..69078ec80 100644 --- a/python/helpers/perplexity_search.py +++ b/python/helpers/perplexity_search.py @@ -1,11 +1,9 @@ - -from openai import OpenAI +import litellm import models def perplexity_search(query:str, model_name="llama-3.1-sonar-large-128k-online",api_key=None,base_url="https://api.perplexity.ai"): api_key = api_key or models.get_api_key("perplexity") - client = OpenAI(api_key=api_key, base_url=base_url) messages = [ #It is recommended to use only single-turn conversations and avoid system prompts for the online LLMs (sonar-small-online and sonar-medium-online). @@ -25,9 +23,11 @@ def perplexity_search(query:str, model_name="llama-3.1-sonar-large-128k-online", }, ] - response = client.chat.completions.create( + response = litellm.completion( model=model_name, messages=messages, # type: ignore + api_key=api_key, + base_url=base_url, ) result = response.choices[0].message.content #only the text is returned return result \ No newline at end of file