33import asyncio
44import http
55import json
6+ import typing
67
7- import openai
88import pydantic_settings
9- from openai .types .chat import (
10- ChatCompletion ,
11- ChatCompletionAssistantMessageParam ,
12- ChatCompletionMessageParam ,
13- ChatCompletionSystemMessageParam ,
14- ChatCompletionToolParam ,
15- ChatCompletionUserMessageParam ,
16- )
9+ import requests
1710
1811import mcp
1912from mcp .client .streamable_http import streamablehttp_client
2417class Configurations (pydantic_settings .BaseSettings ):
2518 """Define configurations for the sampling client."""
2619
20+ base_url : str = "https://api.openai.com/v1"
21+ api_key : str = "your_api_key"
2722 chat_model : str = "gpt-4o-mini"
2823 max_tokens : int = 1024
2924 mcp_server_host : str = "localhost"
3025 mcp_server_port : int = 8000
31- openai_api_key : str = "your_openai_api_key"
3226 system_prompt : str = "You are a helpful assistant."
3327
3428 model_config = pydantic_settings .SettingsConfigDict (env_file = ".env" , env_file_encoding = "utf-8" )
@@ -47,50 +41,58 @@ def __init__(self: "SamplingClient", config: Configurations) -> None:
4741 self .config = config
4842
4943 self .server_url = f"http://{ self .config .mcp_server_host } :{ self .config .mcp_server_port } /mcp"
50- self .openai_client = openai . OpenAI ( api_key = self .config .openai_api_key )
44+ self .api_url = f" { self .config .base_url } /chat/completions"
5145
52- self .conversation_history : list [ChatCompletionMessageParam ] = []
46+ self .conversation_history : list [dict [ str , str ] ] = []
5347
54- def get_openai_response (
48+ def get_llm_response (
5549 self : "SamplingClient" ,
56- chat_history : list [ChatCompletionMessageParam ],
50+ chat_history : list [dict [ str , str ] ],
5751 system_prompt : str ,
5852 max_tokens : int ,
59- tools : list [ChatCompletionToolParam ] | None = None ,
60- ) -> ChatCompletion :
61- """Get a non-streaming response from OpenAI API.
53+ tools : list [dict [ str , typing . Any ] ] | None = None ,
54+ ) -> dict [ str , typing . Any ] :
55+ """Get a non-streaming response from OpenAI compatible LLM API.
6256
6357 Parameters
6458 ----------
65- chat_history : list[ChatCompletionMessageParam ]
59+ chat_history : list[dict[str, str] ]
6660 The chat history to use for the chat completion.
6761 system_prompt : str
6862 The system prompt to use for the chat completion.
6963 max_tokens : int
7064 The maximum number of tokens to generate in the response.
71- tools : list[ChatCompletionToolParam ] | None, optional
65+ tools : list[dict[str, typing.Any] ] | None, optional
7266 The tools to use for the chat completion, by default None.
7367
7468 Returns
7569 -------
76- ChatCompletion
77- The response from the OpenAI API.
70+ dict[str, typing.Any]
71+ The response from the LLM API.
7872 """
7973 updated_chat_history = [
80- ChatCompletionSystemMessageParam ( content = system_prompt , role = " system") ,
74+ { " content" : system_prompt , " role" : " system"} ,
8175 * chat_history ,
8276 ]
8377
8478 extra_arguments = {} if tools is None else {"tool_choice" : "auto" , "tools" : tools }
8579
86- chat_completion = self .openai_client .chat .completions .create (
87- messages = updated_chat_history ,
88- model = self .config .chat_model ,
89- max_completion_tokens = max_tokens ,
90- n = 1 ,
91- stream = False ,
92- ** extra_arguments ,
93- )
80+ chat_completion = requests .post (
81+ self .api_url ,
82+ json = {
83+ "messages" : updated_chat_history ,
84+ "model" : self .config .chat_model ,
85+ "max_completion_tokens" : max_tokens ,
86+ "n" : 1 ,
87+ "stream" : False ,
88+ ** extra_arguments ,
89+ },
90+ headers = {
91+ "Authorization" : f"Bearer { self .config .api_key } " ,
92+ "Content-Type" : "application/json" ,
93+ },
94+ timeout = 300 ,
95+ ).json ()
9496
9597 return chat_completion
9698
@@ -105,7 +107,7 @@ async def fetch_mcp_tools(self: "SamplingClient") -> list[Tool]:
105107 return server_tools .tools
106108
107109 @staticmethod
108- def convert_to_openai_tools (mcp_tools : list [Tool ]) -> list [ChatCompletionToolParam ]:
110+ def convert_to_openai_tools (mcp_tools : list [Tool ]) -> list [dict [ str , typing . Any ] ]:
109111 """Convert MCP tools to OpenAI tool call parameters.
110112
111113 Parameters
@@ -115,29 +117,25 @@ def convert_to_openai_tools(mcp_tools: list[Tool]) -> list[ChatCompletionToolPar
115117
116118 Returns
117119 -------
118- list[ChatCompletionToolParam ]
120+ list[dict[str, typing.Any] ]
119121 List of OpenAI tool call parameters.
120122 """
121123 return [
122- ChatCompletionToolParam (
123- function = {
124- "name" : tool .name ,
125- "description" : tool .description or "" ,
126- "parameters" : tool .inputSchema ,
127- },
128- type = "function" ,
129- )
124+ {
125+ "function" : {"name" : tool .name , "description" : tool .description or "" , "parameters" : tool .inputSchema },
126+ "type" : "function" ,
127+ }
130128 for tool in mcp_tools
131129 ]
132130
133131 async def sampling_handler (
134- self : "SamplingClient" , context : RequestContext , parameters : CreateMessageRequestParams
132+ self : "SamplingClient" , context : RequestContext [ typing . Any , typing . Any ] , parameters : CreateMessageRequestParams
135133 ) -> CreateMessageResult | ErrorData :
136134 """Handle sampling requests for OpenAI API calls with MCP tools.
137135
138136 Parameters
139137 ----------
140- context : RequestContext
138+ context : RequestContext[typing.Any, typing.Any]
141139 request context containing information about the sampling request
142140 parameters : CreateMessageRequestParams
143141 parameters for the sampling request, including messages and customisations
@@ -149,44 +147,44 @@ async def sampling_handler(
149147 """
150148 del context
151149
152- openai_response = self .get_openai_response (
150+ openai_response = self .get_llm_response (
153151 [
154- ChatCompletionUserMessageParam (
155- content = (
156- message . content . text if isinstance (message .content , TextContent ) else str ( message . content )
157- ),
158- role = "user" ,
159- )
152+ {
153+ " content" : message . content . text
154+ if isinstance (message .content , TextContent )
155+ else str ( message . content ),
156+ " role" : "user" ,
157+ }
160158 for message in parameters .messages
161159 ],
162160 parameters .systemPrompt or self .config .system_prompt ,
163161 parameters .maxTokens ,
164162 )
165163
166- if not (choices := openai_response . choices ):
164+ if not (choices := openai_response [ " choices" ] ):
167165 return ErrorData (
168166 code = http .HTTPStatus .INTERNAL_SERVER_ERROR ,
169167 message = "No choices returned from OpenAI API." ,
170168 )
171169
172170 choice = choices [0 ]
173- sampling_response_message = choice . message . content or ""
171+ sampling_response_message = choice [ " message" ][ " content" ] or ""
174172
175173 return CreateMessageResult (
176174 role = "assistant" ,
177175 content = TextContent (type = "text" , text = sampling_response_message ),
178176 model = self .config .chat_model ,
179- stopReason = choice . finish_reason ,
177+ stopReason = choice [ " finish_reason" ] ,
180178 )
181179
182- async def execute_tool_call (self : "SamplingClient" , tool_name : str , arguments : dict ) -> str :
180+ async def execute_tool_call (self : "SamplingClient" , tool_name : str , arguments : dict [ str , typing . Any ] ) -> str :
183181 """Execute a tool call on an MCP server.
184182
185183 Parameters
186184 ----------
187185 tool_name : str
188186 name of the tool to call, formatted as "mcp-{server_name}-{tool_name}"
189- arguments : dict
187+ arguments : dict[str, typing.Any]
190188 arguments to pass to the tool call
191189
192190 Returns
@@ -212,69 +210,62 @@ async def execute_tool_call(self: "SamplingClient", tool_name: str, arguments: d
212210
213211 async def orchestrate (self : "SamplingClient" , user_message : str ) -> None :
214212 """Orchestrate the sampling client to handle requests."""
215- self .conversation_history .append (ChatCompletionUserMessageParam ( role = " user" , content = user_message ) )
213+ self .conversation_history .append ({ " role" : " user" , " content" : user_message } )
216214
217215 self .mcp_server_tools = await self .fetch_mcp_tools ()
218216 self .openai_compatible_tools = self .convert_to_openai_tools (self .mcp_server_tools )
219217
220- openai_response = self .get_openai_response (
218+ openai_response = self .get_llm_response (
221219 self .conversation_history ,
222220 self .config .system_prompt ,
223221 self .config .max_tokens ,
224222 tools = self .openai_compatible_tools ,
225223 )
226224
227- if not (choices := openai_response . choices ):
225+ if not (choices := openai_response [ " choices" ] ):
228226 error_message = "No choices returned from OpenAI API."
229- self .conversation_history .append (
230- ChatCompletionAssistantMessageParam (role = "assistant" , content = error_message )
231- )
227+ self .conversation_history .append ({"role" : "assistant" , "content" : error_message })
232228
233229 print (error_message )
234230
235231 return
236232
237233 choice = choices [0 ]
238234
239- while choice . finish_reason == "tool_calls" :
240- for tool_call in choice . message . tool_calls or []:
241- if tool_call . type != "function" :
235+ while choice [ " finish_reason" ] == "tool_calls" :
236+ for tool_call in choice [ " message" ][ " tool_calls" ] or []:
237+ if tool_call [ " type" ] != "function" :
242238 continue
243239
244240 tool_response = await self .execute_tool_call (
245- tool_call . function . name , json .loads (tool_call . function . arguments )
241+ tool_call [ " function" ][ " name" ] , json .loads (tool_call [ " function" ][ " arguments" ] )
246242 )
247243
248244 self .conversation_history .append (
249- ChatCompletionAssistantMessageParam (
250- role = "assistant" ,
251- content = f"Tool { tool_call .id } returned: { tool_response } " ,
252- )
245+ {"role" : "assistant" , "content" : f"Tool { tool_call ['id' ]} returned: { tool_response } " }
253246 )
254247
255- openai_response = self .get_openai_response (
248+ openai_response = self .get_llm_response (
256249 self .conversation_history ,
257250 self .config .system_prompt ,
258251 self .config .max_tokens ,
259252 tools = self .openai_compatible_tools ,
260253 )
261254
262- if not (choices := openai_response . choices ):
255+ if not (choices := openai_response [ " choices" ] ):
263256 error_message = "No choices returned from OpenAI API."
264- self .conversation_history .append (
265- ChatCompletionAssistantMessageParam (role = "assistant" , content = error_message )
266- )
257+ self .conversation_history .append ({"role" : "assistant" , "content" : error_message })
267258
268259 print (error_message )
269260
270261 return
271262
272263 choice = choices [0 ]
273264
274- assistant_message = choice . message . content or ""
265+ assistant_message = choice [ " message" ][ " content" ] or ""
275266 self .conversation_history .append ({"role" : "assistant" , "content" : assistant_message })
276267
277- print (f"Assistant: { choice . message . content } " )
268+ print (f"Assistant: { assistant_message } " )
278269
279270
280271def main ():
0 commit comments