You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
importinspectimporttracebackfromcopyimportdeepcopyfrompprintimportpformatfromtypesimportGenericAliasfromtypingimportget_origin, Annotated_TOOL_HOOKS= {}
_TOOL_DESCRIPTIONS= {}
defregister_tool(func: callable):
tool_name=func.__name__tool_description=inspect.getdoc(func).strip()
python_params=inspect.signature(func).parametersproperties= []
requiredes= []
forname, paraminpython_params.items():
annotation=param.annotationifannotationisinspect.Parameter.empty:
raiseTypeError(f"Parameter `{name}` missing type annotation")
ifget_origin(annotation) !=Annotated:
raiseTypeError(f"Annotation type for `{name}` must be typing.Annotated")
typ, (description, required) =annotation.__origin__, annotation.__metadata__typ: str=str(typ) ifisinstance(typ, GenericAlias) elsetyp.__name__ifnotisinstance(description, str):
raiseTypeError(f"Description for `{name}` must be a string")
ifnotisinstance(required, bool):
raiseTypeError(f"Required for `{name}` must be a bool")
ifrequired:
requiredes.append(name)
properties.append({
name: {"type": typ, "description": description}
})
tool_def= {
"type": "function",
"function": {
"name": tool_name,
"description": tool_description,
"parameters": {
"type": "object",
"properties": properties,
"required": requiredes
}
}
}
print("[registered tool] "+pformat(tool_def))
_TOOL_HOOKS[tool_name] =func_TOOL_DESCRIPTIONS[tool_name] =tool_defreturnfuncdefdispatch_tool(tool_name: str, tool_params: dict) ->str:
iftool_namenotin_TOOL_HOOKS:
returnf"Tool `{tool_name}` not found. Please use a provided tool."tool_call=_TOOL_HOOKS[tool_name]
try:
ret=tool_call(**tool_params)
except:
ret=traceback.format_exc()
returnstr(ret)
defget_tools() ->list:
returnlist(deepcopy(_TOOL_DESCRIPTIONS).values())
# Tool Definitions@register_tooldefrandom_number_generator(
seed: Annotated[int, 'The random seed used by the generator', True],
range: Annotated[tuple[int, int], 'The range of the generated numbers', True],
) ->int:
""" Generates a random number x, s.t. range[0] <= x < range[1] """ifnotisinstance(seed, int):
raiseTypeError("Seed must be an integer")
ifnotisinstance(range, tuple):
raiseTypeError("Range must be a tuple")
ifnotisinstance(range[0], int) ornotisinstance(range[1], int):
raiseTypeError("Range must be a tuple of integers")
importrandomreturnrandom.Random(seed).randint(*range)
@register_tooldefget_weather(
city_name: Annotated[str, 'The name of the city to be queried', True],
) ->str:
""" Get the current weather for `city_name` """ifnotisinstance(city_name, str):
raiseTypeError("City name must be a string")
key_selection= {
"current_condition": ["temp_C", "FeelsLikeC", "humidity", "weatherDesc", "observation_time"],
}
importrequeststry:
resp=requests.get(f"https://wttr.in/{city_name}?format=j1")
resp.raise_for_status()
resp=resp.json()
ret= {k: {_v: resp[k][0][_v] for_vinv} fork, vinkey_selection.items()}
except:
importtracebackret="Error encountered while fetching weather data!\n"+traceback.format_exc()
returnstr(ret)
if__name__=="__main__":
print(dispatch_tool("get_weather", {"city_name": "beijing"}))
print(get_tools())
Your Python version.
python 3.10
The version of xinference you use.
0.72
Versions of crucial packages.
Full stack of the error.
Traceback (most recent call last):
File "/work/miniconda3/envs/chatglm3/lib/python3.10/site-packages/openai/_base_client.py", line 885, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'detail': 'Invalid input. Please specify the prompt.'}
Minimized code to reproduce the error.
Expected behavior
A clear and concise description of what you expected to happen.
i encountered the error again
xinference, version 0.16.1
i changed the source code and the error is gone
source file :**/site-packages/xinference/api/restful_api.py ,in function "async def create_chat_completion",there are some lines(start line number:1895) which are as follows:
if not messages or messages[-1].get("role") not in ["user", "system", "tool"]:
raise HTTPException(
status_code=400, detail="Invalid input. Please specify the prompt."
)
it asks the role of last messages is one of user or system or tool,sometimes this condition can't be satisfied,and then the error occured
then change it ,delete "or messages[-1].get("role") not in ["user", "system", "tool"]",the new lines are :
if not messages :
raise HTTPException(
status_code=400, detail="Invalid input. Please specify the prompt."
)
after the modification,restart xinference
try prompt again,it may work well
Describe the bug
When I test the function call, xinerence report "Invalid input. Please specify the prompt"。
To Reproduce
too_regiester.py
python 3.10
0.72
Traceback (most recent call last):
File "/work/miniconda3/envs/chatglm3/lib/python3.10/site-packages/openai/_base_client.py", line 885, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'detail': 'Invalid input. Please specify the prompt.'}
Expected behavior
A clear and concise description of what you expected to happen.
Additional context
按照OpenAI的function call规范,第二轮的messages包含 user、assistant、tool三个message,但是被xinference拦截了。
The text was updated successfully, but these errors were encountered: