Skip to content

Commit fe02e8d

Browse files
committed
Add basic function calling example using a llama-cli python wrapper
1 parent 70392f1 commit fe02e8d

File tree

4 files changed

+261
-0
lines changed

4 files changed

+261
-0
lines changed

examples/function-calling/README.md

+46
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# llama.cpp/examples/function-calling
2+
3+
This example shows how to do basic function calling using llama-cli and a python wrapper to declare and call functions.
4+
5+
## Options
6+
7+
Important options for llama-cli-function-runner.py:
8+
9+
- `-m FNAME, --model FNAME`: Specify the path to the function calling model (e.g., `-m "$(huggingface-cli download meetkai/functionary-small-v3.2-GGUF functionary-small-v3.2.Q4_0.gguf)"`).
10+
- `--ctx-size N`: Set the size of the prompt context. The default is 1024
11+
- `--special`: show special tokens and function calling details
12+
13+
## Example showing showing function call details
14+
15+
```
16+
./examples/function-calling/llama-cli-function-runner.py -m `huggingface-cli download meetkai/functionary-small-v3.2-GGUF functionary-small-v3.2.Q4_0.gguf` -i --special
17+
What is the weather in Phoenix?
18+
Sure, I'll look that up for you. Let me just check the current weather conditions in Phoenix.>>>get_weather
19+
{"location": "Phoenix"}<|eot_id|>
20+
{"temperature": "30C"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
21+
The current weather in Phoenix is 30C.<|eot_id|>
22+
What is 38484 + 323?
23+
Sure, let's calculate that.>>>calculate
24+
{"expression": "38484 + 323"}<|eot_id|>
25+
{"result": 38807}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
26+
The sum of 38484 and 323 is 38807.<|eot_id|>
27+
What is 67 feet in meters?
28+
To convert 67 feet into meters, we use the conversion factor: 1 foot is approximately 0.3048 meters. Let's calculate it.>>>calculate
29+
{"expression": "67 * 0.3048"}<|eot_id|>
30+
{"result": 20.4216}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
31+
67 feet is approximately 20.4216 meters.<|eot_id|>
32+
```
33+
34+
## Function calling example, hiding details
35+
```
36+
./examples/function-calling/llama-cli-function-runner.py -m `huggingface-cli download meetkai/functionary-small-v3.2-GGUF functionary-small-v3.2.Q4_0.gguf` -i
37+
What is the weather in Phoenix?
38+
To provide you with the current weather in Phoenix, Arizona, I will need to check the weather data for you. Let me get that information.
39+
The current weather in Phoenix, Arizona is 30°C. If you have any more questions about weather in other locations, feel free to ask!
40+
Is it colder in Vegas?
41+
To determine if the current temperature in Las Vegas is colder than in Phoenix, which is currently 30°C, I will need to check the weather data for Las Vegas. Let's find out.
42+
The current weather in Las Vegas, Nevada is also 30°C. Therefore, there is no difference in temperature between Phoenix and Las Vegas at the moment. If you have any more questions or need further assistance, please let me know!
43+
What is 37234 times 39?
44+
To calculate 37234 times 39, I'll perform the multiplication. Let's do that.
45+
The result of multiplying 37234 by 39 is 1,452,126. If you have any more calculations or questions, feel free to ask!
46+
```
+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
# Generate function calling definitions function schemas
2+
3+
import inspect
4+
import re
5+
6+
# Extract OpenAI function calling style definitions from functions
7+
#
8+
# Generated with: Create a python function to to generate the OpenAI function calling definition from a given function, getting the description, parameter type and parameter description from the function documentation, assuming the function documentation contains sphynx style parameter descriptions, marked with :param.
9+
def get_function_tool_json(func):
10+
typemap = { 'str': 'string' };
11+
def get_type(s):
12+
return typemap[s] if s in typemap else s
13+
14+
function_name = func.__name__
15+
doc_parts = re.split(r'\n\s*:param[^:]*\s+', func.__doc__.rstrip());
16+
17+
function_description = doc_parts[0]
18+
params_doc = [ re.split(r'\:\s*', param_doc, maxsplit=1) for param_doc in doc_parts[1:] ]
19+
params_doc = { param: desc for param, desc in params_doc }
20+
21+
function_def = {
22+
'name': function_name,
23+
'description': function_description,
24+
'parameters': { 'type': 'object', 'properties': {}, 'required': [] }
25+
}
26+
27+
for param_name, param in inspect.signature(func).parameters.items():
28+
function_def['parameters']['properties'][param_name] = {
29+
'type' : get_type(param.annotation.__name__) if param.annotation is not param.empty else '',
30+
'description': params_doc[param_name] if param_name in params_doc else ''
31+
}
32+
function_def['parameters']['required'].append(param_name);
33+
34+
return function_def
35+
36+
# Generate function definition schema from function definitions
37+
#
38+
# This is from llama-cpp-python, llama_chat_format.py
39+
def generate_schema_from_functions(functions, namespace="functions") -> str:
40+
schema = (
41+
"// Supported function definitions that should be called when necessary.\n"
42+
)
43+
schema += f"namespace {namespace} {{\n\n"
44+
45+
for function in functions:
46+
function_name = function["name"]
47+
description = function.get("description", "")
48+
parameters = function.get("parameters", {})
49+
required_params = parameters.get("required", [])
50+
51+
schema += f"// {description}\n"
52+
schema += f"type {function_name} = (_: {{\n"
53+
54+
for param_name, param in parameters.get("properties", {}).items():
55+
param_description = param.get("description", "")
56+
param_type = param.get("type", "any")
57+
optional_indicator = "" if param_name in required_params else "?"
58+
schema += f"// {param_description}\n"
59+
schema += f"{param_name}{optional_indicator}: {param_type},\n"
60+
schema += "}) => any;\n\n"
61+
62+
schema += "}} // namespace {}".format(namespace)
63+
return schema
+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
def calculate(expression: str):
2+
"""Evaluate a mathematical expression
3+
:param expression: The mathematical expression to evaluate
4+
"""
5+
try:
6+
result = eval(expression)
7+
return {"result": result}
8+
except:
9+
return {"error": "Invalid expression"}
10+
11+
def get_weather(location: str):
12+
"""get the weather of a location
13+
:param location: where to get weather.
14+
"""
15+
return {"temperature": "30C"}
16+
17+
def _run_python(code):
18+
allowed_globals = { '__builtins__': None, '_': None }
19+
allowed_locals = {}
20+
21+
code = code.splitlines()
22+
code[-1] = f"_ = {code[-1]}"
23+
code = '\n'.join(code)
24+
25+
try:
26+
exec(code, allowed_globals, allowed_locals)
27+
except Exception as e:
28+
return None
29+
30+
return {'result': allowed_locals.get('_', None)}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
#!/usr/bin/env python3
2+
# function calling using llama-cli
3+
4+
import subprocess
5+
import sys
6+
import select
7+
import os
8+
import re
9+
10+
import json
11+
12+
import functions
13+
from function_tool import get_function_tool_json, generate_schema_from_functions
14+
15+
function_name_list = [ name for name in dir(functions) if not name.startswith('_') ]
16+
function_lookup = { name: getattr(functions, name) for name in function_name_list }
17+
tools = [ get_function_tool_json(f) for (n, f) in function_lookup.items() ]
18+
function_schema = generate_schema_from_functions(tools)
19+
20+
prompt = """<|start_header_id|>system<|end_header_id|>
21+
22+
You are capable of executing available function(s) if required.
23+
Execute function(s) as needed.
24+
The function calls are not shown in the conversation and should be called covertly to answer questions.
25+
Ask for the required input to:recipient==all
26+
Use JSON for function arguments.
27+
Respond in this format:
28+
>>>${recipient}
29+
${content}
30+
Available functions:
31+
""" + function_schema + """<|eot_id|><|start_header_id|>system<|end_header_id|>
32+
33+
When you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 60.0 seconds. The drive at '/mnt/data' can be used to save and persist user files.<|eot_id|><|start_header_id|>user<|end_header_id|>
34+
"""
35+
36+
def main():
37+
import argparse
38+
39+
parser = argparse.ArgumentParser(epilog='For more options: llama-cli --help')
40+
parser.add_argument('--display-prompt', action=argparse.BooleanOptionalAction, default=False)
41+
parser.add_argument('--special', action=argparse.BooleanOptionalAction, default=False)
42+
parser.add_argument('--reverse-prompt', type=str, default='<|start_header_id|>user<|end_header_id|>\n')
43+
parser.add_argument('--ctx-size', type=int, default=1024)
44+
args, other_args = parser.parse_known_args()
45+
46+
if args.display_prompt: print(prompt)
47+
48+
command = [ './llama-cli', '-i', '-p', prompt, '--reverse-prompt', args.reverse_prompt, '--escape', '--special', '--no-display-prompt', '--log-disable', '--simple-io', '--ctx-size', str(args.ctx_size), *other_args]
49+
50+
process = subprocess.Popen(
51+
command,
52+
stdin=subprocess.PIPE,
53+
stdout=subprocess.PIPE,
54+
stderr=subprocess.PIPE,
55+
text=True,
56+
)
57+
if process.stdout is not None: os.set_blocking(process.stdout.fileno(), False)
58+
59+
try:
60+
run_loop(process, args)
61+
except KeyboardInterrupt:
62+
print("\nInterrupted by user.")
63+
finally:
64+
process.terminate()
65+
process.wait()
66+
67+
def run_loop(process, args):
68+
pbuffer = ''
69+
skip_output_until_result = False
70+
while True:
71+
readable, _, _ = select.select([process.stdout, process.stderr, sys.stdin], [], [])
72+
73+
for stream in readable:
74+
if stream == process.stdout:
75+
pdata = process.stdout.read()
76+
if not pdata: continue
77+
pbuffer += pdata
78+
79+
if(match := re.search(r'>>>([^\n]*)\n(.*)<\|eot_id\|>', pbuffer, re.S)):
80+
if not args.special:
81+
pdata = pdata[:match.pos]
82+
pbuffer = ''
83+
skip_output_until_result = False
84+
85+
tool_name = match.group(1)
86+
tool_args = match.group(2)
87+
88+
if tool_name == 'python':
89+
result = functions._run_python(tool_args);
90+
else:
91+
try:
92+
tool_args = json.loads(tool_args)
93+
result = function_lookup[tool_name](**tool_args)
94+
except ValueError as e:
95+
result = {'error': 'unknown'}
96+
97+
result = json.dumps(result) + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>'
98+
process.stdin.write(result + '\n')
99+
process.stdin.flush()
100+
if(args.special): pdata += '\n' + result
101+
elif (n := pdata.find('>>>')) >= 0:
102+
if not args.special:
103+
pdata = pdata[:n]
104+
skip_output_until_result = True
105+
elif skip_output_until_result:
106+
pdata = ''
107+
108+
if not args.special:
109+
pdata = re.sub(r'<\|[^\|>]*\|>', '', pdata)
110+
sys.stdout.write(pdata)
111+
sys.stdout.flush()
112+
113+
elif stream == sys.stdin:
114+
user_input = sys.stdin.readline()
115+
if user_input:
116+
user_input = user_input.rstrip()
117+
process.stdin.write(user_input + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>' + '\n')
118+
process.stdin.flush()
119+
120+
if __name__ == '__main__':
121+
main()
122+

0 commit comments

Comments
 (0)