Skip to content

Commit

Permalink
LLM inference examples (#105)
Browse files Browse the repository at this point in the history
Bodo is 6x faster in processing, even with only 3 prompts. This uses Gemini Flash, but you can swap other models and try too.
  • Loading branch information
strangeloopcanon authored Jan 3, 2025
1 parent 88d15d9 commit 63252df
Showing 1 changed file with 158 additions and 0 deletions.
158 changes: 158 additions & 0 deletions examples/llm_inference_speedtest.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processing time: 3.0881738662719727\n",
" prompt \\\n",
"0 What is the capital of France? \n",
"1 What is the meaning of life? \n",
"2 What is the smoothest part of the universe? \n",
"\n",
" response \n",
"0 Paris\\n \n",
"1 There's no single, universally accepted answer... \n",
"2 The smoothest part of the universe, on the lar... \n"
]
}
],
"source": [
"\"\"\"Preprocess and query LLMs:\n",
"- Install llm package using pip install llm\n",
"- Set keys for the model you'd like to use, using llm keys set [MODEL]\n",
"\"\"\"\n",
"\n",
"import pandas as pd\n",
"import time\n",
"from dotenv import load_dotenv\n",
"import llm\n",
"\n",
"load_dotenv()\n",
"MODEL = \"gemini-1.5-flash-8b-latest\"\n",
"model = llm.get_model(MODEL) \n",
"\n",
"def query_model(prompt):\n",
" \"\"\"\n",
" Sends a prompt to the AI Suite and returns the response.\n",
" \"\"\"\n",
" response = model.prompt(prompt) \n",
" return response.text()\n",
"\n",
"def query_model_all(df):\n",
" \"\"\"Clean up prompts and query the model for all prompts in the dataframe.\"\"\"\n",
" t0 = time.time()\n",
" cleaned_prompts = df[\"prompt\"].str.strip().str.lower()\n",
" df[\"response\"] = cleaned_prompts.map(query_model)\n",
" print(\"Processing time:\", time.time() - t0)\n",
" return df\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" raw_prompts = [\n",
" \" What is the capital of France? \",\n",
" \" What is the meaning of life? \",\n",
" \" What is the smoothest part of the universe?\",\n",
" ]\n",
"\n",
" # Repeat prompts 10 times for testing purposes\n",
" df = pd.DataFrame({\"prompt\": raw_prompts})\n",
" out_df = query_model_all(df)\n",
" print(out_df)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processing time: 0.4763470000000325\n",
" prompt response\n",
"0 What is the capital of France? Paris\n",
"\n",
"1 What is the meaning of life in one word? Purpose\n",
"\n",
"2 What is the smoothest part of the universe, i... Cosmic\n",
"\n"
]
}
],
"source": [
"\"\"\"Preprocess and query LLMs, Bodo edition:\n",
"- Install llm package using pip install llm\n",
"- Set keys for the model you'd like to use, using llm keys set [MODEL]\n",
"\"\"\"\n",
"\n",
"import pandas as pd\n",
"import bodo\n",
"import time\n",
"from dotenv import load_dotenv\n",
"import llm\n",
"\n",
"load_dotenv()\n",
"MODEL = \"gemini-1.5-flash-8b-latest\"\n",
"model = llm.get_model(MODEL) \n",
"\n",
"@bodo.wrap_python(bodo.string_type)\n",
"def query_model(prompt):\n",
" \"\"\"\n",
" Sends a prompt to the AI Suite and returns the response.\n",
" \"\"\"\n",
" response = model.prompt(prompt) \n",
" return response.text()\n",
"\n",
"@bodo.jit\n",
"def query_model_all(df):\n",
" \"\"\"Clean up prompts and query the model for all prompts in the dataframe.\"\"\"\n",
" t0 = time.time()\n",
" cleaned_prompts = df[\"prompt\"].str.strip().str.lower()\n",
" df[\"response\"] = cleaned_prompts.map(query_model)\n",
" print(\"Processing time:\", time.time() - t0)\n",
" return df\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" raw_prompts = [\n",
" \" What is the capital of France? \",\n",
" \" What is the meaning of life? \",\n",
" \" What is the smoothest part of the universe?\",\n",
" ]\n",
"\n",
" # Repeat prompts 10 times for testing purposes\n",
" df = pd.DataFrame({\"prompt\": raw_prompts})\n",
" out_df = query_model_all(df)\n",
" print(out_df)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "global_env",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

0 comments on commit 63252df

Please sign in to comment.