diff --git a/examples/prompty/basic/prompty-quickstart.ipynb b/examples/prompty/basic/prompty-quickstart.ipynb index d073c880033..59740d73b3b 100644 --- a/examples/prompty/basic/prompty-quickstart.ipynb +++ b/examples/prompty/basic/prompty-quickstart.ipynb @@ -79,10 +79,52 @@ "metadata": {}, "outputs": [], "source": [ - "from promptflow.core import Flow\n", + "from promptflow.core import Prompty\n", "\n", "# load prompty as a flow\n", - "f = Flow.load(\"basic.prompty\")\n", + "f = Prompty.load(source=\"basic.prompty\")\n", + "\n", + "# execute the flow as function\n", + "result = f(\n", + " first_name=\"John\", last_name=\"Doe\", question=\"What is the capital of France?\"\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can override configuration with `AzureOpenAIModelConfiguration` and `OpenAIModelConfiguration`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.core import AzureOpenAIModelConfiguration, OpenAIModelConfiguration\n", + "\n", + "# override configuration with AzureOpenAIModelConfiguration\n", + "configuration = AzureOpenAIModelConfiguration(\n", + " azure_endpoint=\"${env:AZURE_OPENAI_ENDPOINT}\", # Use ${env:} to surround the environment variable name.\n", + " api_key=\"${env:AZURE_OPENAI_API_KEY}\",\n", + " azure_deployment=\"gpt-35-turbo\",\n", + ")\n", + "\n", + "# override configuration with OpenAIModelConfiguration\n", + "# configuration = OpenAIModelConfiguration(\n", + "# base_url=\"${env:OPENAI_BASE_URL}\",\n", + "# api_key=\"${env:OPENAI_API_KEY}\",\n", + "# model=\"gpt-3.5-turbo\"\n", + "# )\n", + "\n", + "override_model = {\"configuration\": configuration, \"parameters\": {\"max_token\": 512}}\n", + "\n", + "# load prompty as a flow\n", + "f = Prompty.load(source=\"basic.prompty\", model=override_model)\n", + "\n", "# execute the flow as function\n", "result = f(\n", " first_name=\"John\", last_name=\"Doe\", question=\"What is the capital of France?\"\n", @@ -143,7 +185,7 @@ "outputs": [], "source": [ "# load prompty as a flow\n", - "eval_flow = Flow.load(\"../eval-basic/eval.prompty\")\n", + "eval_flow = Prompty.load(\"../eval-basic/eval.prompty\")\n", "# execute the flow as function\n", "result = eval_flow(\n", " question=question, ground_truth=ground_truth, answer=result[\"answer\"]\n", diff --git a/examples/prompty/chat-basic/chat-with-prompty.ipynb b/examples/prompty/chat-basic/chat-with-prompty.ipynb index 40fd9e4e498..17fe8e332dd 100644 --- a/examples/prompty/chat-basic/chat-with-prompty.ipynb +++ b/examples/prompty/chat-basic/chat-with-prompty.ipynb @@ -114,10 +114,49 @@ "metadata": {}, "outputs": [], "source": [ - "from promptflow.core import Flow\n", + "from promptflow.core import Prompty\n", "\n", "# load prompty as a flow\n", - "f = Flow.load(\"chat.prompty\")\n", + "f = Prompty.load(\"chat.prompty\")\n", + "# execute the flow as function\n", + "question = \"What is the capital of France?\"\n", + "result = f(first_name=\"John\", last_name=\"Doe\", question=question)\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can override connection with `AzureOpenAIModelConfiguration` and `OpenAIModelConfiguration`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.core import AzureOpenAIModelConfiguration, OpenAIModelConfiguration\n", + "\n", + "\n", + "# override configuration with created connection in AzureOpenAIModelConfiguration\n", + "configuration = AzureOpenAIModelConfiguration(\n", + " connection=connection, azure_deployment=\"gpt-35-turbo\"\n", + ")\n", + "\n", + "# override openai connection with OpenAIModelConfiguration\n", + "# configuration = OpenAIModelConfiguration(\n", + "# connection=connection,\n", + "# model=\"gpt-3.5-turbo\"\n", + "# )\n", + "\n", + "override_model = {\n", + " \"configuration\": configuration,\n", + "}\n", + "\n", + "# load prompty as a flow\n", + "f = Prompty.load(\"chat.prompty\", model=override_model)\n", "# execute the flow as function\n", "question = \"What is the capital of France?\"\n", "result = f(first_name=\"John\", last_name=\"Doe\", question=question)\n", @@ -189,7 +228,7 @@ "outputs": [], "source": [ "# load prompty as a flow\n", - "eval_flow = Flow.load(eval_prompty)\n", + "eval_flow = Prompty.load(eval_prompty)\n", "# execute the flow as function\n", "result = eval_flow(question=question, answer=result[\"answer\"], messages=[])\n", "result"