Skip to content

Commit

Permalink
Merge branch 'main' into mengla/copilot_sample
Browse files Browse the repository at this point in the history
  • Loading branch information
melionel authored May 9, 2024
2 parents 466cd0a + 3d464e2 commit 93c9a14
Show file tree
Hide file tree
Showing 17 changed files with 236 additions and 34 deletions.
16 changes: 12 additions & 4 deletions scripts/json_schema/gen_json_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,11 @@ def get_required(self, obj):
from promptflow._sdk.schemas._flow import FlowSchema, FlexFlowSchema


def dump_json(file_name, dct):
with open(file_name, "w") as f:
f.write(json.dumps(dct, indent=4))


if __name__ == "__main__":
example_text = """Example usage:
Expand Down Expand Up @@ -180,8 +185,9 @@ def get_required(self, obj):
"type": "object",
"oneOf": schema_list
}
with open((f"Flow.schema.json"), "w") as f:
f.write(json.dumps(schema, indent=4))
dump_json("Flow.schema.json", schema)
# Dump another small case first letter one
dump_json("flow.schema.json", schema)
args.output_file.remove("Flow")

prepared_schemas = {}
Expand All @@ -197,5 +203,7 @@ def get_required(self, obj):
print(f"Schema not found for {item}")
else:
target_schema = PatchedJSONSchema().dump(item_cls(context={"base_path": "./"}))
with open((f"{item}.schema.json"), "w") as f:
f.write(json.dumps(target_schema, indent=4))
dump_json(f"{item}.schema.json", target_schema)
# Dump another small case first letter one
item[0] = item[0].lower()
dump_json(f"{item}.schema.json", target_schema)
12 changes: 10 additions & 2 deletions src/promptflow-core/promptflow/_core/data/tool.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,8 @@
"FunctionList",
"FunctionStr",
"FormRecognizerConnection",
"ServerlessConnection",
"AzureAIServicesConnection",
"FilePath",
"Image",
"AssistantDefinition"
Expand Down Expand Up @@ -243,6 +245,8 @@
"function_list",
"function_str",
"FormRecognizerConnection",
"ServerlessConnection",
"AzureAIServicesConnection",
"file_path",
"image",
"assistant_definition"
Expand Down Expand Up @@ -354,7 +358,9 @@
"Pinecone",
"Qdrant",
"Weaviate",
"FormRecognizer"
"FormRecognizer",
"Serverless",
"AzureAIServices"
],
"enum": [
"OpenAI",
Expand All @@ -369,7 +375,9 @@
"Pinecone",
"Qdrant",
"Weaviate",
"FormRecognizer"
"FormRecognizer",
"Serverless",
"AzureAIServices"
]
},
"ToolState": {
Expand Down
4 changes: 3 additions & 1 deletion src/promptflow-core/promptflow/_utils/exception_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,9 @@ def get_additional_info(self, name):

def get_user_execution_error_info(self):
"""Get user tool execution error info from additional info."""
user_execution_error_info = self.get_additional_info(ADDITIONAL_INFO_USER_EXECUTION_ERROR)
user_execution_error_info = self.get_additional_info(
ADDITIONAL_INFO_USER_EXECUTION_ERROR
) or self.get_additional_info(ADDITIONAL_INFO_FLEX_FLOW_ERROR)
if not user_execution_error_info or not isinstance(user_execution_error_info, dict):
return {}
return user_execution_error_info
Expand Down
60 changes: 48 additions & 12 deletions src/promptflow-core/promptflow/core/_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from promptflow._constants import DEFAULT_ENCODING, LANGUAGE_KEY, PROMPTY_EXTENSION, FlowLanguage
from promptflow._utils.flow_utils import is_flex_flow, is_prompty_flow, resolve_flow_path
from promptflow._utils.yaml_utils import load_yaml_string
from promptflow.contracts.tool import ValueType
from promptflow.core._errors import MissingRequiredInputError
from promptflow.core._model_configuration import PromptyModelConfiguration
from promptflow.core._prompty_utils import (
Expand Down Expand Up @@ -314,6 +315,7 @@ def __init__(

# TODO support more templating engine
self._template_engine = configs.get("template", "jinja2")
self._input_signature, self._output_signature = None, None
super().__init__(code=path.parent, path=path, data=configs, content_hash=None, **kwargs)

@classmethod
Expand Down Expand Up @@ -362,10 +364,18 @@ def _parse_prompty(path):
configs = load_yaml_string(config_content)
return configs, prompt_template

def _validate_inputs(self, input_values):
def _resolve_inputs(self, input_values):
"""
Resolve prompty inputs. If not provide input_values, sample data will be regarded as input value.
For inputs are not provided, the default value in the input signature will be used.
"""
if not input_values and self._sample:
# Load inputs from sample
input_values = load_inputs_from_sample(self._sample)

resolved_inputs = {}
missing_inputs = []
for input_name, value in self._inputs.items():
for input_name, value in self._get_input_signature().items():
if input_name not in input_values and "default" not in value:
missing_inputs.append(input_name)
continue
Expand All @@ -374,6 +384,25 @@ def _validate_inputs(self, input_values):
raise MissingRequiredInputError(f"Missing required inputs: {missing_inputs}")
return resolved_inputs

def _get_input_signature(self):
if not self._input_signature:
if self._inputs:
self._input_signature = self._inputs
elif self._sample:
sample_data = load_inputs_from_sample(self._sample)
self._input_signature = {k: {"type": ValueType.from_value(v).value} for k, v in sample_data.items()}
else:
self._input_signature = {}
return self._input_signature

def _get_output_signature(self, include_primitive_output=False):
if not self._output_signature:
self._output_signature = self._outputs
if not self._output_signature and include_primitive_output:
return {"output": {"type": "string"}}
else:
return self._output_signature

@trace
def __call__(self, *args, **kwargs):
"""Calling flow as a function, the inputs should be provided with key word arguments.
Expand All @@ -387,17 +416,13 @@ def __call__(self, *args, **kwargs):
"""
if args:
raise UserErrorException("Prompty can only be called with keyword arguments.")
inputs = kwargs
if not inputs and self._sample:
# Load inputs from sample
inputs = load_inputs_from_sample(self._sample)
inputs = self._resolve_inputs(kwargs)
enrich_prompt_template(self._template, variables=inputs)

# 1. Get connection
connection = convert_model_configuration_to_connection(self._model.configuration)

# 2.deal with prompt
inputs = self._validate_inputs(inputs)
traced_convert_prompt_template = _traced(func=convert_prompt_template, args_to_ignore=["api"])
template = traced_convert_prompt_template(self._template, inputs, self._model.api)

Expand All @@ -417,6 +442,21 @@ def __call__(self, *args, **kwargs):
outputs=self._outputs,
)

def render(self, *args, **kwargs):
"""Render the prompt content.
:param args: positional arguments are not supported.
:param kwargs: prompty inputs with key word arguments.
:return: Prompt content
:rtype: str
"""
if args:
raise UserErrorException("Prompty can only be rendered with keyword arguments.")
inputs = self._resolve_inputs(kwargs)
prompt = convert_prompt_template(self._template, inputs, self._model.api)
# For chat mode, the message generated is list type. Convert to string type and return to user.
return str(prompt)


class AsyncPrompty(Prompty):
"""Async prompty is based on Prompty, which is used to invoke prompty in async mode.
Expand Down Expand Up @@ -445,17 +485,13 @@ async def __call__(self, *args, **kwargs) -> Mapping[str, Any]:
"""
if args:
raise UserErrorException("Prompty can only be called with keyword arguments.")
inputs = kwargs
if not inputs and self._sample:
# Load inputs from sample
inputs = load_inputs_from_sample(self._sample)
inputs = self._resolve_inputs(kwargs)
enrich_prompt_template(self._template, variables=inputs)

# 1. Get connection
connection = convert_model_configuration_to_connection(self._model.configuration)

# 2.deal with prompt
inputs = self._validate_inputs(inputs)
traced_convert_prompt_template = _traced(func=convert_prompt_template, args_to_ignore=["api"])
template = traced_convert_prompt_template(self._template, inputs, self._model.api)

Expand Down
4 changes: 2 additions & 2 deletions src/promptflow-devkit/promptflow/_cli/_pf/entry.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def run_command(args):
handler.setLevel(logging.DEBUG)

if args.version:
print_pf_version()
print_pf_version(with_azure=True)
elif args.action == "flow":
dispatch_flow_commands(args)
elif args.action == "connection":
Expand Down Expand Up @@ -141,7 +141,7 @@ def main():
"""Entrance of pf CLI."""
command_args = sys.argv[1:]
if len(command_args) == 1 and command_args[0] == "version":
print_promptflow_version_dict_string()
print_promptflow_version_dict_string(with_azure=True)
return
if len(command_args) == 0:
# print privacy statement & welcome message like azure-cli
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,11 @@ def _raise_error_when_test_failed(test_result, show_trace=False):
error_response = ErrorResponse.from_error_dict(error_dict)
user_execution_error = error_response.get_user_execution_error_info()
error_message = error_response.message
stack_trace = user_execution_error.get("traceback", "")
# sdk will wrap exception here, so we need get user code stacktrace or recursively get debug info
# stacktrace as inner exception here
stack_trace = user_execution_error.get("traceback", "") or TestSubmitter._recursively_get_stacktrace(
error_dict.get("debugInfo", {})
)
error_type = user_execution_error.get("type", "Exception")
if show_trace:
print(stack_trace)
Expand All @@ -625,3 +629,12 @@ def _get_generator_outputs(outputs):
generator_outputs = {key: output for key, output in outputs.items() if isinstance(output, GeneratorType)}
if generator_outputs:
logger.info(f"Some streaming outputs in the result, {generator_outputs.keys()}")

@staticmethod
def _recursively_get_stacktrace(debug_info: dict):
if not debug_info:
return ""
stack_trace = debug_info.get("stackTrace", "") + debug_info.get("message", "")
inner_exception = debug_info.get("innerException", {})
stack_trace = TestSubmitter._recursively_get_stacktrace(inner_exception) + stack_trace
return stack_trace
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import subprocess
import sys
import time
import traceback
from dataclasses import InitVar, dataclass, field
from datetime import datetime
from functools import wraps
Expand Down Expand Up @@ -278,7 +279,10 @@ def __post_init__(self, exception):
self.target = exception.target
self.module = exception.module
self.reference_code = exception.reference_code
self.inner_exception = str(exception.inner_exception)
# If not inner_exception here, directly get traceback here
self.inner_exception = (
str(exception.inner_exception) if exception.inner_exception else traceback.format_exc()
)
self.additional_info = exception.additional_info
self.error_codes = exception.error_codes
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1005,11 +1005,12 @@ def _infer_signature(entry: Union[Callable, FlexFlow, Flow, Prompty], include_pr
from promptflow.contracts.tool import ValueType
from promptflow.core._model_configuration import PromptyModelConfiguration

flow_meta = {"inputs": entry._data.get("inputs", {})}
if "outputs" in entry._data:
flow_meta["outputs"] = entry._data.get("outputs")
elif include_primitive_output:
flow_meta["outputs"] = {"output": {"type": "string"}}
flow_meta = {
"inputs": entry._core_prompty._get_input_signature(),
}
output_signature = entry._core_prompty._get_output_signature(include_primitive_output)
if output_signature:
flow_meta["outputs"] = output_signature
init_dict = {}
for field in fields(PromptyModelConfiguration):
init_dict[field.name] = {"type": ValueType.from_type(field.type).value}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,25 @@ def test_flow_infer_signature(self):
"response": {"type": "string", "default": "first"},
},
}

# sample as input signature
prompty = load_flow(source=Path(PROMPTY_DIR) / "sample_as_input_signature.prompty")
meta = pf.flows.infer_signature(entry=prompty, include_primitive_output=True)
assert meta == {
"inputs": {
"firstName": {"type": "string"},
"lastName": {"type": "string"},
"question": {"type": "string"},
},
"outputs": {"output": {"type": "string"}},
"init": {
"configuration": {"type": "object"},
"parameters": {"type": "object"},
"api": {"type": "string", "default": "chat"},
"response": {"type": "string", "default": "first"},
},
}

# Flex flow
flex_flow = load_flow(source=Path(EAGER_FLOWS_DIR) / "builtin_llm")
meta = pf.flows.infer_signature(entry=flex_flow, include_primitive_output=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,13 @@ def test_eager_flow_test_with_primitive_output(self):
result = _client._flows._test(flow=flow_path, inputs={"input_val": "val1"})
assert result.run_info.status.value == "Completed"

def test_eager_flow_test_with_user_code_error(self):
clear_module_cache("entry")
flow_path = Path(f"{EAGER_FLOWS_DIR}/exception_in_user_code/").absolute()
result = _client._flows._test(flow=flow_path)
assert result.run_info.status.value == "Failed"
assert "FlexFlowExecutionErrorDetails" in str(result.run_info.error)

def test_eager_flow_test_invalid_cases(self):
# wrong entry provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/incorrect_entry/").absolute()
Expand Down
38 changes: 38 additions & 0 deletions src/promptflow-devkit/tests/sdk_cli_test/e2etests/test_prompty.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,10 +355,48 @@ def test_prompty_with_sample(self, pf: PFClient):
prompty()
assert "Only dict and json file are supported as sample in prompty" in ex.value.message

# Test sample field as input signature
prompty = Flow.load(source=f"{PROMPTY_DIR}/sample_as_input_signature.prompty")
result = prompty()
assert "2" in result

input_signature = prompty._get_input_signature()
assert input_signature == {
"firstName": {"type": "string"},
"lastName": {"type": "string"},
"question": {"type": "string"},
}

def test_prompty_with_default_connection(self, pf: PFClient):
connection = pf.connections.get(name="azure_open_ai_connection", with_secrets=True)
os.environ["AZURE_OPENAI_ENDPOINT"] = connection.api_base
os.environ["AZURE_OPENAI_API_KEY"] = connection.api_key
prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example_with_default_connection.prompty")
result = prompty(question="what is the result of 1+1?")
assert "2" in result

def test_render_prompty(self):
prompty = Prompty.load(source=f"{PROMPTY_DIR}/prompty_example.prompty")
result = prompty.render(question="what is the result of 1+1?")
expect = [
{
"role": "system",
"content": "You are an AI assistant who helps people find information.\nAs the assistant, "
"you answer questions briefly, succinctly,\nand in a personable manner using markdown "
"and even add some personal flair with appropriate emojis.\n\n# Safety\n- You **should "
"always** reference factual statements to search results based on [relevant documents]\n-"
" Search results based on [relevant documents] may be incomplete or irrelevant. You do not"
" make assumptions\n# Customer\nYou are helping John Doh to find answers to their "
"questions.\nUse their name to address them in your responses.",
},
{"role": "user", "content": "what is the result of 1+1?"},
]
assert result == str(expect)

with pytest.raises(UserErrorException) as ex:
prompty.render("mock_value")
assert "Prompty can only be rendered with keyword arguments." in ex.value.message

with pytest.raises(MissingRequiredInputError) as ex:
prompty.render(mock_key="mock_value")
assert "Missing required inputs" in ex.value.message
Loading

0 comments on commit 93c9a14

Please sign in to comment.