Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: change variable names in run_relevance_eval for clarity #1570

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 14 additions & 12 deletions src/phoenix/experimental/evals/functions/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ def run_relevance_eval(
template: Union[PromptTemplate, str] = RAG_RELEVANCY_PROMPT_TEMPLATE_STR,
rails: List[str] = list(RAG_RELEVANCY_PROMPT_RAILS_MAP.values()),
system_instruction: Optional[str] = None,
query_column_name: str = "query",
document_column_name: str = "reference",
query_variable_name: str = "query",
document_variable_name: str = "reference",
) -> List[List[str]]:
"""
Given a pandas dataframe containing queries and retrieved documents, classifies the relevance of
Expand Down Expand Up @@ -103,11 +103,13 @@ def run_relevance_eval(
rails (List[str], optional): A list of strings representing the possible output classes of
the model's predictions.

query_column_name (str, optional): The name of the query column in the dataframe, which
should also be a template variable.
query_variable_name (str, optional): The name of the query variable in the evaluation prompt
template. This must also be a column in the dataframe, unless the dataframe is in
OpenInference trace format.

reference_column_name (str, optional): The name of the document column in the dataframe,
which should also be a template variable.
reference_variable_name (str, optional): The name of the reference variable in the
evaluation prompt template. This must also be a column in the dataframe, unless the
dataframe is in OpenInference trace format.

system_instruction (Optional[str], optional): An optional system message.

Expand All @@ -120,15 +122,15 @@ def run_relevance_eval(
be parsed.
"""

query_column = dataframe.get(query_column_name)
document_column = dataframe.get(document_column_name)
query_column = dataframe.get(query_variable_name)
document_column = dataframe.get(document_variable_name)
if query_column is None or document_column is None:
openinference_query_column = dataframe.get(OPENINFERENCE_QUERY_COLUMN_NAME)
openinference_document_column = dataframe.get(OPENINFERENCE_DOCUMENT_COLUMN_NAME)
if openinference_query_column is None or openinference_document_column is None:
raise ValueError(
f'Dataframe columns must include either "{query_column_name}" and '
f'"{document_column_name}", or "{OPENINFERENCE_QUERY_COLUMN_NAME}" and '
f'Dataframe columns must include either "{query_variable_name}" and '
f'"{document_variable_name}", or "{OPENINFERENCE_QUERY_COLUMN_NAME}" and '
f'"{OPENINFERENCE_DOCUMENT_COLUMN_NAME}".'
)
query_column = openinference_query_column
Expand All @@ -153,8 +155,8 @@ def run_relevance_eval(
predictions = llm_eval_binary(
dataframe=pd.DataFrame(
{
query_column_name: expanded_queries,
document_column_name: expanded_documents,
query_variable_name: expanded_queries,
document_variable_name: expanded_documents,
}
),
model=model,
Expand Down
Loading