Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: allow progress bar to be disabled #5064

Merged
merged 2 commits into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions packages/phoenix-evals/src/phoenix/evals/classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def llm_classify(
exit_on_error: bool = True,
run_sync: bool = False,
concurrency: Optional[int] = None,
progress_bar_format: Optional[str] = get_tqdm_progress_bar_formatter("llm_classify"),
) -> pd.DataFrame:
"""
Classifies each input row of the dataframe using an LLM.
Expand Down Expand Up @@ -135,6 +136,11 @@ def llm_classify(
submission is possible. If not provided, a recommended default concurrency is
set on a per-model basis.

progress_bar_format(Optional[str]): An optional format for progress bar shown. If not
specified, defaults to: llm_classify |{bar}| {n_fmt}/{total_fmt} ({percentage:3.1f}%) "
"| ⏳ {elapsed}<{remaining} | {rate_fmt}{postfix}". If 'None' is passed in specifically,
the progress_bar log will be disabled.

Returns:
pandas.DataFrame: A dataframe where the `label` column (at column position 0) contains
the classification labels. If provide_explanation=True, then an additional column named
Expand All @@ -150,7 +156,6 @@ def llm_classify(
# clients need to be reloaded to ensure that async evals work properly
model.reload_client()

tqdm_bar_format = get_tqdm_progress_bar_formatter("llm_classify")
use_openai_function_call = (
use_function_calling_if_available
and isinstance(model, OpenAIModel)
Expand Down Expand Up @@ -230,7 +235,7 @@ def _run_llm_classification_sync(input_data: pd.Series[Any]) -> ParsedLLMRespons
_run_llm_classification_async,
run_sync=run_sync,
concurrency=concurrency,
tqdm_bar_format=tqdm_bar_format,
tqdm_bar_format=progress_bar_format,
max_retries=max_retries,
exit_on_error=exit_on_error,
fallback_return_value=fallback_return_value,
Expand Down
14 changes: 11 additions & 3 deletions packages/phoenix-evals/src/phoenix/evals/executors.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,11 @@ def termination_handler(signum: int, frame: Any) -> None:
original_handler = signal.signal(self.termination_signal, termination_handler)
outputs = [self.fallback_return_value] * len(inputs)
execution_details = [ExecutionDetails() for _ in range(len(inputs))]
progress_bar = tqdm(total=len(inputs), bar_format=self.tqdm_bar_format)
progress_bar = tqdm(
total=len(inputs),
bar_format=self.tqdm_bar_format,
disable=self.tqdm_bar_format is None,
)

max_queue_size = 5 * self.concurrency # limit the queue to bound memory usage
max_fill = max_queue_size - (2 * self.concurrency) # ensure there is always room to requeue
Expand Down Expand Up @@ -287,7 +291,7 @@ class SyncExecutor(Executor):
returns an output.

tqdm_bar_format (Optional[str], optional): The format string for the progress bar. Defaults
to None.
to None. If None, the progress bar is disabled.

max_retries (int, optional): The maximum number of times to retry on exceptions. Defaults to
10.
Expand Down Expand Up @@ -339,7 +343,11 @@ def run(self, inputs: Sequence[Any]) -> Tuple[List[Any], List[Any]]:
execution_details: List[ExecutionDetails] = [
ExecutionDetails() for _ in range(len(inputs))
]
progress_bar = tqdm(total=len(inputs), bar_format=self.tqdm_bar_format)
progress_bar = tqdm(
total=len(inputs),
bar_format=self.tqdm_bar_format,
disable=self.tqdm_bar_format is None,
)

for index, input in enumerate(inputs):
task_start_time = time.time()
Expand Down
Loading