Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import re
from typing import Any, Dict, Optional

from llama_stack.apis.inference.inference import Inference
from llama_stack.apis.inference.inference import Inference, UserMessage
from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
Expand Down Expand Up @@ -58,10 +58,9 @@ async def score_row(
judge_response = await self.inference_api.chat_completion(
model_id=fn_def.params.judge_model,
messages=[
{
"role": "user",
"content": judge_input_msg,
}
UserMessage(
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bug uncovered from unit tests :)

content=judge_input_msg,
),
],
)
content = judge_response.completion_message.content
Expand Down
5 changes: 5 additions & 0 deletions llama_stack/providers/utils/scoring/base_scoring_fn.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ def register_scoring_fn_def(self, scoring_fn: ScoringFn) -> None:
raise ValueError(f"Scoring function def with identifier {scoring_fn.identifier} already exists.")
self.supported_fn_defs_registry[scoring_fn.identifier] = scoring_fn

def unregister_scoring_fn_def(self, scoring_fn_id: str) -> None:
if scoring_fn_id not in self.supported_fn_defs_registry:
raise ValueError(f"Scoring function def with identifier {scoring_fn_id} does not exist.")
del self.supported_fn_defs_registry[scoring_fn_id]

@abstractmethod
async def score_row(
self,
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def pytest_addoption(parser):
)
parser.addoption(
"--judge-model",
help="comma-separated list of judge models. Fixture name: judge_model_id",
help="Specify the judge model to use for testing",
)
parser.addoption(
"--embedding-dimension",
Expand Down
10 changes: 5 additions & 5 deletions tests/integration/datasetio/test_dataset.csv
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
input_query,generated_answer,expected_answer,chat_completion_input
What is the capital of France?,London,Paris,"[{'role': 'user', 'content': 'What is the capital of France?'}]"
Who is the CEO of Meta?,Mark Zuckerberg,Mark Zuckerberg,"[{'role': 'user', 'content': 'Who is the CEO of Meta?'}]"
What is the largest planet in our solar system?,Jupiter,Jupiter,"[{'role': 'user', 'content': 'What is the largest planet in our solar system?'}]"
What is the smallest country in the world?,China,Vatican City,"[{'role': 'user', 'content': 'What is the smallest country in the world?'}]"
What is the currency of Japan?,Yen,Yen,"[{'role': 'user', 'content': 'What is the currency of Japan?'}]"
What is the capital of France?,London,Paris,"[{""role"": ""user"", ""content"": ""What is the capital of France?""}]"
Who is the CEO of Meta?,Mark Zuckerberg,Mark Zuckerberg,"[{""role"": ""user"", ""content"": ""Who is the CEO of Meta?""}]"
What is the largest planet in our solar system?,Jupiter,Jupiter,"[{""role"": ""user"", ""content"": ""What is the largest planet in our solar system?""}]"
What is the smallest country in the world?,China,Vatican City,"[{""role"": ""user"", ""content"": ""What is the smallest country in the world?""}]"
What is the currency of Japan?,Yen,Yen,"[{""role"": ""user"", ""content"": ""What is the currency of Japan?""}]"
29 changes: 6 additions & 23 deletions tests/integration/datasetio/test_datasetio.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,9 @@
import os
from pathlib import Path

import pytest

# How to run this test:
#
# pytest llama_stack/providers/tests/datasetio/test_datasetio.py
# -m "meta_reference"
# -v -s --tb=short --disable-warnings
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasetio


def data_url_from_file(file_path: str) -> str:
Expand Down Expand Up @@ -60,42 +56,29 @@ def register_dataset(llama_stack_client, for_generation=False, for_rag=False, da
"generated_answer": {"type": "string"},
}

dataset_providers = [x for x in llama_stack_client.providers.list() if x.api == "datasetio"]
dataset_provider_id = dataset_providers[0].provider_id

llama_stack_client.datasets.register(
dataset_id=dataset_id,
dataset_schema=dataset_schema,
url=dict(uri=test_url),
provider_id="localfs",
provider_id=dataset_provider_id,
)


def test_datasets_list(llama_stack_client):
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful

response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 0


def test_register_dataset(llama_stack_client):
def test_register_unregister_dataset(llama_stack_client):
register_dataset(llama_stack_client)
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 1
assert response[0].identifier == "test_dataset"

with pytest.raises(ValueError):
# unregister a dataset that does not exist
llama_stack_client.datasets.unregister("test_dataset2")

llama_stack_client.datasets.unregister("test_dataset")
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 0

with pytest.raises(ValueError):
llama_stack_client.datasets.unregister("test_dataset")


def test_get_rows_paginated(llama_stack_client):
register_dataset(llama_stack_client)
Expand Down
242 changes: 74 additions & 168 deletions tests/integration/eval/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,181 +3,87 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

import uuid

import pytest

from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.type_system import ChatCompletionInputType, StringType
from llama_stack.apis.eval.eval import (
ModelCandidate,
)
from llama_stack.apis.inference import SamplingParams
from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams
from llama_stack.distribution.datatypes import Api

from ..datasetio.test_datasetio import register_dataset
from .constants import JUDGE_PROMPT

# How to run this test:
#
# pytest llama_stack/providers/tests/eval/test_eval.py
# -m "meta_reference_eval_together_inference_huggingface_datasetio"
# -v -s --tb=short --disable-warnings


@pytest.mark.skip(reason="FIXME FIXME @yanxi0830 this needs to be migrated to use the API")
class Testeval:
@pytest.mark.asyncio
async def test_benchmarks_list(self, eval_stack):
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful
benchmarks_impl = eval_stack[Api.benchmarks]
response = await benchmarks_impl.list_benchmarks()
assert isinstance(response, list)

@pytest.mark.asyncio
async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasetio_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasetio],
eval_stack[Api.datasets],
)

await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
response = await datasets_impl.list_datasets()

rows = await datasetio_impl.get_rows_paginated(
dataset_id="test_dataset_for_eval",
rows_in_page=3,
)
assert len(rows.rows) == 3

scoring_functions = [
"basic::equality",
]
benchmark_id = "meta-reference::app_eval"
await benchmarks_impl.register_benchmark(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
)
response = await eval_impl.evaluate_rows(
benchmark_id=benchmark_id,
input_rows=rows.rows,
scoring_functions=scoring_functions,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
scoring_params={
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
judge_model=judge_model,
prompt_template=JUDGE_PROMPT,
judge_score_regexes=[
r"Total rating: (\d+)",
r"rating: (\d+)",
r"Rating: (\d+)",
],
)
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/eval


@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
response = llama_stack_client.datasets.list()
assert any(x.identifier == "test_dataset_for_eval" for x in response)

rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset_for_eval",
rows_in_page=3,
)
assert len(rows.rows) == 3

scoring_functions = [
scoring_fn_id,
]
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
)
list_benchmarks = llama_stack_client.benchmarks.list()
assert any(x.identifier == benchmark_id for x in list_benchmarks)

response = llama_stack_client.eval.evaluate_rows(
benchmark_id=benchmark_id,
input_rows=rows.rows,
scoring_functions=scoring_functions,
benchmark_config={
"eval_candidate": {
"type": "model",
"model": text_model_id,
"sampling_params": {
"temperature": 0.0,
},
),
)
assert len(response.generations) == 3
assert "basic::equality" in response.scores

@pytest.mark.asyncio
async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
)

await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")

scoring_functions = [
"basic::subset_of",
]

benchmark_id = "meta-reference::app_eval-2"
await benchmarks_impl.register_benchmark(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
)
response = await eval_impl.run_eval(
benchmark_id=benchmark_id,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
),
)
assert response.job_id == "0"
job_status = await eval_impl.job_status(benchmark_id, response.job_id)
assert job_status and job_status.value == "completed"
eval_response = await eval_impl.job_result(benchmark_id, response.job_id)

assert eval_response is not None
assert len(eval_response.generations) == 5
assert "basic::subset_of" in eval_response.scores

@pytest.mark.asyncio
async def test_eval_run_benchmark_eval(self, eval_stack, inference_model):
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
)

response = await datasets_impl.list_datasets()
assert len(response) > 0
if response[0].provider_id != "huggingface":
pytest.skip("Only huggingface provider supports pre-registered remote datasets")

await datasets_impl.register_dataset(
dataset_id="mmlu",
dataset_schema={
"input_query": StringType(),
"expected_answer": StringType(),
"chat_completion_input": ChatCompletionInputType(),
},
url=URL(uri="https://huggingface.co/datasets/llamastack/evals"),
metadata={
"path": "llamastack/evals",
"name": "evals__mmlu__details",
"split": "train",
},
)

assert len(response.generations) == 3
assert scoring_fn_id in response.scores


@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval_2",
scoring_functions=[scoring_fn_id],
)

response = llama_stack_client.eval.run_eval(
benchmark_id=benchmark_id,
benchmark_config={
"eval_candidate": {
"type": "model",
"model": text_model_id,
"sampling_params": {
"temperature": 0.0,
},
},
)

# register eval task
await benchmarks_impl.register_benchmark(
benchmark_id="meta-reference-mmlu",
dataset_id="mmlu",
scoring_functions=["basic::regex_parser_multiple_choice_answer"],
)

# list benchmarks
response = await benchmarks_impl.list_benchmarks()
assert len(response) > 0

benchmark_id = "meta-reference-mmlu"
response = await eval_impl.run_eval(
benchmark_id=benchmark_id,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
num_examples=3,
),
)
job_status = await eval_impl.job_status(benchmark_id, response.job_id)
assert job_status and job_status.value == "completed"
eval_response = await eval_impl.job_result(benchmark_id, response.job_id)
assert eval_response is not None
assert len(eval_response.generations) == 3
},
)
assert response.job_id == "0"
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
assert job_status and job_status == "completed"

eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
assert eval_response is not None
assert len(eval_response.generations) == 5
assert scoring_fn_id in eval_response.scores
Loading