Skip to content

Commit 557f406

Browse files
authored
Users/singankit/evaluation async samples (#38104)
* Adding async sample for evaluation * Update sample_evaluations_async.py
1 parent 46e18c3 commit 557f406

File tree

4 files changed

+159
-18
lines changed

4 files changed

+159
-18
lines changed

sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,10 @@
66
77
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
88
"""
9-
from typing import List, Any
9+
import uuid
10+
from os import PathLike
11+
from pathlib import Path
12+
from typing import List, Any, Union, Dict
1013
from azure.core import AsyncPipelineClient
1114
from azure.core.pipeline import policies
1215
from typing_extensions import Self
@@ -155,7 +158,7 @@ def __init__(
155158
project_name=project_name,
156159
credential=credential,
157160
api_version="2024-07-01-preview", # TODO: Update me
158-
credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready
161+
credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready
159162
**kwargs3,
160163
)
161164
_policies3 = kwargs3.pop("policies", None)
@@ -226,6 +229,49 @@ def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential
226229
project_name = parts[3]
227230
return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs)
228231

232+
def upload_file(self, file_path: Union[Path, str, PathLike]) -> str:
233+
"""Upload a file to the Azure AI Studio project.
234+
This method required *azure-ai-ml* to be installed.
235+
236+
:param file_path: The path to the file to upload.
237+
:type file_path: Union[str, Path, PathLike]
238+
:return: The asset id of uploaded file.
239+
:rtype: str
240+
"""
241+
try:
242+
from azure.ai.ml import MLClient
243+
from azure.ai.ml.entities import Data
244+
from azure.ai.ml.constants import AssetTypes
245+
except ImportError:
246+
raise ImportError(
247+
"azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`")
248+
249+
data = Data(
250+
path=file_path,
251+
type=AssetTypes.URI_FILE,
252+
name=str(uuid.uuid4()), # generating random name
253+
is_anonymous=True,
254+
version="1",
255+
)
256+
257+
ml_client = MLClient(
258+
self._config3.credential,
259+
self._config3.subscription_id,
260+
self._config3.resource_group_name,
261+
self._config3.project_name,
262+
)
263+
264+
data_asset = ml_client.data.create_or_update(data)
265+
266+
return data_asset.id
267+
268+
@property
269+
def scope(self) -> Dict[str, str]:
270+
return {
271+
"subscription_id": self._config3.subscription_id,
272+
"resource_group_name": self._config3.resource_group_name,
273+
"project_name": self._config3.project_name,
274+
}
229275

230276
__all__: List[str] = [
231277
"AIProjectClient",
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."}
2+
{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."}
3+
{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."}
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
FILE: sample_agents_basics_async.py
8+
9+
DESCRIPTION:
10+
This sample demonstrates how to use evaluation operations from
11+
the Azure Evaluation service using a asynchronous client.
12+
13+
USAGE:
14+
python sample_evaluation_async.py
15+
16+
Before running the sample:
17+
18+
pip install azure.ai.projects azure-identity aiohttp
19+
20+
Set this environment variables with your own values:
21+
PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project.
22+
"""
23+
import asyncio
24+
import time
25+
import os
26+
27+
from azure.ai.projects.aio import AIProjectClient
28+
from azure.identity import DefaultAzureCredential
29+
from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType
30+
from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, ViolenceEvaluator
31+
32+
33+
async def main():
34+
project_client = AIProjectClient.from_connection_string(
35+
credential=DefaultAzureCredential(),
36+
conn_str=os.environ["PROJECT_CONNECTION_STRING"],
37+
)
38+
39+
# Upload data for evaluation
40+
data_id = project_client.upload_file("./evaluate_test_data.jsonl")
41+
42+
default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI)
43+
44+
deployment_name = "<>"
45+
api_version = "<>"
46+
47+
# Create an evaluation
48+
evaluation = Evaluation(
49+
display_name="Remote Evaluation",
50+
description="Evaluation of dataset",
51+
data=Dataset(id=data_id),
52+
evaluators={
53+
"f1_score": EvaluatorConfiguration(
54+
# id=F1ScoreEvaluator.id,
55+
id="azureml://registries/azureml-staging/models/F1Score-Evaluator/versions/3",
56+
),
57+
"relevance": EvaluatorConfiguration(
58+
# id=RelevanceEvaluator.id,
59+
id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3",
60+
init_params={
61+
"model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name,
62+
api_version=api_version)
63+
},
64+
),
65+
"violence": EvaluatorConfiguration(
66+
# id=ViolenceEvaluator.id,
67+
id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3",
68+
init_params={
69+
"azure_ai_project": project_client.scope
70+
},
71+
),
72+
},
73+
)
74+
75+
async with project_client:
76+
# Create evaluation
77+
evaluation_response = await project_client.evaluations.create(evaluation)
78+
79+
# Get evaluation
80+
get_evaluation_response = await project_client.evaluations.get(evaluation_response.id)
81+
82+
print("----------------------------------------------------------------")
83+
print("Created evaluation, evaluation ID: ", get_evaluation_response.id)
84+
print("Evaluation status: ", get_evaluation_response.status)
85+
print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"])
86+
print("----------------------------------------------------------------")
87+
88+
89+
90+
91+
if __name__ == "__main__":
92+
asyncio.run(main())

sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -39,14 +39,12 @@
3939
)
4040

4141
# Upload data for evaluation
42-
# Service side fix needed to make this work
43-
# data_id = project_client.upload_file("./evaluate_test_data.jsonl")
44-
data_id = (
45-
"azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3"
46-
)
42+
data_id = project_client.upload_file("./evaluate_test_data.jsonl")
4743

4844
default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI)
4945

46+
deployment_name = "<>"
47+
api_version = "<>"
5048

5149
# Create an evaluation
5250
evaluation = Evaluation(
@@ -55,26 +53,28 @@
5553
data=Dataset(id=data_id),
5654
evaluators={
5755
"f1_score": EvaluatorConfiguration(
58-
id=F1ScoreEvaluator.evaluator_id,
56+
# id=F1ScoreEvaluator.id,
57+
id="azureml://registries/azureml-staging/models/F1Score-Evaluator/versions/3",
5958
),
6059
"relevance": EvaluatorConfiguration(
61-
id=RelevanceEvaluator.evaluator_id,
60+
# id=RelevanceEvaluator.id,
61+
id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3",
6262
init_params={
63-
"model_config": default_connection.to_evaluator_model_config(
64-
deployment_name="GPT-4-Prod", api_version="2024-08-01-preview"
65-
)
63+
"model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name,
64+
api_version=api_version)
6665
},
6766
),
68-
"hate_unfairness": EvaluatorConfiguration(
69-
id=HateUnfairnessEvaluator.evaluator_id,
70-
init_params={"azure_ai_project": project_client.scope},
67+
"violence": EvaluatorConfiguration(
68+
# id=ViolenceEvaluator.id,
69+
id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3",
70+
init_params={
71+
"azure_ai_project": project_client.scope
72+
},
7173
),
7274
},
73-
# This is needed as a workaround until environment gets published to registry
74-
properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"},
7575
)
7676

77-
# Create evaluation
77+
7878
evaluation_response = project_client.evaluations.create(
7979
evaluation=evaluation,
8080
)

0 commit comments

Comments
 (0)