Skip to content

Commit

Permalink
feat: add ModelEvaluation support (#1167)
Browse files Browse the repository at this point in the history
  • Loading branch information
sararob committed Apr 20, 2022
1 parent c1e899d commit 10f95cd
Show file tree
Hide file tree
Showing 7 changed files with 535 additions and 1 deletion.
41 changes: 41 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,47 @@ Please visit `Importing models to Vertex AI`_ for a detailed overview:

.. _Importing models to Vertex AI: https://cloud.google.com/vertex-ai/docs/general/import-model

Model Evaluation
----------------

The Vertex AI SDK for Python currently supports getting model evaluation metrics for all AutoML models.

To list all model evaluations for a model:

.. code-block:: Python
model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
evaluations = model.list_model_evaluations()
To get the model evaluation resource for a given model:

.. code-block:: Python
model = aiplatform.Model('/projects/my-project/locations/us-central1/models/{MODEL_ID}')
# returns the first evaluation with no arguments, you can also pass the evaluation ID
evaluation = model.get_model_evaluation()
eval_metrics = evaluation.metrics
You can also create a reference to your model evaluation directly by passing in the resource name of the model evaluation:

.. code-block:: Python
evaluation = aiplatform.ModelEvaluation(
evaluation_name='/projects/my-project/locations/us-central1/models/{MODEL_ID}/evaluations/{EVALUATION_ID}')
Alternatively, you can create a reference to your evaluation by passing in the model and evaluation IDs:

.. code-block:: Python
evaluation = aiplatform.ModelEvaluation(
evaluation_name={EVALUATION_ID},
model_id={MODEL_ID})
Batch Prediction
----------------
Expand Down
4 changes: 3 additions & 1 deletion google/cloud/aiplatform/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-

# Copyright 2020 Google LLC
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -41,6 +41,7 @@
from google.cloud.aiplatform.metadata import metadata
from google.cloud.aiplatform.models import Endpoint
from google.cloud.aiplatform.models import Model
from google.cloud.aiplatform.model_evaluation import ModelEvaluation
from google.cloud.aiplatform.jobs import (
BatchPredictionJob,
CustomJob,
Expand Down Expand Up @@ -107,6 +108,7 @@
"ImageDataset",
"HyperparameterTuningJob",
"Model",
"ModelEvaluation",
"PipelineJob",
"TabularDataset",
"Tensorboard",
Expand Down
20 changes: 20 additions & 0 deletions google/cloud/aiplatform/model_evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-

# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from google.cloud.aiplatform.model_evaluation.model_evaluation import ModelEvaluation

__all__ = ("ModelEvaluation",)
93 changes: 93 additions & 0 deletions google/cloud/aiplatform/model_evaluation/model_evaluation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-

# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from google.auth import credentials as auth_credentials

from google.cloud.aiplatform import base
from google.cloud.aiplatform import utils
from google.cloud.aiplatform import models
from google.protobuf import struct_pb2

from typing import Optional


class ModelEvaluation(base.VertexAiResourceNounWithFutureManager):

client_class = utils.ModelClientWithOverride
_resource_noun = "evaluations"
_delete_method = None
_getter_method = "get_model_evaluation"
_list_method = "list_model_evaluations"
_parse_resource_name_method = "parse_model_evaluation_path"
_format_resource_name_method = "model_evaluation_path"

@property
def metrics(self) -> Optional[struct_pb2.Value]:
"""Gets the evaluation metrics from the Model Evaluation.
Returns:
A dict with model metrics created from the Model Evaluation or
None if the metrics for this evaluation are empty.
"""
return self._gca_resource.metrics

def __init__(
self,
evaluation_name: str,
model_id: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Retrieves the ModelEvaluation resource and instantiates its representation.
Args:
evaluation_name (str):
Required. A fully-qualified model evaluation resource name or evaluation ID.
Example: "projects/123/locations/us-central1/models/456/evaluations/789" or
"789". If passing only the evaluation ID, model_id must be provided.
model_id (str):
Optional. The ID of the model to retrieve this evaluation from. If passing
only the evaluation ID as evaluation_name, model_id must be provided.
project (str):
Optional project to retrieve model evaluation from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve model evaluation from. If not set, location
set in aiplatform.init will be used.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to retrieve this model evaluation. If not set,
credentials set in aiplatform.init will be used.
"""

super().__init__(
project=project,
location=location,
credentials=credentials,
resource_name=evaluation_name,
)

self._gca_resource = self._get_gca_resource(
resource_name=evaluation_name,
parent_resource_name_fields={models.Model._resource_noun: model_id}
if model_id
else model_id,
)

def delete(self):
raise NotImplementedError(
"Deleting a model evaluation has not been implemented yet."
)
77 changes: 77 additions & 0 deletions google/cloud/aiplatform/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import gcs_utils
from google.cloud.aiplatform import model_evaluation

from google.cloud.aiplatform.compat.services import endpoint_service_client

Expand Down Expand Up @@ -3210,3 +3211,79 @@ def upload_tensorflow_saved_model(
sync=sync,
upload_request_timeout=upload_request_timeout,
)

def list_model_evaluations(
self,
) -> List["model_evaluation.ModelEvaluation"]:
"""List all Model Evaluation resources associated with this model.
Example Usage:
my_model = Model(
model_name="projects/123/locations/us-central1/models/456"
)
my_evaluations = my_model.list_model_evaluations()
Returns:
List[model_evaluation.ModelEvaluation]: List of ModelEvaluation resources
for the model.
"""

self.wait()

return model_evaluation.ModelEvaluation._list(
parent=self.resource_name,
credentials=self.credentials,
)

def get_model_evaluation(
self,
evaluation_id: Optional[str] = None,
) -> Optional[model_evaluation.ModelEvaluation]:
"""Returns a ModelEvaluation resource and instantiates its representation.
If no evaluation_id is passed, it will return the first evaluation associated
with this model.
Example usage:
my_model = Model(
model_name="projects/123/locations/us-central1/models/456"
)
my_evaluation = my_model.get_model_evaluation(
evaluation_id="789"
)
# If no arguments are passed, this returns the first evaluation for the model
my_evaluation = my_model.get_model_evaluation()
Args:
evaluation_id (str):
Optional. The ID of the model evaluation to retrieve.
Returns:
model_evaluation.ModelEvaluation: Instantiated representation of the
ModelEvaluation resource.
"""

evaluations = self.list_model_evaluations()

if not evaluation_id:
if len(evaluations) > 1:
_LOGGER.warning(
f"Your model has more than one model evaluation, this is returning only one evaluation resource: {evaluations[0].resource_name}"
)
return evaluations[0] if evaluations else evaluations
else:
resource_uri_parts = self._parse_resource_name(self.resource_name)
evaluation_resource_name = (
model_evaluation.ModelEvaluation._format_resource_name(
**resource_uri_parts,
evaluation=evaluation_id,
)
)

return model_evaluation.ModelEvaluation(
evaluation_name=evaluation_resource_name,
credentials=self.credentials,
)
Loading

0 comments on commit 10f95cd

Please sign in to comment.