Skip to content

Commit

Permalink
docs: Update documentation in the code (#11)
Browse files Browse the repository at this point in the history
Corrected grammar, punctuation, and spelling errors in the code's documentation. Improved clarity and readability.

Signed-off-by: Aashish Saini <141953346+AashishSainiShorthillsAI@users.noreply.github.com>
Co-authored-by: Aashish Saini <141953346+AashishSainiShorthillsAI@users.noreply.github.com>
  • Loading branch information
ShorthillsAI and ShorthillsAI authored Oct 17, 2023
1 parent ca85f0a commit 00c53e7
Showing 1 changed file with 18 additions and 21 deletions.
39 changes: 18 additions & 21 deletions examples/hyperparam/search_hyperopt.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
"""
Example of hyperparameter search in MLflow using Hyperopt.
The run method will instantiate and run Hyperopt optimizer. Each parameter configuration is
evaluated in a new MLflow run invoking main entry point with selected parameters.
The `run` method will instantiate and run the Hyperopt optimizer. Each parameter configuration is
evaluated in a new MLflow run, invoking the main entry point with the selected parameters.
The runs are evaluated based on validation set loss. Test set score is calculated to verify the
The runs are evaluated based on the validation set loss. Test set scores are calculated to verify the
results.
This example currently does not support parallel execution.
"""

Expand All @@ -20,9 +19,8 @@

_inf = np.finfo(np.float64).max


@click.command(
help="Perform hyperparameter search with Hyperopt library. Optimize dl_train target."
help="Perform hyperparameter search with the Hyperopt library. Optimize the dl_train target."
)
@click.option("--max-runs", type=click.INT, default=10, help="Maximum number of runs to evaluate.")
@click.option("--epochs", type=click.INT, default=500, help="Number of epochs")
Expand All @@ -34,34 +32,34 @@ def train(training_data, max_runs, epochs, metric, algo, seed):
"""
Run hyperparameter optimization.
"""
# create random file to store run ids of the training tasks
# create a random file to store run ids of the training tasks
tracking_client = MlflowClient()

def new_eval(
nepochs, experiment_id, null_train_loss, null_valid_loss, null_test_loss, return_all=False
):
"""
Create a new eval function
Create a new eval function.
:param nepochs: Number of epochs to train the model.
:experiment_id: Experiment id for the training run
:valid_null_loss: Loss of a null model on the validation dataset
:test_null_loss: Loss of a null model on the test dataset.
:return_test_loss: Return both validation and test loss if set.
:param experiment_id: Experiment id for the training run.
:param null_valid_loss: Loss of a null model on the validation dataset.
:param null_test_loss: Loss of a null model on the test dataset.
:param return_all: Return both validation and test loss if set.
:return: new eval function.
:return: A new eval function.
"""

def eval(params):
"""
Train Keras model with given parameters by invoking MLflow run.
Train a Keras model with given parameters by invoking an MLflow run.
Notice we store runUuid and resulting metric in a file. We will later use these to pick
Notice we store runUuid and the resulting metric in a file. We will later use these to pick
the best run and to log the runUuids of the child runs as an artifact. This is a
temporary workaround until MLflow offers better mechanism of linking runs together.
temporary workaround until MLflow offers a better mechanism for linking runs together.
:param params: Parameters to the train_keras script we optimize over:
learning_rate, drop_out_1
:param params: Parameters for the train_keras script we optimize over:
learning_rate, dropout_1.
:return: The metric value evaluated on the validation data.
"""
import mlflow.tracking
Expand All @@ -80,7 +78,7 @@ def eval(params):
"seed": seed,
},
experiment_id=experiment_id,
synchronous=False, # Allow the run to fail if a model is not properly created
synchronous=False, # Allow the run to fail if a model is not properly created.
)
succeeded = p.wait()
mlflow.log_params({"lr": lr, "momentum": momentum})
Expand Down Expand Up @@ -121,7 +119,7 @@ def eval(params):

with mlflow.start_run() as run:
experiment_id = run.info.experiment_id
# Evaluate null model first.
# Evaluate the null model first.
train_null_loss, valid_null_loss, test_null_loss = new_eval(
0, experiment_id, _inf, _inf, _inf, True
)(params=[0, 0])
Expand Down Expand Up @@ -156,6 +154,5 @@ def eval(params):
}
)


if __name__ == "__main__":
train()

0 comments on commit 00c53e7

Please sign in to comment.