Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed learning_rate_scheduler params in automl #3203

Merged
merged 7 commits into from
Mar 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ jobs:
ports:
- 9000:9000

timeout-minutes: 90
timeout-minutes: 120
steps:
- name: Setup ludwigai/ludwig-ray container for local testing with act.
if: ${{ env.ACT }}
Expand Down
4 changes: 2 additions & 2 deletions ludwig/automl/defaults/combiner/tabnet_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ hyperopt:
trainer.learning_rate:
space: choice
categories: [0.005, 0.01, 0.02, 0.025]
trainer.decay_rate:
trainer.learning_rate_scheduler.decay_rate:
space: choice
categories: [0.8, 0.9, 0.95]
trainer.decay_steps:
trainer.learning_rate_scheduler.decay_steps:
space: choice
categories: [500, 2000, 8000, 10000, 20000]
combiner.size:
Expand Down
4 changes: 2 additions & 2 deletions ludwig/benchmarking/examples/process_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ def process_config(ludwig_config: dict, experiment_dict: dict) -> dict:
"categories": ["fill_with_const", "fill_with_mean"],
},
"combiner.type": {"space": "choice", "categories": ["tabnet", "concat"]},
"trainer.decay": {"space": "choice", "categories": [True, False]},
"trainer.learning_rate_scheduler.decay": {"space": "choice", "categories": [True, False]},
"trainer.learning_rate": {"space": "loguniform", "lower": 0.0001, "upper": 0.1},
"trainer.decay_rate": {"space": "uniform", "lower": 0.4, "upper": 0.96},
"trainer.learning_rate_scheduler.decay_rate": {"space": "uniform", "lower": 0.4, "upper": 0.96},
"trainer.batch_size": {"space": "randint", "lower": 32, "upper": 2048},
},
"search_alg": {"type": "hyperopt"},
Expand Down
23 changes: 22 additions & 1 deletion tests/integration_tests/test_automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,14 @@
ray = pytest.importorskip("ray")

import dask.dataframe as dd # noqa
from ray.tune.experiment.trial import Trial # noqa

from ludwig.automl import create_auto_config, create_auto_config_with_dataset_profile, train_with_config # noqa
from ludwig.automl import ( # noqa
auto_train,
create_auto_config,
create_auto_config_with_dataset_profile,
train_with_config,
)
from ludwig.hyperopt.execution import RayTuneExecutor # noqa

pytestmark = pytest.mark.distributed
Expand Down Expand Up @@ -299,6 +305,21 @@ def test_train_with_config(time_budget, test_data_tabular_large, ray_cluster_2cp
_run_train_with_config(time_budget, test_data_tabular_large, tmpdir)


@pytest.mark.distributed
def test_auto_train(test_data_tabular_large, ray_cluster_2cpu, tmpdir):
_, ofeatures, dataset_csv = test_data_tabular_large
results = auto_train(
dataset=dataset_csv,
target=ofeatures[0][NAME],
time_limit_s=120,
user_config={"hyperopt": {"executor": {"num_samples": 2}}},
)

analysis = results.experiment_analysis
for trial in analysis.trials:
assert trial.status != Trial.ERROR, f"Error in trial {trial}"


@pytest.mark.parametrize("fs_protocol,bucket", [private_param(("s3", "ludwig-tests"))], ids=["s3"])
def test_train_with_config_remote(fs_protocol, bucket, test_data_tabular_large, ray_cluster_2cpu):
backend = {
Expand Down