-
Notifications
You must be signed in to change notification settings - Fork 505
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
support string alg in tune #1093
Changes from all commits
f45a558
acd90ed
569c532
ea8557c
1da79e3
35f718d
43729b2
1d2c636
de5637b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -344,7 +344,7 @@ def easy_objective(config): | |
# do cleanup operation here | ||
return | ||
``` | ||
search_alg: An instance of BlendSearch as the search algorithm | ||
search_alg: An instance/string of the search algorithm | ||
to be used. The same instance can be used for iterative tuning. | ||
e.g., | ||
|
||
|
@@ -481,12 +481,25 @@ def easy_objective(config): | |
else: | ||
logger.setLevel(logging.CRITICAL) | ||
|
||
from .searcher.blendsearch import BlendSearch, CFO | ||
from .searcher.blendsearch import BlendSearch, CFO, RandomSearch | ||
|
||
if lexico_objectives is not None: | ||
logger.warning("If lexico_objectives is not None, search_alg is forced to be CFO") | ||
search_alg = None | ||
if search_alg is None: | ||
if "modes" not in lexico_objectives.keys(): | ||
lexico_objectives["modes"] = ["min"] * len(lexico_objectives["metrics"]) | ||
for t_metric, t_mode in zip(lexico_objectives["metrics"], lexico_objectives["modes"]): | ||
if t_metric not in lexico_objectives["tolerances"].keys(): | ||
lexico_objectives["tolerances"][t_metric] = 0 | ||
if t_metric not in lexico_objectives["targets"].keys(): | ||
lexico_objectives["targets"][t_metric] = -float("inf") if t_mode == "min" else float("inf") | ||
if search_alg is None or isinstance(search_alg, str): | ||
if isinstance(search_alg, str): | ||
assert search_alg in [ | ||
"BlendSearch", | ||
"CFO", | ||
"CFOCat", | ||
"RandomSearch", | ||
], f"search_alg={search_alg} is not recognized. 'BlendSearch', 'CFO', 'CFOcat' and 'RandomSearch' are supported." | ||
|
||
flaml_scheduler_resource_attr = ( | ||
flaml_scheduler_min_resource | ||
) = flaml_scheduler_max_resource = flaml_scheduler_reduction_factor = None | ||
|
@@ -500,20 +513,30 @@ def easy_objective(config): | |
flaml_scheduler_max_resource = max_resource | ||
flaml_scheduler_reduction_factor = reduction_factor | ||
scheduler = None | ||
if lexico_objectives is None: | ||
try: | ||
import optuna as _ | ||
if lexico_objectives: | ||
# TODO: Modify after supporting BlendSearch in lexicographic optimization | ||
SearchAlgorithm = CFO | ||
logger.info( | ||
f"Using search algorithm {SearchAlgorithm.__name__} for lexicographic optimization. Note that when providing other search algorithms, we use CFO instead temporarily." | ||
) | ||
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC | ||
else: | ||
if not search_alg or search_alg == "BlendSearch": | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. when There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes. resolved. |
||
try: | ||
import optuna as _ | ||
|
||
SearchAlgorithm = BlendSearch | ||
SearchAlgorithm = BlendSearch | ||
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__)) | ||
except ImportError: | ||
if search_alg == "BlendSearch": | ||
raise ValueError("To use BlendSearch, run: pip install flaml[blendsearch]") | ||
else: | ||
SearchAlgorithm = CFO | ||
logger.warning("Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]") | ||
else: | ||
SearchAlgorithm = locals()[search_alg] | ||
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__)) | ||
except ImportError: | ||
SearchAlgorithm = CFO | ||
logger.warning("Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]") | ||
metric = metric or DEFAULT_METRIC | ||
else: | ||
SearchAlgorithm = CFO | ||
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__)) | ||
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC | ||
search_alg = SearchAlgorithm( | ||
metric=metric, | ||
mode=mode, | ||
|
@@ -535,8 +558,12 @@ def easy_objective(config): | |
) | ||
else: | ||
if metric is None or mode is None: | ||
metric = metric or search_alg.metric or DEFAULT_METRIC | ||
mode = mode or search_alg.mode | ||
if lexico_objectives: | ||
metric = lexico_objectives["metrics"][0] or metric or search_alg.metric or DEFAULT_METRIC | ||
mode = lexico_objectives["modes"][0] or mode or search_alg.mode | ||
else: | ||
metric = metric or search_alg.metric or DEFAULT_METRIC | ||
mode = mode or search_alg.mode | ||
if ray_available and use_ray: | ||
if ray_version.startswith("1."): | ||
from ray.tune.suggest import ConcurrencyLimiter | ||
|
@@ -555,6 +582,13 @@ def easy_objective(config): | |
): | ||
search_alg.use_incumbent_result_in_evaluation = use_incumbent_result_in_evaluation | ||
searcher = search_alg.searcher if isinstance(search_alg, ConcurrencyLimiter) else search_alg | ||
if lexico_objectives: | ||
# TODO: Modify after supporting BlendSearch in lexicographic optimization | ||
assert search_alg.__class__.__name__ in [ | ||
"CFO", | ||
], "If lexico_objectives is not None, the search_alg must be CFO for now." | ||
Comment on lines
+587
to
+589
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Such assertions and logging happen in different places: here, 519, 487. Could you add TODO notes to modify them later in case they are forgotten? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Resolved |
||
search_alg.lexico_objective = lexico_objectives | ||
|
||
if isinstance(searcher, BlendSearch): | ||
setting = {} | ||
if time_budget_s: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1,14 @@ | ||
"""Require: pip install flaml[test,ray] | ||
""" | ||
from flaml import BlendSearch | ||
from flaml import BlendSearch, CFO | ||
import time | ||
import os | ||
from sklearn.model_selection import train_test_split | ||
import sklearn.metrics | ||
import sklearn.datasets | ||
import xgboost as xgb | ||
import logging | ||
import math | ||
|
||
try: | ||
from ray.tune.integration.xgboost import TuneReportCheckpointCallback | ||
|
@@ -20,6 +21,32 @@ | |
logger.setLevel(logging.INFO) | ||
|
||
|
||
def _BraninCurrin(config): | ||
# Rescale brain | ||
x_1 = 15 * config["x1"] - 5 | ||
x_2 = 15 * config["x2"] | ||
# Brain function | ||
t1 = x_2 - 5.1 / (4 * math.pi**2) * x_1**2 + 5 / math.pi * x_1 - 6 | ||
t2 = 10 * (1 - 1 / (8 * math.pi)) * math.cos(x_1) | ||
brain_result = t1**2 + t2 + 10 | ||
# Currin function | ||
xc_1 = config["x1"] | ||
xc_2 = config["x2"] | ||
factor1 = 1 - math.exp(-1 / (2 * xc_2)) | ||
numer = 2300 * pow(xc_1, 3) + 1900 * pow(xc_1, 2) + 2092 * xc_1 + 60 | ||
denom = 100 * pow(xc_1, 3) + 500 * pow(xc_1, 2) + 4 * xc_1 + 20 | ||
currin_result = factor1 * numer / denom | ||
return {"brain": brain_result, "currin": currin_result} | ||
|
||
|
||
def _easy_objective(config): | ||
# Hyperparameters | ||
width, height, step = config["width"], config["height"], config["steps"] | ||
|
||
# get_result | ||
return {"mean_loss": (0.1 + width * step / 100) ** (-1) + height * 0.1} | ||
|
||
|
||
def test_nested_run(): | ||
from flaml import AutoML, tune | ||
|
||
|
@@ -352,6 +379,72 @@ def evaluate_config_empty(config): | |
) | ||
|
||
|
||
def test_passing_search_alg(): | ||
from flaml import tune | ||
|
||
# search_space | ||
so_search_space = { | ||
"steps": 100, | ||
"width": tune.uniform(0, 20), | ||
"height": tune.uniform(-100, 100), | ||
} | ||
mo_search_space = { | ||
"x1": tune.uniform(lower=0.000001, upper=1.0), | ||
"x2": tune.uniform(lower=0.000001, upper=1.0), | ||
} | ||
|
||
# lexicographic objectives | ||
lexico_objectives = {} | ||
lexico_objectives["metrics"] = ["brain", "currin"] | ||
lexico_objectives["tolerances"] = {"brain": 10.0, "currin": 0.0} | ||
lexico_objectives["targets"] = {"brain": 0.0, "currin": 0.0} | ||
lexico_objectives["modes"] = ["min", "min"] | ||
|
||
## Passing search_alg through string | ||
# Non lexico tune | ||
tune.run( | ||
_easy_objective, | ||
search_alg="BlendSearch", | ||
metric="mean_loss", | ||
mode="min", | ||
num_samples=10, | ||
config=so_search_space, | ||
) | ||
# lexico tune | ||
tune.run( | ||
_BraninCurrin, search_alg="CFO", num_samples=10, config=mo_search_space, lexico_objectives=lexico_objectives | ||
) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add another test with lexico_objectives and There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added |
||
tune.run( | ||
_BraninCurrin, | ||
search_alg="BlendSearch", | ||
num_samples=10, | ||
config=mo_search_space, | ||
lexico_objectives=lexico_objectives, | ||
) | ||
|
||
## Passing search_alg through instance | ||
so_bs = BlendSearch(time_budget_s=5, metric="mean_loss", mode="min") | ||
# TODO: We will change CFO into blendsearch in the future | ||
mo_bs = CFO(time_budget_s=5) | ||
sonichi marked this conversation as resolved.
Show resolved
Hide resolved
|
||
# Non lexico tune | ||
tune.run( | ||
_easy_objective, | ||
search_alg=so_bs, | ||
metric="mean_loss", | ||
mode="min", | ||
num_samples=10, | ||
config=so_search_space, | ||
) | ||
# lexico tune | ||
tune.run( | ||
_BraninCurrin, | ||
search_alg=mo_bs, | ||
num_samples=10, | ||
config=mo_search_space, | ||
lexico_objectives=lexico_objectives, | ||
) | ||
|
||
|
||
def test_xgboost_bs(): | ||
_test_xgboost() | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
RandomSearch is not used.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we may use it in eval(search_alg)