Skip to content

Commit

Permalink
Merge pull request #179 from hrntsm/featura/updata-optuna-v3.2
Browse files Browse the repository at this point in the history
Featura/updata optuna v3.2
  • Loading branch information
hrntsm authored Jul 24, 2023
2 parents a6f75ad + 6ea3a79 commit ce70bab
Show file tree
Hide file tree
Showing 23 changed files with 1,175 additions and 485 deletions.
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -451,4 +451,8 @@ $RECYCLE.BIN/
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/extensions.json

**/venv
*.db
*.log
10 changes: 8 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,17 @@ Please see [here](https://github.com/hrntsm/Tunny/releases) for the data release
- Add 2 sample gh file
- Support CMA-ES with Margin
- It allows for more efficient optimization in mixed integer problems.
- Support NSGA-III
- For more than 3 objective optimization.
- Python sample code
- Open optuna dashboard menu item


### Changed

- When optimizing with CMA-ES, the with Margin option is enabled by default.
- Bump up optuna v3.1.1
- Bump up optuna-dashboard v0.9.2
- Bump up optuna v3.2.0
- Bump up optuna-dashboard v0.10.2

### Deprecated

Expand All @@ -36,6 +40,8 @@ Please see [here](https://github.com/hrntsm/Tunny/releases) for the data release

### Security

- Bump up scipy v1.10.0

## [v0.7.2] -2023-03-22

### Added
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
78 changes: 78 additions & 0 deletions Samples/Python/compare_sampler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# #############################################################################
# This is an example of how optuna compares each sampler it can handle.
# #############################################################################

import optuna

n_trials = 50


def objective(trial):
x = trial.suggest_float("x", -5, 5, step=0.1)
y = trial.suggest_int("y", -5, 5)
return x**2 + y**2


studies = []

# compare samplers
cmaes = optuna.samplers.CmaEsSampler(with_margin=True)
study_cmaes = optuna.create_study(
sampler=cmaes, direction="minimize", study_name="cmaes"
)
study_cmaes.optimize(objective, n_trials=n_trials)
studies.append(study_cmaes)

nsgaii = optuna.samplers.NSGAIISampler(population_size=10)
study_nsgaii = optuna.create_study(
sampler=nsgaii, direction="minimize", study_name="nsgaii"
)
study_nsgaii.optimize(objective, n_trials=n_trials)
studies.append(study_nsgaii)

nsgaiii = optuna.samplers.NSGAIIISampler(population_size=10)
study_nsgaiii = optuna.create_study(
sampler=nsgaiii, direction="minimize", study_name="nsgaiii"
)
study_nsgaiii.optimize(objective, n_trials=n_trials)
studies.append(study_nsgaiii)

tpe = optuna.samplers.TPESampler()
study_tpe = optuna.create_study(sampler=tpe, direction="minimize", study_name="tpe")
study_tpe.optimize(objective, n_trials=n_trials)
studies.append(study_tpe)

bo = optuna.integration.BoTorchSampler()
study_bo = optuna.create_study(sampler=bo, direction="minimize", study_name="bo")
study_bo.optimize(objective, n_trials=n_trials)
studies.append(study_bo)

random = optuna.samplers.RandomSampler()
study_random = optuna.create_study(
sampler=random, direction="minimize", study_name="random"
)
study_random.optimize(objective, n_trials=n_trials)

qmc = optuna.samplers.QMCSampler()
study_qmc = optuna.create_study(sampler=qmc, direction="minimize", study_name="qmc")
study_qmc.optimize(objective, n_trials=n_trials)

brute = optuna.samplers.BruteForceSampler()
study_brute = optuna.create_study(
sampler=brute, direction="minimize", study_name="brute"
)
study_brute.optimize(objective, n_trials=n_trials)

print("Result")
print(" true value : 0.0 {'x': 0.0, 'y': 0}")
print(" CmaEsSampler : ", study_cmaes.best_value, study_cmaes.best_params)
print(" NSGAIISampler : ", study_nsgaii.best_value, study_nsgaii.best_params)
print(" NSGAIIISampler : ", study_nsgaiii.best_value, study_nsgaiii.best_params)
print(" TPESampler : ", study_tpe.best_value, study_tpe.best_params)
print(" BoTorchSampler : ", study_bo.best_value, study_bo.best_params)
print(" RandomSampler : ", study_random.best_value, study_random.best_params)
print(" QMCSampler : ", study_qmc.best_value, study_qmc.best_params)
print(" BruteForceSampler: ", study_brute.best_value, study_brute.best_params)

fig = optuna.visualization.plot_optimization_history(studies, error_bar=False)
fig.show()
29 changes: 29 additions & 0 deletions Samples/Python/create_storage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# #############################################################################
# This is an example of creating STORAGE.
# #############################################################################

import optuna


def objective(trial):
x = trial.suggest_float("x", -100, 100)
return x**2


# SQLite3 storage
storage = optuna.storages.RDBStorage(
url="sqlite:///test.db",
)
study_db = optuna.create_study(storage=storage)
study_db.optimize(objective, n_trials=10)


# Journal storage
file_path = "test.log"
lock_obj = optuna.storages.JournalFileOpenLock(file_path)

storage = optuna.storages.JournalStorage(
optuna.storages.JournalFileStorage(file_path, lock_obj=lock_obj),
)
study_journal = optuna.create_study(storage=storage)
study_journal.optimize(objective, n_trials=10)
43 changes: 43 additions & 0 deletions Samples/Python/cull_trial.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# ##############################################################################
# This is an example of how to cull unintended trial results from STUDY
# ##############################################################################

import optuna

storage_path = "fish.log"
target_study_name = "study_target"
culled_study_name = "study_cull"
cull_trial_number = [10, 15, 17]

# If you use .log file, use blow.
lock_obj = optuna.storages.JournalFileOpenLock(storage_path)
storage = optuna.storages.JournalStorage(
optuna.storages.JournalFileStorage(storage_path, lock_obj=lock_obj),
)

# If you use RDB, use blow.
# storage = optuna.storages.RDBStorage("sqlite:///" + storage_path)

# Load storage
study = optuna.load_study(storage=storage, study_name=target_study_name)
usr_attr = study.user_attrs
trials = study.get_trials()
directions = study.directions

# Create new study to save cull result
cull_study = optuna.create_study(
storage=storage, study_name=culled_study_name, directions=directions
)

# If you want to read this result file from Tunny, you need to set the following.
# Tunny needs to read some attributes
for key, value in usr_attr.items():
cull_study.set_user_attr(key, value)

# Cull trials
for num in cull_trial_number:
trials = list(filter(lambda trial: trial.number != num, trials))
cull_study.add_trials(trials=trials)

# visualize result(if you want)
optuna.visualization.plot_pareto_front(cull_study).show()
63 changes: 63 additions & 0 deletions Samples/Python/plot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# #############################################################################
# This is an example of how to use the visualization function.
# #############################################################################


import optuna

n_trials = 50


def objective(trial):
# Rosenbrock function
x = trial.suggest_float("x", -5, 5, step=0.1)
y = trial.suggest_int("y", -5, 5)
trial.set_user_attr("too_long_str", "too_long_str, " * 100)

return [(1 - x) ** 2 + 100 * (y - x**2) ** 2, x]


tpe = optuna.samplers.TPESampler()
study = optuna.create_study(sampler=tpe, directions=["minimize", "minimize"])
study.optimize(objective, n_trials=n_trials)

name = "Rosenbrock function"

optuna.visualization.plot_slice(
study,
params=["x", "y"],
target=lambda t: t.values[0],
target_name=name,
).show()

optuna.visualization.plot_pareto_front(
study,
target_names=[name, "x"],
).show()

optuna.visualization.plot_param_importances(
study,
target=lambda t: t.values[0],
target_name=name,
).show()

optuna.visualization.plot_contour(
study,
params=["x", "y"],
target=lambda t: t.values[0],
target_name=name,
).show()

optuna.visualization.plot_optimization_history(
study, target=lambda t: t.values[0], target_name=name
).show()

optuna.visualization.plot_parallel_coordinate(
study, params=["x", "y"], target=lambda t: t.values[0], target_name=name
).show()

optuna.visualization.plot_edf(
study,
target=lambda t: t.values[0],
target_name=name,
).show()
85 changes: 85 additions & 0 deletions Samples/Python/plot_with_coloring_of_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# #############################################################################
# This example shows how to coloring and plot the optimized results.
# #############################################################################

import optuna
import plotly.graph_objects as go


def objective(trial):
x = trial.suggest_float("x", -10, 10)
y = trial.suggest_float("y", -10, 10)
return [x + y, x - y]


study = optuna.create_study(directions=["minimize", "minimize"])
study.optimize(objective, n_trials=100)

criteria_x = 0
criteria_value = 5

good_trials = []
no_good_trials = []

for trial in filter(
lambda t: t.state == optuna.trial.TrialState.COMPLETE, study.trials
):
if trial.values[0] < criteria_value and trial.params["x"] < criteria_x:
good_trials.append(trial)
else:
no_good_trials.append(trial)

traces = []

traces.append(
go.Scatter(
x=[t.values[0] for t in good_trials],
y=[t.values[1] for t in good_trials],
mode="markers",
name="good",
marker={"color": "blue"},
)
)

traces.append(
go.Scatter(
x=[t.values[0] for t in no_good_trials],
y=[t.values[1] for t in no_good_trials],
mode="markers",
name="no good",
marker={"color": "#cccccc"},
)
)

fig = go.Figure(traces)
fig.update_layout(
plot_bgcolor="white",
xaxis=dict(
title="x+y",
showline=True,
linewidth=1,
linecolor="black",
zeroline=True,
zerolinecolor="black",
zerolinewidth=1,
showgrid=True,
gridcolor="lightgray",
range=[-10, 10],
),
yaxis=dict(
title="x-y",
showline=True,
linewidth=1,
linecolor="black",
zeroline=True,
zerolinecolor="black",
zerolinewidth=1,
showgrid=True,
gridcolor="lightgray",
range=[-10, 10],
),
width=640,
height=480,
)

fig.show()
6 changes: 3 additions & 3 deletions Tunny/Lib/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ MarkupSafe==2.1.2
multipledispatch==0.6.0
numpy==1.24.1
opt-einsum==3.3.0
optuna==3.1.1
optuna-dashboard==0.9.2
optuna==3.2.0
optuna-dashboard==0.10.2
packaging==23.0
plotly==5.9.0
pyro-api==0.1.2
pyro-ppl==1.8.4
PyYAML==6.0
scikit-learn==1.2.0
scipy==1.8.1
scipy==1.10.0
six==1.16.0
SQLAlchemy==1.4.46
tenacity==8.1.0
Expand Down
11 changes: 11 additions & 0 deletions Tunny/Settings/Sampler/NSGAIII.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
namespace Tunny.Settings.Sampler
{
/// <summary>
/// https://optuna.readthedocs.io/en/stable/reference/generated/optuna.samplers.NSGAIIISampler.html
/// </summary>
public class NSGAIII : NSGAII
{
public double[] ReferencePoints { get; set; }
public int DividingParameter { get; set; } = 3;
}
}
1 change: 1 addition & 0 deletions Tunny/Settings/Sampler/SamplerSettings.cs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ public class SamplerSettings
public Tpe Tpe { get; set; } = new Tpe();
public CmaEs CmaEs { get; set; } = new CmaEs();
public NSGAII NsgaII { get; set; } = new NSGAII();
public NSGAIII NsgaIII { get; set; } = new NSGAIII();
public QuasiMonteCarlo QMC { get; set; } = new QuasiMonteCarlo();
public BoTorch BoTorch { get; set; } = new BoTorch();
}
Expand Down
Loading

0 comments on commit ce70bab

Please sign in to comment.