Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add llamatune checks with quantized values #814

Merged
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
4b55dbb
Validate the configuration produced by LlamaTune inverse_transform()
motus Jul 17, 2024
57fde9c
Merge branch 'main' into sergiym/opt/llamatune_inv
motus Jul 22, 2024
b58236f
Update azure credentials to be more flexible (#787)
eujing Jul 19, 2024
8034458
Fix mypy 1.11 warnings (#809)
motus Jul 22, 2024
ca898ed
Fix the coercion of scores to floats in the optimizer (#789)
motus Jul 22, 2024
f57d480
Merge remote-tracking branch 'sergiy/sergiym/opt/llamatune_inv' into …
bpkroth Jul 22, 2024
13fb61f
comments
bpkroth Jul 22, 2024
f02411a
comments
bpkroth Jul 22, 2024
b4d5007
apply the q_scaler
bpkroth Jul 22, 2024
e0e3b30
more tweaks
bpkroth Jul 22, 2024
1a44544
more fixups
bpkroth Jul 22, 2024
ceebf90
reorg to make pylint happier
bpkroth Jul 23, 2024
abcfafa
add a todo comment
bpkroth Jul 23, 2024
c7167de
format
bpkroth Jul 23, 2024
cdd71e1
add quantized values to the default tunable groups fixture
bpkroth Jul 23, 2024
60bd4f0
Also validate the other side
bpkroth Jul 23, 2024
ef06e8c
Add explicit tests for quantized values with llamatune
bpkroth Jul 23, 2024
dd83779
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Jul 31, 2024
8ec0d76
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 1, 2024
aff965d
Merge branch 'main' into add-llamatune-checks-with-quantized-values
bpkroth Aug 8, 2024
a2dbb2b
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 15, 2024
b56ba23
Merge branch 'main' into add-llamatune-checks-with-quantized-values
motus Aug 19, 2024
b3850c3
Add back the quantization for LlamaTune unit tests (#59)
motus Aug 19, 2024
b502b28
Update mlos_bench/mlos_bench/optimizers/convert_configspace.py
motus Aug 19, 2024
7583d43
Update mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_te…
motus Aug 19, 2024
ce0fd04
Update mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
motus Aug 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def test_smac_optimization_loop(mock_env_no_noise: MockEnv, smac_opt: MlosCoreOp
"vmSize": "Standard_B2s",
"idle": "mwait",
"kernel_sched_migration_cost_ns": 297669,
"kernel_sched_latency_ns": 290365137,
"kernel_sched_latency_ns": 290365100,
}
assert score == pytest.approx(expected_score, 0.01)
assert tunables.get_param_values() == expected_tunable_values
3 changes: 2 additions & 1 deletion mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@
"type": "int",
"default": 2000000,
"range": [0, 1000000000],
"log": false
"log": false,
"quantization": 100
}
}
}
Expand Down
26 changes: 25 additions & 1 deletion mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,16 @@
from mlos_core.spaces.adapters import LlamaTuneAdapter


def construct_parameter_space(
# Explicitly test quantized values with llamatune space adapter.
# TODO: Add log scale sampling tests as well.


def construct_parameter_space( # pylint: disable=too-many-arguments
*,
n_continuous_params: int = 0,
n_quantized_continuous_params: int = 0,
n_integer_params: int = 0,
n_quantized_integer_params: int = 0,
n_categorical_params: int = 0,
seed: int = 1234,
) -> CS.ConfigurationSpace:
Expand All @@ -26,8 +33,12 @@ def construct_parameter_space(

for idx in range(n_continuous_params):
input_space.add(CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64))
for idx in range(n_quantized_continuous_params):
input_space.add(CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64, q=12.8))
for idx in range(n_integer_params):
input_space.add(CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=-1, upper=256))
for idx in range(n_quantized_integer_params):
input_space.add(CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=0, upper=256, q=16))
motus marked this conversation as resolved.
Show resolved Hide resolved
for idx in range(n_categorical_params):
input_space.add(
CS.CategoricalHyperparameter(
Expand All @@ -49,6 +60,13 @@ def construct_parameter_space(
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down Expand Up @@ -358,6 +376,12 @@ def test_max_unique_values_per_param() -> None:
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down
Loading