From d80be973b9bb4d5c6ca6dec3f55862cd5a437b7d Mon Sep 17 00:00:00 2001 From: "siqiao.xsq" Date: Wed, 12 Jul 2023 11:35:34 +0800 Subject: [PATCH] 1.Add script to run hpo; 2. update version to 0.0.5 --- examples/configs/hpo_config.yaml | 55 ++++++++++++++++++++++++++++++++ examples/train_nhp_hpo.py | 26 +++++++++++++++ version.py | 2 +- 3 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 examples/configs/hpo_config.yaml create mode 100644 examples/train_nhp_hpo.py diff --git a/examples/configs/hpo_config.yaml b/examples/configs/hpo_config.yaml new file mode 100644 index 0000000..9302ade --- /dev/null +++ b/examples/configs/hpo_config.yaml @@ -0,0 +1,55 @@ +pipeline_config_id: hpo_runner_config + +data: + taxi: + data_format: pkl + train_dir: ./data/taxi/train.pkl + valid_dir: ./data/taxi/dev.pkl + test_dir: ./data/taxi/test.pkl + data_specs: + num_event_types: 10 + pad_token_id: 10 + padding_side: right + truncation_side: right + +hpo: + storage_uri: sqlite://hpo_test.db + is_continuous: False + framework_id: optuna # the framework of hpo + n_trials: 10 + + +NHP_train: + base_config: + stage: train + backend: torch + dataset_id: taxi + runner_id: std_tpp + model_id: NHP # model name + base_dir: './checkpoints/' + trainer_config: + batch_size: 256 + max_epoch: 200 + shuffle: False + optimizer: adam + learning_rate: 1.e-3 + valid_freq: 1 + use_tfb: False + metrics: [ 'acc', 'rmse' ] + seed: 2019 + gpu: -1 + model_config: + hidden_size: 64 + loss_integral_num_sample_per_step: 20 +# pretrained_model_dir: ./checkpoints/75518_4377527680_230530-132355/models/saved_model + thinning: + num_seq: 10 + num_sample: 1 + num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm + look_ahead_time: 10 + patience_counter: 5 # the maximum iteration used in adaptive thinning + over_sample_rate: 5 + num_samples_boundary: 5 + dtime_max: 5 + num_step_gen: 1 + diff --git a/examples/train_nhp_hpo.py b/examples/train_nhp_hpo.py new file mode 100644 index 0000000..124ec08 --- /dev/null +++ b/examples/train_nhp_hpo.py @@ -0,0 +1,26 @@ +import argparse + +from easy_tpp.config_factory import Config +from easy_tpp.hpo import HyperTuner + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('--config_dir', type=str, required=False, default='configs/hpo_config.yaml', + help='Dir of configuration yaml to train and evaluate the model.') + + parser.add_argument('--experiment_id', type=str, required=False, default='NHP_train', + help='Experiment id in the config file.') + + args = parser.parse_args() + + config = Config.build_from_yaml_file(args.config_dir, experiment_id=args.experiment_id) + + tuner = HyperTuner.build_from_config(config) + + tuner.run() + + +if __name__ == '__main__': + main() diff --git a/version.py b/version.py index 156d6f9..eead319 100644 --- a/version.py +++ b/version.py @@ -1 +1 @@ -__version__ = '0.0.4' +__version__ = '0.0.5'