Skip to content

Commit

Permalink
shorten the test time
Browse files Browse the repository at this point in the history
  • Loading branch information
zhenghh04 committed Dec 8, 2023
1 parent 00a253d commit 45e5730
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 13 deletions.
15 changes: 7 additions & 8 deletions .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -137,20 +137,19 @@ jobs:
- name: test-tf-loader-tfrecord
run: |
source ${VENV}/bin/activate
mpirun -np 2 dlio_benchmark workload=resnet50 ++workload.dataset.num_files_train=64 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=16 ++workload.dataset.num_samples_per_file=16
mpirun -np 2 dlio_benchmark workload=resnet50 ++workload.dataset.num_files_train=64 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=16 ++workload.dataset.num_samples_per_file=16
mpirun -np 2 dlio_benchmark workload=resnet50 ++workload.dataset.num_files_train=64 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=4 ++workload.dataset.num_samples_per_file=16
mpirun -np 2 dlio_benchmark workload=resnet50 ++workload.dataset.num_files_train=64 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=4 ++workload.dataset.num_samples_per_file=16 ++workload.train.computation_time=0.01 ++workload.train.epochs=1
- name: test-torch-loader-npz
run: |
source ${VENV}/bin/activate
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=2 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=16 ++workload.dataset.num_files_eval=16 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=2 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=16 ++workload.dataset.num_files_eval=16 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=8 ++workload.dataset.num_files_eval=8 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=1 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=8 ++workload.dataset.num_files_eval=8 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
- name: test-tf-loader-npz
run: |
source ${VENV}/bin/activate
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.framework=tensorflow ++workload.data_reader.data_loader=tensorflow ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=2 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=16 ++workload.dataset.num_files_eval=16 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.framework=tensorflow ++workload.data_reader.data_loader=tensorflow ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=2 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=16 ++workload.dataset.num_files_eval=16 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.framework=tensorflow ++workload.data_reader.data_loader=tensorflow ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=1 ++workload.workflow.train=False ++workload.workflow.generate_data=True ++workload.dataset.num_files_train=8 ++workload.dataset.num_files_eval=8 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
mpirun -np 2 dlio_benchmark workload=unet3d ++workload.framework=tensorflow ++workload.data_reader.data_loader=tensorflow ++workload.train.computation_time=0.05 ++workload.evaluation.eval_time=0.01 ++workload.train.epochs=1 ++workload.workflow.train=True ++workload.workflow.generate_data=False ++workload.dataset.num_files_train=8 ++workload.dataset.num_files_eval=8 ++workload.reader.read_threads=2 ++workload.dataset.record_length=4096 ++workload.dataset.record_length_stdev=0
- name: test_subset
run: |
source ${VENV}/bin/activate
mpirun -np 2 dlio_benchmark ++workload.workflow.generate_data=True ++workload.workflow.train=False
mpirun -np 2 dlio_benchmark ++workload.workflow.generate_data=False ++workload.workflow.train=True ++workload.dataset.num_files_train=8
mpirun -np 2 pytest -k test_subset -v
15 changes: 10 additions & 5 deletions tests/dlio_benchmark_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,9 @@ def test_gen_data(fmt, framework) -> None:
f'++workload.reader.data_loader={framework}',
'++workload.workflow.train=False',
'++workload.workflow.generate_data=True',
f"++workload.dataset.format={fmt}"])
f"++workload.dataset.format={fmt}",
"++workload.dataset.num_files_train=8",
"++workload.dataset.num_files_eval=8"])
benchmark = run_benchmark(cfg, verify=False)
if benchmark.args.num_subfolders_train <= 1:
train = pathlib.Path(f"{cfg.workload.dataset.data_folder}/train")
Expand Down Expand Up @@ -119,7 +121,9 @@ def test_subset() -> None:
'++workload.workflow.generate_data=True'])
benchmark=run_benchmark(cfg, verify=False)
cfg = compose(config_name='config', overrides=['++workload.workflow.train=True', \
'++workload.workflow.generate_data=False', '++workload.dataset.num_files_train=8'])
'++workload.workflow.generate_data=False', \
'++workload.dataset.num_files_train=8', \
'++workload.train.computation_time=0.01'])
benchmark=run_benchmark(cfg, verify=True)
clean()

Expand All @@ -142,7 +146,8 @@ def test_storage_root_gen_data(fmt, framework) -> None:
'++workload.workflow.train=False',
'++workload.workflow.generate_data=True',
f"++workload.storage.storage_root={storage_root}",
f"++workload.dataset.format={fmt}"])
f"++workload.dataset.format={fmt}",
"++workload.dataset.num_files_train=16"])
benchmark = run_benchmark(cfg, verify=False)
if benchmark.args.num_subfolders_train <= 1:
assert (
Expand Down Expand Up @@ -302,8 +307,8 @@ def test_multi_threads(framework, nt) -> None:
'workload.train.computation_time=0.01',
'workload.evaluation.eval_time=0.005',
'++workload.train.epochs=1',
'++workload.dataset.num_files_train=16',
'++workload.dataset.num_files_eval=16'])
'++workload.dataset.num_files_train=8',
'++workload.dataset.num_files_eval=8'])
benchmark = run_benchmark(cfg)
clean()

Expand Down

0 comments on commit 45e5730

Please sign in to comment.