diff --git a/language/bert/onnxruntime_SUT.py b/language/bert/onnxruntime_SUT.py index 0b2a4a7b0..aff0c6321 100644 --- a/language/bert/onnxruntime_SUT.py +++ b/language/bert/onnxruntime_SUT.py @@ -34,10 +34,13 @@ def __init__(self, args): print("Loading ONNX model...") self.quantized = args.quantized - if self.quantized: - model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/bert_large_v1_1_fake_quant.onnx" - else: - model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/model.onnx" + + model_path = os.environ.get("MODEL_FILE") + if not model_path: + if self.quantized: + model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/bert_large_v1_1_fake_quant.onnx" + else: + model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/model.onnx" self.sess = onnxruntime.InferenceSession(model_path, self.options) print("Constructing SUT...") diff --git a/language/bert/run.py b/language/bert/run.py index 8d471bfc5..9319d23dd 100644 --- a/language/bert/run.py +++ b/language/bert/run.py @@ -86,8 +86,9 @@ def main(): settings.mode = lg.TestMode.AccuracyOnly else: settings.mode = lg.TestMode.PerformanceOnly - - log_path = "build/logs" + log_path = os.environ.get("LOG_PATH") + if not log_path: + log_path = "build/logs" if not os.path.exists(log_path): os.makedirs(log_path) log_output_settings = lg.LogOutputSettings() @@ -99,8 +100,7 @@ def main(): print("Running LoadGen test...") lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings) - - if args.accuracy: + if args.accuracy and not os.environ.get("SKIP_VERIFY_ACCURACY"): cmd = "python3 {:}/accuracy-squad.py {}".format( os.path.dirname(os.path.abspath(__file__)), '--max_examples {}'.format( diff --git a/language/bert/squad_QSL.py b/language/bert/squad_QSL.py index 270a26e99..c751c33c6 100644 --- a/language/bert/squad_QSL.py +++ b/language/bert/squad_QSL.py @@ -44,10 +44,16 @@ def __init__(self, total_count_override=None, perf_count_override=None, cache_pa print("No cached features at '%s'... converting from examples..." % cache_path) print("Creating tokenizer...") - tokenizer = BertTokenizer("build/data/bert_tf_v1_1_large_fp32_384_v2/vocab.txt") + vocab_file = os.environ.get("VOCAB_FILE") + if not vocab_file: + vocab_file = "build/data/bert_tf_v1_1_large_fp32_384_v2/vocab.txt" + tokenizer = BertTokenizer(vocab_file) print("Reading examples...") - eval_examples = read_squad_examples(input_file="build/data/dev-v1.1.json", + dataset_file = os.environ.get("DATASET_FILE") + if not dataset_file: + dataset_file = "build/data/dev-v1.1.json" + eval_examples = read_squad_examples(input_file=dataset_file, is_training=False, version_2_with_negative=False) print("Converting examples to features...")