diff --git a/benchmarks/README.md b/benchmarks/README.md index 6feb3d7850..d17875418f 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -76,6 +76,7 @@ The following parameters can be used to run the AB benchmark suite. - ts: Use Already running Torchserve instance. Default: False - gpus: Number of gpus to run docker container with. By default it runs the docker container on CPU. - backend_profiling: Enable backend profiling using CProfile. Default: False +- generate_graphs: Enable generation of Graph plots. Default False - config_properties: Path to config.properties file. Default: config.properties in the benchmark directory - inference_model_url: Inference function url - can be either for predictions or explanations. Default: predictions/benchmark. - config: All the above params can be set using a config JSON file. When this flag is used, all other cmd line params are ignored. diff --git a/benchmarks/benchmark-ab.py b/benchmarks/benchmark-ab.py index 896282c41c..097ab1e985 100644 --- a/benchmarks/benchmark-ab.py +++ b/benchmarks/benchmark-ab.py @@ -30,6 +30,7 @@ "image": "", "docker_runtime": "", "backend_profiling": False, + "generate_graphs": False, "config_properties": "config.properties", "inference_model_url": "predictions/benchmark", "report_location": tempfile.gettempdir(), @@ -94,6 +95,12 @@ def json_provider(file_path, cmd_name): default=False, help="Enable backend profiling using CProfile. Default False", ) +@click.option( + "--generate_graphs", + "-gg", + default=False, + help="Enable generation of Graph plots. Default False", +) @click.option( "--config_properties", "-cp", @@ -140,6 +147,7 @@ def benchmark( inference_model_url, report_location, tmp_dir, + generate_graphs, ): input_params = { "url": url, @@ -159,6 +167,7 @@ def benchmark( "inference_model_url": inference_model_url, "report_location": report_location, "tmp_dir": tmp_dir, + "generate_graphs": generate_graphs, } # set ab params @@ -441,8 +450,9 @@ def generate_report(warm_up_lines): click.secho("\n\nGenerating Reports...", fg="green") extract_metrics(warm_up_lines=warm_up_lines) generate_csv_output() - generate_latency_graph() - generate_profile_graph() + if execution_params["generate_graphs"]: + generate_latency_graph() + generate_profile_graph() click.secho("\nTest suite execution complete.", fg="green")