Skip to content

Commit

Permalink
Update cpuinfo (#2379)
Browse files Browse the repository at this point in the history
* update cpuinfo

* update

* lint

* lint

* lint

* install numactl

* update num workers

* skip if gpu

---------

Co-authored-by: Ankith Gunapal <agunapal@ischool.Berkeley.edu>
Co-authored-by: Mark Saroufim <marksaroufim@fb.com>
  • Loading branch information
3 people authored Jun 2, 2023
1 parent 77f8c0b commit e5004b2
Showing 1 changed file with 14 additions and 10 deletions.
24 changes: 14 additions & 10 deletions test/pytest/test_example_intel_extension_for_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest
import requests
import test_utils
import torch
from test_handler import run_inference_using_url_with_data

REPO_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
Expand Down Expand Up @@ -83,7 +84,8 @@ def scale_workers_with_core_pinning(scaled_num_workers):


@pytest.mark.skipif(
not ipex_xeon_run_cpu_available,
not ipex_xeon_run_cpu_available
or ((torch.cuda.device_count() > 0) and torch.cuda.is_available()),
reason="Make sure intel-extension-for-pytorch is installed and torch.backends.xeon.run_cpu is available",
)
def test_single_worker_affinity():
Expand All @@ -102,16 +104,16 @@ def test_single_worker_affinity():
), "single-worker inference with core pinning failed"

affinity = get_worker_affinity(num_workers, worker_idx)
print("affinity: ", affinity)
assert affinity in open(TS_LOG).read(), "workers are not correctly pinned to cores"


@pytest.mark.skipif(
not ipex_xeon_run_cpu_available,
not ipex_xeon_run_cpu_available
or ((torch.cuda.device_count() > 0) and torch.cuda.is_available()),
reason="Make sure intel-extension-for-pytorch is installed and torch.backends.xeon.run_cpu is available",
)
def test_multi_worker_affinity():
num_workers = 4
num_workers = 2
setup_torchserve()
requests.post(
"http://localhost:8081/models?initial_workers={}&synchronous=true&url=resnet-18.mar".format(
Expand All @@ -132,19 +134,20 @@ def test_multi_worker_affinity():


@pytest.mark.skipif(
not ipex_xeon_run_cpu_available,
not ipex_xeon_run_cpu_available
or ((torch.cuda.device_count() > 0) and torch.cuda.is_available()),
reason="Make sure intel-extension-for-pytorch is installed and torch.backends.xeon.run_cpu is available",
)
def test_worker_scale_up_affinity():
initial_num_workers = 2
initial_num_workers = 1
setup_torchserve()
requests.post(
"http://localhost:8081/models?initial_workers={}&synchronous=true&url=resnet-18.mar".format(
initial_num_workers
)
)

scaled_up_num_workers = 4
scaled_up_num_workers = 2
response = scale_workers_with_core_pinning(scaled_up_num_workers)
resnet18_list = json.loads(response.content)
assert (
Expand All @@ -164,19 +167,20 @@ def test_worker_scale_up_affinity():


@pytest.mark.skipif(
not ipex_xeon_run_cpu_available,
not ipex_xeon_run_cpu_available
or ((torch.cuda.device_count() > 0) and torch.cuda.is_available()),
reason="Make sure intel-extension-for-pytorch is installed and torch.backends.xeon.run_cpu is available",
)
def test_worker_scale_down_affinity():
initial_num_workers = 4
initial_num_workers = 2
setup_torchserve()
requests.post(
"http://localhost:8081/models?initial_workers={}&synchronous=true&url=resnet-18.mar".format(
initial_num_workers
)
)

scaled_down_num_workers = 2
scaled_down_num_workers = 1
response = scale_workers_with_core_pinning(scaled_down_num_workers)
resnet18_list = json.loads(response.content)
assert (
Expand Down

0 comments on commit e5004b2

Please sign in to comment.