Skip to content

Commit

Permalink
add npu support
Browse files Browse the repository at this point in the history
  • Loading branch information
wangshuai09 committed Apr 1, 2024
1 parent 01a34cd commit 6c22317
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 7 deletions.
4 changes: 2 additions & 2 deletions examples/person_detection/scrfd_person.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@

def detect_person(img, detector):
bboxes, kpss = detector.detect(img)
bboxes = np.round(bboxes[:,:4]).astype(np.int)
kpss = np.round(kpss).astype(np.int)
bboxes = np.round(bboxes[:,:4]).astype(np.int32)
kpss = np.round(kpss).astype(np.int32)
kpss[:,:,0] = np.clip(kpss[:,:,0], 0, img.shape[1])
kpss[:,:,1] = np.clip(kpss[:,:,1], 0, img.shape[0])
vbboxes = bboxes.copy()
Expand Down
4 changes: 2 additions & 2 deletions python-package/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ For ``insightface<=0.1.5``, we use MXNet as inference backend.

Starting from insightface>=0.2, we use onnxruntime as inference backend.

You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or install ``onnxruntime`` to use CPU only inference.
You have to install ``onnxruntime-gpu`` manually to enable GPU inference, or ``pip install onnxruntime-cann`` manually to enable NPU inference, or install ``onnxruntime`` to use CPU only inference.

## Change Log

Expand Down Expand Up @@ -54,7 +54,7 @@ import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CANNExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
img = ins_get_image('t1')
faces = app.get(img)
Expand Down
2 changes: 1 addition & 1 deletion python-package/insightface/model_zoo/model_zoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def find_onnx_file(dir_path):
return paths[-1]

def get_default_providers():
return ['CUDAExecutionProvider', 'CPUExecutionProvider']
return ['CUDAExecutionProvider', 'CANNExecutionProvider', 'CPUExecutionProvider']

def get_default_provider_options():
return None
Expand Down
2 changes: 1 addition & 1 deletion web-demos/src_recognition/arcface_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, model_file=None, session=None):
self.input_std = input_std
#print('input mean and std:', self.input_mean, self.input_std)
if self.session is None:
self.session = onnxruntime.InferenceSession(self.model_file, providers=['CUDAExecutionProvider'])
self.session = onnxruntime.InferenceSession(self.model_file, providers=['CUDAExecutionProvider', 'CANNExecutionProvider'])
input_cfg = self.session.get_inputs()[0]
input_shape = input_cfg.shape
input_name = input_cfg.name
Expand Down
2 changes: 1 addition & 1 deletion web-demos/src_recognition/scrfd.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(self, model_file=None, session=None):
if self.session is None:
assert self.model_file is not None
assert osp.exists(self.model_file)
self.session = onnxruntime.InferenceSession(self.model_file, providers=['CUDAExecutionProvider'])
self.session = onnxruntime.InferenceSession(self.model_file, providers=['CUDAExecutionProvider', 'CANNExecutionProvider'])
self.center_cache = {}
self.nms_thresh = 0.4
self.det_thresh = 0.5
Expand Down

0 comments on commit 6c22317

Please sign in to comment.