Skip to content

Commit

Permalink
Use cpu provider for embeddings models
Browse files Browse the repository at this point in the history
  • Loading branch information
NickM-27 committed Sep 17, 2024
1 parent ff9e1da commit 357c787
Showing 1 changed file with 3 additions and 25 deletions.
28 changes: 3 additions & 25 deletions frigate/embeddings/functions/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,31 +49,11 @@ def _load_models(

@staticmethod
def _load_model(path: str, silent: bool):
providers = []
options = []

for provider in ort.get_available_providers():
if provider == "TensorrtExecutionProvider":
continue
elif provider == "OpenVINOExecutionProvider":
# TODO need to verify openvino works correctly
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
providers.append(provider)
options.append(
{
"cache_dir": "/config/model_cache/openvino/ort",
"device_type": "GPU",
}
)
else:
providers.append(provider)
options.append({})
providers = ["CPUExecutionProvider"]

try:
if os.path.exists(path):
return ort.InferenceSession(
path, providers=providers, provider_options=options
)
return ort.InferenceSession(path, providers=providers)
else:
raise FileNotFoundError(
errno.ENOENT,
Expand Down Expand Up @@ -104,9 +84,7 @@ def _load_model(path: str, silent: bool):
f.flush()
# Finally move the temporary file to the correct location
temporary_filename.rename(path)
return ort.InferenceSession(
path, providers=provider, provider_options=options
)
return ort.InferenceSession(path, providers=providers)


class ClipEmbedding(EmbeddingFunction):
Expand Down

0 comments on commit 357c787

Please sign in to comment.