Skip to content

Commit eefd76f

Browse files
authoredNov 1, 2023
llama: fix exception in Llama.__del__ (ggml-org#846)
1 parent 3fc9147 commit eefd76f

File tree

1 file changed

+7
-5
lines changed

1 file changed

+7
-5
lines changed
 

‎llama_cpp/llama.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -1638,12 +1638,14 @@ def create_chat_completion(
16381638
)
16391639
return self._convert_completion_to_chat(completion_or_chunks, stream=stream) # type: ignore
16401640

1641-
def _free_model(self):
1642-
if hasattr(self, "model") and self.model is not None:
1643-
llama_cpp.llama_free_model(self.model)
1641+
def _free_model(self, *, _lfree_model=llama_cpp._lib.llama_free_model, _free=llama_cpp._lib.llama_free):
1642+
model = getattr(self, 'model', None)
1643+
if model is not None:
1644+
_lfree_model(model)
16441645
self.model = None
1645-
if hasattr(self, "ctx") and self.ctx is not None:
1646-
llama_cpp.llama_free(self.ctx)
1646+
ctx = getattr(self, 'ctx', None)
1647+
if ctx is not None:
1648+
_free(ctx)
16471649
self.ctx = None
16481650

16491651
def __del__(self):

0 commit comments

Comments
 (0)
Please sign in to comment.