diff --git a/redisvl/extensions/llmcache/schema.py b/redisvl/extensions/llmcache/schema.py index 8075496b..515b1421 100644 --- a/redisvl/extensions/llmcache/schema.py +++ b/redisvl/extensions/llmcache/schema.py @@ -44,9 +44,9 @@ def non_empty_metadata(cls, v): def to_dict(self) -> Dict: data = self.dict(exclude_none=True) data["prompt_vector"] = array_to_buffer(self.prompt_vector) - if self.metadata: + if self.metadata is not None: data["metadata"] = serialize(self.metadata) - if self.filters: + if self.filters is not None: data.update(self.filters) del data["filters"] return data diff --git a/redisvl/extensions/llmcache/semantic.py b/redisvl/extensions/llmcache/semantic.py index f0f38a04..1b78ea9e 100644 --- a/redisvl/extensions/llmcache/semantic.py +++ b/redisvl/extensions/llmcache/semantic.py @@ -300,8 +300,6 @@ def check( key = cache_search_result["id"] self._refresh_ttl(key) - print(cache_search_result, flush=True) - # Create cache hit cache_hit = CacheHit(**cache_search_result) cache_hit_dict = { diff --git a/tests/integration/test_llmcache.py b/tests/integration/test_llmcache.py index 34c15113..03a6f0eb 100644 --- a/tests/integration/test_llmcache.py +++ b/tests/integration/test_llmcache.py @@ -295,6 +295,22 @@ def test_store_with_metadata(cache, vectorizer): assert check_result[0]["prompt"] == prompt +def test_store_with_empty_metadata(cache, vectorizer): + prompt = "This is another test prompt." + response = "This is another test response." + metadata = {} + vector = vectorizer.embed(prompt) + + cache.store(prompt, response, vector=vector, metadata=metadata) + check_result = cache.check(vector=vector, num_results=1) + + assert len(check_result) == 1 + print(check_result, flush=True) + assert check_result[0]["response"] == response + assert check_result[0]["metadata"] == metadata + assert check_result[0]["prompt"] == prompt + + def test_store_with_invalid_metadata(cache, vectorizer): prompt = "This is another test prompt." response = "This is another test response."