Skip to content

Commit 4f563e2

Browse files
authored
Revert "Update LLMInterface to restore LC compatibility (neo4j#416)" (neo4j#433)
This reverts commit 7b4e2d3.
1 parent 291d919 commit 4f563e2

31 files changed

+1177
-989
lines changed

CHANGELOG.md

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,8 @@
44

55
### Added
66

7-
- Document node is now always created when running SimpleKGPipeline, even if `from_pdf=False`.
8-
- Document metadata is exposed in SimpleKGPipeline run method.
97
- Added automatic rate limiting with retry logic and exponential backoff for all Embedding providers using tenacity. The `RateLimitHandler` interface allows for custom rate limiting strategies, including the ability to disable rate limiting entirely.
108

11-
### Fixed
12-
13-
- LangChain Chat models compatibility is now working again.
14-
15-
169
## 1.10.0
1710

1811
### Added

examples/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ are listed in [the last section of this file](#customize).
6969
- [OpenAI (GPT)](./customize/llms/openai_llm.py)
7070
- [Azure OpenAI]()
7171
- [VertexAI (Gemini)](./customize/llms/vertexai_llm.py)
72-
- [MistralAI](customize/llms/mistralai_llm.py)
72+
- [MistralAI](./customize/llms/mistalai_llm.py)
7373
- [Cohere](./customize/llms/cohere_llm.py)
7474
- [Anthropic (Claude)](./customize/llms/anthropic_llm.py)
7575
- [Ollama](./customize/llms/ollama_llm.py)
@@ -142,7 +142,7 @@ are listed in [the last section of this file](#customize).
142142

143143
### Answer: GraphRAG
144144

145-
- [LangChain compatibility](customize/answer/langchain_compatibility.py)
145+
- [LangChain compatibility](./customize/answer/langchain_compatiblity.py)
146146
- [Use a custom prompt](./customize/answer/custom_prompt.py)
147147

148148

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,12 @@
11
from neo4j_graphrag.llm import AnthropicLLM, LLMResponse
2-
from neo4j_graphrag.types import LLMMessage
32

43
# set api key here on in the ANTHROPIC_API_KEY env var
54
api_key = None
65

7-
messages: list[LLMMessage] = [
8-
{
9-
"role": "system",
10-
"content": "You are a seasoned actor and expert performer, renowned for your one-man shows and comedic talent.",
11-
},
12-
{
13-
"role": "user",
14-
"content": "say something",
15-
},
16-
]
17-
18-
196
llm = AnthropicLLM(
207
model_name="claude-3-opus-20240229",
218
model_params={"max_tokens": 1000}, # max_tokens must be specified
229
api_key=api_key,
2310
)
24-
res: LLMResponse = llm.invoke(
25-
# "say something",
26-
messages,
27-
)
11+
res: LLMResponse = llm.invoke("say something")
2812
print(res.content)
Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,11 @@
11
from neo4j_graphrag.llm import CohereLLM, LLMResponse
2-
from neo4j_graphrag.types import LLMMessage
32

43
# set api key here on in the CO_API_KEY env var
54
api_key = None
65

7-
messages: list[LLMMessage] = [
8-
{
9-
"role": "system",
10-
"content": "You are a seasoned actor and expert performer, renowned for your one-man shows and comedic talent.",
11-
},
12-
{
13-
"role": "user",
14-
"content": "say something",
15-
},
16-
]
17-
186
llm = CohereLLM(
197
model_name="command-r",
208
api_key=api_key,
219
)
22-
res: LLMResponse = llm.invoke(input=messages)
10+
res: LLMResponse = llm.invoke("say something")
2311
print(res.content)

examples/customize/llms/custom_llm.py

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
import random
22
import string
3-
from typing import Any, Awaitable, Callable, Optional, TypeVar
3+
from typing import Any, Awaitable, Callable, List, Optional, TypeVar, Union
44

55
from neo4j_graphrag.llm import LLMInterface, LLMResponse
66
from neo4j_graphrag.utils.rate_limit import (
77
RateLimitHandler,
88
# rate_limit_handler,
99
# async_rate_limit_handler,
1010
)
11+
from neo4j_graphrag.message_history import MessageHistory
1112
from neo4j_graphrag.types import LLMMessage
1213

1314

@@ -17,27 +18,37 @@ def __init__(
1718
):
1819
super().__init__(model_name, **kwargs)
1920

20-
def _invoke(
21+
# Optional: Apply rate limit handling to synchronous invoke method
22+
# @rate_limit_handler
23+
def invoke(
2124
self,
22-
input: list[LLMMessage],
25+
input: str,
26+
message_history: Optional[Union[List[LLMMessage], MessageHistory]] = None,
27+
system_instruction: Optional[str] = None,
2328
) -> LLMResponse:
2429
content: str = (
2530
self.model_name + ": " + "".join(random.choices(string.ascii_letters, k=30))
2631
)
2732
return LLMResponse(content=content)
2833

29-
async def _ainvoke(
34+
# Optional: Apply rate limit handling to asynchronous ainvoke method
35+
# @async_rate_limit_handler
36+
async def ainvoke(
3037
self,
31-
input: list[LLMMessage],
38+
input: str,
39+
message_history: Optional[Union[List[LLMMessage], MessageHistory]] = None,
40+
system_instruction: Optional[str] = None,
3241
) -> LLMResponse:
3342
raise NotImplementedError()
3443

3544

36-
llm = CustomLLM("")
45+
llm = CustomLLM(
46+
""
47+
) # if rate_limit_handler and async_rate_limit_handler decorators are used, the default rate limit handler will be applied automatically (retry with exponential backoff)
3748
res: LLMResponse = llm.invoke("text")
3849
print(res.content)
3950

40-
# If you want to use a custom rate limit handler
51+
# If rate_limit_handler and async_rate_limit_handler decorators are used and you want to use a custom rate limit handler
4152
# Type variables for function signatures used in rate limit handlers
4253
F = TypeVar("F", bound=Callable[..., Any])
4354
AF = TypeVar("AF", bound=Callable[..., Awaitable[Any]])
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from neo4j_graphrag.llm import MistralAILLM
2+
3+
# set api key here on in the MISTRAL_API_KEY env var
4+
api_key = None
5+
6+
llm = MistralAILLM(
7+
model_name="mistral-small-latest",
8+
api_key=api_key,
9+
)
10+
llm.invoke("say something")

examples/customize/llms/mistralai_llm.py

Lines changed: 0 additions & 32 deletions
This file was deleted.

examples/customize/llms/ollama_llm.py

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -3,26 +3,11 @@
33
"""
44

55
from neo4j_graphrag.llm import LLMResponse, OllamaLLM
6-
from neo4j_graphrag.types import LLMMessage
7-
8-
messages: list[LLMMessage] = [
9-
{
10-
"role": "system",
11-
"content": "You are a seasoned actor and expert performer, renowned for your one-man shows and comedic talent.",
12-
},
13-
{
14-
"role": "user",
15-
"content": "say something",
16-
},
17-
]
18-
196

207
llm = OllamaLLM(
21-
model_name="orca-mini:latest",
8+
model_name="<model_name>",
229
# model_params={"options": {"temperature": 0}, "format": "json"},
2310
# host="...", # if using a remote server
2411
)
25-
res: LLMResponse = llm.invoke(
26-
messages,
27-
)
12+
res: LLMResponse = llm.invoke("What is the additive color model?")
2813
print(res.content)
Lines changed: 1 addition & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,8 @@
11
from neo4j_graphrag.llm import LLMResponse, OpenAILLM
2-
from neo4j_graphrag.message_history import InMemoryMessageHistory
3-
from neo4j_graphrag.types import LLMMessage
42

53
# set api key here on in the OPENAI_API_KEY env var
64
api_key = None
75

8-
messages: list[LLMMessage] = [
9-
{
10-
"role": "system",
11-
"content": "You are a seasoned actor and expert performer, renowned for your one-man shows and comedic talent.",
12-
},
13-
{
14-
"role": "user",
15-
"content": "say something",
16-
},
17-
]
18-
19-
206
llm = OpenAILLM(model_name="gpt-4o", api_key=api_key)
21-
res: LLMResponse = llm.invoke(
22-
# "say something",
23-
# messages,
24-
InMemoryMessageHistory(
25-
messages=messages,
26-
)
27-
)
7+
res: LLMResponse = llm.invoke("say something")
288
print(res.content)

0 commit comments

Comments
 (0)