Skip to content

Commit 3a6fe9a

Browse files
authored
fix: handle langchain multiple batching (#2257)
## Issue Link / Problem Description <!-- Link to related issue or describe the problem this PR solves --> - Fixes #2256, #2254 and #2252 ## Changes Made <!-- Describe what you changed and why --> - handle langchain batching separately Signed-off-by: Kumar Anirudha <mail@anirudha.dev>
1 parent bd97987 commit 3a6fe9a

File tree

1 file changed

+10
-3
lines changed

1 file changed

+10
-3
lines changed

src/ragas/prompt/pydantic_prompt.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -207,10 +207,12 @@ async def generate_multiple(
207207
# LangChain LLMs have agenerate() for async, generate() for sync
208208
# Ragas LLMs have generate() as async method
209209
if is_langchain_llm(llm):
210-
# This is a LangChain LLM - use agenerate_prompt()
210+
# This is a LangChain LLM - use agenerate_prompt() with batch for multiple generations
211211
langchain_llm = t.cast(BaseLanguageModel, llm)
212+
# LangChain doesn't support n parameter directly, so we batch multiple prompts
213+
prompts = t.cast(t.List[t.Any], [prompt_value for _ in range(n)])
212214
resp = await langchain_llm.agenerate_prompt(
213-
[prompt_value],
215+
prompts,
214216
stop=stop,
215217
callbacks=prompt_cb,
216218
)
@@ -228,7 +230,12 @@ async def generate_multiple(
228230
output_models = []
229231
parser = RagasOutputParser(pydantic_object=self.output_model)
230232
for i in range(n):
231-
output_string = resp.generations[0][i].text
233+
if is_langchain_llm(llm):
234+
# For LangChain LLMs, each generation is in a separate batch result
235+
output_string = resp.generations[i][0].text
236+
else:
237+
# For Ragas LLMs, all generations are in the first batch
238+
output_string = resp.generations[0][i].text
232239
try:
233240
# For the parser, we need a BaseRagasLLM, so if it's a LangChain LLM, we need to handle this
234241
if is_langchain_llm(llm):

0 commit comments

Comments
 (0)