From a5fc6d86d80e4b3a0992e16a1ad5bcfb4e773697 Mon Sep 17 00:00:00 2001 From: Rich <34130474+richwardle@users.noreply.github.com> Date: Fri, 29 Nov 2024 15:31:17 +0000 Subject: [PATCH] remove double generate --- prompting/llms/hf_llm.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/prompting/llms/hf_llm.py b/prompting/llms/hf_llm.py index 2dfd9db7..1dd85fd5 100644 --- a/prompting/llms/hf_llm.py +++ b/prompting/llms/hf_llm.py @@ -70,11 +70,6 @@ def generate(self, prompts, sampling_params=None, seed=None): eos_token_id=self.tokenizer.eos_token_id, ) - outputs = self.model.generate( - **inputs, - **filtered_params, - eos_token_id=self.tokenizer.eos_token_id, - ) results = self.tokenizer.batch_decode( outputs[:, inputs["input_ids"].shape[1] :], skip_special_tokens=True,