diff --git a/prompting/llms/hf_llm.py b/prompting/llms/hf_llm.py index 2dfd9db7..1dd85fd5 100644 --- a/prompting/llms/hf_llm.py +++ b/prompting/llms/hf_llm.py @@ -70,11 +70,6 @@ def generate(self, prompts, sampling_params=None, seed=None): eos_token_id=self.tokenizer.eos_token_id, ) - outputs = self.model.generate( - **inputs, - **filtered_params, - eos_token_id=self.tokenizer.eos_token_id, - ) results = self.tokenizer.batch_decode( outputs[:, inputs["input_ids"].shape[1] :], skip_special_tokens=True,