From 562aa3102e135c38c475491a65254f25f6a9bb1b Mon Sep 17 00:00:00 2001 From: codingchild Date: Sun, 14 May 2023 22:37:02 +0000 Subject: [PATCH] change comment in tuners.lora, lora_alpha float to int --- src/peft/tuners/lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/peft/tuners/lora.py b/src/peft/tuners/lora.py index fce9c2561a..d2db0da377 100644 --- a/src/peft/tuners/lora.py +++ b/src/peft/tuners/lora.py @@ -48,7 +48,7 @@ class LoraConfig(PeftConfig): Args: r (`int`): Lora attention dimension. target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to. - lora_alpha (`float`): The alpha parameter for Lora scaling. + lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.: