@@ -606,27 +606,33 @@ def test_packed_loras(dist_init, dummy_model_gate_up, device):
606606
607607 assert isinstance (model .get_submodule ("gate_up_proj" ),
608608 MergedColumnParallelLinearWithLoRA )
609+ # Verify packed lora is correct
610+ model_lora_clone = model_lora .clone (1 )
611+ model_lora_clone1 = model_lora1 .clone (1 )
609612 assert manager .add_adapter (model_lora )
610613 assert manager .add_adapter (model_lora1 )
611614
615+ assert model_lora .get_lora ("gate_proj" ) is None
616+ assert model_lora .get_lora ("up_proj" ) is None
617+ assert model_lora1 .get_lora ("up_proj" ) is None
612618 packed_lora = model_lora .get_lora ("gate_up_proj" )
613619 assert packed_lora and isinstance (packed_lora , PackedLoRALayerWeights )
614620
615621 torch .testing .assert_close (packed_lora .lora_a [0 ],
616- model_lora .get_lora ("gate_proj" ).lora_a )
622+ model_lora_clone .get_lora ("gate_proj" ).lora_a )
617623 torch .testing .assert_close (packed_lora .lora_b [0 ],
618- model_lora .get_lora ("gate_proj" ).lora_b )
624+ model_lora_clone .get_lora ("gate_proj" ).lora_b )
619625 torch .testing .assert_close (packed_lora .lora_a [1 ],
620- model_lora .get_lora ("up_proj" ).lora_a )
626+ model_lora_clone .get_lora ("up_proj" ).lora_a )
621627 torch .testing .assert_close (packed_lora .lora_b [1 ],
622- model_lora .get_lora ("up_proj" ).lora_b )
628+ model_lora_clone .get_lora ("up_proj" ).lora_b )
623629
624630 packed_lora1 = model_lora1 .get_lora ("gate_up_proj" )
625631 assert packed_lora1 and isinstance (packed_lora1 , PackedLoRALayerWeights )
626632
627633 assert packed_lora1 .lora_a [0 ] is None
628634 assert packed_lora1 .lora_b [0 ] is None
629635 torch .testing .assert_close (packed_lora1 .lora_a [1 ],
630- model_lora1 .get_lora ("up_proj" ).lora_a )
636+ model_lora_clone1 .get_lora ("up_proj" ).lora_a )
631637 torch .testing .assert_close (packed_lora1 .lora_b [1 ],
632- model_lora1 .get_lora ("up_proj" ).lora_b )
638+ model_lora_clone1 .get_lora ("up_proj" ).lora_b )
0 commit comments