Skip to content

Commit

Permalink
remove unused comments
Browse files Browse the repository at this point in the history
  • Loading branch information
VladOS95-cyber committed Oct 24, 2024
1 parent 06011a4 commit bd11280
Showing 1 changed file with 0 additions and 2 deletions.
2 changes: 0 additions & 2 deletions tests/quantization/ggml/test_ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,6 @@ def test_starcoder2_weights_conversion_fp16(self):

for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
# quantized models do not contain "lm_head.weight" layer
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
Expand Down Expand Up @@ -777,7 +776,6 @@ def test_mamba_weights_conversion_fp16(self):

for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
# quantized models do not contain "lm_head.weight" layer
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
if "mixer.A_log" in layer_name:
# we should increase tolerance after exponential reversing
Expand Down

0 comments on commit bd11280

Please sign in to comment.