From 5540b9257e0e3825d2c9c13758dfbed495f34ac1 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 6 Aug 2024 16:51:36 -0400 Subject: [PATCH] replace dash with underscore (#122) --- examples/bit_packing/int4_config.json | 2 +- examples/llama_1.1b/example_quant_config.json | 2 +- src/compressed_tensors/quantization/quant_config.py | 2 +- tests/test_compressors/test_model_compressor.py | 2 +- tests/test_quantization/lifecycle/test_dynamic_lifecycle.py | 2 +- tests/test_quantization/lifecycle/test_kv_cache.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/bit_packing/int4_config.json b/examples/bit_packing/int4_config.json index 4513ab54..44d76cc4 100644 --- a/examples/bit_packing/int4_config.json +++ b/examples/bit_packing/int4_config.json @@ -1,5 +1,5 @@ { - "quant_method": "compressed-tensors", + "quant_method": "compressed_tensors", "format": "pack-quantized", "global_compression_ratio": null, "config_groups": { diff --git a/examples/llama_1.1b/example_quant_config.json b/examples/llama_1.1b/example_quant_config.json index 0f125120..0b610f80 100644 --- a/examples/llama_1.1b/example_quant_config.json +++ b/examples/llama_1.1b/example_quant_config.json @@ -1,5 +1,5 @@ { - "quant_method": "compressed-tensors", + "quant_method": "compressed_tensors", "format": "fakequant", "global_compression_ratio": null, "config_groups": { diff --git a/src/compressed_tensors/quantization/quant_config.py b/src/compressed_tensors/quantization/quant_config.py index 01b43910..2e6adb3c 100644 --- a/src/compressed_tensors/quantization/quant_config.py +++ b/src/compressed_tensors/quantization/quant_config.py @@ -103,7 +103,7 @@ def __le__(self, other): QuantizationStatus.COMPRESSED, ] -DEFAULT_QUANTIZATION_METHOD = "compressed-tensors" +DEFAULT_QUANTIZATION_METHOD = "compressed_tensors" DEFAULT_QUANTIZATION_FORMAT = "fakequant" diff --git a/tests/test_compressors/test_model_compressor.py b/tests/test_compressors/test_model_compressor.py index 126d54fa..d58ce2b2 100644 --- a/tests/test_compressors/test_model_compressor.py +++ b/tests/test_compressors/test_model_compressor.py @@ -43,7 +43,7 @@ def quantization_config(): "format": "pack-quantized", "global_compression_ratio": 1.891791164021256, "ignore": ["lm_head"], - "quant_method": "compressed-tensors", + "quant_method": "compressed_tensors", "quantization_status": "frozen", } diff --git a/tests/test_quantization/lifecycle/test_dynamic_lifecycle.py b/tests/test_quantization/lifecycle/test_dynamic_lifecycle.py index be228451..d343a73e 100644 --- a/tests/test_quantization/lifecycle/test_dynamic_lifecycle.py +++ b/tests/test_quantization/lifecycle/test_dynamic_lifecycle.py @@ -90,7 +90,7 @@ def get_tinyllama_model(): def get_sample_dynamic_tinyllama_quant_config(): config_dict = { - "quant_method": "compressed-tensors", + "quant_method": "compressed_tensors", "format": "fakequant", "quantization_status": "calibration", "global_compression_ratio": None, diff --git a/tests/test_quantization/lifecycle/test_kv_cache.py b/tests/test_quantization/lifecycle/test_kv_cache.py index c7d92741..1c6f3a92 100644 --- a/tests/test_quantization/lifecycle/test_kv_cache.py +++ b/tests/test_quantization/lifecycle/test_kv_cache.py @@ -23,7 +23,7 @@ config = { - "quant_method": "compressed-tensors", + "quant_method": "compressed_tensors", "format": "fakequant", "kv_cache_scheme": { "num_bits": 8,