Skip to content

Commit

Permalink
replace dash with underscore (#122)
Browse files Browse the repository at this point in the history
  • Loading branch information
Sara Adkins authored Aug 6, 2024
1 parent a507590 commit 5540b92
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion examples/bit_packing/int4_config.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"quant_method": "compressed-tensors",
"quant_method": "compressed_tensors",
"format": "pack-quantized",
"global_compression_ratio": null,
"config_groups": {
Expand Down
2 changes: 1 addition & 1 deletion examples/llama_1.1b/example_quant_config.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"quant_method": "compressed-tensors",
"quant_method": "compressed_tensors",
"format": "fakequant",
"global_compression_ratio": null,
"config_groups": {
Expand Down
2 changes: 1 addition & 1 deletion src/compressed_tensors/quantization/quant_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def __le__(self, other):
QuantizationStatus.COMPRESSED,
]

DEFAULT_QUANTIZATION_METHOD = "compressed-tensors"
DEFAULT_QUANTIZATION_METHOD = "compressed_tensors"
DEFAULT_QUANTIZATION_FORMAT = "fakequant"


Expand Down
2 changes: 1 addition & 1 deletion tests/test_compressors/test_model_compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def quantization_config():
"format": "pack-quantized",
"global_compression_ratio": 1.891791164021256,
"ignore": ["lm_head"],
"quant_method": "compressed-tensors",
"quant_method": "compressed_tensors",
"quantization_status": "frozen",
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def get_tinyllama_model():

def get_sample_dynamic_tinyllama_quant_config():
config_dict = {
"quant_method": "compressed-tensors",
"quant_method": "compressed_tensors",
"format": "fakequant",
"quantization_status": "calibration",
"global_compression_ratio": None,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_quantization/lifecycle/test_kv_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@


config = {
"quant_method": "compressed-tensors",
"quant_method": "compressed_tensors",
"format": "fakequant",
"kv_cache_scheme": {
"num_bits": 8,
Expand Down

0 comments on commit 5540b92

Please sign in to comment.