From 097bd7944fbc7c0eb5bfe14c9fa91df38f201121 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Mon, 8 Apr 2024 11:38:43 +0000 Subject: [PATCH 01/51] initial commit --- .../transformers/compression/README.md | 1 + .../transformers/compression/__init__.py | 2 - .../compression/compressors/__init__.py | 19 -- .../compression/compressors/base.py | 83 ------ .../compression/compressors/dense.py | 32 --- .../compression/compressors/sparse_bitmask.py | 237 ------------------ .../compression/config/__init__.py | 6 +- .../transformers/compression/config/base.py | 17 +- .../transformers/compression/config/dense.py | 8 +- .../compression/config/sparse_bitmask.py | 8 +- .../compression/utils/__init__.py | 1 - .../compression/utils/compress_save.py | 9 +- .../transformers/compression/utils/helpers.py | 9 +- .../compression/utils/safetensors_load.py | 196 --------------- .../sparsification/sparse_model.py | 2 +- .../transformers/compression/test_bitmask.py | 122 --------- .../compression/test_registries.py | 50 ---- .../compression/test_sparse_auto.py | 20 +- 18 files changed, 40 insertions(+), 782 deletions(-) delete mode 100644 src/sparseml/transformers/compression/compressors/__init__.py delete mode 100644 src/sparseml/transformers/compression/compressors/base.py delete mode 100644 src/sparseml/transformers/compression/compressors/dense.py delete mode 100644 src/sparseml/transformers/compression/compressors/sparse_bitmask.py delete mode 100644 src/sparseml/transformers/compression/utils/safetensors_load.py delete mode 100644 tests/sparseml/transformers/compression/test_bitmask.py delete mode 100644 tests/sparseml/transformers/compression/test_registries.py diff --git a/src/sparseml/transformers/compression/README.md b/src/sparseml/transformers/compression/README.md index 51d49adecc5..7d6a3c9eb90 100644 --- a/src/sparseml/transformers/compression/README.md +++ b/src/sparseml/transformers/compression/README.md @@ -1,3 +1,4 @@ +// # TODO: Edit this readme # Save/Load Compressed SafeTensors ## Motivation diff --git a/src/sparseml/transformers/compression/__init__.py b/src/sparseml/transformers/compression/__init__.py index ca37b25df52..02878b7b1e7 100644 --- a/src/sparseml/transformers/compression/__init__.py +++ b/src/sparseml/transformers/compression/__init__.py @@ -13,6 +13,4 @@ # limitations under the License. # flake8: noqa - -from .compressors import * from .config import * diff --git a/src/sparseml/transformers/compression/compressors/__init__.py b/src/sparseml/transformers/compression/compressors/__init__.py deleted file mode 100644 index e8a36527c04..00000000000 --- a/src/sparseml/transformers/compression/compressors/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa - -from .base import ModelCompressor -from .dense import DenseCompressor -from .sparse_bitmask import BitmaskCompressor diff --git a/src/sparseml/transformers/compression/compressors/base.py b/src/sparseml/transformers/compression/compressors/base.py deleted file mode 100644 index 2a1a37d9196..00000000000 --- a/src/sparseml/transformers/compression/compressors/base.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import operator -from typing import Dict, Generator, Tuple - -from torch import Tensor -from torch.nn import Module, Parameter -from tqdm import tqdm - -from sparseml.transformers.compression.config import CompressionConfig -from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME -from sparseml.utils.pytorch.module import set_layer -from sparsezoo.utils.registry import RegistryMixin - - -__all__ = ["ModelCompressor"] - - -class ModelCompressor(RegistryMixin): - """ - Base class representing a model compression algorithm. - - :param config: config specifying compression parameters - """ - - def __init__(self, config: CompressionConfig): - self.config = config - - def compress(self, model_state: Dict[str, Tensor]) -> Dict[str, Tensor]: - """ - Compresses a dense state dict - - :param model_state: state dict of uncompressed model - :return: compressed state dict - """ - raise NotImplementedError() - - def decompress(self, model_path: str) -> Generator[Tuple[str, Tensor], None, None]: - """ - Reads a compressed state dict located at model_path and returns a - generator for sequentially decompressing back to a dense state dict - - :param model_path: path to compressed safetensors model - :return: compressed state dict - """ - raise NotImplementedError() - - @staticmethod - def replace_layer(param_name: str, data: Tensor, model: Module): - """ - Overwrites a parameterized layer with a new tensor, maintaining the device of - the original parameter - - :param param_name: name of parameterized layer to replace - :param data: tensor to insert into model - :param model: pytorch model to insert data into - """ - model_device = operator.attrgetter(param_name)(model).device - set_layer(param_name, Parameter(data.to(model_device)), model) - - def overwrite_weights(self, model_path: str, model: Module): - """ - Overwrites the weights in model with weights decompressed from model_path - - :param model_path: path to compressed weights - :param model: pytorch model to load decompressed weights into - """ - dense_gen = self.decompress(model_path) - for name, data in tqdm(dense_gen, desc="Decompressing model"): - ModelCompressor.replace_layer(name, data, model) - setattr(model, SPARSITY_CONFIG_NAME, self.config) diff --git a/src/sparseml/transformers/compression/compressors/dense.py b/src/sparseml/transformers/compression/compressors/dense.py deleted file mode 100644 index e40ea92e6c6..00000000000 --- a/src/sparseml/transformers/compression/compressors/dense.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict, Generator, Tuple - -from torch import Tensor - -from sparseml.transformers.compression.compressors import ModelCompressor - - -@ModelCompressor.register(name="dense_sparsity") -class DenseCompressor(ModelCompressor): - """ - Identity compressor for dense models, returns the original state_dict - """ - - def compress(self, model_state: Dict[str, Tensor]) -> Dict[str, Tensor]: - return model_state - - def decompress(self, model_path: str) -> Generator[Tuple[str, Tensor], None, None]: - return iter([]) diff --git a/src/sparseml/transformers/compression/compressors/sparse_bitmask.py b/src/sparseml/transformers/compression/compressors/sparse_bitmask.py deleted file mode 100644 index 1c6f35c7171..00000000000 --- a/src/sparseml/transformers/compression/compressors/sparse_bitmask.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Dict, Generator, List, Tuple, Union - -import numpy -import torch -from torch import Tensor -from tqdm import tqdm - -from safetensors import safe_open -from sparseml.transformers.compression.compressors import ModelCompressor -from sparseml.transformers.compression.utils import ( - get_nested_weight_mappings, - merge_names, -) - - -__all__ = [ - "BitmaskCompressor", - "BitmaskTensor", - "bitmask_compress", - "bitmask_decompress", - "pack_bitmasks", - "unpack_bitmasks", -] - -_LOGGER: logging.Logger = logging.getLogger(__name__) - - -@ModelCompressor.register(name="sparse_bitmask") -class BitmaskCompressor(ModelCompressor): - """ - Compression for sparse models using bitmasks. Non-zero weights are stored in a 1d - values tensor, with their locations stored in a 2d bitmask - """ - - COMPRESSION_PARAM_NAMES = ["shape", "compressed", "bitmask", "row_offsets"] - - def compress(self, model_state: Dict[str, Tensor]) -> Dict[str, Tensor]: - """ - Compresses a dense state dict using bitmask compression - - :param model_state: state dict of uncompressed model - :return: compressed state dict - """ - compressed_dict = {} - _LOGGER.debug( - f"Compressing model with {len(model_state)} parameterized layers..." - ) - for name, value in tqdm(model_state.items(), desc="Compressing model"): - bitmask_tensor = BitmaskTensor.from_dense(value) - bitmask_dict = bitmask_tensor.dict(name_prefix=name, device="cpu") - for key in bitmask_dict.keys(): - if key in compressed_dict: - _LOGGER.warn( - f"Expected all compressed state_dict keys to be unique, but " - f"found an existing entry for {key}. The existing entry will " - "be replaced." - ) - compressed_dict.update(bitmask_dict) - - return compressed_dict - - def decompress(self, model_path: str) -> Generator[Tuple[str, Tensor], None, None]: - """ - Reads a bitmask compressed state dict located at model_path and returns a - generator for sequentially decompressing back to a dense state dict - - :param model_path: path to compressed safetensors model - :return: iterator for generating decompressed weights - """ - weight_mappings = get_nested_weight_mappings( - model_path, self.COMPRESSION_PARAM_NAMES - ) - for weight_name in weight_mappings.keys(): - weight_data = {} - for param_name, safe_path in weight_mappings[weight_name].items(): - full_name = merge_names(weight_name, param_name) - with safe_open(safe_path, framework="pt", device="cpu") as f: - weight_data[param_name] = f.get_tensor(full_name) - data = BitmaskTensor(**weight_data) - decompressed = data.decompress() - yield weight_name, decompressed - - -class BitmaskTensor: - """ - Owns compressions and decompression for a single bitmask compressed tensor. - Adapted from: https://github.com/mgoin/torch_bitmask/tree/main - - :param shape: shape of dense tensor - :compressed: flat tensor of non-zero values - :bitmask: 2d bitmask of non-zero values - :row_offsets: flat tensor indicating what index in values each dense row starts at - """ - - def __init__( - self, - shape: Union[torch.Size, List], - compressed: Tensor, - bitmask: Tensor, - row_offsets: Tensor, - ): - self.shape = list(shape) - self.compressed = compressed - self.bitmask = bitmask - self.row_offsets = row_offsets - - @staticmethod - def from_dense(tensor: Tensor) -> "BitmaskTensor": - """ - :param tensor: dense tensor to compress - :return: instantiated compressed tensor - """ - shape = tensor.shape - compressed, bitmask, row_offsets = bitmask_compress(tensor.cpu()) - return BitmaskTensor( - shape=shape, compressed=compressed, bitmask=bitmask, row_offsets=row_offsets - ) - - def decompress(self) -> Tensor: - """ - :return: reconstructed dense tensor - """ - return bitmask_decompress(self.compressed, self.bitmask, self.shape) - - def curr_memory_size_bytes(self): - """ - :return: size in bytes required to store compressed tensor on disk - """ - - def sizeof_tensor(a): - return a.element_size() * a.nelement() - - return ( - sizeof_tensor(self.compressed) - + sizeof_tensor(self.bitmask) - + sizeof_tensor(self.row_offsets) - ) - - def dict(self, name_prefix: str, device: str = "cpu") -> Dict[str, Tensor]: - """ - :name_prefix: name of original tensor to store compressed weight as - :return: dict of compressed data for the stored weight - """ - return { - merge_names(name_prefix, "shape"): torch.tensor(self.shape, device=device), - merge_names(name_prefix, "compressed"): self.compressed.to(device), - merge_names(name_prefix, "bitmask"): self.bitmask.to(device), - merge_names(name_prefix, "row_offsets"): self.row_offsets.to(device), - } - - def __repr__(self): - return f"BitmaskTensor(shape={self.shape}, compressed=True)" - - -def bitmask_compress(tensor: Tensor) -> Tuple[Tensor, Tensor, Tensor]: - """ - Compresses a dense tensor using bitmask compression - - :param tensor: dense tensor to compress - :return: tuple of compressed data representing tensor - """ - bytemasks = tensor != 0 - row_counts = bytemasks.sum(dim=-1) - row_offsets = torch.cumsum(row_counts, 0) - row_counts - values = tensor[bytemasks] - bitmasks_packed = pack_bitmasks(bytemasks) - - return values, bitmasks_packed, row_offsets - - -def bitmask_decompress( - values: Tensor, bitmasks: Tensor, original_shape: torch.Size -) -> Tensor: - """ - Reconstructs a dense tensor from a compressed one - - :param values: 1d tensor of non-zero values - :param bitmasks: 2d int8 tensor flagging locations of non-zero values in the - tensors original shape - :param original_shape: shape of the dense tensor - :return: decompressed dense tensor - """ - bytemasks_unpacked = unpack_bitmasks(bitmasks, original_shape) - - decompressed_tensor = torch.zeros(original_shape, dtype=values.dtype) - decompressed_tensor[bytemasks_unpacked] = values - - return decompressed_tensor - - -def pack_bitmasks(bytemasks: Tensor) -> Tensor: - """ - Converts a bytemask tensor to a bitmask tensor to reduce memory. Shape RxC will be - compressed to R x ceil(C/8) - :param bytemasks: mask tensor where each byte corresponds to a weight - :return: mask tensor where each bit corresounds to a weight - """ - packed_bits_numpy = numpy.packbits(bytemasks.numpy(), axis=-1, bitorder="little") - packed_bits_torch = torch.from_numpy(packed_bits_numpy) - - return packed_bits_torch - - -def unpack_bitmasks(packed_bitmasks: Tensor, original_shape: torch.Size) -> Tensor: - """ - Converts a bitmask tensor back to a bytemask tensor for use during decompression - - :param packed_bitmasks: mask tensor where each bit corresponds to a weight - :param original_shape: dense shape to decompress to - :return: boolean mask of weights in the original dense shape - """ - # Unpack the bits - unpacked_bits = numpy.unpackbits( - packed_bitmasks.numpy(), axis=-1, count=original_shape[-1], bitorder="little" - ) - - # Reshape to match the original shape - unpacked_bitmasks_torch = torch.from_numpy( - unpacked_bits.reshape(original_shape).astype(bool) - ) - - return unpacked_bitmasks_torch diff --git a/src/sparseml/transformers/compression/config/__init__.py b/src/sparseml/transformers/compression/config/__init__.py index 6465c3c6d1b..06ad97ba4bd 100644 --- a/src/sparseml/transformers/compression/config/__init__.py +++ b/src/sparseml/transformers/compression/config/__init__.py @@ -14,6 +14,6 @@ # flake8: noqa -from .base import CompressionConfig -from .dense import DenseSparsityConfig -from .sparse_bitmask import BitmaskConfig +from .base import SparseMLCompressionConfig +from .dense import SparseMLDenseSparsityConfig +from .sparse_bitmask import SparseMLBitmaskConfig diff --git a/src/sparseml/transformers/compression/config/base.py b/src/sparseml/transformers/compression/config/base.py index a642676c5a4..d85a975bf4b 100644 --- a/src/sparseml/transformers/compression/config/base.py +++ b/src/sparseml/transformers/compression/config/base.py @@ -14,18 +14,17 @@ from typing import Optional -from pydantic import BaseModel from torch.nn import Module import sparseml.core.session as session_manager from sparseml.pytorch.utils import ModuleSparsificationInfo -from sparsezoo.utils.registry import RegistryMixin +from sparsetensors import CompressionConfig -__all__ = ["CompressionConfig"] +__all__ = ["SparseMLCompressionConfig"] -class CompressionConfig(RegistryMixin, BaseModel): +class SparseMLCompressionConfig(CompressionConfig): """ Base data class for storing compression parameters @@ -85,18 +84,18 @@ def infer_config_from_model( :return: compression config inferred from the model """ - global_sparsity = CompressionConfig.infer_global_sparsity(model) + global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) if global_sparsity < 0.05: return None - sparsity_structure = CompressionConfig.infer_sparsity_structure() + sparsity_structure = SparseMLCompressionConfig.infer_sparsity_structure() if compress: format = "sparse_bitmask" else: format = "dense_sparsity" - return CompressionConfig.load_from_registry( + return SparseMLCompressionConfig.load_from_registry( format, global_sparsity=global_sparsity, sparsity_structure=sparsity_structure, @@ -108,5 +107,5 @@ def fill_config_details(self, model: Module): :param model: pytorch model to infer config parameters from """ - self.global_sparsity = CompressionConfig.infer_global_sparsity(model) - self.sparsity_structure = CompressionConfig.infer_sparsity_structure() + self.global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) + self.sparsity_structure = SparseMLCompressionConfig.infer_sparsity_structure() diff --git a/src/sparseml/transformers/compression/config/dense.py b/src/sparseml/transformers/compression/config/dense.py index e9903c4fdb2..cd691ffc1a7 100644 --- a/src/sparseml/transformers/compression/config/dense.py +++ b/src/sparseml/transformers/compression/config/dense.py @@ -14,14 +14,14 @@ from typing import Optional -from sparseml.transformers.compression.config import CompressionConfig +from sparseml.transformers.compression.config import SparseMLCompressionConfig -__all__ = ["DenseSparsityConfig"] +__all__ = ["SparseMLDenseSparsityConfig"] -@CompressionConfig.register(name="dense_sparsity") -class DenseSparsityConfig(CompressionConfig): +@SparseMLCompressionConfig.register(name="dense_sparsity") +class SparseMLDenseSparsityConfig(SparseMLCompressionConfig): """ Identity configuration for storing a sparse model in an uncompressed dense format diff --git a/src/sparseml/transformers/compression/config/sparse_bitmask.py b/src/sparseml/transformers/compression/config/sparse_bitmask.py index dfd71711104..752a24ac17e 100644 --- a/src/sparseml/transformers/compression/config/sparse_bitmask.py +++ b/src/sparseml/transformers/compression/config/sparse_bitmask.py @@ -14,14 +14,14 @@ from typing import Optional -from sparseml.transformers.compression.config import CompressionConfig +from sparseml.transformers.compression.config import SparseMLCompressionConfig -__all__ = ["BitmaskConfig"] +__all__ = ["SparseMLBitmaskConfig"] -@CompressionConfig.register(name="sparse_bitmask") -class BitmaskConfig(CompressionConfig): +@SparseMLCompressionConfig.register(name="sparse_bitmask") +class SparseMLBitmaskConfig(SparseMLCompressionConfig): """ Configuration for storing a sparse model using bitmask compression diff --git a/src/sparseml/transformers/compression/utils/__init__.py b/src/sparseml/transformers/compression/utils/__init__.py index 560435126ad..d1488c49eb4 100644 --- a/src/sparseml/transformers/compression/utils/__init__.py +++ b/src/sparseml/transformers/compression/utils/__init__.py @@ -16,4 +16,3 @@ from .compress_save import * from .helpers import * -from .safetensors_load import * diff --git a/src/sparseml/transformers/compression/utils/compress_save.py b/src/sparseml/transformers/compression/utils/compress_save.py index 3013449b67d..810143f3eb4 100644 --- a/src/sparseml/transformers/compression/utils/compress_save.py +++ b/src/sparseml/transformers/compression/utils/compress_save.py @@ -22,10 +22,9 @@ from transformers import PreTrainedModel from transformers.file_utils import CONFIG_NAME -from sparseml.transformers.compression.compressors import ModelCompressor -from sparseml.transformers.compression.config import CompressionConfig -from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME +from sparseml.transformers.compression.config import SparseMLCompressionConfig from sparseml.utils.pytorch import qat_active +from sparsetensors import SPARSITY_CONFIG_NAME, ModelCompressor _LOGGER = logging.getLogger(__name__) @@ -54,7 +53,7 @@ def save_pretrained_compressed(save_pretrained_method): @wraps(original_save_pretrained) def save_pretrained_wrapper( save_directory: str, - sparsity_config: Optional[CompressionConfig] = None, + sparsity_config: Optional[SparseMLCompressionConfig] = None, save_compressed: bool = False, skip_compression_stats: bool = False, **kwargs, @@ -95,7 +94,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = CompressionConfig.infer_config_from_model( + sparsity_config = SparseMLCompressionConfig.infer_config_from_model( model, compress=save_compressed ) diff --git a/src/sparseml/transformers/compression/utils/helpers.py b/src/sparseml/transformers/compression/utils/helpers.py index 4d96fa66cf3..a018c0a4fbb 100644 --- a/src/sparseml/transformers/compression/utils/helpers.py +++ b/src/sparseml/transformers/compression/utils/helpers.py @@ -17,9 +17,8 @@ from transformers import AutoConfig -from sparseml.transformers.compression.compressors import ModelCompressor -from sparseml.transformers.compression.config import CompressionConfig -from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME +from sparseml.transformers.compression.config import SparseMLCompressionConfig +from sparsetensors import SPARSITY_CONFIG_NAME, ModelCompressor __all__ = ["infer_compressor_from_model_config"] @@ -41,6 +40,8 @@ def infer_compressor_from_model_config( return None format = sparsity_config.get("format") - sparsity_config = CompressionConfig.load_from_registry(format, **sparsity_config) + sparsity_config = SparseMLCompressionConfig.load_from_registry( + format, **sparsity_config + ) compressor = ModelCompressor.load_from_registry(format, config=sparsity_config) return compressor diff --git a/src/sparseml/transformers/compression/utils/safetensors_load.py b/src/sparseml/transformers/compression/utils/safetensors_load.py deleted file mode 100644 index 4d71482a8e9..00000000000 --- a/src/sparseml/transformers/compression/utils/safetensors_load.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import re -import struct -from typing import Dict, List, Optional - -from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, cached_file - - -__all__ = [ - "get_safetensors_folder", - "get_safetensors_header", - "match_param_name", - "merge_names", - "get_weight_mappings", - "get_nested_weight_mappings", -] - - -def get_safetensors_folder( - pretrained_model_name_or_path: str, cache_dir: Optional[str] = None -) -> str: - """ - Given a Hugging Face stub or a local path, return the folder containing the - safetensors weight files - - :param pretrained_model_name_or_path: local path to model or HF stub - :param cache_dir: optional cache dir to search through, if none is specified the - model will be searched for in the default TRANSFORMERS_CACHE - :return: local folder containing model data - """ - if os.path.exists(pretrained_model_name_or_path): - # argument is a path to a local folder - return pretrained_model_name_or_path - - safetensors_path = cached_file( - pretrained_model_name_or_path, - SAFE_WEIGHTS_NAME, - cache_dir=cache_dir, - _raise_exceptions_for_missing_entries=False, - ) - index_path = cached_file( - pretrained_model_name_or_path, - SAFE_WEIGHTS_INDEX_NAME, - cache_dir=cache_dir, - _raise_exceptions_for_missing_entries=False, - ) - if safetensors_path is not None: - # found a single cached safetensors file - return os.path.split(safetensors_path)[0] - if index_path is not None: - # found a cached safetensors weight index file - return os.path.split(index_path)[0] - - # model weights could not be found locally or cached from HF Hub - raise ValueError( - "Could not locate safetensors weight or index file from " - f"{pretrained_model_name_or_path}." - ) - - -def get_safetensors_header(safetensors_path: str) -> Dict[str, str]: - """ - Extracts the metadata from a safetensors file as JSON - - :param safetensors_path: path to a safetensors file - :return: dictionary of metadata extracted from the safetensors file - """ - with open(safetensors_path, "rb") as f: - length_of_header = struct.unpack(" str: - """ - Helper function extracting the uncompressed parameterized layer name from a - compressed name. Assumes the compressed name was merged using merge_names. - - :param full_name: full name of parameter in compressed model - :param param_name: compression paramater name - :return: uncompressed name of the uncompressed parameterized layer - """ - pattern = r"^(.*)\." + param_name + r"$" - regex = re.findall(pattern, full_name) - if len(regex) == 0: - return None - return regex[0] - - -def merge_names(parent_name: str, child_name: str) -> str: - """ - Helper function for merging an uncompressed parameterized layer name with a - compression parameter. Names merged with this function can then be parsed by - match_param_name. - - :param parent_name: uncompressed parameterized layer name - :param child_name: compression parameter name - :return: merged compressed name - """ - return parent_name + "." + child_name - - -def get_weight_mappings(model_path: str) -> Dict[str, str]: - """ - Takes a path to a state dict saved in safetensors format and returns a mapping - from parameterized layer name to file location. - - { - layer.weight.bitmask: file_location, - layer.weight.row_offsets: file_location, - layer.weight.shape: file_location, - layer.weight.compressed: file_location - } - - This generalizes to cases where the model is split into multiple safetensors files - - :param model_path: path to safetensors state dict, must contain either a single - safetensors file or multiple files with an index - :return: mapping of parameterized layer name to file location - """ - safetensors_path = os.path.join(model_path, SAFE_WEIGHTS_NAME) - index_path = os.path.join(model_path, SAFE_WEIGHTS_INDEX_NAME) - if os.path.exists(safetensors_path): - # we have a single safetensors file to read - header = get_safetensors_header(safetensors_path) - for key in header.keys(): - header[key] = SAFE_WEIGHTS_NAME - header.pop("__metadata__", None) - elif os.path.exists(index_path): - # we have multiple safetensors file, read from index - with open(index_path, "r", encoding="utf-8") as f: - index = json.load(f) - header = index["weight_map"] - else: - raise ValueError( - f"Could not find a safetensors weight or index file at {model_path}" - ) - - # convert weight locations to full paths - for key, value in header.items(): - header[key] = os.path.join(model_path, value) - - return header - - -def get_nested_weight_mappings( - model_path: str, params_to_nest: List[str] -) -> Dict[str, Dict[str, str]]: - """ - Takes a path to a state dict saved in safetensors format and returns a nested - mapping from uncompressed parameterized layer names to the file locations of each - of the layers compression parameters. - - layer.weight: { - bitmask: file_location, - row_offsets: file_location, - shape: file_location, - compressed: file_location - } - - This generalizes to cases where the model is split into multiple safetensors files - - :param model_path: path to safetensors state dict, must contain either a single - safetensors file or multiple files with an index - :return: nested mapping of parameterized layer name to file location - """ - weight_mappings = get_weight_mappings(model_path) - - nested_weight_mappings = {} - for key in weight_mappings.keys(): - for param_name in params_to_nest: - maybe_match = match_param_name(key, param_name) - if maybe_match is not None: - dense_param = maybe_match - if dense_param not in nested_weight_mappings: - nested_weight_mappings[dense_param] = {} - nested_weight_mappings[dense_param][param_name] = weight_mappings[key] - - return nested_weight_mappings diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 88f90de65d9..3c616a2cb91 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -35,12 +35,12 @@ log_model_load, ) from sparseml.transformers.compression.utils import ( - get_safetensors_folder, infer_compressor_from_model_config, modify_save_pretrained, ) from sparseml.transformers.sparsification.modification import modify_model from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe +from sparsetensors import get_safetensors_folder __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] diff --git a/tests/sparseml/transformers/compression/test_bitmask.py b/tests/sparseml/transformers/compression/test_bitmask.py deleted file mode 100644 index 40d683cb468..00000000000 --- a/tests/sparseml/transformers/compression/test_bitmask.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import shutil - -import pytest -import torch - -from safetensors.torch import save_file -from sparseml.transformers.compression import BitmaskCompressor, BitmaskConfig -from sparseml.transformers.compression.compressors.sparse_bitmask import BitmaskTensor - - -@pytest.mark.parametrize( - "shape,sparsity,dtype", - [ - [(512, 1024), 0.5, torch.float32], - [(830, 545), 0.8, torch.float32], - [(342, 512), 0.3, torch.bfloat16], - [(256, 700), 0.9, torch.float16], - ], -) -def test_bitmask_sizes(shape, sparsity, dtype): - test_tensor = torch.rand(shape, dtype=dtype) - mask = (test_tensor.abs() < (1 - sparsity)).int() - test_tensor *= mask - dense_state_dict = {"dummy.weight": test_tensor} - - sparsity_config = BitmaskConfig() - compressor = BitmaskCompressor(config=sparsity_config) - sparse_state_dict = compressor.compress(dense_state_dict) - - # each dense tensor has 4 parameters for compression - assert len(dense_state_dict) * 4 == len(sparse_state_dict) - - # bitmask should be 1 bit per dense element, rounded up to nearest int8 - sparse_shape = sparse_state_dict["dummy.weight.shape"] - assert torch.all(torch.eq(sparse_shape, torch.tensor(shape))) - bitmask_shape = sparse_state_dict["dummy.weight.bitmask"].shape - assert bitmask_shape[0] == sparse_shape[0] - assert bitmask_shape[1] == int(math.ceil(sparse_shape[1] / 8.0)) - - # one value for each non-zero weight - values_shape = sparse_state_dict["dummy.weight.compressed"].shape - assert values_shape[0] == torch.sum(test_tensor != 0) - row_offsets_shape = sparse_state_dict["dummy.weight.row_offsets"].shape - assert row_offsets_shape[0] == test_tensor.shape[0] - - -@pytest.mark.parametrize( - "shape,sparsity,dtype", - [ - [(256, 512), 0.5, torch.float32], - [(128, 280), 0.8, torch.float32], - [(1024, 256), 0.3, torch.bfloat16], - [(511, 350), 0.7, torch.float16], - ], -) -def test_match(shape, sparsity, dtype): - test_tensor1 = torch.rand(shape, dtype=dtype) - mask = (test_tensor1.abs() < (1 - sparsity)).int() - test_tensor1 *= mask - - test_tensor2 = torch.rand(shape, dtype=dtype) - mask = (test_tensor2.abs() < (1 - sparsity)).int() - test_tensor2 *= mask - - dense_state_dict = {"dummy.weight": test_tensor1, "dummy2.weight": test_tensor2} - - for key in dense_state_dict.keys(): - dense_tensor = dense_state_dict[key] - sparse_tensor = BitmaskTensor.from_dense(dense_tensor) - decompressed = sparse_tensor.decompress() - assert decompressed.dtype == dense_tensor.dtype == dtype - assert torch.equal(dense_tensor, decompressed) - - -@pytest.mark.parametrize( - "sparsity,dtype", - [ - [0.5, torch.float32], - [0.8, torch.float32], - [0.3, torch.bfloat16], - [0.7, torch.float16], - ], -) -def test_reload_match(sparsity, dtype, tmp_path): - test_tensor1 = torch.rand((256, 512), dtype=dtype) - mask = (test_tensor1.abs() < (1 - sparsity)).int() - test_tensor1 *= mask - - test_tensor2 = torch.rand((360, 720), dtype=dtype) - mask = (test_tensor2.abs() < (1 - sparsity)).int() - test_tensor2 *= mask - - dense_state_dict = {"dummy.weight": test_tensor1, "dummy2.weight": test_tensor2} - - sparsity_config = BitmaskConfig() - compressor = BitmaskCompressor(config=sparsity_config) - - sparse_state_dict = compressor.compress(dense_state_dict) - save_file(sparse_state_dict, tmp_path / "model.safetensors") - reconstructed_dense = compressor.decompress(tmp_path) - - for key, reconstructed_tensor in reconstructed_dense: - dense_tensor = dense_state_dict[key] - assert dense_tensor.dtype == reconstructed_tensor.dtype == dtype - assert torch.equal(dense_tensor, reconstructed_tensor) - - shutil.rmtree(tmp_path) diff --git a/tests/sparseml/transformers/compression/test_registries.py b/tests/sparseml/transformers/compression/test_registries.py deleted file mode 100644 index fb1ba37d3d0..00000000000 --- a/tests/sparseml/transformers/compression/test_registries.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -from sparseml.transformers.compression import ( - BitmaskCompressor, - BitmaskConfig, - CompressionConfig, - DenseCompressor, - DenseSparsityConfig, - ModelCompressor, -) - - -@pytest.mark.parametrize( - "name,type", - [ - ["sparse_bitmask", BitmaskConfig], - ["dense_sparsity", DenseSparsityConfig], - ], -) -def test_configs(name, type): - config = CompressionConfig.load_from_registry(name) - assert isinstance(config, type) - assert config.format == name - - -@pytest.mark.parametrize( - "name,type", - [["sparse_bitmask", BitmaskCompressor], ["dense_sparsity", DenseCompressor]], -) -def test_compressors(name, type): - compressor = ModelCompressor.load_from_registry( - name, config=CompressionConfig(format="none") - ) - assert isinstance(compressor, type) - assert isinstance(compressor.config, CompressionConfig) - assert compressor.config.format == "none" diff --git a/tests/sparseml/transformers/compression/test_sparse_auto.py b/tests/sparseml/transformers/compression/test_sparse_auto.py index 7a1fdec0266..c6e5e4098db 100644 --- a/tests/sparseml/transformers/compression/test_sparse_auto.py +++ b/tests/sparseml/transformers/compression/test_sparse_auto.py @@ -22,9 +22,9 @@ import sparseml.core.session as session_manager from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression import ( - BitmaskConfig, - CompressionConfig, - DenseSparsityConfig, + SparseMLBitmaskConfig, + SparseMLCompressionConfig, + SparseMLDenseSparsityConfig, ) from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME @@ -33,9 +33,9 @@ "compressed,config,dtype", [ [True, None, torch.float32], - [False, DenseSparsityConfig(), torch.float16], - [True, BitmaskConfig(), torch.bfloat16], - [False, BitmaskConfig(), torch.float32], + [False, SparseMLDenseSparsityConfig(), torch.float16], + [True, SparseMLBitmaskConfig(), torch.bfloat16], + [False, SparseMLBitmaskConfig(), torch.float32], [False, None, torch.float16], ], ) @@ -68,9 +68,9 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): tmp_path / "oneshot_out", torch_dtype=dtype ) - inferred_global_sparsity = CompressionConfig.infer_global_sparsity(model) + inferred_global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = CompressionConfig.infer_sparsity_structure() + inferred_structure = SparseMLCompressionConfig.infer_sparsity_structure() assert inferred_structure == "0:0" model.save_pretrained( @@ -115,9 +115,9 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): model_path = "Xenova/llama2.c-stories15M" model = SparseAutoModelForCausalLM.from_pretrained(model_path) - inferred_global_sparsity = CompressionConfig.infer_global_sparsity(model) + inferred_global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = CompressionConfig.infer_sparsity_structure() + inferred_structure = SparseMLCompressionConfig.infer_sparsity_structure() assert inferred_structure == "unstructured" model.save_pretrained( From 76970e309bd1149b557d2ce2bf14c2c4f5c427b7 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Mon, 8 Apr 2024 12:11:12 +0000 Subject: [PATCH 02/51] update setup.py --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 233618fc418..676abdf9dc5 100644 --- a/setup.py +++ b/setup.py @@ -55,6 +55,7 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug + # "sparsetensors" for now installed from source until pypi release is available ] _nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] _deepsparse_deps = [ From bbf4b39342e204e69bf6169fe9ef24a56169309b Mon Sep 17 00:00:00 2001 From: dbogunowicz <97082108+dbogunowicz@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:37:10 +0200 Subject: [PATCH 03/51] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 676abdf9dc5..b99be671a91 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug - # "sparsetensors" for now installed from source until pypi release is available + "sparsetensors @ git+https://github.com/neuralmagic/sparsetensors@main", ] _nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] _deepsparse_deps = [ From a272a30e1bb6d9a4951faa82e5ad66f8972ead66 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Mon, 8 Apr 2024 14:08:12 +0000 Subject: [PATCH 04/51] fix setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b99be671a91..4ddf2e47050 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug - "sparsetensors @ git+https://github.com/neuralmagic/sparsetensors@main", + "sparsetensors @ git+ssh://git@github.com/neuralmagic/sparsetensors.git", ] _nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] _deepsparse_deps = [ From c0d3ead2de91874fd1c18887fee093f3c5019d2e Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 10 Apr 2024 19:20:01 +0000 Subject: [PATCH 05/51] move all config to sparsetensors --- .../transformers/compression/__init__.py | 4 +- .../compression/{utils => }/compress_save.py | 10 ++-- .../compression/config/__init__.py | 19 -------- .../transformers/compression/config/dense.py | 36 -------------- .../compression/config/sparse_bitmask.py | 36 -------------- .../{config/base.py => sparsity_config.py} | 32 ++++--------- .../compression/utils/__init__.py | 18 ------- .../transformers/compression/utils/helpers.py | 47 ------------------- .../sparsification/sparse_model.py | 6 +-- .../compression/test_sparse_auto.py | 21 ++++----- 10 files changed, 28 insertions(+), 201 deletions(-) rename src/sparseml/transformers/compression/{utils => }/compress_save.py (93%) delete mode 100644 src/sparseml/transformers/compression/config/__init__.py delete mode 100644 src/sparseml/transformers/compression/config/dense.py delete mode 100644 src/sparseml/transformers/compression/config/sparse_bitmask.py rename src/sparseml/transformers/compression/{config/base.py => sparsity_config.py} (75%) delete mode 100644 src/sparseml/transformers/compression/utils/__init__.py delete mode 100644 src/sparseml/transformers/compression/utils/helpers.py diff --git a/src/sparseml/transformers/compression/__init__.py b/src/sparseml/transformers/compression/__init__.py index 02878b7b1e7..2587489968c 100644 --- a/src/sparseml/transformers/compression/__init__.py +++ b/src/sparseml/transformers/compression/__init__.py @@ -13,4 +13,6 @@ # limitations under the License. # flake8: noqa -from .config import * + +from .compress_save import * +from .sparsity_config import * diff --git a/src/sparseml/transformers/compression/utils/compress_save.py b/src/sparseml/transformers/compression/compress_save.py similarity index 93% rename from src/sparseml/transformers/compression/utils/compress_save.py rename to src/sparseml/transformers/compression/compress_save.py index 810143f3eb4..8f1f65d533d 100644 --- a/src/sparseml/transformers/compression/utils/compress_save.py +++ b/src/sparseml/transformers/compression/compress_save.py @@ -22,9 +22,9 @@ from transformers import PreTrainedModel from transformers.file_utils import CONFIG_NAME -from sparseml.transformers.compression.config import SparseMLCompressionConfig +from sparseml.transformers.compression.sparsity_config import SparsityConfigFiller from sparseml.utils.pytorch import qat_active -from sparsetensors import SPARSITY_CONFIG_NAME, ModelCompressor +from sparsetensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor _LOGGER = logging.getLogger(__name__) @@ -53,7 +53,7 @@ def save_pretrained_compressed(save_pretrained_method): @wraps(original_save_pretrained) def save_pretrained_wrapper( save_directory: str, - sparsity_config: Optional[SparseMLCompressionConfig] = None, + sparsity_config: Optional[CompressionConfig] = None, save_compressed: bool = False, skip_compression_stats: bool = False, **kwargs, @@ -85,7 +85,7 @@ def save_pretrained_wrapper( ) if sparsity_config is not None: - sparsity_config.fill_config_details(model) + SparsityConfigFiller.fill_config_details(sparsity_config, model) elif not skip_compression_stats: # try to infer a sparsity config from the model if none is provided _LOGGER.info( @@ -94,7 +94,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = SparseMLCompressionConfig.infer_config_from_model( + sparsity_config = SparsityConfigFiller.infer_config_from_model( model, compress=save_compressed ) diff --git a/src/sparseml/transformers/compression/config/__init__.py b/src/sparseml/transformers/compression/config/__init__.py deleted file mode 100644 index 06ad97ba4bd..00000000000 --- a/src/sparseml/transformers/compression/config/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa - -from .base import SparseMLCompressionConfig -from .dense import SparseMLDenseSparsityConfig -from .sparse_bitmask import SparseMLBitmaskConfig diff --git a/src/sparseml/transformers/compression/config/dense.py b/src/sparseml/transformers/compression/config/dense.py deleted file mode 100644 index cd691ffc1a7..00000000000 --- a/src/sparseml/transformers/compression/config/dense.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional - -from sparseml.transformers.compression.config import SparseMLCompressionConfig - - -__all__ = ["SparseMLDenseSparsityConfig"] - - -@SparseMLCompressionConfig.register(name="dense_sparsity") -class SparseMLDenseSparsityConfig(SparseMLCompressionConfig): - """ - Identity configuration for storing a sparse model in - an uncompressed dense format - - :param global_sparsity: average sparsity of the entire model - :param sparsity_structure: structure of the sparsity, such as - "unstructured", "2:4", "8:16" etc - """ - - format: str = "dense_sparsity" - global_sparsity: Optional[float] = 0.0 - sparsity_structure: Optional[str] = "unstructured" diff --git a/src/sparseml/transformers/compression/config/sparse_bitmask.py b/src/sparseml/transformers/compression/config/sparse_bitmask.py deleted file mode 100644 index 752a24ac17e..00000000000 --- a/src/sparseml/transformers/compression/config/sparse_bitmask.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional - -from sparseml.transformers.compression.config import SparseMLCompressionConfig - - -__all__ = ["SparseMLBitmaskConfig"] - - -@SparseMLCompressionConfig.register(name="sparse_bitmask") -class SparseMLBitmaskConfig(SparseMLCompressionConfig): - """ - Configuration for storing a sparse model using - bitmask compression - - :param global_sparsity: average sparsity of the entire model - :param sparsity_structure: structure of the sparsity, such as - "unstructured", "2:4", "8:16" etc - """ - - format: str = "sparse_bitmask" - global_sparsity: Optional[float] = 0.0 - sparsity_structure: Optional[str] = "unstructured" diff --git a/src/sparseml/transformers/compression/config/base.py b/src/sparseml/transformers/compression/sparsity_config.py similarity index 75% rename from src/sparseml/transformers/compression/config/base.py rename to src/sparseml/transformers/compression/sparsity_config.py index d85a975bf4b..c11aca76ae8 100644 --- a/src/sparseml/transformers/compression/config/base.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -21,23 +21,7 @@ from sparsetensors import CompressionConfig -__all__ = ["SparseMLCompressionConfig"] - - -class SparseMLCompressionConfig(CompressionConfig): - """ - Base data class for storing compression parameters - - :param format: name of compression format - :param global_sparsity: average sparsity of the entire model - :param sparsity_structure: structure of the sparsity, such as - "unstructured", "2:4", "8:16" etc - """ - - format: str - global_sparsity: Optional[float] = 0.0 - sparsity_structure: Optional[str] = "unstructured" - +class SparsityConfigFiller: @staticmethod def infer_global_sparsity(model: Module) -> float: """ @@ -84,28 +68,30 @@ def infer_config_from_model( :return: compression config inferred from the model """ - global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) + global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) if global_sparsity < 0.05: return None - sparsity_structure = SparseMLCompressionConfig.infer_sparsity_structure() + sparsity_structure = SparsityConfigFiller.infer_sparsity_structure() if compress: format = "sparse_bitmask" else: format = "dense_sparsity" - return SparseMLCompressionConfig.load_from_registry( + return CompressionConfig.load_from_registry( format, global_sparsity=global_sparsity, sparsity_structure=sparsity_structure, ) - def fill_config_details(self, model: Module): + @staticmethod + def fill_config_details(config: CompressionConfig, model: Module): """ Fills in informational sparsity parameters from a given model + :param config: sparsity config to fill in :param model: pytorch model to infer config parameters from """ - self.global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) - self.sparsity_structure = SparseMLCompressionConfig.infer_sparsity_structure() + config.global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) + config.sparsity_structure = SparsityConfigFiller.infer_sparsity_structure() diff --git a/src/sparseml/transformers/compression/utils/__init__.py b/src/sparseml/transformers/compression/utils/__init__.py deleted file mode 100644 index d1488c49eb4..00000000000 --- a/src/sparseml/transformers/compression/utils/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa - -from .compress_save import * -from .helpers import * diff --git a/src/sparseml/transformers/compression/utils/helpers.py b/src/sparseml/transformers/compression/utils/helpers.py deleted file mode 100644 index a018c0a4fbb..00000000000 --- a/src/sparseml/transformers/compression/utils/helpers.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import Optional - -from transformers import AutoConfig - -from sparseml.transformers.compression.config import SparseMLCompressionConfig -from sparsetensors import SPARSITY_CONFIG_NAME, ModelCompressor - - -__all__ = ["infer_compressor_from_model_config"] - - -def infer_compressor_from_model_config( - pretrained_model_name_or_path: str, -) -> Optional[ModelCompressor]: - """ - Given a path to a model config, extract a sparsity config if it exists and return - the associated ModelCompressor - - :param pretrained_model_name_or_path: path to model config on disk or HF hub - :return: matching compressor if config contains a sparsity config - """ - config = AutoConfig.from_pretrained(pretrained_model_name_or_path) - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) - if sparsity_config is None: - return None - - format = sparsity_config.get("format") - sparsity_config = SparseMLCompressionConfig.load_from_registry( - format, **sparsity_config - ) - compressor = ModelCompressor.load_from_registry(format, config=sparsity_config) - return compressor diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 3c616a2cb91..f56c84018ad 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -34,13 +34,11 @@ apply_recipe_structure_to_model, log_model_load, ) -from sparseml.transformers.compression.utils import ( - infer_compressor_from_model_config, - modify_save_pretrained, -) +from sparseml.transformers.compression import modify_save_pretrained from sparseml.transformers.sparsification.modification import modify_model from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe from sparsetensors import get_safetensors_folder +from sparsetensors.utils import infer_compressor_from_model_config __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] diff --git a/tests/sparseml/transformers/compression/test_sparse_auto.py b/tests/sparseml/transformers/compression/test_sparse_auto.py index c6e5e4098db..a1b725ff995 100644 --- a/tests/sparseml/transformers/compression/test_sparse_auto.py +++ b/tests/sparseml/transformers/compression/test_sparse_auto.py @@ -21,21 +21,18 @@ import sparseml.core.session as session_manager from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -from sparseml.transformers.compression import ( - SparseMLBitmaskConfig, - SparseMLCompressionConfig, - SparseMLDenseSparsityConfig, -) +from sparseml.transformers.compression import SparsityConfigFiller from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME +from sparsetensors.config import BitmaskConfig, DenseSparsityConfig @pytest.mark.parametrize( "compressed,config,dtype", [ [True, None, torch.float32], - [False, SparseMLDenseSparsityConfig(), torch.float16], - [True, SparseMLBitmaskConfig(), torch.bfloat16], - [False, SparseMLBitmaskConfig(), torch.float32], + [False, DenseSparsityConfig(), torch.float16], + [True, BitmaskConfig(), torch.bfloat16], + [False, BitmaskConfig(), torch.float32], [False, None, torch.float16], ], ) @@ -68,9 +65,9 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): tmp_path / "oneshot_out", torch_dtype=dtype ) - inferred_global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = SparseMLCompressionConfig.infer_sparsity_structure() + inferred_structure = SparsityConfigFiller.infer_sparsity_structure() assert inferred_structure == "0:0" model.save_pretrained( @@ -115,9 +112,9 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): model_path = "Xenova/llama2.c-stories15M" model = SparseAutoModelForCausalLM.from_pretrained(model_path) - inferred_global_sparsity = SparseMLCompressionConfig.infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = SparseMLCompressionConfig.infer_sparsity_structure() + inferred_structure = SparsityConfigFiller.infer_sparsity_structure() assert inferred_structure == "unstructured" model.save_pretrained( From a75f8daf2ed952441f3260a424c9ab2e3e5e2be5 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 10 Apr 2024 19:37:23 +0000 Subject: [PATCH 06/51] cleanup class name and comments --- .../transformers/compression/compress_save.py | 6 +++--- .../transformers/compression/sparsity_config.py | 15 ++++++++++----- .../transformers/compression/test_sparse_auto.py | 10 +++++----- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/sparseml/transformers/compression/compress_save.py b/src/sparseml/transformers/compression/compress_save.py index 1693432c834..46322886b89 100644 --- a/src/sparseml/transformers/compression/compress_save.py +++ b/src/sparseml/transformers/compression/compress_save.py @@ -22,7 +22,7 @@ from transformers import PreTrainedModel from transformers.file_utils import CONFIG_NAME -from sparseml.transformers.compression.sparsity_config import SparsityConfigFiller +from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active from sparsetensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor @@ -87,7 +87,7 @@ def save_pretrained_wrapper( ) if sparsity_config is not None: - SparsityConfigFiller.fill_config_details( + SparsityConfigMetadata.fill_config_details( sparsity_config, model, state_dict=state_dict ) elif not skip_compression_stats: @@ -98,7 +98,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = SparsityConfigFiller.infer_config_from_model( + sparsity_config = SparsityConfigMetadata.infer_config_from_model( model, state_dict=state_dict, compress=save_compressed ) diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py index 88f6a9e2c95..504e154148f 100644 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -22,7 +22,12 @@ from sparsetensors import CompressionConfig -class SparsityConfigFiller: +class SparsityConfigMetadata: + """ + Class of helper functions for filling out a CompressionConfig with readable + metadata from the model + """ + @staticmethod def infer_global_sparsity( model: Module, state_dict: Optional[Dict[str, Tensor]] = None @@ -78,14 +83,14 @@ def infer_config_from_model( :return: compression config inferred from the model """ - global_sparsity = SparsityConfigFiller.infer_global_sparsity( + global_sparsity = SparsityConfigMetadata.infer_global_sparsity( model, state_dict=state_dict ) if global_sparsity < 0.05: return None - sparsity_structure = SparsityConfigFiller.infer_sparsity_structure() + sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() if compress: format = "sparse_bitmask" else: @@ -111,7 +116,7 @@ def fill_config_details( :param state_dict: optional state_dict to replace that in model, used for gathering global FSDP model info """ - config.global_sparsity = SparsityConfigFiller.infer_global_sparsity( + config.global_sparsity = SparsityConfigMetadata.infer_global_sparsity( model, state_dict=state_dict ) - config.sparsity_structure = SparsityConfigFiller.infer_sparsity_structure() + config.sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() diff --git a/tests/sparseml/transformers/compression/test_sparse_auto.py b/tests/sparseml/transformers/compression/test_sparse_auto.py index a1b725ff995..42557cd808d 100644 --- a/tests/sparseml/transformers/compression/test_sparse_auto.py +++ b/tests/sparseml/transformers/compression/test_sparse_auto.py @@ -21,7 +21,7 @@ import sparseml.core.session as session_manager from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -from sparseml.transformers.compression import SparsityConfigFiller +from sparseml.transformers.compression import SparsityConfigMetadata from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME from sparsetensors.config import BitmaskConfig, DenseSparsityConfig @@ -65,9 +65,9 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): tmp_path / "oneshot_out", torch_dtype=dtype ) - inferred_global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = SparsityConfigFiller.infer_sparsity_structure() + inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() assert inferred_structure == "0:0" model.save_pretrained( @@ -112,9 +112,9 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): model_path = "Xenova/llama2.c-stories15M" model = SparseAutoModelForCausalLM.from_pretrained(model_path) - inferred_global_sparsity = SparsityConfigFiller.infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = SparsityConfigFiller.infer_sparsity_structure() + inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() assert inferred_structure == "unstructured" model.save_pretrained( From 2c72ab16aea1824a58e8e577b665fe2a1e7dbfcd Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 16 Apr 2024 17:49:18 +0000 Subject: [PATCH 07/51] initial implementation untested --- .../modifiers/quantization_vllm/__init__.py | 17 +++ .../modifiers/quantization_vllm/base.py | 85 +++++++++++ .../modifiers/quantization_vllm/pytorch.py | 141 ++++++++++++++++++ 3 files changed, 243 insertions(+) create mode 100644 src/sparseml/modifiers/quantization_vllm/__init__.py create mode 100644 src/sparseml/modifiers/quantization_vllm/base.py create mode 100644 src/sparseml/modifiers/quantization_vllm/pytorch.py diff --git a/src/sparseml/modifiers/quantization_vllm/__init__.py b/src/sparseml/modifiers/quantization_vllm/__init__.py new file mode 100644 index 00000000000..9cdf715c135 --- /dev/null +++ b/src/sparseml/modifiers/quantization_vllm/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa + +from .base import * diff --git a/src/sparseml/modifiers/quantization_vllm/base.py b/src/sparseml/modifiers/quantization_vllm/base.py new file mode 100644 index 00000000000..7b29e7f44ae --- /dev/null +++ b/src/sparseml/modifiers/quantization_vllm/base.py @@ -0,0 +1,85 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional + +from pydantic import Field + +from sparseml.core import Event, Modifier +from sparsetensors.quantization import ( + QuantizationConfig, + QuantizationScheme, + QuantizationStatus, +) + + +__all__ = ["vLLMQuantizationModifier"] + + +class vLLMQuantizationModifier(Modifier): + """ + Enables post training quantization (PTQ) and quantization aware training (QAT) for a + given module or its submodules. After calibration (PTQ) or the start epoch (QAT), + the specified module(s) forward pass will emulate quantized execution and the + modifier will be enabled until training is completed. + + :param config_groups: dictionary specifying quantization schemes to apply to target + modules. Modules not matching a scheme target will NOT be quantized. + :param ignore: optional list of module class names or submodule names to not + quantize even if they match a target in config_groups. Defaults to empty list. + :param disable_quantization_observer_epoch: Epoch to disable updates to the module + quantization observers. At this point, quantized weights and zero points will + not be updated. Leave None to not disable observers during QAT. Default is None + :param num_calibration_steps: Number of steps to run post training calibration for. + When None, the entire calibration_dataloader is used + :param post_oneshot_calibration: Whether to rerun calibration on finalization + """ + + config_groups: Dict[str, QuantizationScheme] + ignore: List[str] = Field(default_factory=list) + disable_quantization_observer_epoch: Optional[float] = None + num_calibration_steps: Optional[int] = None + post_oneshot_calibration: Optional[bool] = False + + def create_init_config(self) -> QuantizationConfig: + return QuantizationConfig( + config_groups=self.config_groups, + quantization_status=QuantizationStatus.INITIALIZED, + ignore=self.ignore, + ) + + def calculate_disable_observer_epoch(self) -> float: + """ + Get the epoch at which we want to disable to quantization observer + :return epoch to disable at, or -1 if it is not set + """ + return ( + self.disable_quantization_observer_epoch + if self.disable_quantization_observer_epoch is not None + else -1 + ) + + def check_should_disable_observer(self, event: Event) -> bool: + """ + Given the current index, determine if we should disable the observer + + :param event: Event to get index from + :return: True if observer should be disabled, False otherwise + """ + disable_epoch = self.calculate_disable_observer_epoch() + if disable_epoch == -1: + return False + if event.current_index >= disable_epoch: + return True + return False diff --git a/src/sparseml/modifiers/quantization_vllm/pytorch.py b/src/sparseml/modifiers/quantization_vllm/pytorch.py new file mode 100644 index 00000000000..4d8b87568cd --- /dev/null +++ b/src/sparseml/modifiers/quantization_vllm/pytorch.py @@ -0,0 +1,141 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any + +import torch +from torch.nn import Module + +from sparseml.core import Event, EventType, State +from sparseml.modifiers.quantization_vllm.base import vLLMQuantizationModifier +from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward +from sparsetensors.quantization import ( + QuantizationStatus, + apply_quantization_config, + apply_quantization_status, +) + + +_LOGGER = logging.getLogger(__name__) + + +class vLLMQuantizationModifierPyTorch(vLLMQuantizationModifier): + """ + Pytorch-specific implementation of quantization modifier + + :param scheme: Default QuantizationScheme to use when enabling quantization + in a module. May also be a dictionary to be loaded into the QuantizationScheme + class. A string alias may also be used, supported aliases: + ['default', 'deepsparse', 'tensorrt']. + If None, the default scheme (`QuantizationScheme()`) will be used. + Default is None + :param scheme_overrides: optional mapping of module type names or submodule type + names to quantization schemes to override them with. If a scheme is mapped to + 'default', then it will use the scheme set in the mo difier scheme property + """ + + calibration_dataloader_: Any = None + calibration_function_: Any = None + + def on_initialize_structure(self, state: State, **kwargs): + module = state.model.model + self._apply_modifier_to_model(module) + apply_quantization_status(module, QuantizationStatus.FROZEN) + + def on_initialize(self, state: State, **kwargs) -> bool: + if self.end and self.end != -1: + raise ValueError( + "end_epoch is disabled for QuantizationModifier and can only be set to" + " -1 or None. Given {}".format(self.end) + ) + + self.calibration_dataloader_ = state.data.calib + module = state.model.model + + # intialize quantization in appropriate modules + self._apply_modifier_to_model(module) + + if self.calculate_start() == -1: # one-shot + apply_quantization_status(module, QuantizationStatus.CALIBRATION) + self._calibrate_if_possible(module) + apply_quantization_status(module, QuantizationStatus.FROZEN) + + return True + + def on_finalize(self, state: State, **kwargs) -> bool: + if self.post_oneshot_calibration: + state.model.model.apply(torch.quantization.enable_observer) + self._calibrate_if_possible(state.model.model) + self._disable_quantization_observer(state.model.model) + return True + + def on_start(self, state: State, event: Event, **kwargs): + module = state.model.model + apply_quantization_status(module, QuantizationStatus.CALIBRATION) + + def on_update(self, state: State, event: Event, **kwargs): + if event.type_ == EventType.BATCH_START: + if self.check_should_disable_observer(event): + module = state.model.model + apply_quantization_status(module, QuantizationStatus.FROZEN) + + def on_end(self, state: State, event: Event, **kwargs): + module = state.model.model + apply_quantization_status(module, QuantizationStatus.FROZEN) + + def on_event(self, state: State, event: Event, **kwargs): + pass + + def _apply_modifier_to_model(self, model: Module): + modifier_as_config = self.create_init_config() + apply_quantization_config(model, modifier_as_config) + + def _calibrate_if_possible(self, module: Module): + if self.num_calibration_steps == 0 and self.calibration_dataloader_: + _LOGGER.warning( + f"num_calibration_steps is {self.num_calibration_steps}." + f"Calibration data loader will not be used." + ) + elif self.num_calibration_steps and not self.calibration_dataloader_: + raise ValueError( + f"num_calibration_steps is {self.num_calibration_steps}. " + "Calibration data loader is not set. Pass a " + "calibration_data_loader with initialize(...) method." + ) + + elif not self.calibration_dataloader_: + return + + self._calibrate(module) + + def _calibrate(self, module: Module): + class_name = self.__class__.__name__.replace("PyTorch", "") + _LOGGER.info( + f"Running {class_name} calibration with " + f"{len(self.calibration_dataloader_)} samples..." + ) + + module_training = module.training + module.eval() + + run_calibration_forward( + module, + self.calibration_dataloader_, + self.num_calibration_steps, + self.calibration_function_, + ) + + if module_training: + module.train() From 9174c1d1a9471e244d167976f0ff180dcaead701 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 16 Apr 2024 19:50:02 +0000 Subject: [PATCH 08/51] fixing issues --- .../modifiers/quantization_vllm/pytorch.py | 24 ++++++++--------- .../transformers/compression/compress_save.py | 27 ++++++++++++++++--- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/src/sparseml/modifiers/quantization_vllm/pytorch.py b/src/sparseml/modifiers/quantization_vllm/pytorch.py index 4d8b87568cd..185cc1fc59a 100644 --- a/src/sparseml/modifiers/quantization_vllm/pytorch.py +++ b/src/sparseml/modifiers/quantization_vllm/pytorch.py @@ -15,16 +15,15 @@ import logging from typing import Any -import torch from torch.nn import Module from sparseml.core import Event, EventType, State from sparseml.modifiers.quantization_vllm.base import vLLMQuantizationModifier from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward from sparsetensors.quantization import ( - QuantizationStatus, apply_quantization_config, - apply_quantization_status, + freeze_module_quantization, + set_module_for_calibration, ) @@ -52,7 +51,7 @@ class vLLMQuantizationModifierPyTorch(vLLMQuantizationModifier): def on_initialize_structure(self, state: State, **kwargs): module = state.model.model self._apply_modifier_to_model(module) - apply_quantization_status(module, QuantizationStatus.FROZEN) + module.apply(freeze_module_quantization) def on_initialize(self, state: State, **kwargs) -> bool: if self.end and self.end != -1: @@ -68,32 +67,33 @@ def on_initialize(self, state: State, **kwargs) -> bool: self._apply_modifier_to_model(module) if self.calculate_start() == -1: # one-shot - apply_quantization_status(module, QuantizationStatus.CALIBRATION) + module.apply(set_module_for_calibration) self._calibrate_if_possible(module) - apply_quantization_status(module, QuantizationStatus.FROZEN) + module.apply(freeze_module_quantization) return True def on_finalize(self, state: State, **kwargs) -> bool: + module = state.model.model if self.post_oneshot_calibration: - state.model.model.apply(torch.quantization.enable_observer) - self._calibrate_if_possible(state.model.model) - self._disable_quantization_observer(state.model.model) + module.apply(set_module_for_calibration) + self._calibrate_if_possible(module) + module.apply(freeze_module_quantization) return True def on_start(self, state: State, event: Event, **kwargs): module = state.model.model - apply_quantization_status(module, QuantizationStatus.CALIBRATION) + module.apply(set_module_for_calibration) def on_update(self, state: State, event: Event, **kwargs): if event.type_ == EventType.BATCH_START: if self.check_should_disable_observer(event): module = state.model.model - apply_quantization_status(module, QuantizationStatus.FROZEN) + module.apply(freeze_module_quantization) def on_end(self, state: State, event: Event, **kwargs): module = state.model.model - apply_quantization_status(module, QuantizationStatus.FROZEN) + module.apply(freeze_module_quantization) def on_event(self, state: State, event: Event, **kwargs): pass diff --git a/src/sparseml/transformers/compression/compress_save.py b/src/sparseml/transformers/compression/compress_save.py index 46322886b89..8d184f9fcde 100644 --- a/src/sparseml/transformers/compression/compress_save.py +++ b/src/sparseml/transformers/compression/compress_save.py @@ -24,7 +24,13 @@ from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active -from sparsetensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor +from sparsetensors import ( + SPARSITY_CONFIG_NAME, + CompressionConfig, + ModelCompressor, + QuantizationConfig, +) +from sparsetensors.quantization.utils import is_model_quantized _LOGGER = logging.getLogger(__name__) @@ -76,16 +82,31 @@ def save_pretrained_wrapper( # state_dict gets passed in as a kwarg for FSDP models state_dict = kwargs.get("state_dict", None) - if qat_active(model): + if qat_active(model) or is_model_quantized(model): _LOGGER.info( "Compression for quantized models is not yet supported. Save will " "be run without compression and no sparsity statistics will be " "calculated." ) - return original_save_pretrained.__get__(model, model_class)( + + original_save_pretrained.__get__(model, model_class)( save_directory, **kwargs ) + if is_model_quantized(model): + quant_config = QuantizationConfig.from_pretrained(model) + quant_config_data = quant_config.dict() + config_file_path = os.path.join(save_directory, CONFIG_NAME) + + # add the sparsity config to the model's config file + with open(config_file_path, "r") as config_file: + config_data = json.load(config_file) + config_data["quantization_config"] = quant_config_data + with open(config_file_path, "w") as config_file: + json.dump(config_data, config_file, indent=2, sort_keys=True) + + return + if sparsity_config is not None: SparsityConfigMetadata.fill_config_details( sparsity_config, model, state_dict=state_dict From aa17e77593960d3c9df0496f9560c27f14df8391 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 17 Apr 2024 18:07:52 +0000 Subject: [PATCH 09/51] add test script --- test_quantization.py | 146 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 test_quantization.py diff --git a/test_quantization.py b/test_quantization.py new file mode 100644 index 00000000000..9fea185bcaa --- /dev/null +++ b/test_quantization.py @@ -0,0 +1,146 @@ +import torch +from sparseml.transformers import oneshot, SparseAutoModelForCausalLM, SparseAutoTokenizer +from sparseml.transformers.finetune.data.data_args import DataTrainingArguments +from sparseml.transformers.finetune.data import TextGenerationDataset +from torch.utils.data import DataLoader +from transformers import DefaultDataCollator, AutoModelForCausalLM +from sparseml.pytorch.utils import tensors_to_device +from sparsetensors.quantization.utils import is_module_quantized +import math + +def old_quant_linear(): + return """ + test_stage: + quant_modifiers: + QuantizationModifier: + ignore: + - LlamaRotaryEmbedding + - LlamaRMSNorm + - SiLU + - MatMulLeftInput_QK + - MatMulRightInput_QK + - MatMulOutput_QK + - MatMulLeftInput_PV + - MatMulRightInput_PV + - MatMulOutput_PV + - Embedding + scheme_overrides: + Linear: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + """ + +def new_quant_linear(): + return """ + test_stage: + quant_modifiers: + vLLMQuantizationModifier: + ignore: [] + config_groups: + group_0: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Linear"] + """ + +def labeled_dataloader(dataset_name, model_name): + tokenizer = SparseAutoTokenizer.from_pretrained(model_name) + data_args = DataTrainingArguments( + dataset=dataset_name, + max_seq_length=512, + pad_to_max_length=False, + ) + dataset_manager = TextGenerationDataset.load_from_registry( + data_args.dataset, + data_args=data_args, + split="train", + tokenizer=tokenizer, + ) + calib_dataset = dataset_manager.tokenize_and_process( + dataset_manager.get_raw_dataset() + ) + data_loader = DataLoader( + calib_dataset, batch_size=1, collate_fn=DefaultDataCollator() + ) + + return data_loader + +def run_oneshot(model, recipe, dataset): + num_calibration_samples = 8 + max_seq_length = 512 + pad_to_max_length = False + + oneshot( + model=model, + dataset=dataset, + overwrite_output_dir=True, + max_seq_length = max_seq_length, + num_calibration_samples=num_calibration_samples, + recipe=recipe, + pad_to_max_length=pad_to_max_length + ) + +def test_quantization_eval(): + num_comparisons = 4 + model_stub = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" + model_old = SparseAutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:0") + dataset = "open_platypus" + run_oneshot(model_old, old_quant_linear(), dataset) + + model_new = AutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:1") + run_oneshot(model_new, new_quant_linear(), dataset) + + old_quant_count = 0 + old_info = {} + for name, module in model_old.named_modules(): + if hasattr(module, "weight_fake_quant"): + old_info[name] = (module.weight_fake_quant.scale.item(), module.weight_fake_quant.zero_point.item()) + old_quant_count += 1 + + new_quant_count = 0 + new_info = {} + for name, module in model_new.named_modules(): + if is_module_quantized(module): + new_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) + new_quant_count += 1 + + assert old_quant_count == new_quant_count + for name, (o_scale, o_zp) in old_info.items(): + n_scale, n_zp = new_info[name] + if not math.isclose(o_scale, n_scale, abs_tol=1e-4, rel_tol=1e-4): + print(f"mismatch {name} {o_scale} {n_scale}") + if not math.isclose(o_zp, n_zp, rel_tol=1e-3): + print(f"mismatch {name} {o_zp} {n_zp}") + + dataloader = labeled_dataloader(dataset, model_stub) + total_old_ppl = 0.0 + total_new_ppl = 0.0 + for idx, sample in enumerate(dataloader): + if idx >= num_comparisons: + return + old_output = model_old(**(tensors_to_device(sample, "cuda:0"))) + new_output = model_new(**(tensors_to_device(sample, "cuda:1"))) + old_ppl = torch.exp(old_output.loss) + new_ppl = torch.exp(new_output.loss) + print(f"Perplexity: new {new_ppl} old {old_ppl}") + total_old_ppl += old_ppl + total_new_ppl += new_ppl + del old_output + del new_output + torch.cuda.empty_cache() + + + avg_new_ppl = total_new_ppl / num_comparisons + avg_old_ppl = total_old_ppl / num_comparisons + print(f"Avg Perplexity: new {avg_new_ppl} old {avg_old_ppl}") + +test_quantization_eval() \ No newline at end of file From f1f114c45e996b792ba127aa06b101ac169d04ca Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 17 Apr 2024 21:04:15 +0000 Subject: [PATCH 10/51] update perplexity test --- test_quantization.py | 43 ++++++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/test_quantization.py b/test_quantization.py index 9fea185bcaa..56bf89bba4c 100644 --- a/test_quantization.py +++ b/test_quantization.py @@ -14,6 +14,7 @@ def old_quant_linear(): quant_modifiers: QuantizationModifier: ignore: + - model.layers.0.mlp.down_proj - LlamaRotaryEmbedding - LlamaRMSNorm - SiLU @@ -23,9 +24,18 @@ def old_quant_linear(): - MatMulLeftInput_PV - MatMulRightInput_PV - MatMulOutput_PV - - Embedding scheme_overrides: Linear: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: + num_bits: 8 + symmetric: false + strategy: "tensor" + output_activations: null + Embedding: weights: num_bits: 8 symmetric: true @@ -39,7 +49,7 @@ def new_quant_linear(): test_stage: quant_modifiers: vLLMQuantizationModifier: - ignore: [] + ignore: ["model.layers.0.mlp.down_proj"] config_groups: group_0: weights: @@ -47,9 +57,22 @@ def new_quant_linear(): type: "int" symmetric: true strategy: "tensor" - input_activations: null + input_activations: + num_bits: 8 + type: "int" + symmetric: false + strategy: "tensor" output_activations: null targets: ["Linear"] + group_1: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Embedding"] """ def labeled_dataloader(dataset_name, model_name): @@ -75,7 +98,7 @@ def labeled_dataloader(dataset_name, model_name): return data_loader def run_oneshot(model, recipe, dataset): - num_calibration_samples = 8 + num_calibration_samples = 512 max_seq_length = 512 pad_to_max_length = False @@ -115,10 +138,12 @@ def test_quantization_eval(): assert old_quant_count == new_quant_count for name, (o_scale, o_zp) in old_info.items(): + if name.endswith(".module"): + name = name[:-7] n_scale, n_zp = new_info[name] - if not math.isclose(o_scale, n_scale, abs_tol=1e-4, rel_tol=1e-4): + if not math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3): print(f"mismatch {name} {o_scale} {n_scale}") - if not math.isclose(o_zp, n_zp, rel_tol=1e-3): + if not o_zp == n_zp: print(f"mismatch {name} {o_zp} {n_zp}") dataloader = labeled_dataloader(dataset, model_stub) @@ -126,7 +151,7 @@ def test_quantization_eval(): total_new_ppl = 0.0 for idx, sample in enumerate(dataloader): if idx >= num_comparisons: - return + break old_output = model_old(**(tensors_to_device(sample, "cuda:0"))) new_output = model_new(**(tensors_to_device(sample, "cuda:1"))) old_ppl = torch.exp(old_output.loss) @@ -134,10 +159,6 @@ def test_quantization_eval(): print(f"Perplexity: new {new_ppl} old {old_ppl}") total_old_ppl += old_ppl total_new_ppl += new_ppl - del old_output - del new_output - torch.cuda.empty_cache() - avg_new_ppl = total_new_ppl / num_comparisons avg_old_ppl = total_old_ppl / num_comparisons From bbbdcb9b0a8640aa7461dc8f33bdd521909a3678 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Thu, 18 Apr 2024 11:04:58 +0000 Subject: [PATCH 11/51] refactor to compressed-tensors --- setup.py | 6 +- .../transformers/compression/README.md | 163 ------------------ .../transformers/compression/__init__.py | 18 -- .../compression/sparsity_config.py | 122 ------------- .../compressed_tensors_utils.py} | 5 +- .../sparsification/sparse_model.py | 9 +- .../test_compress_tensor_utils.py} | 6 +- 7 files changed, 14 insertions(+), 315 deletions(-) delete mode 100644 src/sparseml/transformers/compression/README.md delete mode 100644 src/sparseml/transformers/compression/__init__.py delete mode 100644 src/sparseml/transformers/compression/sparsity_config.py rename src/sparseml/transformers/{compression/compress_save.py => sparsification/compressed_tensors_utils.py} (96%) rename tests/sparseml/transformers/{compression/test_sparse_auto.py => sparsification/test_compress_tensor_utils.py} (95%) diff --git a/setup.py b/setup.py index aee3bf57b14..9efe063a1fd 100644 --- a/setup.py +++ b/setup.py @@ -55,10 +55,12 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug - "sparsetensors @ git+ssh://git@github.com/neuralmagic/sparsetensors.git", "clearml==1.14.4", ] -_nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] +_nm_deps = [ + f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", + f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}~={version_nm_deps}", +] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" ] diff --git a/src/sparseml/transformers/compression/README.md b/src/sparseml/transformers/compression/README.md deleted file mode 100644 index 7d6a3c9eb90..00000000000 --- a/src/sparseml/transformers/compression/README.md +++ /dev/null @@ -1,163 +0,0 @@ -// # TODO: Edit this readme -# Save/Load Compressed SafeTensors - -## Motivation - -* Reduce disk space by saving in a compressed format for sparse models. Models in this compressed format will be loaded by vLLM for more efficient inference -* Set up the save/load architecture such that we can easily expand to additional compression formats in the future. The config should be human readable so users can understand the compression format at a quick glance - -## SafeTensors File Format - -For each parameter in the uncompressed state_dict, we store the following attributes -needed for decompression in the compressed state_dict: - -* compressed tensor -* bitmask -* uncompressed shape -* row offsets - -```python -# dense -{ - PARAM_NAME: uncompressed_tensor -} - -# compressed -{ - PARAM_NAME.compressed: compressed_tensor # 1d tensor - PARAM_NAME.bitmask: value # 2d bitmask tensor (nrows x (ncols / 8)) - PARAM_NAME.shape: value # uncompressed shape tensor - PARAM_NAME.row_offsets: value # 1d offsets tensor -} -``` - -Config information gets stored in the HF config file -```json -// config.json -{ - "sparsity_config": { - "format": "sparse_bitmask", // "dense_sparsity" for original tensor format - - // informational - "sparsity_structure": "unstructured", // or 2:4, 8:16 etc... - "global_sparsity": "0.5" - } -} -``` - -## Saving/Loading Interface - -Loading in a compressed model requires no interface changes - -```python -from sparseml.transformers.utils import SparseAutoModelForCausalLM - -# should contain model.safetensors or model.safetensors.index.json -model_path = "/PATH/TO/COMPRESSED_MODEL" - -model = SparseAutoModelForCausalLM.from_pretrained( - model_name_or_path=model_path, - **model_kwargs, -) -``` - -Saving a compressed model with an explicitly provided compression config. The config -is saved to the model's `config.json` file. **Note:** the model must have been -initialized with SparseAutoModelForCausalLM.from_pretrained() - -```python -from sparseml.transformers.compression import BitmaskConfig - -output_dir = "/PATH/TO/SAVE/COMPRESSED_MODEL" -sparsity_config = BitmaskConfig() - -model.save_pretrained( - save_directory=output_dir, - sparsity_config=sparsity_config, -) -``` - -Saving a compressed model, inferring the config from the model attributes - -```python -model.save_pretrained( - save_directory=output_dir, - save_compressed=True -) -``` - -Saving a model in the dense format. If the model has at least 5% global sparsity a -sparsity config will still be included in `config.json` with format `dense_sparsity` - -```python -model.save_pretrained( - save_directory=output_dir -) -``` - -Saving a model in the dense format, bypassing the sparsity config calculation. When the -`skip_compression_stats` flag is set, no sparsity config will be written to -`config.json` - -```python -model.save_pretrained( - save_directory=output_dir - skip_compression_stats=True -) -``` - -## Enable Compression During One-Shot and Sparse Finetunining -Models that are saved in a supported compressed format on disk will automatically be -decompressed when loaded as input to `sparseml.transformers.oneshot` or -`sparseml.transformers.train` - -To enable compression on save after oneshot or finetuning simply add the -`save_compressed=True` argument to `sparseml.transformers.oneshot` or -`sparseml.transformers.train` - -```python -from sparseml.transformers import train - -train( - save_compressed=True, - model="neuralmagic/TinyLlama-1.1B-Chat-v1.0-pruned2.4", - recipe=RECIPE, - dataset=DATASET -) -``` - - -## Example Code - -Loads a 60% sparse model, compresses it using the inferred bitmask compression, then -reloads the compressed model. - -```python -from sparseml.transformers import SparseAutoModelForCausalLM -from sparseml.utils.pytorch.utils import measure_cuda_memory -import torch - -MODEL_PATH = "zoo:llama2-7b-open_platypus_orca_llama2_pretrain-pruned60" -OUTPUT_PATH = "./test_compress_output" -RECIPE = "zoo:llama2-7b-open_platypus_orca_llama2_pretrain-pruned60" - -torch.cuda.set_device(0) -with measure_cuda_memory() as m: - model = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="cuda:0") -print(f"Load dense model peak GPU {m.overall_peak_memory / float(2**30):.4f} GB") - -sparsity_config = getattr(model,"sparsity_config", None) -print(f"Sparsity config before compression: {sparsity_config}") -with measure_cuda_memory() as m: - model.save_pretrained(OUTPUT_PATH, save_compressed=True) -print(f"Save compressed model peak GPU {m.overall_peak_memory / float(2**30):.4f} GB") - -torch.cuda.set_device(1) -with measure_cuda_memory() as m: - model_again = SparseAutoModelForCausalLM.from_pretrained( - OUTPUT_PATH, device_map="cuda:1" - ) -print(f"Load compressed model peak GPU {m.overall_peak_memory / float(2**30):.4f} GB") -sparsity_config = getattr(model_again,"sparsity_config", None) -print(f"Sparsity config after compression: {sparsity_config}") -``` diff --git a/src/sparseml/transformers/compression/__init__.py b/src/sparseml/transformers/compression/__init__.py deleted file mode 100644 index 2587489968c..00000000000 --- a/src/sparseml/transformers/compression/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa - -from .compress_save import * -from .sparsity_config import * diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py deleted file mode 100644 index 504e154148f..00000000000 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict, Optional - -from torch import Tensor -from torch.nn import Module - -import sparseml.core.session as session_manager -from sparseml.pytorch.utils import ModuleSparsificationInfo -from sparsetensors import CompressionConfig - - -class SparsityConfigMetadata: - """ - Class of helper functions for filling out a CompressionConfig with readable - metadata from the model - """ - - @staticmethod - def infer_global_sparsity( - model: Module, state_dict: Optional[Dict[str, Tensor]] = None - ) -> float: - """ - Calculates the global percentage of sparse zero weights in the model - - :param model: pytorch model to infer sparsity of - :param state_dict: optional state_dict to replace that in model, used for - gathering global FSDP model info - :return: global sparsity of model - """ - - info = ModuleSparsificationInfo(model, state_dict=state_dict) - global_sparsity = info.params_sparse_percent - return global_sparsity - - @staticmethod - def infer_sparsity_structure() -> str: - """ - Determines what sparsity structure, if any, was applied in the currently active - sparse session - - :return: sparsity structure as a string - """ - current_session = session_manager.active_session() - stage_modifiers = current_session.lifecycle.modifiers - sparsity_structure = "unstructured" - - # check for applied pruning modifiers - for stage in stage_modifiers: - if stage.applied: - for modifier in stage.modifiers: - if hasattr(modifier, "mask_structure"): - sparsity_structure = modifier.mask_structure - break - - return sparsity_structure - - @staticmethod - def infer_config_from_model( - model: Module, - state_dict: Optional[Dict[str, Tensor]] = None, - compress: bool = False, - ) -> Optional["CompressionConfig"]: - """ - Determines compression type and informational parameters for a given model - - :param model: pytorch model to calculate sparsity config for - :param state_dict: optional state_dict to replace that in model, used for - gathering global FSDP model info - :param compress: whether or not to compress the model on disk - :return: compression config inferred from the model - """ - - global_sparsity = SparsityConfigMetadata.infer_global_sparsity( - model, state_dict=state_dict - ) - - if global_sparsity < 0.05: - return None - - sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() - if compress: - format = "sparse_bitmask" - else: - format = "dense_sparsity" - - return CompressionConfig.load_from_registry( - format, - global_sparsity=global_sparsity, - sparsity_structure=sparsity_structure, - ) - - @staticmethod - def fill_config_details( - config: CompressionConfig, - model: Module, - state_dict: Optional[Dict[str, Tensor]] = None, - ): - """ - Fills in informational sparsity parameters from a given model - - :param config: sparsity config to fill in - :param model: pytorch model to infer config parameters from - :param state_dict: optional state_dict to replace that in model, used for - gathering global FSDP model info - """ - config.global_sparsity = SparsityConfigMetadata.infer_global_sparsity( - model, state_dict=state_dict - ) - config.sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() diff --git a/src/sparseml/transformers/compression/compress_save.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py similarity index 96% rename from src/sparseml/transformers/compression/compress_save.py rename to src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 46322886b89..d1614a00f94 100644 --- a/src/sparseml/transformers/compression/compress_save.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -20,11 +20,8 @@ from typing import Optional from transformers import PreTrainedModel -from transformers.file_utils import CONFIG_NAME - -from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active -from sparsetensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor +# TODO: additional dependencies on compressed-tensors _LOGGER = logging.getLogger(__name__) diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index f56c84018ad..a148897ff76 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -34,11 +34,14 @@ apply_recipe_structure_to_model, log_model_load, ) -from sparseml.transformers.compression import modify_save_pretrained +from sparseml.transformers.sparsification.compressed_tensors_utils import ( + modify_save_pretrained, +) from sparseml.transformers.sparsification.modification import modify_model from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe -from sparsetensors import get_safetensors_folder -from sparsetensors.utils import infer_compressor_from_model_config + + +# TODO: additional dependencies on compressed-tensors __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] diff --git a/tests/sparseml/transformers/compression/test_sparse_auto.py b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py similarity index 95% rename from tests/sparseml/transformers/compression/test_sparse_auto.py rename to tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py index 42557cd808d..667bb119841 100644 --- a/tests/sparseml/transformers/compression/test_sparse_auto.py +++ b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py @@ -21,9 +21,9 @@ import sparseml.core.session as session_manager from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -from sparseml.transformers.compression import SparsityConfigMetadata -from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME -from sparsetensors.config import BitmaskConfig, DenseSparsityConfig + + +# TODO: additional dependencies on compressed-tensors @pytest.mark.parametrize( From 7a9f9e5fefe99588d8b0b1f7a5d62efa5cf39ef2 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 18 Apr 2024 13:53:08 +0000 Subject: [PATCH 12/51] rename sparsetensors --- src/sparseml/modifiers/quantization_vllm/base.py | 4 ++-- src/sparseml/modifiers/quantization_vllm/pytorch.py | 8 ++++---- src/sparseml/transformers/compression/compress_save.py | 8 ++++---- src/sparseml/transformers/compression/sparsity_config.py | 2 +- src/sparseml/transformers/sparsification/sparse_model.py | 4 ++-- test_quantization.py | 2 +- .../sparseml/transformers/compression/test_sparse_auto.py | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/sparseml/modifiers/quantization_vllm/base.py b/src/sparseml/modifiers/quantization_vllm/base.py index 7b29e7f44ae..5451c1280a3 100644 --- a/src/sparseml/modifiers/quantization_vllm/base.py +++ b/src/sparseml/modifiers/quantization_vllm/base.py @@ -16,12 +16,12 @@ from pydantic import Field -from sparseml.core import Event, Modifier -from sparsetensors.quantization import ( +from compressed_tensors.quantization import ( QuantizationConfig, QuantizationScheme, QuantizationStatus, ) +from sparseml.core import Event, Modifier __all__ = ["vLLMQuantizationModifier"] diff --git a/src/sparseml/modifiers/quantization_vllm/pytorch.py b/src/sparseml/modifiers/quantization_vllm/pytorch.py index 185cc1fc59a..5b6b7419a60 100644 --- a/src/sparseml/modifiers/quantization_vllm/pytorch.py +++ b/src/sparseml/modifiers/quantization_vllm/pytorch.py @@ -17,14 +17,14 @@ from torch.nn import Module -from sparseml.core import Event, EventType, State -from sparseml.modifiers.quantization_vllm.base import vLLMQuantizationModifier -from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward -from sparsetensors.quantization import ( +from compressed_tensors.quantization import ( apply_quantization_config, freeze_module_quantization, set_module_for_calibration, ) +from sparseml.core import Event, EventType, State +from sparseml.modifiers.quantization_vllm.base import vLLMQuantizationModifier +from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward _LOGGER = logging.getLogger(__name__) diff --git a/src/sparseml/transformers/compression/compress_save.py b/src/sparseml/transformers/compression/compress_save.py index 8d184f9fcde..767ec968657 100644 --- a/src/sparseml/transformers/compression/compress_save.py +++ b/src/sparseml/transformers/compression/compress_save.py @@ -22,15 +22,15 @@ from transformers import PreTrainedModel from transformers.file_utils import CONFIG_NAME -from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata -from sparseml.utils.pytorch import qat_active -from sparsetensors import ( +from compressed_tensors import ( SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor, QuantizationConfig, ) -from sparsetensors.quantization.utils import is_model_quantized +from compressed_tensors.quantization.utils import is_model_quantized +from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata +from sparseml.utils.pytorch import qat_active _LOGGER = logging.getLogger(__name__) diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py index 504e154148f..b04edf333c3 100644 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -18,8 +18,8 @@ from torch.nn import Module import sparseml.core.session as session_manager +from compressed_tensors import CompressionConfig from sparseml.pytorch.utils import ModuleSparsificationInfo -from sparsetensors import CompressionConfig class SparsityConfigMetadata: diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index f56c84018ad..f5ce3bc3073 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -30,6 +30,8 @@ ) from transformers.file_utils import WEIGHTS_NAME +from compressed_tensors import get_safetensors_folder +from compressed_tensors.utils import infer_compressor_from_model_config from sparseml.pytorch.model_load.helpers import ( apply_recipe_structure_to_model, log_model_load, @@ -37,8 +39,6 @@ from sparseml.transformers.compression import modify_save_pretrained from sparseml.transformers.sparsification.modification import modify_model from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe -from sparsetensors import get_safetensors_folder -from sparsetensors.utils import infer_compressor_from_model_config __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] diff --git a/test_quantization.py b/test_quantization.py index 56bf89bba4c..a06d4324884 100644 --- a/test_quantization.py +++ b/test_quantization.py @@ -5,7 +5,7 @@ from torch.utils.data import DataLoader from transformers import DefaultDataCollator, AutoModelForCausalLM from sparseml.pytorch.utils import tensors_to_device -from sparsetensors.quantization.utils import is_module_quantized +from compressed_tensors.quantization.utils import is_module_quantized import math def old_quant_linear(): diff --git a/tests/sparseml/transformers/compression/test_sparse_auto.py b/tests/sparseml/transformers/compression/test_sparse_auto.py index 42557cd808d..b86e4a96e40 100644 --- a/tests/sparseml/transformers/compression/test_sparse_auto.py +++ b/tests/sparseml/transformers/compression/test_sparse_auto.py @@ -20,10 +20,10 @@ from transformers import AutoConfig import sparseml.core.session as session_manager +from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression import SparsityConfigMetadata from sparseml.transformers.utils.helpers import SPARSITY_CONFIG_NAME -from sparsetensors.config import BitmaskConfig, DenseSparsityConfig @pytest.mark.parametrize( From fa43088e020f29ad30a4f16ecc3a67ca0e7c9f04 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 18 Apr 2024 14:38:11 +0000 Subject: [PATCH 13/51] update setup --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index aee3bf57b14..fe648b6f07a 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug - "sparsetensors @ git+ssh://git@github.com/neuralmagic/sparsetensors.git", + "compressed_tensors @ git+ssh://git@github.com/neuralmagic/compressed_tensors.git", "clearml==1.14.4", ] _nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] From 63266d8ee193ab5f9675bfe02150e083da069271 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Fri, 19 Apr 2024 15:59:00 -0400 Subject: [PATCH 14/51] Sa/model reload (#2250) * working reload * sparsegpt --- .../modifiers/obcq/utils/sgpt_wrapper.py | 11 ++ .../transformers/compression/compress_save.py | 3 +- .../sparsification/sparse_model.py | 40 ++++-- src/sparseml/utils/pytorch/module.py | 3 + test_ppl_new_quant.py | 59 +++++++++ test_quantization.py | 121 +++++++++--------- test_quantization_reload.py | 121 ++++++++++++++++++ 7 files changed, 282 insertions(+), 76 deletions(-) create mode 100644 test_ppl_new_quant.py create mode 100644 test_quantization_reload.py diff --git a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py index a911fa1c0c7..e9fca78e743 100644 --- a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py +++ b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py @@ -171,6 +171,17 @@ def fasterprune( else: q = torch.quantize_per_channel(q, scale, zero_point, 0, dtype) q = torch.dequantize(q) + elif hasattr(self.layer, "quantization_scheme"): + if self.layer.quantization_scheme.weights is not None: + scale = self.layer.weight_scale + zero_point = self.layer.weight_zero_point + from compressed_tensors.quantization.lifecycle.forward import ( + fake_quantize, + ) + + q = fake_quantize( + q, scale, zero_point, self.layer.quantization_scheme.weights + ) Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2 diff --git a/src/sparseml/transformers/compression/compress_save.py b/src/sparseml/transformers/compression/compress_save.py index 767ec968657..fd96aa12859 100644 --- a/src/sparseml/transformers/compression/compress_save.py +++ b/src/sparseml/transformers/compression/compress_save.py @@ -23,6 +23,7 @@ from transformers.file_utils import CONFIG_NAME from compressed_tensors import ( + QUANTIZATION_CONFIG_NAME, SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor, @@ -101,7 +102,7 @@ def save_pretrained_wrapper( # add the sparsity config to the model's config file with open(config_file_path, "r") as config_file: config_data = json.load(config_file) - config_data["quantization_config"] = quant_config_data + config_data[QUANTIZATION_CONFIG_NAME] = quant_config_data with open(config_file_path, "w") as config_file: json.dump(config_data, config_file, indent=2, sort_keys=True) diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index f5ce3bc3073..569a3bbead3 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -30,8 +30,12 @@ ) from transformers.file_utils import WEIGHTS_NAME -from compressed_tensors import get_safetensors_folder -from compressed_tensors.utils import infer_compressor_from_model_config +from compressed_tensors.compressors import infer_compressor_from_model_config +from compressed_tensors.quantization import ( + QuantizationConfig, + apply_quantization_config, + load_pretrained_quantization, +) from sparseml.pytorch.model_load.helpers import ( apply_recipe_structure_to_model, log_model_load, @@ -103,6 +107,9 @@ def skip(*args, **kwargs): # determine compression format, if any, from the model config compressor = infer_compressor_from_model_config(pretrained_model_name_or_path) + quantization_config = QuantizationConfig.from_model_config( + pretrained_model_name_or_path + ) # temporarily set the log level to error, to ignore printing out long missing # and unexpected key error messages (these are EXPECTED for quantized models) @@ -119,21 +126,26 @@ def skip(*args, **kwargs): # If model is compressed on disk, decompress and load the weights if compressor is not None: - # if we loaded from a HF stub, find the cached model - model_path = get_safetensors_folder( - pretrained_model_name_or_path, cache_dir=kwargs.get("cache_dir", None) + # decompress weights + compressor.overwrite_weights( + model_path=pretrained_model_name_or_path, model=model ) - # decompress weights - compressor.overwrite_weights(model_path=model_path, model=model) - - recipe = resolve_recipe(recipe=recipe, model_path=pretrained_model_name_or_path) - if recipe: - apply_recipe_structure_to_model( - model=model, - model_path=pretrained_model_name_or_path, - recipe_path=recipe, + if quantization_config is not None: + # if we loaded from a HF stub, find the cached model + apply_quantization_config(model, quantization_config) + load_pretrained_quantization(model, pretrained_model_name_or_path) + else: + recipe = resolve_recipe( + recipe=recipe, model_path=pretrained_model_name_or_path ) + if recipe: + apply_recipe_structure_to_model( + model=model, + model_path=pretrained_model_name_or_path, + recipe_path=recipe, + ) + return model diff --git a/src/sparseml/utils/pytorch/module.py b/src/sparseml/utils/pytorch/module.py index 2228a533b31..780f1255db1 100644 --- a/src/sparseml/utils/pytorch/module.py +++ b/src/sparseml/utils/pytorch/module.py @@ -25,6 +25,7 @@ from torch.nn import Linear, Module, Parameter from torch.nn.modules.conv import _ConvNd +from compressed_tensors.quantization.utils import is_module_quantized from sparseml.core.model.base import ModelParameterizedLayer from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context @@ -283,6 +284,8 @@ def qat_active(module: Module) -> bool: for _, layer in module.named_modules(): if isinstance(layer, torch.quantization.FakeQuantize): return True + if is_module_quantized(layer): + return True return False diff --git a/test_ppl_new_quant.py b/test_ppl_new_quant.py new file mode 100644 index 00000000000..2c69d408668 --- /dev/null +++ b/test_ppl_new_quant.py @@ -0,0 +1,59 @@ +from sparseml.transformers import SparseAutoModelForCausalLM, SparseAutoTokenizer +from sparseml.transformers.finetune.data import TextGenerationDataset +from sparseml.transformers.finetune.data.data_args import DataTrainingArguments +from transformers import DefaultDataCollator +from torch.utils.data import DataLoader +import torch +import random +from sparseml.pytorch.utils import tensors_to_device + +MODEL_PATH_OLD = "llama1.1b_old_quant_wo" +MODEL_PATH_NEW = "llama1.1b_new_quant_wo" +MAX_SEQ_LENGTH = 512 +DATASET_NAME = "open_platypus" +NUM_COMPARISONS = 6 + +def get_dataloader(dataset_name, tokenizer): + data_args = DataTrainingArguments( + dataset=dataset_name, + max_seq_length=MAX_SEQ_LENGTH, + pad_to_max_length=False, + ) + dataset_manager = TextGenerationDataset.load_from_registry( + data_args.dataset, + data_args=data_args, + split="train", + tokenizer=tokenizer, + ) + calib_dataset = dataset_manager.tokenize_and_process( + dataset_manager.get_raw_dataset() + ) + data_loader = DataLoader( + calib_dataset, + batch_size=1, + collate_fn=DefaultDataCollator(), + sampler=torch.utils.data.RandomSampler(calib_dataset) + ) + + return data_loader + +def main(seed=0): + random.seed(seed) + torch.manual_seed(seed) + + model_new = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH_NEW, device_map="cuda:0") + model_old = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH_OLD, device_map="cuda:1") + tokenizer = SparseAutoTokenizer.from_pretrained(MODEL_PATH_NEW) + dataloader = get_dataloader(DATASET_NAME, tokenizer) + + for idx, sample in enumerate(dataloader): + if idx >= NUM_COMPARISONS: + break + sample_new = tensors_to_device(sample, "cuda:0") + sample_old = tensors_to_device(sample, "cuda:1") + output_new = model_new(**sample_new) + output_old = model_old(**sample_old) + print(torch.exp(output_new.loss).item(), torch.exp(output_old.loss).item()) + +if __name__ == "__main__": + main(seed=5678) diff --git a/test_quantization.py b/test_quantization.py index a06d4324884..15ce56fbfb6 100644 --- a/test_quantization.py +++ b/test_quantization.py @@ -1,12 +1,9 @@ import torch -from sparseml.transformers import oneshot, SparseAutoModelForCausalLM, SparseAutoTokenizer -from sparseml.transformers.finetune.data.data_args import DataTrainingArguments -from sparseml.transformers.finetune.data import TextGenerationDataset -from torch.utils.data import DataLoader -from transformers import DefaultDataCollator, AutoModelForCausalLM -from sparseml.pytorch.utils import tensors_to_device +from sparseml.transformers import oneshot, SparseAutoModelForCausalLM from compressed_tensors.quantization.utils import is_module_quantized -import math +import math +import random +import sparseml.core.session as session_manager def old_quant_linear(): return """ @@ -15,6 +12,7 @@ def old_quant_linear(): QuantizationModifier: ignore: - model.layers.0.mlp.down_proj + - lm_head - LlamaRotaryEmbedding - LlamaRMSNorm - SiLU @@ -42,6 +40,12 @@ def old_quant_linear(): strategy: "tensor" input_activations: null output_activations: null + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\\d+$"] """ def new_quant_linear(): @@ -49,7 +53,7 @@ def new_quant_linear(): test_stage: quant_modifiers: vLLMQuantizationModifier: - ignore: ["model.layers.0.mlp.down_proj"] + ignore: ["lm_head", "model.layers.0.mlp.down_proj"] config_groups: group_0: weights: @@ -73,32 +77,16 @@ def new_quant_linear(): input_activations: null output_activations: null targets: ["Embedding"] + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\\d+$"] """ -def labeled_dataloader(dataset_name, model_name): - tokenizer = SparseAutoTokenizer.from_pretrained(model_name) - data_args = DataTrainingArguments( - dataset=dataset_name, - max_seq_length=512, - pad_to_max_length=False, - ) - dataset_manager = TextGenerationDataset.load_from_registry( - data_args.dataset, - data_args=data_args, - split="train", - tokenizer=tokenizer, - ) - calib_dataset = dataset_manager.tokenize_and_process( - dataset_manager.get_raw_dataset() - ) - data_loader = DataLoader( - calib_dataset, batch_size=1, collate_fn=DefaultDataCollator() - ) - - return data_loader - -def run_oneshot(model, recipe, dataset): - num_calibration_samples = 512 +def run_oneshot(model, recipe, dataset, output_dir): + num_calibration_samples = 1024 max_seq_length = 512 pad_to_max_length = False @@ -106,62 +94,73 @@ def run_oneshot(model, recipe, dataset): model=model, dataset=dataset, overwrite_output_dir=True, + output_dir=output_dir, max_seq_length = max_seq_length, num_calibration_samples=num_calibration_samples, recipe=recipe, - pad_to_max_length=pad_to_max_length + pad_to_max_length=pad_to_max_length, ) -def test_quantization_eval(): - num_comparisons = 4 +def test_quantization_eval(input_seed): + random.seed(input_seed) + torch.manual_seed(input_seed) model_stub = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" model_old = SparseAutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:0") dataset = "open_platypus" - run_oneshot(model_old, old_quant_linear(), dataset) + with session_manager.create_session(): + run_oneshot(model_old, old_quant_linear(), dataset, "llama1.1b_old_quant") - model_new = AutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:1") - run_oneshot(model_new, new_quant_linear(), dataset) + model_new = SparseAutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:1") + with session_manager.create_session(): + run_oneshot(model_new, new_quant_linear(), dataset, "llama1.1b_new_quant") old_quant_count = 0 + old_quant_input_count = 0 old_info = {} + old_input_info = {} for name, module in model_old.named_modules(): if hasattr(module, "weight_fake_quant"): - old_info[name] = (module.weight_fake_quant.scale.item(), module.weight_fake_quant.zero_point.item()) + scale = module.weight_fake_quant.scale.item() + zp = module.weight_fake_quant.zero_point.item() + old_info[name] = (scale, zp) old_quant_count += 1 + elif hasattr(module, "quant"): + scale = module.quant.activation_post_process.scale.item() + zp = module.quant.activation_post_process.zero_point.item() + old_input_info[name] = (scale, zp) + old_quant_input_count += 1 new_quant_count = 0 + new_quant_input_count = 0 new_info = {} + new_input_info = {} for name, module in model_new.named_modules(): if is_module_quantized(module): - new_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) - new_quant_count += 1 + if module.quantization_scheme.weights is not None: + new_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) + new_quant_count += 1 + if module.quantization_scheme.input_activations is not None: + new_input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) + new_quant_input_count += 1 assert old_quant_count == new_quant_count + assert old_quant_input_count == new_quant_input_count + for name, (o_scale, o_zp) in old_info.items(): if name.endswith(".module"): name = name[:-7] n_scale, n_zp = new_info[name] if not math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3): - print(f"mismatch {name} {o_scale} {n_scale}") + print(f"weight mismatch {name} {o_scale} {n_scale}") if not o_zp == n_zp: - print(f"mismatch {name} {o_zp} {n_zp}") - - dataloader = labeled_dataloader(dataset, model_stub) - total_old_ppl = 0.0 - total_new_ppl = 0.0 - for idx, sample in enumerate(dataloader): - if idx >= num_comparisons: - break - old_output = model_old(**(tensors_to_device(sample, "cuda:0"))) - new_output = model_new(**(tensors_to_device(sample, "cuda:1"))) - old_ppl = torch.exp(old_output.loss) - new_ppl = torch.exp(new_output.loss) - print(f"Perplexity: new {new_ppl} old {old_ppl}") - total_old_ppl += old_ppl - total_new_ppl += new_ppl + print(f"weight mismatch {name} {o_zp} {n_zp}") - avg_new_ppl = total_new_ppl / num_comparisons - avg_old_ppl = total_old_ppl / num_comparisons - print(f"Avg Perplexity: new {avg_new_ppl} old {avg_old_ppl}") + for name, (o_scale, o_zp) in old_input_info.items(): + print(name) + n_scale, n_zp = new_input_info[name] + if not math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3): + print(f"input mismatch {name} {o_scale} {n_scale}") + if not o_zp == n_zp: + print(f"input mismatch {name} {o_zp} {n_zp}") -test_quantization_eval() \ No newline at end of file +test_quantization_eval(input_seed=0) \ No newline at end of file diff --git a/test_quantization_reload.py b/test_quantization_reload.py new file mode 100644 index 00000000000..c6503ee6ccb --- /dev/null +++ b/test_quantization_reload.py @@ -0,0 +1,121 @@ +import torch +from sparseml.transformers import oneshot, SparseAutoModelForCausalLM, SparseAutoTokenizer +from sparseml.transformers.finetune.data.data_args import DataTrainingArguments +from sparseml.transformers.finetune.data import TextGenerationDataset +from torch.utils.data import DataLoader +from transformers import DefaultDataCollator +from compressed_tensors.quantization.utils import is_module_quantized +import math + +MODEL_PATH = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" +MAX_SEQ_LENGTH = 512 +DATASET_NAME = "open_platypus" +NUM_CALIBRATION_SAMPLES = 512 +OUTPUT_PATH = "llama1.1b_new_quant_comp" + +def new_quant_linear(): + return """ + test_stage: + quant_modifiers: + vLLMQuantizationModifier: + ignore: ["model.layers.0.mlp.down_proj"] + config_groups: + group_0: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: + num_bits: 8 + type: "int" + symmetric: false + strategy: "tensor" + output_activations: null + targets: ["Linear"] + group_1: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Embedding"] + """ + +def labeled_dataloader(dataset_name, model_name): + tokenizer = SparseAutoTokenizer.from_pretrained(model_name) + data_args = DataTrainingArguments( + dataset=dataset_name, + max_seq_length=MAX_SEQ_LENGTH, + pad_to_max_length=False, + ) + dataset_manager = TextGenerationDataset.load_from_registry( + data_args.dataset, + data_args=data_args, + split="train", + tokenizer=tokenizer, + ) + calib_dataset = dataset_manager.tokenize_and_process( + dataset_manager.get_raw_dataset() + ) + data_loader = DataLoader( + calib_dataset, + batch_size=1, + collate_fn=DefaultDataCollator(), + sampler=torch.utils.data.RandomSampler(calib_dataset) + ) + + return data_loader + +def run_oneshot(model, recipe, dataset, output_dir): + num_calibration_samples = NUM_CALIBRATION_SAMPLES + max_seq_length = MAX_SEQ_LENGTH + pad_to_max_length = False + + oneshot( + model=model, + dataset=dataset, + overwrite_output_dir=True, + output_dir=output_dir, + max_seq_length = max_seq_length, + num_calibration_samples=num_calibration_samples, + recipe=recipe, + pad_to_max_length=pad_to_max_length, + ) + +def test_quantization_reload(): + model = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="cuda:0") + run_oneshot(model, new_quant_linear(), DATASET_NAME, OUTPUT_PATH) + + model_reloaded = SparseAutoModelForCausalLM.from_pretrained(OUTPUT_PATH, device_map="cuda:1") + + weight_info = {} + input_info = {} + for name, module in model.named_modules(): + if is_module_quantized(module): + if module.quantization_scheme.weights is not None: + weight_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) + if module.quantization_scheme.input_activations is not None: + input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) + + reload_weight_info = {} + reload_input_info = {} + for name, module in model_reloaded.named_modules(): + if is_module_quantized(module): + if module.quantization_scheme.weights is not None: + reload_weight_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) + if module.quantization_scheme.input_activations is not None: + reload_input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) + + + for name, (o_scale, o_zp) in weight_info.items(): + n_scale, n_zp = reload_weight_info[name] + if not o_scale == n_scale: + print(f"weight mismatch {name} {o_scale} {n_scale}") + if not o_zp == n_zp: + print(f"weight mismatch {name} {o_zp} {n_zp}") + + +test_quantization_reload() \ No newline at end of file From 55976c564142a65d1a43fbe9721b132add8f0a3c Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Mon, 22 Apr 2024 20:54:44 +0000 Subject: [PATCH 15/51] cleanup --- .../finetuning/example_single_gpu_config.yaml | 15 +++++++++++++++ setup.py | 4 ++-- .../sparsification/test_compress_tensor_utils.py | 3 ++- 3 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 integrations/huggingface-transformers/finetuning/example_single_gpu_config.yaml diff --git a/integrations/huggingface-transformers/finetuning/example_single_gpu_config.yaml b/integrations/huggingface-transformers/finetuning/example_single_gpu_config.yaml new file mode 100644 index 00000000000..d2f7ec8cdc7 --- /dev/null +++ b/integrations/huggingface-transformers/finetuning/example_single_gpu_config.yaml @@ -0,0 +1,15 @@ +compute_environment: LOCAL_MACHINE +debug: false +distributed_type: 'NO' +enable_cpu_affinity: false +gpu_ids: 0 +machine_rank: 0 +main_training_function: main +num_machines: 1 +num_processes: 1 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false \ No newline at end of file diff --git a/setup.py b/setup.py index aa8577f9137..8d6835efcc2 100644 --- a/setup.py +++ b/setup.py @@ -55,12 +55,12 @@ "GPUtil>=1.4.0", "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug - "compressed_tensors @ git+ssh://git@github.com/neuralmagic/compressed_tensors.git", "clearml==1.14.4", ] _nm_deps = [ f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", - f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}~={version_nm_deps}", + f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}" + f"~={version_nm_deps}", ] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" diff --git a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py index c00dd967d6e..75a9d62edc5 100644 --- a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py @@ -20,11 +20,12 @@ from transformers import AutoConfig import sparseml.core.session as session_manager -from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig from compressed_tensors import SPARSITY_CONFIG_NAME +from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression import SparsityConfigMetadata + @pytest.mark.parametrize( "compressed,config,dtype", [ From 38f4f770f57464e489924b840cc03d3ae01aa82e Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Mon, 22 Apr 2024 21:59:33 +0000 Subject: [PATCH 16/51] refactor tests --- test_ppl_new_quant.py | 59 ----- test_quantization.py | 166 -------------- test_quantization_reload.py | 121 ---------- .../transformers/compression/__init__.py | 13 ++ .../compression/recipes/new_quant_full.yaml | 33 +++ .../compression/recipes/new_quant_weight.yaml | 20 ++ .../compression/recipes/old_quant_full.yaml | 39 ++++ .../compression/recipes/old_quant_weight.yaml | 36 +++ .../test_compress_tensor_utils.py | 0 .../compression/test_quantization.py | 216 ++++++++++++++++++ tests/testing_utils.py | 7 +- 11 files changed, 363 insertions(+), 347 deletions(-) delete mode 100644 test_ppl_new_quant.py delete mode 100644 test_quantization.py delete mode 100644 test_quantization_reload.py create mode 100644 tests/sparseml/transformers/compression/__init__.py create mode 100644 tests/sparseml/transformers/compression/recipes/new_quant_full.yaml create mode 100644 tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml create mode 100644 tests/sparseml/transformers/compression/recipes/old_quant_full.yaml create mode 100644 tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml rename tests/sparseml/transformers/{sparsification => compression}/test_compress_tensor_utils.py (100%) create mode 100644 tests/sparseml/transformers/compression/test_quantization.py diff --git a/test_ppl_new_quant.py b/test_ppl_new_quant.py deleted file mode 100644 index 2c69d408668..00000000000 --- a/test_ppl_new_quant.py +++ /dev/null @@ -1,59 +0,0 @@ -from sparseml.transformers import SparseAutoModelForCausalLM, SparseAutoTokenizer -from sparseml.transformers.finetune.data import TextGenerationDataset -from sparseml.transformers.finetune.data.data_args import DataTrainingArguments -from transformers import DefaultDataCollator -from torch.utils.data import DataLoader -import torch -import random -from sparseml.pytorch.utils import tensors_to_device - -MODEL_PATH_OLD = "llama1.1b_old_quant_wo" -MODEL_PATH_NEW = "llama1.1b_new_quant_wo" -MAX_SEQ_LENGTH = 512 -DATASET_NAME = "open_platypus" -NUM_COMPARISONS = 6 - -def get_dataloader(dataset_name, tokenizer): - data_args = DataTrainingArguments( - dataset=dataset_name, - max_seq_length=MAX_SEQ_LENGTH, - pad_to_max_length=False, - ) - dataset_manager = TextGenerationDataset.load_from_registry( - data_args.dataset, - data_args=data_args, - split="train", - tokenizer=tokenizer, - ) - calib_dataset = dataset_manager.tokenize_and_process( - dataset_manager.get_raw_dataset() - ) - data_loader = DataLoader( - calib_dataset, - batch_size=1, - collate_fn=DefaultDataCollator(), - sampler=torch.utils.data.RandomSampler(calib_dataset) - ) - - return data_loader - -def main(seed=0): - random.seed(seed) - torch.manual_seed(seed) - - model_new = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH_NEW, device_map="cuda:0") - model_old = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH_OLD, device_map="cuda:1") - tokenizer = SparseAutoTokenizer.from_pretrained(MODEL_PATH_NEW) - dataloader = get_dataloader(DATASET_NAME, tokenizer) - - for idx, sample in enumerate(dataloader): - if idx >= NUM_COMPARISONS: - break - sample_new = tensors_to_device(sample, "cuda:0") - sample_old = tensors_to_device(sample, "cuda:1") - output_new = model_new(**sample_new) - output_old = model_old(**sample_old) - print(torch.exp(output_new.loss).item(), torch.exp(output_old.loss).item()) - -if __name__ == "__main__": - main(seed=5678) diff --git a/test_quantization.py b/test_quantization.py deleted file mode 100644 index 15ce56fbfb6..00000000000 --- a/test_quantization.py +++ /dev/null @@ -1,166 +0,0 @@ -import torch -from sparseml.transformers import oneshot, SparseAutoModelForCausalLM -from compressed_tensors.quantization.utils import is_module_quantized -import math -import random -import sparseml.core.session as session_manager - -def old_quant_linear(): - return """ - test_stage: - quant_modifiers: - QuantizationModifier: - ignore: - - model.layers.0.mlp.down_proj - - lm_head - - LlamaRotaryEmbedding - - LlamaRMSNorm - - SiLU - - MatMulLeftInput_QK - - MatMulRightInput_QK - - MatMulOutput_QK - - MatMulLeftInput_PV - - MatMulRightInput_PV - - MatMulOutput_PV - scheme_overrides: - Linear: - weights: - num_bits: 8 - symmetric: true - strategy: "tensor" - input_activations: - num_bits: 8 - symmetric: false - strategy: "tensor" - output_activations: null - Embedding: - weights: - num_bits: 8 - symmetric: true - strategy: "tensor" - input_activations: null - output_activations: null - SparseGPTModifier: - sparsity: 0.0 - block_size: 128 - sequential_update: False - quantize: True - targets: ["re:model.layers.\\\d+$"] - """ - -def new_quant_linear(): - return """ - test_stage: - quant_modifiers: - vLLMQuantizationModifier: - ignore: ["lm_head", "model.layers.0.mlp.down_proj"] - config_groups: - group_0: - weights: - num_bits: 8 - type: "int" - symmetric: true - strategy: "tensor" - input_activations: - num_bits: 8 - type: "int" - symmetric: false - strategy: "tensor" - output_activations: null - targets: ["Linear"] - group_1: - weights: - num_bits: 8 - type: "int" - symmetric: true - strategy: "tensor" - input_activations: null - output_activations: null - targets: ["Embedding"] - SparseGPTModifier: - sparsity: 0.0 - block_size: 128 - sequential_update: False - quantize: True - targets: ["re:model.layers.\\\d+$"] - """ - -def run_oneshot(model, recipe, dataset, output_dir): - num_calibration_samples = 1024 - max_seq_length = 512 - pad_to_max_length = False - - oneshot( - model=model, - dataset=dataset, - overwrite_output_dir=True, - output_dir=output_dir, - max_seq_length = max_seq_length, - num_calibration_samples=num_calibration_samples, - recipe=recipe, - pad_to_max_length=pad_to_max_length, - ) - -def test_quantization_eval(input_seed): - random.seed(input_seed) - torch.manual_seed(input_seed) - model_stub = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" - model_old = SparseAutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:0") - dataset = "open_platypus" - with session_manager.create_session(): - run_oneshot(model_old, old_quant_linear(), dataset, "llama1.1b_old_quant") - - model_new = SparseAutoModelForCausalLM.from_pretrained(model_stub, device_map="cuda:1") - with session_manager.create_session(): - run_oneshot(model_new, new_quant_linear(), dataset, "llama1.1b_new_quant") - - old_quant_count = 0 - old_quant_input_count = 0 - old_info = {} - old_input_info = {} - for name, module in model_old.named_modules(): - if hasattr(module, "weight_fake_quant"): - scale = module.weight_fake_quant.scale.item() - zp = module.weight_fake_quant.zero_point.item() - old_info[name] = (scale, zp) - old_quant_count += 1 - elif hasattr(module, "quant"): - scale = module.quant.activation_post_process.scale.item() - zp = module.quant.activation_post_process.zero_point.item() - old_input_info[name] = (scale, zp) - old_quant_input_count += 1 - - new_quant_count = 0 - new_quant_input_count = 0 - new_info = {} - new_input_info = {} - for name, module in model_new.named_modules(): - if is_module_quantized(module): - if module.quantization_scheme.weights is not None: - new_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) - new_quant_count += 1 - if module.quantization_scheme.input_activations is not None: - new_input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) - new_quant_input_count += 1 - - assert old_quant_count == new_quant_count - assert old_quant_input_count == new_quant_input_count - - for name, (o_scale, o_zp) in old_info.items(): - if name.endswith(".module"): - name = name[:-7] - n_scale, n_zp = new_info[name] - if not math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3): - print(f"weight mismatch {name} {o_scale} {n_scale}") - if not o_zp == n_zp: - print(f"weight mismatch {name} {o_zp} {n_zp}") - - for name, (o_scale, o_zp) in old_input_info.items(): - print(name) - n_scale, n_zp = new_input_info[name] - if not math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3): - print(f"input mismatch {name} {o_scale} {n_scale}") - if not o_zp == n_zp: - print(f"input mismatch {name} {o_zp} {n_zp}") - -test_quantization_eval(input_seed=0) \ No newline at end of file diff --git a/test_quantization_reload.py b/test_quantization_reload.py deleted file mode 100644 index c6503ee6ccb..00000000000 --- a/test_quantization_reload.py +++ /dev/null @@ -1,121 +0,0 @@ -import torch -from sparseml.transformers import oneshot, SparseAutoModelForCausalLM, SparseAutoTokenizer -from sparseml.transformers.finetune.data.data_args import DataTrainingArguments -from sparseml.transformers.finetune.data import TextGenerationDataset -from torch.utils.data import DataLoader -from transformers import DefaultDataCollator -from compressed_tensors.quantization.utils import is_module_quantized -import math - -MODEL_PATH = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" -MAX_SEQ_LENGTH = 512 -DATASET_NAME = "open_platypus" -NUM_CALIBRATION_SAMPLES = 512 -OUTPUT_PATH = "llama1.1b_new_quant_comp" - -def new_quant_linear(): - return """ - test_stage: - quant_modifiers: - vLLMQuantizationModifier: - ignore: ["model.layers.0.mlp.down_proj"] - config_groups: - group_0: - weights: - num_bits: 8 - type: "int" - symmetric: true - strategy: "tensor" - input_activations: - num_bits: 8 - type: "int" - symmetric: false - strategy: "tensor" - output_activations: null - targets: ["Linear"] - group_1: - weights: - num_bits: 8 - type: "int" - symmetric: true - strategy: "tensor" - input_activations: null - output_activations: null - targets: ["Embedding"] - """ - -def labeled_dataloader(dataset_name, model_name): - tokenizer = SparseAutoTokenizer.from_pretrained(model_name) - data_args = DataTrainingArguments( - dataset=dataset_name, - max_seq_length=MAX_SEQ_LENGTH, - pad_to_max_length=False, - ) - dataset_manager = TextGenerationDataset.load_from_registry( - data_args.dataset, - data_args=data_args, - split="train", - tokenizer=tokenizer, - ) - calib_dataset = dataset_manager.tokenize_and_process( - dataset_manager.get_raw_dataset() - ) - data_loader = DataLoader( - calib_dataset, - batch_size=1, - collate_fn=DefaultDataCollator(), - sampler=torch.utils.data.RandomSampler(calib_dataset) - ) - - return data_loader - -def run_oneshot(model, recipe, dataset, output_dir): - num_calibration_samples = NUM_CALIBRATION_SAMPLES - max_seq_length = MAX_SEQ_LENGTH - pad_to_max_length = False - - oneshot( - model=model, - dataset=dataset, - overwrite_output_dir=True, - output_dir=output_dir, - max_seq_length = max_seq_length, - num_calibration_samples=num_calibration_samples, - recipe=recipe, - pad_to_max_length=pad_to_max_length, - ) - -def test_quantization_reload(): - model = SparseAutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="cuda:0") - run_oneshot(model, new_quant_linear(), DATASET_NAME, OUTPUT_PATH) - - model_reloaded = SparseAutoModelForCausalLM.from_pretrained(OUTPUT_PATH, device_map="cuda:1") - - weight_info = {} - input_info = {} - for name, module in model.named_modules(): - if is_module_quantized(module): - if module.quantization_scheme.weights is not None: - weight_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) - if module.quantization_scheme.input_activations is not None: - input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) - - reload_weight_info = {} - reload_input_info = {} - for name, module in model_reloaded.named_modules(): - if is_module_quantized(module): - if module.quantization_scheme.weights is not None: - reload_weight_info[name] = (module.weight_scale.item(), module.weight_zero_point.item()) - if module.quantization_scheme.input_activations is not None: - reload_input_info[name] = (module.input_scale.item(), module.input_zero_point.item()) - - - for name, (o_scale, o_zp) in weight_info.items(): - n_scale, n_zp = reload_weight_info[name] - if not o_scale == n_scale: - print(f"weight mismatch {name} {o_scale} {n_scale}") - if not o_zp == n_zp: - print(f"weight mismatch {name} {o_zp} {n_zp}") - - -test_quantization_reload() \ No newline at end of file diff --git a/tests/sparseml/transformers/compression/__init__.py b/tests/sparseml/transformers/compression/__init__.py new file mode 100644 index 00000000000..0c44f887a47 --- /dev/null +++ b/tests/sparseml/transformers/compression/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/sparseml/transformers/compression/recipes/new_quant_full.yaml b/tests/sparseml/transformers/compression/recipes/new_quant_full.yaml new file mode 100644 index 00000000000..c5a55fa3284 --- /dev/null +++ b/tests/sparseml/transformers/compression/recipes/new_quant_full.yaml @@ -0,0 +1,33 @@ +test_stage: + quant_modifiers: + vLLMQuantizationModifier: + ignore: ["lm_head", "model.layers.0.mlp.down_proj"] + config_groups: + group_0: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: + num_bits: 8 + type: "int" + symmetric: false + strategy: "tensor" + output_activations: null + targets: ["Linear"] + group_1: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Embedding"] + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\d+$"] \ No newline at end of file diff --git a/tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml b/tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml new file mode 100644 index 00000000000..64a1f87b29d --- /dev/null +++ b/tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml @@ -0,0 +1,20 @@ +test_stage: + quant_modifiers: + vLLMQuantizationModifier: + ignore: ["lm_head", "model.layers.0.mlp.down_proj"] + config_groups: + group_0: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Linear", "Embedding"] + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\d+$"] \ No newline at end of file diff --git a/tests/sparseml/transformers/compression/recipes/old_quant_full.yaml b/tests/sparseml/transformers/compression/recipes/old_quant_full.yaml new file mode 100644 index 00000000000..8a94733242a --- /dev/null +++ b/tests/sparseml/transformers/compression/recipes/old_quant_full.yaml @@ -0,0 +1,39 @@ +test_stage: + quant_modifiers: + QuantizationModifier: + ignore: + - model.layers.0.mlp.down_proj + - lm_head + - LlamaRotaryEmbedding + - LlamaRMSNorm + - SiLU + - MatMulLeftInput_QK + - MatMulRightInput_QK + - MatMulOutput_QK + - MatMulLeftInput_PV + - MatMulRightInput_PV + - MatMulOutput_PV + scheme_overrides: + Linear: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: + num_bits: 8 + symmetric: false + strategy: "tensor" + output_activations: null + Embedding: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\d+$"] \ No newline at end of file diff --git a/tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml b/tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml new file mode 100644 index 00000000000..e095a22912b --- /dev/null +++ b/tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml @@ -0,0 +1,36 @@ +test_stage: + quant_modifiers: + QuantizationModifier: + ignore: + - model.layers.0.mlp.down_proj + - lm_head + - LlamaRotaryEmbedding + - LlamaRMSNorm + - SiLU + - MatMulLeftInput_QK + - MatMulRightInput_QK + - MatMulOutput_QK + - MatMulLeftInput_PV + - MatMulRightInput_PV + - MatMulOutput_PV + scheme_overrides: + Linear: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + Embedding: + weights: + num_bits: 8 + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + SparseGPTModifier: + sparsity: 0.0 + block_size: 128 + sequential_update: False + quantize: True + targets: ["re:model.layers.\\d+$"] \ No newline at end of file diff --git a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py similarity index 100% rename from tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py rename to tests/sparseml/transformers/compression/test_compress_tensor_utils.py diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py new file mode 100644 index 00000000000..bdfdb0d04b1 --- /dev/null +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -0,0 +1,216 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import os +import shutil +import tempfile +import unittest + +import torch +from torch.utils.data import DataLoader +from transformers import DefaultDataCollator + +from compressed_tensors.quantization.utils import is_module_quantized +from parameterized import parameterized_class +from sparseml.pytorch.utils import tensors_to_device +from sparseml.transformers import ( + SparseAutoModelForCausalLM, + SparseAutoTokenizer, + oneshot, +) +from sparseml.transformers.finetune.data import TextGenerationDataset +from sparseml.transformers.finetune.data.data_args import DataTrainingArguments +from tests.testing_utils import requires_gpu, requires_torch + + +@requires_torch +@requires_gpu +@parameterized_class( + ("old_recipe", "new_recipe"), + [ + ( + "tests/sparseml/transformers/compression/recipes/old_quant_full.yaml", + "tests/sparseml/transformers/compression/recipes/new_quant_full.yaml", + ), + ( + "tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml", + "tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml", + ), + ], +) +class TestQuantizationMatches(unittest.TestCase): + old_recipe = None + new_recipe = None + model_stub = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" + dataset = "open_platypus" + old_output = "tiny_llama_old" + new_output = "tiny_llama_new" + max_seq_length = 512 + num_comparisons = 2 + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + + self.model_old = SparseAutoModelForCausalLM.from_pretrained( + self.model_stub, device_map="cuda:0" + ) + self._run_oneshot( + self.model_old, + self.old_recipe, + self.dataset, + os.path.join(self.test_dir, self.old_output), + ) + + self.model_new = SparseAutoModelForCausalLM.from_pretrained( + self.model_stub, device_map="cuda:0" + ) + self._run_oneshot( + self.model_new, + self.new_recipe, + self.dataset, + os.path.join(self.test_dir, self.new_output), + ) + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def _run_oneshot(self, model, recipe, dataset, output_dir): + num_calibration_samples = 512 + pad_to_max_length = False + + oneshot( + model=model, + dataset=dataset, + overwrite_output_dir=True, + output_dir=output_dir, + max_seq_length=self.max_seq_length, + num_calibration_samples=num_calibration_samples, + recipe=recipe, + pad_to_max_length=pad_to_max_length, + ) + + def _get_quant_info_old(self, model): + quant_info_weights = {} + quant_info_inputs = {} + for name, module in model.named_modules(): + if hasattr(module, "weight_fake_quant"): + scale = module.weight_fake_quant.scale.item() + zp = module.weight_fake_quant.zero_point.item() + quant_info_weights[name] = (scale, zp) + elif hasattr(module, "quant"): + scale = module.quant.activation_post_process.scale.item() + zp = module.quant.activation_post_process.zero_point.item() + quant_info_inputs[name] = (scale, zp) + + return quant_info_weights, quant_info_inputs + + def _get_quant_info_new(self, model): + quant_info_weights = {} + quant_info_inputs = {} + for name, module in model.named_modules(): + if is_module_quantized(module): + if module.quantization_scheme.weights is not None: + quant_info_weights[name] = ( + module.weight_scale.item(), + module.weight_zero_point.item(), + ) + if module.quantization_scheme.input_activations is not None: + quant_info_inputs[name] = ( + module.input_scale.item(), + module.input_zero_point.item(), + ) + + return quant_info_weights, quant_info_inputs + + def test_quantization_counts(self): + old_quant_weights, old_quant_inputs = self._get_quant_info_old(self.model_old) + new_quant_weights, new_quant_inputs = self._get_quant_info_new(self.model_new) + + assert len(old_quant_weights) == len(new_quant_weights) + assert len(old_quant_inputs) == len(new_quant_inputs) + + def test_quantization_scale_and_zp(self): + old_quant_weights, old_quant_inputs = self._get_quant_info_old(self.model_old) + new_quant_weights, new_quant_inputs = self._get_quant_info_new(self.model_new) + + for name, (o_scale, o_zp) in old_quant_weights.items(): + if name.endswith(".module"): + name = name[:-7] + n_scale, n_zp = new_quant_weights[name] + assert math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3) + assert o_zp == n_zp + + for name, (o_scale, o_zp) in old_quant_inputs.items(): + n_scale, n_zp = new_quant_inputs[name] + assert math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3) + assert o_zp == n_zp + + def test_quantization_reload(self): + model_reloaded = SparseAutoModelForCausalLM.from_pretrained( + self.test_dir / self.new_output + ) + + og_weights, og_inputs = self._get_quant_info_new(self.model_new) + reloaded_weights, reloaded_inputs = self._get_quant_info_new(model_reloaded) + + for name, (o_scale, o_zp) in og_weights.items(): + n_scale, n_zp = reloaded_weights[name] + assert o_scale == n_scale + assert o_zp == n_zp + + for name, (o_scale, o_zp) in og_inputs.items(): + n_scale, n_zp = reloaded_inputs[name] + assert o_scale == n_scale + assert o_zp == n_zp + + def _get_dataloader(self, dataset_name, tokenizer): + data_args = DataTrainingArguments( + dataset=dataset_name, + max_seq_length=self.max_seq_length, + pad_to_max_length=False, + ) + dataset_manager = TextGenerationDataset.load_from_registry( + data_args.dataset, + data_args=data_args, + split="train", + tokenizer=tokenizer, + ) + calib_dataset = dataset_manager.tokenize_and_process( + dataset_manager.get_raw_dataset() + ) + data_loader = DataLoader( + calib_dataset, + batch_size=1, + collate_fn=DefaultDataCollator(), + sampler=torch.utils.data.RandomSampler(calib_dataset), + ) + + return data_loader + + def test_perplexity(self): + tokenizer = SparseAutoTokenizer.from_pretrained(self.model_stub) + dataloader = self._get_dataloader(self.dataset, tokenizer) + + for idx, sample in enumerate(dataloader): + if idx >= self.num_comparisons: + break + sample_new = tensors_to_device(sample, "cuda:0") + sample_old = tensors_to_device(sample, "cuda:0") + output_new = self.model_new(**sample_new) + output_old = self.model_old(**sample_old) + ppl_ratio = ( + torch.exp(output_new.loss).item() / torch.exp(output_old.loss).item() + ) + assert abs(1.0 - ppl_ratio) < 0.05 diff --git a/tests/testing_utils.py b/tests/testing_utils.py index 81853d0ca03..03658e587e5 100644 --- a/tests/testing_utils.py +++ b/tests/testing_utils.py @@ -36,7 +36,12 @@ def is_torch_available(): def is_gpu_available(): - return False + try: + import torch # noqa: F401 + + return torch.cuda.device_count() > 0 + except ImportError: + return False def requires_torch(test_case): From 65748749e6785c188b6cf3e106d1b32e258f379c Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Mon, 22 Apr 2024 23:28:49 +0000 Subject: [PATCH 17/51] only run oneshot once --- .../compression/test_quantization.py | 44 ++++++++++--------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index bdfdb0d04b1..06e29754220 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -60,34 +60,38 @@ class TestQuantizationMatches(unittest.TestCase): max_seq_length = 512 num_comparisons = 2 - def setUp(self): - self.test_dir = tempfile.mkdtemp() + @classmethod + def setUpClass(cls): + cls.test_dir = tempfile.mkdtemp() - self.model_old = SparseAutoModelForCausalLM.from_pretrained( - self.model_stub, device_map="cuda:0" + cls.model_old = SparseAutoModelForCausalLM.from_pretrained( + cls.model_stub, device_map="cuda:0" ) - self._run_oneshot( - self.model_old, - self.old_recipe, - self.dataset, - os.path.join(self.test_dir, self.old_output), + cls._run_oneshot( + cls.model_old, + cls.old_recipe, + cls.dataset, + os.path.join(cls.test_dir, cls.old_output), ) - self.model_new = SparseAutoModelForCausalLM.from_pretrained( - self.model_stub, device_map="cuda:0" + cls.model_new = SparseAutoModelForCausalLM.from_pretrained( + cls.model_stub, device_map="cuda:0" ) - self._run_oneshot( - self.model_new, - self.new_recipe, - self.dataset, - os.path.join(self.test_dir, self.new_output), + cls._run_oneshot( + cls.model_new, + cls.new_recipe, + cls.dataset, + os.path.join(cls.test_dir, cls.new_output), ) - def tearDown(self): - shutil.rmtree(self.test_dir) + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.test_dir) - def _run_oneshot(self, model, recipe, dataset, output_dir): + @staticmethod + def _run_oneshot(model, recipe, dataset, output_dir): num_calibration_samples = 512 + max_seq_length = 512 pad_to_max_length = False oneshot( @@ -95,7 +99,7 @@ def _run_oneshot(self, model, recipe, dataset, output_dir): dataset=dataset, overwrite_output_dir=True, output_dir=output_dir, - max_seq_length=self.max_seq_length, + max_seq_length=max_seq_length, num_calibration_samples=num_calibration_samples, recipe=recipe, pad_to_max_length=pad_to_max_length, From 7f5babf5483feeb5a907c5d67600256dae96f5bd Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Tue, 23 Apr 2024 12:08:35 +0000 Subject: [PATCH 18/51] all tests passing --- setup.py | 2 +- src/sparseml/pytorch/utils/sparsification.py | 40 +++++++++++++++ .../compressed_tensors_utils.py | 51 +++++++++++++++++-- .../sparsification/sparse_model.py | 7 +-- .../test_compress_tensor_utils.py | 14 ++--- 5 files changed, 98 insertions(+), 16 deletions(-) diff --git a/setup.py b/setup.py index 82a12a9a43e..32e764a05cd 100644 --- a/setup.py +++ b/setup.py @@ -59,7 +59,7 @@ ] _nm_deps = [ f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", - f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}~={version_nm_deps}", + f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}~={version_nm_deps}", # noqa E501 ] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" diff --git a/src/sparseml/pytorch/utils/sparsification.py b/src/sparseml/pytorch/utils/sparsification.py index f22750c85c6..24d8eba0a3d 100644 --- a/src/sparseml/pytorch/utils/sparsification.py +++ b/src/sparseml/pytorch/utils/sparsification.py @@ -35,6 +35,7 @@ from torch.nn import Module from tqdm import tqdm +import sparseml.core.session as session_manager from sparseml.pytorch.utils.helpers import ( get_prunable_layers, get_quantizable_layers, @@ -46,11 +47,50 @@ __all__ = [ "ModuleSparsificationInfo", "GradSampler", + "infer_global_sparsity", + "infer_sparsity_structure", ] _LOGGER = logging.getLogger(__name__) +def infer_global_sparsity( + model: Module, state_dict: Optional[Dict[str, torch.Tensor]] = None +) -> float: + """ + Calculates the global percentage of sparse zero weights in the model + :param model: pytorch model to infer sparsity of + :param state_dict: optional state_dict to replace that in model, used for + gathering global FSDP model info + :return: global sparsity of model + """ + + info = ModuleSparsificationInfo(model, state_dict=state_dict) + global_sparsity = info.params_sparse_percent + return global_sparsity + + +def infer_sparsity_structure() -> str: + """ + Determines what sparsity structure, if any, was applied in the currently active + sparse session + :return: sparsity structure as a string + """ + current_session = session_manager.active_session() + stage_modifiers = current_session.lifecycle.modifiers + sparsity_structure = "unstructured" + + # check for applied pruning modifiers + for stage in stage_modifiers: + if stage.applied: + for modifier in stage.modifiers: + if hasattr(modifier, "mask_structure"): + sparsity_structure = modifier.mask_structure + break + + return sparsity_structure + + class ModuleSparsificationInfo: """ Helper class for providing information related to torch Module parameters diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index d1614a00f94..589a5d0157c 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -17,11 +17,15 @@ import os import weakref from functools import wraps -from typing import Optional +from typing import Dict, Optional +import torch from transformers import PreTrainedModel +from transformers.file_utils import CONFIG_NAME + +from compressed_tensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor +from sparseml.pytorch.utils import infer_global_sparsity, infer_sparsity_structure from sparseml.utils.pytorch import qat_active -# TODO: additional dependencies on compressed-tensors _LOGGER = logging.getLogger(__name__) @@ -29,6 +33,41 @@ __all__ = ["modify_save_pretrained"] +def infer_config_from_model( + model: torch.nn.Module, + state_dict: Optional[Dict[str, torch.Tensor]] = None, + compress: bool = False, +) -> Optional["CompressionConfig"]: + """ + Determines compression type and informational parameters for a given model + + :param model: pytorch model to calculate sparsity config for + :param state_dict: optional state_dict to replace that in model, used for + gathering global FSDP model info + :param compress: whether or not to compress the model on disk + :return: compression config inferred from the model + """ + + global_sparsity = infer_global_sparsity(model, state_dict=state_dict) + + if global_sparsity < 0.05: + # we do not consider model that have less then 0.05 + # zero weights as sparse + return None + + sparsity_structure = infer_sparsity_structure() + if compress: + format = "sparse_bitmask" + else: + format = "dense_sparsity" + + return CompressionConfig.load_from_registry( + format, + global_sparsity=global_sparsity, + sparsity_structure=sparsity_structure, + ) + + def modify_save_pretrained(model: PreTrainedModel): """ Overrides a PreTrainedModel's save_pretrained() method with a wrapped version that @@ -84,9 +123,11 @@ def save_pretrained_wrapper( ) if sparsity_config is not None: - SparsityConfigMetadata.fill_config_details( - sparsity_config, model, state_dict=state_dict + sparsity_config.global_sparsity = infer_global_sparsity( + model, state_dict=state_dict ) + sparsity_config.sparsity_structure = infer_sparsity_structure() + elif not skip_compression_stats: # try to infer a sparsity config from the model if none is provided _LOGGER.info( @@ -95,7 +136,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = SparsityConfigMetadata.infer_config_from_model( + sparsity_config = infer_config_from_model( model, state_dict=state_dict, compress=save_compressed ) diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index a148897ff76..8cd3553a468 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -30,6 +30,10 @@ ) from transformers.file_utils import WEIGHTS_NAME +from compressed_tensors import ( + get_safetensors_folder, + infer_compressor_from_model_config, +) from sparseml.pytorch.model_load.helpers import ( apply_recipe_structure_to_model, log_model_load, @@ -41,9 +45,6 @@ from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe -# TODO: additional dependencies on compressed-tensors - - __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] diff --git a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py index 667bb119841..a706b8cf73d 100644 --- a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py @@ -20,12 +20,12 @@ from transformers import AutoConfig import sparseml.core.session as session_manager +from compressed_tensors import SPARSITY_CONFIG_NAME +from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig +from sparseml.pytorch.utils import infer_global_sparsity, infer_sparsity_structure from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -# TODO: additional dependencies on compressed-tensors - - @pytest.mark.parametrize( "compressed,config,dtype", [ @@ -65,9 +65,9 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): tmp_path / "oneshot_out", torch_dtype=dtype ) - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) + inferred_global_sparsity = infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() + inferred_structure = infer_sparsity_structure() assert inferred_structure == "0:0" model.save_pretrained( @@ -112,9 +112,9 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): model_path = "Xenova/llama2.c-stories15M" model = SparseAutoModelForCausalLM.from_pretrained(model_path) - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) + inferred_global_sparsity = infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() + inferred_structure = infer_sparsity_structure() assert inferred_structure == "unstructured" model.save_pretrained( From c0d6cb97f95c15171a9f44b1fad84d82d022cd5f Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Tue, 23 Apr 2024 12:11:10 +0000 Subject: [PATCH 19/51] remove unused config --- src/sparseml/transformers/utils/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sparseml/transformers/utils/helpers.py b/src/sparseml/transformers/utils/helpers.py index 944d0bd32ff..cb95d376a75 100644 --- a/src/sparseml/transformers/utils/helpers.py +++ b/src/sparseml/transformers/utils/helpers.py @@ -75,7 +75,7 @@ class TaskNames(Enum): ALL_TASK_NAMES = list(set.union(*[task_names.value for task_names in TaskNames])) ONNX_MODEL_NAME_INTERMEDIATE = "model-orig.onnx" RECIPE_NAME = "recipe.yaml" -SPARSITY_CONFIG_NAME = "sparsity_config" + MANDATORY_DEPLOYMENT_FILES = { ONNX_MODEL_NAME, "tokenizer_config.json", From a59e2af5feb9a78d83abc90e240028635baaefe4 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 23 Apr 2024 13:35:05 +0000 Subject: [PATCH 20/51] reset models on each parameterize --- .../transformers/compression/test_quantization.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index 06e29754220..701718e31d6 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -58,7 +58,7 @@ class TestQuantizationMatches(unittest.TestCase): old_output = "tiny_llama_old" new_output = "tiny_llama_new" max_seq_length = 512 - num_comparisons = 2 + num_comparisons = 1 @classmethod def setUpClass(cls): @@ -75,7 +75,7 @@ def setUpClass(cls): ) cls.model_new = SparseAutoModelForCausalLM.from_pretrained( - cls.model_stub, device_map="cuda:0" + cls.model_stub, device_map="cuda:1" ) cls._run_oneshot( cls.model_new, @@ -87,6 +87,9 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): shutil.rmtree(cls.test_dir) + del cls.model_new + del cls.model_old + torch.cuda.empty_cache() @staticmethod def _run_oneshot(model, recipe, dataset, output_dir): @@ -163,7 +166,7 @@ def test_quantization_scale_and_zp(self): def test_quantization_reload(self): model_reloaded = SparseAutoModelForCausalLM.from_pretrained( - self.test_dir / self.new_output + os.path.join(self.test_dir, self.new_output) ) og_weights, og_inputs = self._get_quant_info_new(self.model_new) @@ -210,11 +213,13 @@ def test_perplexity(self): for idx, sample in enumerate(dataloader): if idx >= self.num_comparisons: break - sample_new = tensors_to_device(sample, "cuda:0") + sample_new = tensors_to_device(sample, "cuda:1") sample_old = tensors_to_device(sample, "cuda:0") output_new = self.model_new(**sample_new) output_old = self.model_old(**sample_old) ppl_ratio = ( torch.exp(output_new.loss).item() / torch.exp(output_old.loss).item() ) - assert abs(1.0 - ppl_ratio) < 0.05 + + # perplexity not more than 5% worse that old quantization method + assert ppl_ratio <= 1.05 From 2a6b0f2fd6df46ec518a26cb9417640e6441565b Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 23 Apr 2024 13:51:39 +0000 Subject: [PATCH 21/51] style --- .../transformers/sparsification/compressed_tensors_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 51f59b74236..7be9e6e2dd2 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -31,7 +31,6 @@ QuantizationConfig, ) from compressed_tensors.quantization.utils import is_model_quantized -from compressed_tensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor from sparseml.pytorch.utils import infer_global_sparsity, infer_sparsity_structure from sparseml.utils.pytorch import qat_active From a4e057527280c3336eebcf0c8da1e39c749c07a7 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Wed, 24 Apr 2024 09:54:17 +0000 Subject: [PATCH 22/51] bring back SparsityConfigMetadata --- src/sparseml/pytorch/utils/sparsification.py | 40 ------ .../compression/sparsity_config.py | 122 ++++++++++++++++++ .../compressed_tensors_utils.py | 52 ++------ .../test_compress_tensor_utils.py | 10 +- 4 files changed, 137 insertions(+), 87 deletions(-) create mode 100644 src/sparseml/transformers/compression/sparsity_config.py diff --git a/src/sparseml/pytorch/utils/sparsification.py b/src/sparseml/pytorch/utils/sparsification.py index 24d8eba0a3d..f22750c85c6 100644 --- a/src/sparseml/pytorch/utils/sparsification.py +++ b/src/sparseml/pytorch/utils/sparsification.py @@ -35,7 +35,6 @@ from torch.nn import Module from tqdm import tqdm -import sparseml.core.session as session_manager from sparseml.pytorch.utils.helpers import ( get_prunable_layers, get_quantizable_layers, @@ -47,50 +46,11 @@ __all__ = [ "ModuleSparsificationInfo", "GradSampler", - "infer_global_sparsity", - "infer_sparsity_structure", ] _LOGGER = logging.getLogger(__name__) -def infer_global_sparsity( - model: Module, state_dict: Optional[Dict[str, torch.Tensor]] = None -) -> float: - """ - Calculates the global percentage of sparse zero weights in the model - :param model: pytorch model to infer sparsity of - :param state_dict: optional state_dict to replace that in model, used for - gathering global FSDP model info - :return: global sparsity of model - """ - - info = ModuleSparsificationInfo(model, state_dict=state_dict) - global_sparsity = info.params_sparse_percent - return global_sparsity - - -def infer_sparsity_structure() -> str: - """ - Determines what sparsity structure, if any, was applied in the currently active - sparse session - :return: sparsity structure as a string - """ - current_session = session_manager.active_session() - stage_modifiers = current_session.lifecycle.modifiers - sparsity_structure = "unstructured" - - # check for applied pruning modifiers - for stage in stage_modifiers: - if stage.applied: - for modifier in stage.modifiers: - if hasattr(modifier, "mask_structure"): - sparsity_structure = modifier.mask_structure - break - - return sparsity_structure - - class ModuleSparsificationInfo: """ Helper class for providing information related to torch Module parameters diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py new file mode 100644 index 00000000000..b04edf333c3 --- /dev/null +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -0,0 +1,122 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Optional + +from torch import Tensor +from torch.nn import Module + +import sparseml.core.session as session_manager +from compressed_tensors import CompressionConfig +from sparseml.pytorch.utils import ModuleSparsificationInfo + + +class SparsityConfigMetadata: + """ + Class of helper functions for filling out a CompressionConfig with readable + metadata from the model + """ + + @staticmethod + def infer_global_sparsity( + model: Module, state_dict: Optional[Dict[str, Tensor]] = None + ) -> float: + """ + Calculates the global percentage of sparse zero weights in the model + + :param model: pytorch model to infer sparsity of + :param state_dict: optional state_dict to replace that in model, used for + gathering global FSDP model info + :return: global sparsity of model + """ + + info = ModuleSparsificationInfo(model, state_dict=state_dict) + global_sparsity = info.params_sparse_percent + return global_sparsity + + @staticmethod + def infer_sparsity_structure() -> str: + """ + Determines what sparsity structure, if any, was applied in the currently active + sparse session + + :return: sparsity structure as a string + """ + current_session = session_manager.active_session() + stage_modifiers = current_session.lifecycle.modifiers + sparsity_structure = "unstructured" + + # check for applied pruning modifiers + for stage in stage_modifiers: + if stage.applied: + for modifier in stage.modifiers: + if hasattr(modifier, "mask_structure"): + sparsity_structure = modifier.mask_structure + break + + return sparsity_structure + + @staticmethod + def infer_config_from_model( + model: Module, + state_dict: Optional[Dict[str, Tensor]] = None, + compress: bool = False, + ) -> Optional["CompressionConfig"]: + """ + Determines compression type and informational parameters for a given model + + :param model: pytorch model to calculate sparsity config for + :param state_dict: optional state_dict to replace that in model, used for + gathering global FSDP model info + :param compress: whether or not to compress the model on disk + :return: compression config inferred from the model + """ + + global_sparsity = SparsityConfigMetadata.infer_global_sparsity( + model, state_dict=state_dict + ) + + if global_sparsity < 0.05: + return None + + sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() + if compress: + format = "sparse_bitmask" + else: + format = "dense_sparsity" + + return CompressionConfig.load_from_registry( + format, + global_sparsity=global_sparsity, + sparsity_structure=sparsity_structure, + ) + + @staticmethod + def fill_config_details( + config: CompressionConfig, + model: Module, + state_dict: Optional[Dict[str, Tensor]] = None, + ): + """ + Fills in informational sparsity parameters from a given model + + :param config: sparsity config to fill in + :param model: pytorch model to infer config parameters from + :param state_dict: optional state_dict to replace that in model, used for + gathering global FSDP model info + """ + config.global_sparsity = SparsityConfigMetadata.infer_global_sparsity( + model, state_dict=state_dict + ) + config.sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 589a5d0157c..ab9a7f5f5fc 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -17,14 +17,13 @@ import os import weakref from functools import wraps -from typing import Dict, Optional +from typing import Optional -import torch from transformers import PreTrainedModel from transformers.file_utils import CONFIG_NAME from compressed_tensors import SPARSITY_CONFIG_NAME, CompressionConfig, ModelCompressor -from sparseml.pytorch.utils import infer_global_sparsity, infer_sparsity_structure +from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active @@ -33,41 +32,6 @@ __all__ = ["modify_save_pretrained"] -def infer_config_from_model( - model: torch.nn.Module, - state_dict: Optional[Dict[str, torch.Tensor]] = None, - compress: bool = False, -) -> Optional["CompressionConfig"]: - """ - Determines compression type and informational parameters for a given model - - :param model: pytorch model to calculate sparsity config for - :param state_dict: optional state_dict to replace that in model, used for - gathering global FSDP model info - :param compress: whether or not to compress the model on disk - :return: compression config inferred from the model - """ - - global_sparsity = infer_global_sparsity(model, state_dict=state_dict) - - if global_sparsity < 0.05: - # we do not consider model that have less then 0.05 - # zero weights as sparse - return None - - sparsity_structure = infer_sparsity_structure() - if compress: - format = "sparse_bitmask" - else: - format = "dense_sparsity" - - return CompressionConfig.load_from_registry( - format, - global_sparsity=global_sparsity, - sparsity_structure=sparsity_structure, - ) - - def modify_save_pretrained(model: PreTrainedModel): """ Overrides a PreTrainedModel's save_pretrained() method with a wrapped version that @@ -123,10 +87,14 @@ def save_pretrained_wrapper( ) if sparsity_config is not None: - sparsity_config.global_sparsity = infer_global_sparsity( - model, state_dict=state_dict + sparsity_config.global_sparsity = ( + SparsityConfigMetadata.infer_global_sparsity( + model, state_dict=state_dict + ) + ) + sparsity_config.sparsity_structure = ( + SparsityConfigMetadata.infer_sparsity_structure() ) - sparsity_config.sparsity_structure = infer_sparsity_structure() elif not skip_compression_stats: # try to infer a sparsity config from the model if none is provided @@ -136,7 +104,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = infer_config_from_model( + sparsity_config = SparsityConfigMetadata.infer_config_from_model( model, state_dict=state_dict, compress=save_compressed ) diff --git a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py index a706b8cf73d..38369617ed7 100644 --- a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py @@ -22,8 +22,8 @@ import sparseml.core.session as session_manager from compressed_tensors import SPARSITY_CONFIG_NAME from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig -from sparseml.pytorch.utils import infer_global_sparsity, infer_sparsity_structure from sparseml.transformers import SparseAutoModelForCausalLM, oneshot +from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @pytest.mark.parametrize( @@ -65,9 +65,9 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): tmp_path / "oneshot_out", torch_dtype=dtype ) - inferred_global_sparsity = infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = infer_sparsity_structure() + inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() assert inferred_structure == "0:0" model.save_pretrained( @@ -112,9 +112,9 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): model_path = "Xenova/llama2.c-stories15M" model = SparseAutoModelForCausalLM.from_pretrained(model_path) - inferred_global_sparsity = infer_global_sparsity(model) + inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = infer_sparsity_structure() + inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() assert inferred_structure == "unstructured" model.save_pretrained( From 8ac18e7ab49c283b88ba8d811d3b1c42e32de80b Mon Sep 17 00:00:00 2001 From: dbogunowicz <97082108+dbogunowicz@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:04:31 +0200 Subject: [PATCH 23/51] Update setup.py Co-authored-by: Rahul Tuli --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index c90a4f3be52..0e20eda47c0 100644 --- a/setup.py +++ b/setup.py @@ -59,7 +59,7 @@ ] _nm_deps = [ f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", - f"{'compressed-tensors' if is_release else 'compress-tensors-nightly'}~={version_nm_deps}", # noqa E501 + f"{'compressed-tensors' if is_release else 'compressed-tensors-nightly'}~={version_nm_deps}", # noqa E501 ] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" From de782470d83314377c5a7c49187ef201ca4448af Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 25 Apr 2024 03:17:33 +0000 Subject: [PATCH 24/51] add more comparisons, tighten threshold --- .../compression/test_quantization.py | 35 ++++++++++++------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index 701718e31d6..5506b080fe9 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -58,7 +58,7 @@ class TestQuantizationMatches(unittest.TestCase): old_output = "tiny_llama_old" new_output = "tiny_llama_new" max_seq_length = 512 - num_comparisons = 1 + num_comparisons = 512 @classmethod def setUpClass(cls): @@ -159,10 +159,11 @@ def test_quantization_scale_and_zp(self): assert math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3) assert o_zp == n_zp + # allow for error here due to implementation differences for name, (o_scale, o_zp) in old_quant_inputs.items(): n_scale, n_zp = new_quant_inputs[name] - assert math.isclose(o_scale, n_scale, abs_tol=1e-3, rel_tol=1e-3) - assert o_zp == n_zp + assert math.isclose(o_scale, n_scale, abs_tol=1e-1, rel_tol=1e-1) + assert abs(o_zp - n_zp) < 5 def test_quantization_reload(self): model_reloaded = SparseAutoModelForCausalLM.from_pretrained( @@ -210,16 +211,24 @@ def test_perplexity(self): tokenizer = SparseAutoTokenizer.from_pretrained(self.model_stub) dataloader = self._get_dataloader(self.dataset, tokenizer) + self.model_new.to("cpu") + self.model_old.to("cpu") + + total_ppl_old = 0.0 + total_ppl_new = 0.0 + total_non_nan = 0 for idx, sample in enumerate(dataloader): if idx >= self.num_comparisons: break - sample_new = tensors_to_device(sample, "cuda:1") - sample_old = tensors_to_device(sample, "cuda:0") - output_new = self.model_new(**sample_new) - output_old = self.model_old(**sample_old) - ppl_ratio = ( - torch.exp(output_new.loss).item() / torch.exp(output_old.loss).item() - ) - - # perplexity not more than 5% worse that old quantization method - assert ppl_ratio <= 1.05 + output_new = self.model_new(**sample) + output_old = self.model_old(**sample) + if torch.isnan(output_old.loss) and torch.isnan(output_new.loss): + continue + total_ppl_old += torch.exp(output_old.loss).item() + total_ppl_new += torch.exp(output_new.loss).item() + total_non_nan += 1 + + avg_ppl_ratio = (total_ppl_new / total_non_nan) / ( + total_ppl_old / total_non_nan + ) + assert avg_ppl_ratio <= 1.02 From 4041f2ea309fbb5cf9242f68869d34b3f416bfc1 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 25 Apr 2024 13:33:19 +0000 Subject: [PATCH 25/51] use wikitext for perplexity --- .../compression/test_quantization.py | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index 5506b080fe9..1fc5f1af3c7 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -58,7 +58,7 @@ class TestQuantizationMatches(unittest.TestCase): old_output = "tiny_llama_old" new_output = "tiny_llama_new" max_seq_length = 512 - num_comparisons = 512 + num_comparisons = 64 @classmethod def setUpClass(cls): @@ -162,7 +162,7 @@ def test_quantization_scale_and_zp(self): # allow for error here due to implementation differences for name, (o_scale, o_zp) in old_quant_inputs.items(): n_scale, n_zp = new_quant_inputs[name] - assert math.isclose(o_scale, n_scale, abs_tol=1e-1, rel_tol=1e-1) + assert math.isclose(o_scale, n_scale, abs_tol=1e-2, rel_tol=1e-2) assert abs(o_zp - n_zp) < 5 def test_quantization_reload(self): @@ -183,12 +183,7 @@ def test_quantization_reload(self): assert o_scale == n_scale assert o_zp == n_zp - def _get_dataloader(self, dataset_name, tokenizer): - data_args = DataTrainingArguments( - dataset=dataset_name, - max_seq_length=self.max_seq_length, - pad_to_max_length=False, - ) + def _get_dataloader(self, data_args, tokenizer): dataset_manager = TextGenerationDataset.load_from_registry( data_args.dataset, data_args=data_args, @@ -207,12 +202,16 @@ def _get_dataloader(self, dataset_name, tokenizer): return data_loader + @torch.no_grad() def test_perplexity(self): tokenizer = SparseAutoTokenizer.from_pretrained(self.model_stub) - dataloader = self._get_dataloader(self.dataset, tokenizer) - - self.model_new.to("cpu") - self.model_old.to("cpu") + data_args = DataTrainingArguments( + dataset="wikitext", + dataset_config_name="wikitext-2-raw-v1", + max_seq_length=self.max_seq_length, + concatenate_data=True, + ) + dataloader = self._get_dataloader(data_args, tokenizer) total_ppl_old = 0.0 total_ppl_new = 0.0 @@ -220,8 +219,8 @@ def test_perplexity(self): for idx, sample in enumerate(dataloader): if idx >= self.num_comparisons: break - output_new = self.model_new(**sample) - output_old = self.model_old(**sample) + output_new = self.model_new(**tensors_to_device(sample, "cuda:1")) + output_old = self.model_old(**tensors_to_device(sample, "cuda:0")) if torch.isnan(output_old.loss) and torch.isnan(output_new.loss): continue total_ppl_old += torch.exp(output_old.loss).item() From c220772a10360bc07e17e639a513e0f05e67a270 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Thu, 25 Apr 2024 14:58:00 +0000 Subject: [PATCH 26/51] update setup --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 396f600c9a1..45879e467bf 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ ] _nm_deps = [ f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", - f"{'compressed-tensors' if is_release else 'compressed-tensors-nightly'}~={version_nm_deps}", # noqa E501 + "compressed-tensors", ] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" From 2fe554ed8b8285025881316e9265ef4b1d412843 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Thu, 25 Apr 2024 15:31:05 +0000 Subject: [PATCH 27/51] fix import problem --- src/sparseml/transformers/compression/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/sparseml/transformers/compression/__init__.py diff --git a/src/sparseml/transformers/compression/__init__.py b/src/sparseml/transformers/compression/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 4e0413eb3126b5bcaf8f97715d5641f2ca3ebfa7 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Thu, 25 Apr 2024 15:46:43 +0000 Subject: [PATCH 28/51] fix clearml test --- src/sparseml/transformers/compression/__init__.py | 13 +++++++++++++ tests/sparseml/transformers/test_clear_ml.py | 11 +++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/sparseml/transformers/compression/__init__.py b/src/sparseml/transformers/compression/__init__.py index e69de29bb2d..0c44f887a47 100644 --- a/src/sparseml/transformers/compression/__init__.py +++ b/src/sparseml/transformers/compression/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/sparseml/transformers/test_clear_ml.py b/tests/sparseml/transformers/test_clear_ml.py index c64a765d176..fd21eddc8ca 100644 --- a/tests/sparseml/transformers/test_clear_ml.py +++ b/tests/sparseml/transformers/test_clear_ml.py @@ -11,20 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from pathlib import Path +import pytest import torch + try: from clearml import Task -except Exception as err: - clearml = None + is_clearml = True +except Exception: + is_clearml = False from sparseml.transformers import train -@pytest.mark.skipif(clearml is None, reason="clearML not installed") + +@pytest.mark.skipif(not is_clearml, reason="clearML not installed") def test_finetune_wout_recipe(tmp_path: Path): recipe_str = None model = "Xenova/llama2.c-stories15M" From a98a1931570a0aaa116653130566382285c626a1 Mon Sep 17 00:00:00 2001 From: dbogunowicz Date: Thu, 25 Apr 2024 16:08:13 +0000 Subject: [PATCH 29/51] compressed-tensors are transformers dep --- setup.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 45879e467bf..281aa1d9ded 100644 --- a/setup.py +++ b/setup.py @@ -56,10 +56,7 @@ "protobuf>=3.12.2,<=3.20.3", "click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug ] -_nm_deps = [ - f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}", - "compressed-tensors", -] +_nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"] _deepsparse_deps = [ f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}" ] @@ -90,6 +87,7 @@ "evaluate>=0.4.1", "accelerate>=0.20.3", "safetensors>=0.4.1", + "compressed-tensors", ] _llm_deps = _transformers_deps + ["sentencepiece"] _yolov5_deps = _pytorch_vision_deps + [ From f4362cfcdba11d1e0021c61108d3180a9c60607b Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 25 Apr 2024 21:03:39 +0000 Subject: [PATCH 30/51] address PR comments --- .../compressed_tensors_utils.py | 44 ++++++++++++------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 9258cf1f249..ee314bb92d3 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -83,28 +83,42 @@ def save_pretrained_wrapper( # state_dict gets passed in as a kwarg for FSDP models state_dict = kwargs.get("state_dict", None) - if qat_active(model) or is_model_quantized(model): + # check if we are in the old quantization framework + if qat_active(model) and not is_model_quantized(model): _LOGGER.info( - "Compression for quantized models is not yet supported. Save will " - "be run without compression and no sparsity statistics will be " - "calculated." + "Compression for models quantized with QuantizationModifer is not " + "supported. Save will be run without compression and no sparsity " + "statistics will be calculated. To save a quantized model in a " + "compressed state please use vLLMQuantizationModifier instead." ) original_save_pretrained.__get__(model, model_class)( save_directory, **kwargs ) - if is_model_quantized(model): - quant_config = QuantizationConfig.from_pretrained(model) - quant_config_data = quant_config.dict() - config_file_path = os.path.join(save_directory, CONFIG_NAME) - - # add the sparsity config to the model's config file - with open(config_file_path, "r") as config_file: - config_data = json.load(config_file) - config_data[QUANTIZATION_CONFIG_NAME] = quant_config_data - with open(config_file_path, "w") as config_file: - json.dump(config_data, config_file, indent=2, sort_keys=True) + return + + elif qat_active(model): # quantized in new framework + _LOGGER.info( + "Sparsity compression for quantized models is not yet supported. " + "No sparsity statistics will be calculated and no sparsity config " + "will be saved." + ) + + original_save_pretrained.__get__(model, model_class)( + save_directory, **kwargs + ) + + quant_config = QuantizationConfig.from_pretrained(model) + quant_config_data = quant_config.model_dump(exclude_unset=True) + config_file_path = os.path.join(save_directory, CONFIG_NAME) + + # add the sparsity config to the model's config file + with open(config_file_path, "r") as config_file: + config_data = json.load(config_file) + config_data[QUANTIZATION_CONFIG_NAME] = quant_config_data + with open(config_file_path, "w") as config_file: + json.dump(config_data, config_file, indent=2, sort_keys=True) return From ca91c4f6672ea776dd99acb1b641d0c6c346cd9d Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Fri, 26 Apr 2024 13:12:00 +0000 Subject: [PATCH 31/51] can't repeat freeze --- src/sparseml/modifiers/quantization_vllm/base.py | 2 -- src/sparseml/modifiers/quantization_vllm/pytorch.py | 5 ----- 2 files changed, 7 deletions(-) diff --git a/src/sparseml/modifiers/quantization_vllm/base.py b/src/sparseml/modifiers/quantization_vllm/base.py index 5451c1280a3..6ac61b1c261 100644 --- a/src/sparseml/modifiers/quantization_vllm/base.py +++ b/src/sparseml/modifiers/quantization_vllm/base.py @@ -43,14 +43,12 @@ class vLLMQuantizationModifier(Modifier): not be updated. Leave None to not disable observers during QAT. Default is None :param num_calibration_steps: Number of steps to run post training calibration for. When None, the entire calibration_dataloader is used - :param post_oneshot_calibration: Whether to rerun calibration on finalization """ config_groups: Dict[str, QuantizationScheme] ignore: List[str] = Field(default_factory=list) disable_quantization_observer_epoch: Optional[float] = None num_calibration_steps: Optional[int] = None - post_oneshot_calibration: Optional[bool] = False def create_init_config(self) -> QuantizationConfig: return QuantizationConfig( diff --git a/src/sparseml/modifiers/quantization_vllm/pytorch.py b/src/sparseml/modifiers/quantization_vllm/pytorch.py index 5b6b7419a60..92314ab6609 100644 --- a/src/sparseml/modifiers/quantization_vllm/pytorch.py +++ b/src/sparseml/modifiers/quantization_vllm/pytorch.py @@ -74,11 +74,6 @@ def on_initialize(self, state: State, **kwargs) -> bool: return True def on_finalize(self, state: State, **kwargs) -> bool: - module = state.model.model - if self.post_oneshot_calibration: - module.apply(set_module_for_calibration) - self._calibrate_if_possible(module) - module.apply(freeze_module_quantization) return True def on_start(self, state: State, event: Event, **kwargs): From c89430530c256b0ae0e10486964f5cc96dec7b4a Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Fri, 26 Apr 2024 15:16:18 +0000 Subject: [PATCH 32/51] UX pr comments --- src/sparseml/transformers/compression/sparsity_config.py | 2 +- .../transformers/sparsification/compressed_tensors_utils.py | 2 +- src/sparseml/transformers/sparsification/sparse_model.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py index b04edf333c3..958ddc2b738 100644 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -68,7 +68,7 @@ def infer_sparsity_structure() -> str: return sparsity_structure @staticmethod - def infer_config_from_model( + def from_pretrained( model: Module, state_dict: Optional[Dict[str, Tensor]] = None, compress: bool = False, diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index ee314bb92d3..b6852535a2c 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -140,7 +140,7 @@ def save_pretrained_wrapper( "calculation of compression statistics set " "skip_compression_stats=True" ) - sparsity_config = SparsityConfigMetadata.infer_config_from_model( + sparsity_config = SparsityConfigMetadata.from_pretrained( model, state_dict=state_dict, compress=save_compressed ) diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 2182ee4c33f..d14ec60e9e8 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -30,7 +30,7 @@ ) from transformers.file_utils import WEIGHTS_NAME -from compressed_tensors.compressors import infer_compressor_from_model_config +from compressed_tensors.compressors import ModelCompressor from compressed_tensors.quantization import ( QuantizationConfig, apply_quantization_config, @@ -108,7 +108,7 @@ def skip(*args, **kwargs): ) # determine compression format, if any, from the model config - compressor = infer_compressor_from_model_config(pretrained_model_name_or_path) + compressor = ModelCompressor.from_pretrained(pretrained_model_name_or_path) quantization_config = QuantizationConfig.from_model_config( pretrained_model_name_or_path ) From 604c4ef0079688cc0e6f6d5d5b523d21d9b16838 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Mon, 29 Apr 2024 21:55:31 +0000 Subject: [PATCH 33/51] initial commit --- .../compressed_tensors_utils.py | 60 ++----------------- .../sparsification/sparse_model.py | 29 +++------ 2 files changed, 12 insertions(+), 77 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index b6852535a2c..320d9f1e80b 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -83,44 +83,6 @@ def save_pretrained_wrapper( # state_dict gets passed in as a kwarg for FSDP models state_dict = kwargs.get("state_dict", None) - # check if we are in the old quantization framework - if qat_active(model) and not is_model_quantized(model): - _LOGGER.info( - "Compression for models quantized with QuantizationModifer is not " - "supported. Save will be run without compression and no sparsity " - "statistics will be calculated. To save a quantized model in a " - "compressed state please use vLLMQuantizationModifier instead." - ) - - original_save_pretrained.__get__(model, model_class)( - save_directory, **kwargs - ) - - return - - elif qat_active(model): # quantized in new framework - _LOGGER.info( - "Sparsity compression for quantized models is not yet supported. " - "No sparsity statistics will be calculated and no sparsity config " - "will be saved." - ) - - original_save_pretrained.__get__(model, model_class)( - save_directory, **kwargs - ) - - quant_config = QuantizationConfig.from_pretrained(model) - quant_config_data = quant_config.model_dump(exclude_unset=True) - config_file_path = os.path.join(save_directory, CONFIG_NAME) - - # add the sparsity config to the model's config file - with open(config_file_path, "r") as config_file: - config_data = json.load(config_file) - config_data[QUANTIZATION_CONFIG_NAME] = quant_config_data - with open(config_file_path, "w") as config_file: - json.dump(config_data, config_file, indent=2, sort_keys=True) - - return if sparsity_config is not None: sparsity_config.global_sparsity = ( @@ -131,7 +93,6 @@ def save_pretrained_wrapper( sparsity_config.sparsity_structure = ( SparsityConfigMetadata.infer_sparsity_structure() ) - elif not skip_compression_stats: # try to infer a sparsity config from the model if none is provided _LOGGER.info( @@ -144,38 +105,27 @@ def save_pretrained_wrapper( model, state_dict=state_dict, compress=save_compressed ) - if sparsity_config is None: - # model is not sparse, save as dense + compressor = ModelCompressor.from_pretrained_model(model, sparsity_config=sparsity_config) + if compressor is None: + # model is not compressed or quantized, save as normal return original_save_pretrained.__get__(model, model_class)( save_directory, **kwargs ) # if we've gotten to this point we have a config so we can run compression kwargs["safe_serialization"] = True - compressor = ModelCompressor.load_from_registry( - sparsity_config.format, config=sparsity_config - ) - if state_dict is None: state_dict = model.state_dict() # make sure we're on the main process when saving if state_dict is not None and len(state_dict) > 0: - compressed_state_dict = compressor.compress(state_dict) + compressed_state_dict = compressor.compress(model, state_dict) kwargs["state_dict"] = compressed_state_dict original_save_pretrained.__get__(model, model_class)( save_directory, **kwargs ) - sparsity_config_data = sparsity_config.dict() - config_file_path = os.path.join(save_directory, CONFIG_NAME) - - # add the sparsity config to the model's config file - with open(config_file_path, "r") as config_file: - config_data = json.load(config_file) - config_data[SPARSITY_CONFIG_NAME] = sparsity_config_data - with open(config_file_path, "w") as config_file: - json.dump(config_data, config_file, indent=2, sort_keys=True) + compressor.update_config(save_directory) save_pretrained_wrapper._overriden = True return save_pretrained_wrapper diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index d14ec60e9e8..0f058438c04 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -107,11 +107,8 @@ def skip(*args, **kwargs): pretrained_model_name_or_path, **kwargs ) - # determine compression format, if any, from the model config + # instantiate compressor from model config compressor = ModelCompressor.from_pretrained(pretrained_model_name_or_path) - quantization_config = QuantizationConfig.from_model_config( - pretrained_model_name_or_path - ) # temporarily set the log level to error, to ignore printing out long missing # and unexpected key error messages (these are EXPECTED for quantized models) @@ -126,27 +123,15 @@ def skip(*args, **kwargs): # override the PreTrainedModel instance with compression save function modify_save_pretrained(model) + if compressor.quantization_config is not None: + # apply structural changes from quantization + apply_quantization_config(model, compressor.quantization_config) + load_pretrained_quantization(model, pretrained_model_name_or_path) + # If model is compressed on disk, decompress and load the weights if compressor is not None: # decompress weights - compressor.overwrite_weights( - model_path=pretrained_model_name_or_path, model=model - ) - - if quantization_config is not None: - # if we loaded from a HF stub, find the cached model - apply_quantization_config(model, quantization_config) - load_pretrained_quantization(model, pretrained_model_name_or_path) - else: - recipe = resolve_recipe( - recipe=recipe, model_path=pretrained_model_name_or_path - ) - if recipe: - apply_recipe_structure_to_model( - model=model, - model_path=pretrained_model_name_or_path, - recipe_path=recipe, - ) + compressor.decompress(model_path=pretrained_model_name_or_path, model=model) return model From 82d3dd88ba3742855e617d4bacda51ea87e604c3 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Mon, 29 Apr 2024 21:57:21 +0000 Subject: [PATCH 34/51] style --- .../compressed_tensors_utils.py | 20 +++++-------------- .../sparsification/sparse_model.py | 8 ++------ 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 320d9f1e80b..f65c451f7f5 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -12,26 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import logging -import os import weakref from functools import wraps from typing import Optional from transformers import PreTrainedModel -from transformers.file_utils import CONFIG_NAME - -from compressed_tensors import ( - QUANTIZATION_CONFIG_NAME, - SPARSITY_CONFIG_NAME, - CompressionConfig, - ModelCompressor, - QuantizationConfig, -) -from compressed_tensors.quantization.utils import is_model_quantized + +from compressed_tensors import CompressionConfig, ModelCompressor from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata -from sparseml.utils.pytorch import qat_active _LOGGER = logging.getLogger(__name__) @@ -83,7 +72,6 @@ def save_pretrained_wrapper( # state_dict gets passed in as a kwarg for FSDP models state_dict = kwargs.get("state_dict", None) - if sparsity_config is not None: sparsity_config.global_sparsity = ( SparsityConfigMetadata.infer_global_sparsity( @@ -105,7 +93,9 @@ def save_pretrained_wrapper( model, state_dict=state_dict, compress=save_compressed ) - compressor = ModelCompressor.from_pretrained_model(model, sparsity_config=sparsity_config) + compressor = ModelCompressor.from_pretrained_model( + model, sparsity_config=sparsity_config + ) if compressor is None: # model is not compressed or quantized, save as normal return original_save_pretrained.__get__(model, model_class)( diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 0f058438c04..41f7158f11d 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -32,19 +32,15 @@ from compressed_tensors.compressors import ModelCompressor from compressed_tensors.quantization import ( - QuantizationConfig, apply_quantization_config, load_pretrained_quantization, ) -from sparseml.pytorch.model_load.helpers import ( - apply_recipe_structure_to_model, - log_model_load, -) +from sparseml.pytorch.model_load.helpers import log_model_load from sparseml.transformers.sparsification.compressed_tensors_utils import ( modify_save_pretrained, ) from sparseml.transformers.sparsification.modification import modify_model -from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe +from sparseml.transformers.utils.helpers import download_model_directory __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] From b650a8c09770fdd4cd4c1cdcd204dad248556acf Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 02:06:17 +0000 Subject: [PATCH 35/51] skipping unit tests --- .../transformers/compression/sparsity_config.py | 14 +++++++------- .../sparsification/compressed_tensors_utils.py | 4 ++-- .../transformers/sparsification/sparse_model.py | 10 +++++----- .../compression/test_compress_tensor_utils.py | 6 ++++-- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py index 958ddc2b738..49716cf435c 100644 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -18,13 +18,13 @@ from torch.nn import Module import sparseml.core.session as session_manager -from compressed_tensors import CompressionConfig +from compressed_tensors import CompressionFormat, SparsityCompressionConfig from sparseml.pytorch.utils import ModuleSparsificationInfo class SparsityConfigMetadata: """ - Class of helper functions for filling out a CompressionConfig with readable + Class of helper functions for filling out a SparsityCompressionConfig with readable metadata from the model """ @@ -72,7 +72,7 @@ def from_pretrained( model: Module, state_dict: Optional[Dict[str, Tensor]] = None, compress: bool = False, - ) -> Optional["CompressionConfig"]: + ) -> Optional["SparsityCompressionConfig"]: """ Determines compression type and informational parameters for a given model @@ -92,11 +92,11 @@ def from_pretrained( sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() if compress: - format = "sparse_bitmask" + format = CompressionFormat.sparse_bitmask.value else: - format = "dense_sparsity" + format = CompressionFormat.dense.value - return CompressionConfig.load_from_registry( + return SparsityCompressionConfig.load_from_registry( format, global_sparsity=global_sparsity, sparsity_structure=sparsity_structure, @@ -104,7 +104,7 @@ def from_pretrained( @staticmethod def fill_config_details( - config: CompressionConfig, + config: SparsityCompressionConfig, model: Module, state_dict: Optional[Dict[str, Tensor]] = None, ): diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index f65c451f7f5..676f9823173 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -19,7 +19,7 @@ from transformers import PreTrainedModel -from compressed_tensors import CompressionConfig, ModelCompressor +from compressed_tensors import ModelCompressor, SparsityCompressionConfig from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @@ -49,7 +49,7 @@ def save_pretrained_compressed(save_pretrained_method): @wraps(original_save_pretrained) def save_pretrained_wrapper( save_directory: str, - sparsity_config: Optional[CompressionConfig] = None, + sparsity_config: Optional[SparsityCompressionConfig] = None, save_compressed: bool = False, skip_compression_stats: bool = False, **kwargs, diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 41f7158f11d..ffbf0672e10 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -119,13 +119,13 @@ def skip(*args, **kwargs): # override the PreTrainedModel instance with compression save function modify_save_pretrained(model) - if compressor.quantization_config is not None: - # apply structural changes from quantization - apply_quantization_config(model, compressor.quantization_config) - load_pretrained_quantization(model, pretrained_model_name_or_path) - # If model is compressed on disk, decompress and load the weights if compressor is not None: + if compressor.quantization_config is not None: + # apply structural changes from quantization + apply_quantization_config(model, compressor.quantization_config) + load_pretrained_quantization(model, pretrained_model_name_or_path) + # decompress weights compressor.decompress(model_path=pretrained_model_name_or_path, model=model) diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py index 38369617ed7..f3d75ecdae8 100644 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py @@ -20,7 +20,7 @@ from transformers import AutoConfig import sparseml.core.session as session_manager -from compressed_tensors import SPARSITY_CONFIG_NAME +from compressed_tensors import COMPRESSION_CONFIG_NAME, SPARSITY_CONFIG_NAME from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @@ -59,6 +59,7 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): splits=splits, oneshot_device=device, precision=dtype, + clear_sparse_session=False, ) model = SparseAutoModelForCausalLM.from_pretrained( @@ -77,7 +78,8 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): ) config = AutoConfig.from_pretrained(tmp_path / "compress_out") - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + sparsity_config = compression_config.get(SPARSITY_CONFIG_NAME, None) assert ( sparsity_config["format"] == "dense" if (not compressed and config is None) From 6a12295eb66c630c678d6d9e0e35268daa78caae Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 15:23:47 +0000 Subject: [PATCH 36/51] tests for quantization --- .../compressed_tensors_utils.py | 5 +- .../sparsification/sparse_model.py | 9 -- .../compression/recipes/new_quant_simple.yaml | 27 ++++++ .../compression/test_compress_tensor_utils.py | 84 ++++++++++++++++++- .../compression/test_quantization.py | 8 +- 5 files changed, 118 insertions(+), 15 deletions(-) create mode 100644 tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 676f9823173..2fa61a0feeb 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -50,6 +50,7 @@ def save_pretrained_compressed(save_pretrained_method): def save_pretrained_wrapper( save_directory: str, sparsity_config: Optional[SparsityCompressionConfig] = None, + quantization_compression: str = None, save_compressed: bool = False, skip_compression_stats: bool = False, **kwargs, @@ -94,7 +95,9 @@ def save_pretrained_wrapper( ) compressor = ModelCompressor.from_pretrained_model( - model, sparsity_config=sparsity_config + model, + sparsity_config=sparsity_config, + quantization_compression=quantization_compression, ) if compressor is None: # model is not compressed or quantized, save as normal diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index ffbf0672e10..ab163776564 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -31,10 +31,6 @@ from transformers.file_utils import WEIGHTS_NAME from compressed_tensors.compressors import ModelCompressor -from compressed_tensors.quantization import ( - apply_quantization_config, - load_pretrained_quantization, -) from sparseml.pytorch.model_load.helpers import log_model_load from sparseml.transformers.sparsification.compressed_tensors_utils import ( modify_save_pretrained, @@ -121,11 +117,6 @@ def skip(*args, **kwargs): # If model is compressed on disk, decompress and load the weights if compressor is not None: - if compressor.quantization_config is not None: - # apply structural changes from quantization - apply_quantization_config(model, compressor.quantization_config) - load_pretrained_quantization(model, pretrained_model_name_or_path) - # decompress weights compressor.decompress(model_path=pretrained_model_name_or_path, model=model) diff --git a/tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml b/tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml new file mode 100644 index 00000000000..753605fc1dd --- /dev/null +++ b/tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml @@ -0,0 +1,27 @@ +test_stage: + quant_modifiers: + vLLMQuantizationModifier: + ignore: ["lm_head"] + config_groups: + group_0: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: + num_bits: 8 + type: "int" + symmetric: false + strategy: "tensor" + output_activations: null + targets: ["Linear"] + group_1: + weights: + num_bits: 8 + type: "int" + symmetric: true + strategy: "tensor" + input_activations: null + output_activations: null + targets: ["Embedding"] diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py index f3d75ecdae8..ab7b089b99d 100644 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import math import shutil @@ -20,8 +21,13 @@ from transformers import AutoConfig import sparseml.core.session as session_manager -from compressed_tensors import COMPRESSION_CONFIG_NAME, SPARSITY_CONFIG_NAME +from compressed_tensors import ( + COMPRESSION_CONFIG_NAME, + QUANTIZATION_CONFIG_NAME, + SPARSITY_CONFIG_NAME, +) from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig +from compressed_tensors.quantization import QuantizationStatus from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @@ -131,3 +137,79 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): assert sparsity_config is None shutil.rmtree(tmp_path) + + +@pytest.mark.parametrize( + "compressed,status,dtype", + [ + [True, "dense", torch.float32], + [True, "dense", torch.float16], + [True, "int_quantized", torch.float32], + # [True, "int_quantized", torch.float16], + ], +) +def test_quant_model_reload(compressed, status, dtype, tmp_path): + recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" + model_path = "Xenova/llama2.c-stories15M" + device = "cuda:0" + if not torch.cuda.is_available(): + device = "cpu" + dataset = "open_platypus" + concatenate_data = False + num_calibration_samples = 64 + output_dir = tmp_path / "oneshot_out" + splits = {"calibration": "train[:10%]"} + + # create a quantized model + oneshot( + model=model_path, + dataset=dataset, + output_dir=output_dir, + num_calibration_samples=num_calibration_samples, + recipe=recipe_str, + concatenate_data=concatenate_data, + splits=splits, + oneshot_device=device, + precision=dtype, + ) + + model = SparseAutoModelForCausalLM.from_pretrained( + tmp_path / "oneshot_out", torch_dtype=dtype + ) + + for _, module in model.named_modules(): + if hasattr(module, "quantization_scheme"): + assert module.weight.dtype == dtype + assert module.quantization_status == QuantizationStatus.FROZEN + + model_og = copy.deepcopy(model) + model.save_pretrained( + tmp_path / "compress_out", + quantization_compression=status, + save_compressed=compressed, + ) + + config = AutoConfig.from_pretrained(tmp_path / "compress_out") + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) + assert quant_config["format"] == status + + dense_model = SparseAutoModelForCausalLM.from_pretrained( + tmp_path / "compress_out", torch_dtype="auto" + ) + + og_state_dict = model_og.state_dict() + reconstructed_state_dict = dense_model.state_dict() + assert len(og_state_dict) == len(reconstructed_state_dict) + for key in og_state_dict.keys(): + dense_tensor = og_state_dict[key] + reconstructed_tensor = reconstructed_state_dict[key] + assert dense_tensor.dtype == reconstructed_tensor.dtype + if key.endswith("weight") and status != "dense": + # we don't expect an exact match for compressed + diff = torch.abs(dense_tensor - reconstructed_tensor) + assert not torch.any(diff > 0.01).item() + else: + assert torch.equal(dense_tensor, reconstructed_tensor) + + shutil.rmtree(tmp_path) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index 1fc5f1af3c7..855b89d6cf7 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -44,10 +44,10 @@ "tests/sparseml/transformers/compression/recipes/old_quant_full.yaml", "tests/sparseml/transformers/compression/recipes/new_quant_full.yaml", ), - ( - "tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml", - "tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml", - ), + # ( + # "tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml", + # "tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml", + # ), ], ) class TestQuantizationMatches(unittest.TestCase): From c1e0379929753a6094a3e2b288a432031e77e427 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 17:40:25 +0000 Subject: [PATCH 37/51] reloading unit tests --- .../compressed_tensors_utils.py | 6 +- .../compression/test_compress_tensor_utils.py | 87 ++++++++++++++++--- 2 files changed, 81 insertions(+), 12 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 2fa61a0feeb..8c25ab9f1ba 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -50,7 +50,7 @@ def save_pretrained_compressed(save_pretrained_method): def save_pretrained_wrapper( save_directory: str, sparsity_config: Optional[SparsityCompressionConfig] = None, - quantization_compression: str = None, + quantization_format: str = None, save_compressed: bool = False, skip_compression_stats: bool = False, **kwargs, @@ -63,6 +63,8 @@ def save_pretrained_wrapper( :param save_directory: output directory to save model to :param sparsity_config: optional sparsity config to compress model with, if no config is provided it will be inferred from the model + :param quantization_format: optional compression format for quantized + models. If none is provided it will be inferred from the model :param save_compresed: whether or not to compress the model on disk :param skip_compression_stats: whether to skip the calculation of compression statistics (such as global sparsity and sparsity structure) when @@ -97,7 +99,7 @@ def save_pretrained_wrapper( compressor = ModelCompressor.from_pretrained_model( model, sparsity_config=sparsity_config, - quantization_compression=quantization_compression, + quantization_format=quantization_format, ) if compressor is None: # model is not compressed or quantized, save as normal diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py index ab7b089b99d..efe83d81926 100644 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py @@ -27,7 +27,12 @@ SPARSITY_CONFIG_NAME, ) from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig -from compressed_tensors.quantization import QuantizationStatus +from compressed_tensors.quantization import ( + QuantizationStatus, + compress_quantized_weights, + freeze_module_quantization, +) +from safetensors import safe_open from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @@ -140,15 +145,15 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): @pytest.mark.parametrize( - "compressed,status,dtype", + "format,dtype", [ - [True, "dense", torch.float32], - [True, "dense", torch.float16], - [True, "int_quantized", torch.float32], + ["dense", torch.float32], + ["dense", torch.float16], + ["int_quantized", torch.float32], # [True, "int_quantized", torch.float16], ], ) -def test_quant_model_reload(compressed, status, dtype, tmp_path): +def test_quant_model_reload(format, dtype, tmp_path): recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" model_path = "Xenova/llama2.c-stories15M" device = "cuda:0" @@ -185,14 +190,14 @@ def test_quant_model_reload(compressed, status, dtype, tmp_path): model_og = copy.deepcopy(model) model.save_pretrained( tmp_path / "compress_out", - quantization_compression=status, - save_compressed=compressed, + quantization_format=format, + save_compressed=True, ) config = AutoConfig.from_pretrained(tmp_path / "compress_out") compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) - assert quant_config["format"] == status + assert quant_config["format"] == format dense_model = SparseAutoModelForCausalLM.from_pretrained( tmp_path / "compress_out", torch_dtype="auto" @@ -205,7 +210,7 @@ def test_quant_model_reload(compressed, status, dtype, tmp_path): dense_tensor = og_state_dict[key] reconstructed_tensor = reconstructed_state_dict[key] assert dense_tensor.dtype == reconstructed_tensor.dtype - if key.endswith("weight") and status != "dense": + if key.endswith("weight") and format != "dense": # we don't expect an exact match for compressed diff = torch.abs(dense_tensor - reconstructed_tensor) assert not torch.any(diff > 0.01).item() @@ -213,3 +218,65 @@ def test_quant_model_reload(compressed, status, dtype, tmp_path): assert torch.equal(dense_tensor, reconstructed_tensor) shutil.rmtree(tmp_path) + + +@pytest.mark.parametrize( + "status,expected_format,expected_dtype", + [ + [QuantizationStatus.FROZEN, "dense", torch.float32], + [QuantizationStatus.COMPRESSED, "int-quantized", torch.int8], + ], +) +def test_quant_infer_format(status, expected_format, expected_dtype, tmp_path): + recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" + model_path = "Xenova/llama2.c-stories15M" + device = "cuda:0" + if not torch.cuda.is_available(): + device = "cpu" + dataset = "open_platypus" + concatenate_data = False + num_calibration_samples = 64 + output_dir = tmp_path / "oneshot_out" + splits = {"calibration": "train[:10%]"} + + model = SparseAutoModelForCausalLM.from_pretrained(model_path) + + # create a quantized model + oneshot( + model=model, + dataset=dataset, + output_dir=output_dir, + num_calibration_samples=num_calibration_samples, + recipe=recipe_str, + concatenate_data=concatenate_data, + splits=splits, + oneshot_device=device, + ) + + if status == QuantizationStatus.FROZEN: + model.apply(freeze_module_quantization) + elif status == QuantizationStatus.COMPRESSED: + model.apply(compress_quantized_weights) + + for _, module in model.named_modules(): + if hasattr(module, "quantization_scheme"): + assert module.quantization_status == status + + model.save_pretrained( + tmp_path / "compress_out", + save_compressed=True, + ) + + config = AutoConfig.from_pretrained(tmp_path / "compress_out") + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) + assert quant_config["quantization_status"] == status.value + assert quant_config["format"] == expected_format + + with safe_open( + tmp_path / "compress_out" / "model.safetensors", framework="pt", device=device + ) as f: + test_tensor = f.get_tensor("model.layers.0.mlp.down_proj.weight") + assert test_tensor.dtype == expected_dtype + + shutil.rmtree(tmp_path) From ba397fc0725583dd7436fbb8f5b56af60760c05c Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 17:52:06 +0000 Subject: [PATCH 38/51] backwards compat --- .../compressed_tensors_utils.py | 17 +++++++++++++ .../sparsification/sparse_model.py | 24 +++++++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 8c25ab9f1ba..6f1543d7e99 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -20,7 +20,9 @@ from transformers import PreTrainedModel from compressed_tensors import ModelCompressor, SparsityCompressionConfig +from compressed_tensors.quantization.utils import is_model_quantized from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata +from sparseml.utils.pytorch import qat_active _LOGGER = logging.getLogger(__name__) @@ -75,6 +77,21 @@ def save_pretrained_wrapper( # state_dict gets passed in as a kwarg for FSDP models state_dict = kwargs.get("state_dict", None) + # check if we are in the old quantization framework + if qat_active(model) and not is_model_quantized(model): + _LOGGER.info( + "Compression for models quantized with QuantizationModifer is not " + "supported. Save will be run without compression and no sparsity " + "statistics will be calculated. To save a quantized model in a " + "compressed state please use vLLMQuantizationModifier instead." + ) + + original_save_pretrained.__get__(model, model_class)( + save_directory, **kwargs + ) + + return + if sparsity_config is not None: sparsity_config.global_sparsity = ( SparsityConfigMetadata.infer_global_sparsity( diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index ab163776564..e2b89652053 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -31,12 +31,15 @@ from transformers.file_utils import WEIGHTS_NAME from compressed_tensors.compressors import ModelCompressor -from sparseml.pytorch.model_load.helpers import log_model_load +from sparseml.pytorch.model_load.helpers import ( + apply_recipe_structure_to_model, + log_model_load, +) from sparseml.transformers.sparsification.compressed_tensors_utils import ( modify_save_pretrained, ) from sparseml.transformers.sparsification.modification import modify_model -from sparseml.transformers.utils.helpers import download_model_directory +from sparseml.transformers.utils.helpers import download_model_directory, resolve_recipe __all__ = ["SparseAutoModel", "SparseAutoModelForCausalLM", "get_shared_tokenizer_src"] @@ -115,10 +118,23 @@ def skip(*args, **kwargs): # override the PreTrainedModel instance with compression save function modify_save_pretrained(model) - # If model is compressed on disk, decompress and load the weights + # If model is quantized or compressed on disk, initialize quantization + # structure and run decompression if compressor is not None: - # decompress weights + # initialize quantization and decompress weights + # TODO: should we move the quantize logic out of compress? compressor.decompress(model_path=pretrained_model_name_or_path, model=model) + else: + # legacy loading for old quantization modifier + recipe = resolve_recipe( + recipe=recipe, model_path=pretrained_model_name_or_path + ) + if recipe: + apply_recipe_structure_to_model( + model=model, + model_path=pretrained_model_name_or_path, + recipe_path=recipe, + ) return model From 0a0ef06ebeabf870155b7756add3843d2d3b695c Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 20:37:23 +0000 Subject: [PATCH 39/51] test updates --- .../transformers/compression/test_compress_tensor_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py index efe83d81926..f7d666aa855 100644 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy import math import shutil @@ -187,7 +186,6 @@ def test_quant_model_reload(format, dtype, tmp_path): assert module.weight.dtype == dtype assert module.quantization_status == QuantizationStatus.FROZEN - model_og = copy.deepcopy(model) model.save_pretrained( tmp_path / "compress_out", quantization_format=format, @@ -203,7 +201,7 @@ def test_quant_model_reload(format, dtype, tmp_path): tmp_path / "compress_out", torch_dtype="auto" ) - og_state_dict = model_og.state_dict() + og_state_dict = model.state_dict() reconstructed_state_dict = dense_model.state_dict() assert len(og_state_dict) == len(reconstructed_state_dict) for key in og_state_dict.keys(): From b03d138666564766db20117245981ba41f3d5bcc Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 30 Apr 2024 21:01:07 +0000 Subject: [PATCH 40/51] update format --- .../compression/quantization_format.py | 47 +++++++++++++++++++ .../compressed_tensors_utils.py | 8 ++++ 2 files changed, 55 insertions(+) create mode 100644 src/sparseml/transformers/compression/quantization_format.py diff --git a/src/sparseml/transformers/compression/quantization_format.py b/src/sparseml/transformers/compression/quantization_format.py new file mode 100644 index 00000000000..7bea0ca9637 --- /dev/null +++ b/src/sparseml/transformers/compression/quantization_format.py @@ -0,0 +1,47 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional + +from compressed_tensors import CompressionFormat +from compressed_tensors.quantization.utils import is_model_quantized + + +__all__ = ["infer_quantization_format"] + + +def infer_quantization_format( + model, quantization_format: Optional[str] = None, save_compressed: bool = False +) -> str: + """ + Infers a quantization format based on model state and compression args + + :param model: model to check for quantization, if the model is not quantized no + quantization format is returned + :param quantization_format: user provided quantization format, supercedes any + inferred quantization format + :param save_compressed: used to infer a quantization format if None is provided + :return compression format appropriate for model + """ + if not is_model_quantized(model): + return None + + if quantization_format is not None: + return quantization_format + + if save_compressed: + return CompressionFormat.int_quantized + else: + return CompressionFormat.dense diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index 6f1543d7e99..c62a1eb9bf9 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -21,6 +21,9 @@ from compressed_tensors import ModelCompressor, SparsityCompressionConfig from compressed_tensors.quantization.utils import is_model_quantized +from sparseml.transformers.compression.quantization_format import ( + infer_quantization_format, +) from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active @@ -113,6 +116,11 @@ def save_pretrained_wrapper( model, state_dict=state_dict, compress=save_compressed ) + quantization_format = infer_quantization_format( + model=model, + quantization_format=quantization_format, + save_compressed=save_compressed, + ) compressor = ModelCompressor.from_pretrained_model( model, sparsity_config=sparsity_config, From 2931d8e168787c18f622b538434da07beffc72eb Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 1 May 2024 14:15:09 +0000 Subject: [PATCH 41/51] fix inferring --- .../transformers/compression/quantization_format.py | 3 ++- .../compression/test_compress_tensor_utils.py | 5 +---- .../transformers/compression/test_quantization.py | 8 ++++---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/sparseml/transformers/compression/quantization_format.py b/src/sparseml/transformers/compression/quantization_format.py index 7bea0ca9637..4ce883f89c0 100644 --- a/src/sparseml/transformers/compression/quantization_format.py +++ b/src/sparseml/transformers/compression/quantization_format.py @@ -44,4 +44,5 @@ def infer_quantization_format( if save_compressed: return CompressionFormat.int_quantized else: - return CompressionFormat.dense + # format will be inferred from config + return None diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py index f7d666aa855..20bc724f617 100644 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py @@ -260,10 +260,7 @@ def test_quant_infer_format(status, expected_format, expected_dtype, tmp_path): if hasattr(module, "quantization_scheme"): assert module.quantization_status == status - model.save_pretrained( - tmp_path / "compress_out", - save_compressed=True, - ) + model.save_pretrained(tmp_path / "compress_out") config = AutoConfig.from_pretrained(tmp_path / "compress_out") compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) diff --git a/tests/sparseml/transformers/compression/test_quantization.py b/tests/sparseml/transformers/compression/test_quantization.py index 855b89d6cf7..1fc5f1af3c7 100644 --- a/tests/sparseml/transformers/compression/test_quantization.py +++ b/tests/sparseml/transformers/compression/test_quantization.py @@ -44,10 +44,10 @@ "tests/sparseml/transformers/compression/recipes/old_quant_full.yaml", "tests/sparseml/transformers/compression/recipes/new_quant_full.yaml", ), - # ( - # "tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml", - # "tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml", - # ), + ( + "tests/sparseml/transformers/compression/recipes/old_quant_weight.yaml", + "tests/sparseml/transformers/compression/recipes/new_quant_weight.yaml", + ), ], ) class TestQuantizationMatches(unittest.TestCase): From 90795bda37c891be62dbeeea7f108050461bdeaa Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 1 May 2024 16:21:01 +0000 Subject: [PATCH 42/51] quality --- .../transformers/sparsification/compressed_tensors_utils.py | 1 - src/sparseml/transformers/sparsification/sparse_model.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index e32b48bafdc..b6852535a2c 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -122,7 +122,6 @@ def save_pretrained_wrapper( return - if sparsity_config is not None: sparsity_config.global_sparsity = ( SparsityConfigMetadata.infer_global_sparsity( diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index e7852bc23bc..995b349f513 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -36,6 +36,7 @@ apply_quantization_config, load_pretrained_quantization, ) +from sparseml.modifiers.quantization.modification import modify_model from sparseml.pytorch.model_load.helpers import ( apply_recipe_structure_to_model, log_model_load, @@ -152,8 +153,6 @@ class SparseAutoModel: Factory class for creating sparse models using transformers AutoModel classes """ - from sparseml.modifiers.quantization.modification import modify_model - @staticmethod def masked_language_modeling_from_pretrained( model_name_or_path: str, From bf7d0f6e6268b34f382e37392f305e1d4f5b7dc9 Mon Sep 17 00:00:00 2001 From: George Ohashi Date: Wed, 1 May 2024 21:20:00 +0000 Subject: [PATCH 43/51] shape consistency --- src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py index e9fca78e743..2b439862b4e 100644 --- a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py +++ b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py @@ -179,10 +179,22 @@ def fasterprune( fake_quantize, ) + while scale.ndim < 2: + scale = scale.unsqueeze(1) + zero_point = zero_point.unsqueeze(1) + + while q.ndim < 2: + q = q.unsqueeze(1) q = fake_quantize( - q, scale, zero_point, self.layer.quantization_scheme.weights + q, + scale[:, i], + zero_point[:, i], + self.layer.quantization_scheme.weights, ) + while q.ndim != 1: + q.squeeze() + Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2 From 2432cf49dbfe26594e32878e1446413cfd7067b2 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 2 May 2024 18:50:39 +0000 Subject: [PATCH 44/51] address PR comments --- .../modifiers/quantization_vllm/base.py | 4 +- .../modifiers/quantization_vllm/pytorch.py | 27 ++-- .../compression/test_compress_tensor_utils.py | 131 ------------------ 3 files changed, 18 insertions(+), 144 deletions(-) delete mode 100644 tests/sparseml/transformers/compression/test_compress_tensor_utils.py diff --git a/src/sparseml/modifiers/quantization_vllm/base.py b/src/sparseml/modifiers/quantization_vllm/base.py index 6ac61b1c261..c8b2522ecee 100644 --- a/src/sparseml/modifiers/quantization_vllm/base.py +++ b/src/sparseml/modifiers/quantization_vllm/base.py @@ -35,9 +35,9 @@ class vLLMQuantizationModifier(Modifier): modifier will be enabled until training is completed. :param config_groups: dictionary specifying quantization schemes to apply to target - modules. Modules not matching a scheme target will NOT be quantized. + modules. Modules not matching a scheme target will NOT be quantized. :param ignore: optional list of module class names or submodule names to not - quantize even if they match a target in config_groups. Defaults to empty list. + quantize even if they match a target in config_groups. Defaults to empty list. :param disable_quantization_observer_epoch: Epoch to disable updates to the module quantization observers. At this point, quantized weights and zero points will not be updated. Leave None to not disable observers during QAT. Default is None diff --git a/src/sparseml/modifiers/quantization_vllm/pytorch.py b/src/sparseml/modifiers/quantization_vllm/pytorch.py index 92314ab6609..a6e7f179525 100644 --- a/src/sparseml/modifiers/quantization_vllm/pytorch.py +++ b/src/sparseml/modifiers/quantization_vllm/pytorch.py @@ -32,17 +32,22 @@ class vLLMQuantizationModifierPyTorch(vLLMQuantizationModifier): """ - Pytorch-specific implementation of quantization modifier - - :param scheme: Default QuantizationScheme to use when enabling quantization - in a module. May also be a dictionary to be loaded into the QuantizationScheme - class. A string alias may also be used, supported aliases: - ['default', 'deepsparse', 'tensorrt']. - If None, the default scheme (`QuantizationScheme()`) will be used. - Default is None - :param scheme_overrides: optional mapping of module type names or submodule type - names to quantization schemes to override them with. If a scheme is mapped to - 'default', then it will use the scheme set in the mo difier scheme property + PyTorch specific implementation of vLLMQuantizationModifier + + Enables post training quantization (PTQ) and quantization aware training (QAT) for a + given module or its submodules. After calibration (PTQ) or the start epoch (QAT), + the specified module(s) forward pass will emulate quantized execution and the + modifier will be enabled until training is completed. + + :param config_groups: dictionary specifying quantization schemes to apply to target + modules. Modules not matching a scheme target will NOT be quantized. + :param ignore: optional list of module class names or submodule names to not + quantize even if they match a target in config_groups. Defaults to empty list. + :param disable_quantization_observer_epoch: Epoch to disable updates to the module + quantization observers. At this point, quantized weights and zero points will + not be updated. Leave None to not disable observers during QAT. Default is None + :param num_calibration_steps: Number of steps to run post training calibration for. + When None, the entire calibration_dataloader is used """ calibration_dataloader_: Any = None diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py deleted file mode 100644 index 38369617ed7..00000000000 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import shutil - -import pytest -import torch -from transformers import AutoConfig - -import sparseml.core.session as session_manager -from compressed_tensors import SPARSITY_CONFIG_NAME -from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig -from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata - - -@pytest.mark.parametrize( - "compressed,config,dtype", - [ - [True, None, torch.float32], - [False, DenseSparsityConfig(), torch.float16], - [True, BitmaskConfig(), torch.bfloat16], - [False, BitmaskConfig(), torch.float32], - [False, None, torch.float16], - ], -) -def test_sparse_model_reload(compressed, config, dtype, tmp_path): - recipe_str = "tests/sparseml/transformers/obcq/test_tiny2.yaml" - model_path = "Xenova/llama2.c-stories15M" - device = "cuda:0" - if not torch.cuda.is_available(): - device = "cpu" - dataset = "open_platypus" - concatenate_data = False - num_calibration_samples = 64 - output_dir = tmp_path / "oneshot_out" - splits = {"calibration": "train[:10%]"} - - # create a sparse model - oneshot( - model=model_path, - dataset=dataset, - output_dir=output_dir, - num_calibration_samples=num_calibration_samples, - recipe=recipe_str, - concatenate_data=concatenate_data, - splits=splits, - oneshot_device=device, - precision=dtype, - ) - - model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "oneshot_out", torch_dtype=dtype - ) - - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) - assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() - assert inferred_structure == "0:0" - - model.save_pretrained( - tmp_path / "compress_out", - sparsity_config=config, - save_compressed=compressed, - ) - - config = AutoConfig.from_pretrained(tmp_path / "compress_out") - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) - assert ( - sparsity_config["format"] == "dense" - if (not compressed and config is None) - else "sparse_bitmask" - ) - assert sparsity_config["global_sparsity"] == inferred_global_sparsity - assert sparsity_config["sparsity_structure"] == inferred_structure - - dense_model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "compress_out", torch_dtype="auto" - ) - - og_state_dict = model.state_dict() - reconstructed_state_dict = dense_model.state_dict() - assert len(og_state_dict) == len(reconstructed_state_dict) - for key in og_state_dict.keys(): - dense_tensor = og_state_dict[key] - reconstructed_tensor = reconstructed_state_dict[key] - assert dense_tensor.dtype == reconstructed_tensor.dtype == dtype - assert torch.equal(dense_tensor, reconstructed_tensor) - - shutil.rmtree(tmp_path) - - -@pytest.mark.parametrize( - "skip_compression_stats,save_compressed", - [[True, True], [True, False], [False, True], [False, False]], -) -def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): - session_manager.active_session().reset() - - model_path = "Xenova/llama2.c-stories15M" - model = SparseAutoModelForCausalLM.from_pretrained(model_path) - - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) - assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() - assert inferred_structure == "unstructured" - - model.save_pretrained( - tmp_path / "dense_out", - skip_compression_stats=skip_compression_stats, - save_compressed=save_compressed, - ) - - # for models with 0% sparsity no sparsity config is saved regardless - config = AutoConfig.from_pretrained(tmp_path / "dense_out") - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) - assert sparsity_config is None - - shutil.rmtree(tmp_path) From 24437c7ef34fad1eae10ccf08333fb3b19d411f8 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Fri, 3 May 2024 13:58:19 +0000 Subject: [PATCH 45/51] PR comments --- .../compression/quantization_format.py | 4 +- .../sparsification/sparse_model.py | 6 - .../compression/test_compress_tensor_utils.py | 277 ------------------ .../test_compress_tensor_utils.py | 150 +++++++++- 4 files changed, 150 insertions(+), 287 deletions(-) delete mode 100644 tests/sparseml/transformers/compression/test_compress_tensor_utils.py diff --git a/src/sparseml/transformers/compression/quantization_format.py b/src/sparseml/transformers/compression/quantization_format.py index 4ce883f89c0..5f8f8722753 100644 --- a/src/sparseml/transformers/compression/quantization_format.py +++ b/src/sparseml/transformers/compression/quantization_format.py @@ -29,9 +29,9 @@ def infer_quantization_format( Infers a quantization format based on model state and compression args :param model: model to check for quantization, if the model is not quantized no - quantization format is returned + quantization format is returned :param quantization_format: user provided quantization format, supercedes any - inferred quantization format + inferred quantization format :param save_compressed: used to infer a quantization format if None is provided :return compression format appropriate for model """ diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 23fb380ebb2..76e75862fff 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -31,11 +31,6 @@ from transformers.file_utils import WEIGHTS_NAME from compressed_tensors.compressors import ModelCompressor -from compressed_tensors.quantization import ( - QuantizationConfig, - apply_quantization_config, - load_pretrained_quantization, -) from sparseml.modifiers.quantization.modification import modify_model from sparseml.pytorch.model_load.helpers import ( apply_recipe_structure_to_model, @@ -124,7 +119,6 @@ def skip(*args, **kwargs): # structure and run decompression if compressor is not None: # initialize quantization and decompress weights - # TODO: should we move the quantize logic out of compress? compressor.decompress(model_path=pretrained_model_name_or_path, model=model) else: # legacy loading for old quantization modifier diff --git a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py b/tests/sparseml/transformers/compression/test_compress_tensor_utils.py deleted file mode 100644 index 20bc724f617..00000000000 --- a/tests/sparseml/transformers/compression/test_compress_tensor_utils.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import shutil - -import pytest -import torch -from transformers import AutoConfig - -import sparseml.core.session as session_manager -from compressed_tensors import ( - COMPRESSION_CONFIG_NAME, - QUANTIZATION_CONFIG_NAME, - SPARSITY_CONFIG_NAME, -) -from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig -from compressed_tensors.quantization import ( - QuantizationStatus, - compress_quantized_weights, - freeze_module_quantization, -) -from safetensors import safe_open -from sparseml.transformers import SparseAutoModelForCausalLM, oneshot -from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata - - -@pytest.mark.parametrize( - "compressed,config,dtype", - [ - [True, None, torch.float32], - [False, DenseSparsityConfig(), torch.float16], - [True, BitmaskConfig(), torch.bfloat16], - [False, BitmaskConfig(), torch.float32], - [False, None, torch.float16], - ], -) -def test_sparse_model_reload(compressed, config, dtype, tmp_path): - recipe_str = "tests/sparseml/transformers/obcq/test_tiny2.yaml" - model_path = "Xenova/llama2.c-stories15M" - device = "cuda:0" - if not torch.cuda.is_available(): - device = "cpu" - dataset = "open_platypus" - concatenate_data = False - num_calibration_samples = 64 - output_dir = tmp_path / "oneshot_out" - splits = {"calibration": "train[:10%]"} - - # create a sparse model - oneshot( - model=model_path, - dataset=dataset, - output_dir=output_dir, - num_calibration_samples=num_calibration_samples, - recipe=recipe_str, - concatenate_data=concatenate_data, - splits=splits, - oneshot_device=device, - precision=dtype, - clear_sparse_session=False, - ) - - model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "oneshot_out", torch_dtype=dtype - ) - - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) - assert math.isclose(inferred_global_sparsity, 19.6562, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() - assert inferred_structure == "0:0" - - model.save_pretrained( - tmp_path / "compress_out", - sparsity_config=config, - save_compressed=compressed, - ) - - config = AutoConfig.from_pretrained(tmp_path / "compress_out") - compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) - sparsity_config = compression_config.get(SPARSITY_CONFIG_NAME, None) - assert ( - sparsity_config["format"] == "dense" - if (not compressed and config is None) - else "sparse_bitmask" - ) - assert sparsity_config["global_sparsity"] == inferred_global_sparsity - assert sparsity_config["sparsity_structure"] == inferred_structure - - dense_model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "compress_out", torch_dtype="auto" - ) - - og_state_dict = model.state_dict() - reconstructed_state_dict = dense_model.state_dict() - assert len(og_state_dict) == len(reconstructed_state_dict) - for key in og_state_dict.keys(): - dense_tensor = og_state_dict[key] - reconstructed_tensor = reconstructed_state_dict[key] - assert dense_tensor.dtype == reconstructed_tensor.dtype == dtype - assert torch.equal(dense_tensor, reconstructed_tensor) - - shutil.rmtree(tmp_path) - - -@pytest.mark.parametrize( - "skip_compression_stats,save_compressed", - [[True, True], [True, False], [False, True], [False, False]], -) -def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): - session_manager.active_session().reset() - - model_path = "Xenova/llama2.c-stories15M" - model = SparseAutoModelForCausalLM.from_pretrained(model_path) - - inferred_global_sparsity = SparsityConfigMetadata.infer_global_sparsity(model) - assert math.isclose(inferred_global_sparsity, 0.0, rel_tol=1e-3) - inferred_structure = SparsityConfigMetadata.infer_sparsity_structure() - assert inferred_structure == "unstructured" - - model.save_pretrained( - tmp_path / "dense_out", - skip_compression_stats=skip_compression_stats, - save_compressed=save_compressed, - ) - - # for models with 0% sparsity no sparsity config is saved regardless - config = AutoConfig.from_pretrained(tmp_path / "dense_out") - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) - assert sparsity_config is None - - shutil.rmtree(tmp_path) - - -@pytest.mark.parametrize( - "format,dtype", - [ - ["dense", torch.float32], - ["dense", torch.float16], - ["int_quantized", torch.float32], - # [True, "int_quantized", torch.float16], - ], -) -def test_quant_model_reload(format, dtype, tmp_path): - recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" - model_path = "Xenova/llama2.c-stories15M" - device = "cuda:0" - if not torch.cuda.is_available(): - device = "cpu" - dataset = "open_platypus" - concatenate_data = False - num_calibration_samples = 64 - output_dir = tmp_path / "oneshot_out" - splits = {"calibration": "train[:10%]"} - - # create a quantized model - oneshot( - model=model_path, - dataset=dataset, - output_dir=output_dir, - num_calibration_samples=num_calibration_samples, - recipe=recipe_str, - concatenate_data=concatenate_data, - splits=splits, - oneshot_device=device, - precision=dtype, - ) - - model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "oneshot_out", torch_dtype=dtype - ) - - for _, module in model.named_modules(): - if hasattr(module, "quantization_scheme"): - assert module.weight.dtype == dtype - assert module.quantization_status == QuantizationStatus.FROZEN - - model.save_pretrained( - tmp_path / "compress_out", - quantization_format=format, - save_compressed=True, - ) - - config = AutoConfig.from_pretrained(tmp_path / "compress_out") - compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) - quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) - assert quant_config["format"] == format - - dense_model = SparseAutoModelForCausalLM.from_pretrained( - tmp_path / "compress_out", torch_dtype="auto" - ) - - og_state_dict = model.state_dict() - reconstructed_state_dict = dense_model.state_dict() - assert len(og_state_dict) == len(reconstructed_state_dict) - for key in og_state_dict.keys(): - dense_tensor = og_state_dict[key] - reconstructed_tensor = reconstructed_state_dict[key] - assert dense_tensor.dtype == reconstructed_tensor.dtype - if key.endswith("weight") and format != "dense": - # we don't expect an exact match for compressed - diff = torch.abs(dense_tensor - reconstructed_tensor) - assert not torch.any(diff > 0.01).item() - else: - assert torch.equal(dense_tensor, reconstructed_tensor) - - shutil.rmtree(tmp_path) - - -@pytest.mark.parametrize( - "status,expected_format,expected_dtype", - [ - [QuantizationStatus.FROZEN, "dense", torch.float32], - [QuantizationStatus.COMPRESSED, "int-quantized", torch.int8], - ], -) -def test_quant_infer_format(status, expected_format, expected_dtype, tmp_path): - recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" - model_path = "Xenova/llama2.c-stories15M" - device = "cuda:0" - if not torch.cuda.is_available(): - device = "cpu" - dataset = "open_platypus" - concatenate_data = False - num_calibration_samples = 64 - output_dir = tmp_path / "oneshot_out" - splits = {"calibration": "train[:10%]"} - - model = SparseAutoModelForCausalLM.from_pretrained(model_path) - - # create a quantized model - oneshot( - model=model, - dataset=dataset, - output_dir=output_dir, - num_calibration_samples=num_calibration_samples, - recipe=recipe_str, - concatenate_data=concatenate_data, - splits=splits, - oneshot_device=device, - ) - - if status == QuantizationStatus.FROZEN: - model.apply(freeze_module_quantization) - elif status == QuantizationStatus.COMPRESSED: - model.apply(compress_quantized_weights) - - for _, module in model.named_modules(): - if hasattr(module, "quantization_scheme"): - assert module.quantization_status == status - - model.save_pretrained(tmp_path / "compress_out") - - config = AutoConfig.from_pretrained(tmp_path / "compress_out") - compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) - quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) - assert quant_config["quantization_status"] == status.value - assert quant_config["format"] == expected_format - - with safe_open( - tmp_path / "compress_out" / "model.safetensors", framework="pt", device=device - ) as f: - test_tensor = f.get_tensor("model.layers.0.mlp.down_proj.weight") - assert test_tensor.dtype == expected_dtype - - shutil.rmtree(tmp_path) diff --git a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py index 38369617ed7..20bc724f617 100644 --- a/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py +++ b/tests/sparseml/transformers/sparsification/test_compress_tensor_utils.py @@ -20,8 +20,18 @@ from transformers import AutoConfig import sparseml.core.session as session_manager -from compressed_tensors import SPARSITY_CONFIG_NAME +from compressed_tensors import ( + COMPRESSION_CONFIG_NAME, + QUANTIZATION_CONFIG_NAME, + SPARSITY_CONFIG_NAME, +) from compressed_tensors.config import BitmaskConfig, DenseSparsityConfig +from compressed_tensors.quantization import ( + QuantizationStatus, + compress_quantized_weights, + freeze_module_quantization, +) +from safetensors import safe_open from sparseml.transformers import SparseAutoModelForCausalLM, oneshot from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata @@ -59,6 +69,7 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): splits=splits, oneshot_device=device, precision=dtype, + clear_sparse_session=False, ) model = SparseAutoModelForCausalLM.from_pretrained( @@ -77,7 +88,8 @@ def test_sparse_model_reload(compressed, config, dtype, tmp_path): ) config = AutoConfig.from_pretrained(tmp_path / "compress_out") - sparsity_config = getattr(config, SPARSITY_CONFIG_NAME, None) + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + sparsity_config = compression_config.get(SPARSITY_CONFIG_NAME, None) assert ( sparsity_config["format"] == "dense" if (not compressed and config is None) @@ -129,3 +141,137 @@ def test_dense_model_save(tmp_path, skip_compression_stats, save_compressed): assert sparsity_config is None shutil.rmtree(tmp_path) + + +@pytest.mark.parametrize( + "format,dtype", + [ + ["dense", torch.float32], + ["dense", torch.float16], + ["int_quantized", torch.float32], + # [True, "int_quantized", torch.float16], + ], +) +def test_quant_model_reload(format, dtype, tmp_path): + recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" + model_path = "Xenova/llama2.c-stories15M" + device = "cuda:0" + if not torch.cuda.is_available(): + device = "cpu" + dataset = "open_platypus" + concatenate_data = False + num_calibration_samples = 64 + output_dir = tmp_path / "oneshot_out" + splits = {"calibration": "train[:10%]"} + + # create a quantized model + oneshot( + model=model_path, + dataset=dataset, + output_dir=output_dir, + num_calibration_samples=num_calibration_samples, + recipe=recipe_str, + concatenate_data=concatenate_data, + splits=splits, + oneshot_device=device, + precision=dtype, + ) + + model = SparseAutoModelForCausalLM.from_pretrained( + tmp_path / "oneshot_out", torch_dtype=dtype + ) + + for _, module in model.named_modules(): + if hasattr(module, "quantization_scheme"): + assert module.weight.dtype == dtype + assert module.quantization_status == QuantizationStatus.FROZEN + + model.save_pretrained( + tmp_path / "compress_out", + quantization_format=format, + save_compressed=True, + ) + + config = AutoConfig.from_pretrained(tmp_path / "compress_out") + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) + assert quant_config["format"] == format + + dense_model = SparseAutoModelForCausalLM.from_pretrained( + tmp_path / "compress_out", torch_dtype="auto" + ) + + og_state_dict = model.state_dict() + reconstructed_state_dict = dense_model.state_dict() + assert len(og_state_dict) == len(reconstructed_state_dict) + for key in og_state_dict.keys(): + dense_tensor = og_state_dict[key] + reconstructed_tensor = reconstructed_state_dict[key] + assert dense_tensor.dtype == reconstructed_tensor.dtype + if key.endswith("weight") and format != "dense": + # we don't expect an exact match for compressed + diff = torch.abs(dense_tensor - reconstructed_tensor) + assert not torch.any(diff > 0.01).item() + else: + assert torch.equal(dense_tensor, reconstructed_tensor) + + shutil.rmtree(tmp_path) + + +@pytest.mark.parametrize( + "status,expected_format,expected_dtype", + [ + [QuantizationStatus.FROZEN, "dense", torch.float32], + [QuantizationStatus.COMPRESSED, "int-quantized", torch.int8], + ], +) +def test_quant_infer_format(status, expected_format, expected_dtype, tmp_path): + recipe_str = "tests/sparseml/transformers/compression/recipes/new_quant_simple.yaml" + model_path = "Xenova/llama2.c-stories15M" + device = "cuda:0" + if not torch.cuda.is_available(): + device = "cpu" + dataset = "open_platypus" + concatenate_data = False + num_calibration_samples = 64 + output_dir = tmp_path / "oneshot_out" + splits = {"calibration": "train[:10%]"} + + model = SparseAutoModelForCausalLM.from_pretrained(model_path) + + # create a quantized model + oneshot( + model=model, + dataset=dataset, + output_dir=output_dir, + num_calibration_samples=num_calibration_samples, + recipe=recipe_str, + concatenate_data=concatenate_data, + splits=splits, + oneshot_device=device, + ) + + if status == QuantizationStatus.FROZEN: + model.apply(freeze_module_quantization) + elif status == QuantizationStatus.COMPRESSED: + model.apply(compress_quantized_weights) + + for _, module in model.named_modules(): + if hasattr(module, "quantization_scheme"): + assert module.quantization_status == status + + model.save_pretrained(tmp_path / "compress_out") + + config = AutoConfig.from_pretrained(tmp_path / "compress_out") + compression_config = getattr(config, COMPRESSION_CONFIG_NAME, None) + quant_config = compression_config.get(QUANTIZATION_CONFIG_NAME, None) + assert quant_config["quantization_status"] == status.value + assert quant_config["format"] == expected_format + + with safe_open( + tmp_path / "compress_out" / "model.safetensors", framework="pt", device=device + ) as f: + test_tensor = f.get_tensor("model.layers.0.mlp.down_proj.weight") + assert test_tensor.dtype == expected_dtype + + shutil.rmtree(tmp_path) From e8bc02131817a113f33fbf368faa9f3dc304c1c0 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Fri, 3 May 2024 20:45:56 +0000 Subject: [PATCH 46/51] fixing some things --- .../modifiers/obcq/utils/sgpt_wrapper.py | 41 ++++++++++++------- .../compression/sparsity_config.py | 6 ++- 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py index 2b439862b4e..a2e47f66164 100644 --- a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py +++ b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py @@ -172,27 +172,38 @@ def fasterprune( q = torch.quantize_per_channel(q, scale, zero_point, 0, dtype) q = torch.dequantize(q) elif hasattr(self.layer, "quantization_scheme"): - if self.layer.quantization_scheme.weights is not None: + quant_scheme = self.layer.quantization_scheme + if quant_scheme.weights is not None: scale = self.layer.weight_scale zero_point = self.layer.weight_zero_point + from compressed_tensors.quantization import QuantizationStrategy from compressed_tensors.quantization.lifecycle.forward import ( fake_quantize, ) - while scale.ndim < 2: - scale = scale.unsqueeze(1) - zero_point = zero_point.unsqueeze(1) - - while q.ndim < 2: - q = q.unsqueeze(1) - q = fake_quantize( - q, - scale[:, i], - zero_point[:, i], - self.layer.quantization_scheme.weights, - ) - - while q.ndim != 1: + if quant_scheme.weights.strategy == QuantizationStrategy.TENSOR: + q = fake_quantize( + q, + scale, + zero_point, + self.layer.quantization_scheme.weights, + ) + else: + while scale.ndim < 2: + scale = scale.unsqueeze(scale.ndim) + zero_point = zero_point.unsqueeze(zero_point.ndim) + + while q.ndim < 2: + q = q.unsqueeze(q.ndim) + + q = fake_quantize( + q, + scale[:, i], + zero_point[:, i], + self.layer.quantization_scheme.weights, + ) + + while q.ndim > 1: q.squeeze() Q1[:, i] = q diff --git a/src/sparseml/transformers/compression/sparsity_config.py b/src/sparseml/transformers/compression/sparsity_config.py index 49716cf435c..4d621acc5ba 100644 --- a/src/sparseml/transformers/compression/sparsity_config.py +++ b/src/sparseml/transformers/compression/sparsity_config.py @@ -19,6 +19,7 @@ import sparseml.core.session as session_manager from compressed_tensors import CompressionFormat, SparsityCompressionConfig +from compressed_tensors.quantization.utils import is_model_quantized from sparseml.pytorch.utils import ModuleSparsificationInfo @@ -91,7 +92,10 @@ def from_pretrained( return None sparsity_structure = SparsityConfigMetadata.infer_sparsity_structure() - if compress: + if is_model_quantized(model): + # compressing a sparse quantized model is not supported yet + format = CompressionFormat.dense.value + elif compress: format = CompressionFormat.sparse_bitmask.value else: format = CompressionFormat.dense.value From 061de67229977af31c15e73e8b2ef5baf336cc31 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Tue, 7 May 2024 14:35:53 +0000 Subject: [PATCH 47/51] style --- .../transformers/sparsification/compressed_tensors_utils.py | 1 - src/sparseml/transformers/sparsification/sparse_model.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py index f57de060f93..c62a1eb9bf9 100644 --- a/src/sparseml/transformers/sparsification/compressed_tensors_utils.py +++ b/src/sparseml/transformers/sparsification/compressed_tensors_utils.py @@ -24,7 +24,6 @@ from sparseml.transformers.compression.quantization_format import ( infer_quantization_format, ) -from compressed_tensors.quantization.utils import is_model_quantized from sparseml.transformers.compression.sparsity_config import SparsityConfigMetadata from sparseml.utils.pytorch import qat_active diff --git a/src/sparseml/transformers/sparsification/sparse_model.py b/src/sparseml/transformers/sparsification/sparse_model.py index 301ff0f2859..76e75862fff 100644 --- a/src/sparseml/transformers/sparsification/sparse_model.py +++ b/src/sparseml/transformers/sparsification/sparse_model.py @@ -102,9 +102,6 @@ def skip(*args, **kwargs): # instantiate compressor from model config compressor = ModelCompressor.from_pretrained(pretrained_model_name_or_path) - quantization_config = QuantizationConfig.from_model_config( - pretrained_model_name_or_path - ) # temporarily set the log level to error, to ignore printing out long missing # and unexpected key error messages (these are EXPECTED for quantized models) From 6e0f1bc0cfea79c5e79950a24e1ce3ed0a80ee28 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 8 May 2024 20:23:12 +0000 Subject: [PATCH 48/51] pull from cp main --- .github/workflows/integrations-check.yaml | 9 +++++++++ .github/workflows/test-check.yaml | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/.github/workflows/integrations-check.yaml b/.github/workflows/integrations-check.yaml index 86c37b57890..ff57a0db08d 100644 --- a/.github/workflows/integrations-check.yaml +++ b/.github/workflows/integrations-check.yaml @@ -62,6 +62,15 @@ jobs: run: pip3 install -U pip && pip3 install setuptools sparsezoo/ - name: "Clean sparsezoo directory" run: rm -r sparsezoo/ + - uses: actions/checkout@v2 + with: + repository: "neuralmagic/compressed-tensors" + path: "compressed-tensors" + ref: ${{needs.test-setup.outputs.branch}} + - name: "⚙️ Install compressed-tensors dependencies" + run: pip3 install -U pip && pip3 install setuptools compressed-tensors/ + - name: "Clean compressed-tensors directory" + run: rm -r compressed-tensors/ - name: "⚙️ Install dependencies" run: pip3 install .[dev,torchvision,deepsparse,onnxruntime,transformers,yolov5] - name: "🔬 Running integrations tests (cadence: pre-commit}})" diff --git a/.github/workflows/test-check.yaml b/.github/workflows/test-check.yaml index 362fd297321..179eda2bc65 100644 --- a/.github/workflows/test-check.yaml +++ b/.github/workflows/test-check.yaml @@ -246,6 +246,15 @@ jobs: run: pip3 install -U pip && pip3 install setuptools sparsezoo/ - name: "Clean sparsezoo directory" run: rm -r sparsezoo/ + - uses: actions/checkout@v2 + with: + repository: "neuralmagic/compressed-tensors" + path: "compressed-tensors" + ref: ${{needs.test-setup.outputs.branch}} + - name: "⚙️ Install compressed-tensors dependencies" + run: pip3 install -U pip && pip3 install setuptools compressed-tensors/ + - name: "Clean compressed-tensors directory" + run: rm -r compressed-tensors/ - name: "⚙️ Install dependencies" run: pip3 install .[dev,torch,transformers,onnxruntime] - name: "🔬 Running transformers tests" From 3ff4dc8aa0a7572f88dcfd5b374f525604f719e1 Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 8 May 2024 20:30:00 +0000 Subject: [PATCH 49/51] postmerge too --- .github/workflows/Integrations-post-merge-check.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/Integrations-post-merge-check.yaml b/.github/workflows/Integrations-post-merge-check.yaml index 25aeea10051..f3c29e8e3b3 100644 --- a/.github/workflows/Integrations-post-merge-check.yaml +++ b/.github/workflows/Integrations-post-merge-check.yaml @@ -41,6 +41,15 @@ jobs: run: pip3 install -U pip && pip3 install setuptools sparsezoo/ - name: "Clean sparsezoo directory" run: rm -r sparsezoo/ + - uses: actions/checkout@v2 + with: + repository: "neuralmagic/compressed-tensors" + path: "compressed-tensors" + ref: ${{needs.test-setup.outputs.branch}} + - name: "⚙️ Install compressed-tensors dependencies" + run: pip3 install -U pip && pip3 install setuptools compressed-tensors/ + - name: "Clean compressed-tensors directory" + run: rm -r compressed-tensors/ - name: "⚙️ Install dependencies" run: pip3 install .[dev,torchvision,deepsparse,onnxruntime,transformers,yolov5] - name: "🔬 Running integrations tests (cadence: commit}})" From 29a2186f7081db349afe0bdf9e03a1378a98577f Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Wed, 8 May 2024 20:48:04 +0000 Subject: [PATCH 50/51] export needs it too --- .github/workflows/test-check.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/test-check.yaml b/.github/workflows/test-check.yaml index 179eda2bc65..887dd745a81 100644 --- a/.github/workflows/test-check.yaml +++ b/.github/workflows/test-check.yaml @@ -279,6 +279,15 @@ jobs: run: pip3 install -U pip && pip3 install setuptools sparsezoo/ - name: "Clean sparsezoo directory" run: rm -r sparsezoo/ + - uses: actions/checkout@v2 + with: + repository: "neuralmagic/compressed-tensors" + path: "compressed-tensors" + ref: ${{needs.test-setup.outputs.branch}} + - name: "⚙️ Install compressed-tensors dependencies" + run: pip3 install -U pip && pip3 install setuptools compressed-tensors/ + - name: "Clean compressed-tensors directory" + run: rm -r compressed-tensors/ - name: "⚙️ Install dependencies" run: pip3 install .[dev,torch,transformers,torchvision,onnxruntime] - name: "🔬 Running export tests" From e93257f2dac378675a7ad0a91d5eda9903d09eca Mon Sep 17 00:00:00 2001 From: Sara Adkins Date: Thu, 9 May 2024 10:14:12 -0400 Subject: [PATCH 51/51] Update src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py Co-authored-by: Rahul Tuli --- src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py index a2e47f66164..99484490e01 100644 --- a/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py +++ b/src/sparseml/modifiers/obcq/utils/sgpt_wrapper.py @@ -204,7 +204,7 @@ def fasterprune( ) while q.ndim > 1: - q.squeeze() + q = q.squeeze() Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2