Skip to content

Commit

Permalink
Merge pull request #1 from vllm-project/sa/test_failures
Browse files Browse the repository at this point in the history
Address Test Failures
  • Loading branch information
Sara Adkins authored Jun 24, 2024
2 parents 7b9d691 + 33bcc5f commit e8c07af
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 54 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@ ifneq ($(findstring pytorch,$(TARGETS)),pytorch)
endif

# run checks on all files for the repo
# leaving out mypy src for now
quality:
@echo "Running python quality checks";
ruff check $(CHECKDIRS);
isort --check-only $(CHECKDIRS);
flake8 $(CHECKDIRS) --max-line-length 88 --extend-ignore E203;
mypy src

# style the code according to accepted standards for the repo
style:
Expand Down
5 changes: 1 addition & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,11 @@
"torch>=1.7.0",
"transformers<4.41",
"datasets<2.19",
"evaluate>=0.4.1",
"accelerate>=0.20.3",
"safetensors>=0.4.1",
"sentencepiece",
"compressed-tensors"
if version_info.is_release
else "compressed-tensors-nightly",
"sparsezoo" if version_info.is_release else "sparsezoo-nightly",
],
extras_require={
"dev": [
Expand All @@ -91,7 +89,6 @@
"llmcompressor.transformers.text_generation.finetune=llmcompressor.transformers.finetune.text_generation:train", # noqa 501
"llmcompressor.transformers.text_generation.eval=llmcompressor.transformers.finetune.text_generation:eval", # noqa 501
"llmcompressor.transformers.text_generation.oneshot=llmcompressor.transformers.finetune.text_generation:oneshot", # noqa 501
"llmcompressor.evaluate=llmcompressor.evaluation.cli:main",
]
},
python_requires=">=3.8.0,<3.12",
Expand Down
6 changes: 3 additions & 3 deletions src/llmcompressor/core/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"""

from copy import deepcopy
from dataclasses import dataclass
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union

from loguru import logger
Expand Down Expand Up @@ -111,8 +111,8 @@ class State:
optim_wrapped: bool = None
loss: Any = None
batch_data: Any = None
data: Data = Data()
hardware: Hardware = Hardware()
data: Data = field(default_factory=Data)
hardware: Hardware = field(default_factory=Hardware)
start_event: Optional[Event] = None
last_event: Optional[Event] = None
loggers: Optional[LoggerManager] = None
Expand Down
3 changes: 0 additions & 3 deletions tests/llmcompressor/metrics/test_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
LoggerManager,
PythonLogger,
SparsificationGroupLogger,
TensorBoardLogger,
WANDBLogger,
)

Expand All @@ -18,7 +17,6 @@
"logger",
[
PythonLogger(),
TensorBoardLogger(),
LambdaLogger(
lambda_func=lambda tag, value, values, step, wall_time, level: logging.info(
f"{tag}, {value}, {values}, {step}, {wall_time}, {level}"
Expand All @@ -38,7 +36,6 @@
LoggerManager(),
LoggerManager(
[
TensorBoardLogger(),
WANDBLogger() if WANDBLogger.available() else PythonLogger(),
]
),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,31 +1,20 @@
test_stage:
quant_modifiers:
QuantizationModifier:
GPTQModifier:
block_size: 128
sequential_update: False
ignore: ["lm_head", "model.layers.0.mlp.down_proj"]
config_groups:
group_0:
weights:
num_bits: 8
type: "int"
symmetric: true
strategy: "tensor"
symmetric: false
strategy: "channel"
input_activations:
num_bits: 8
type: "int"
symmetric: false
strategy: "tensor"
output_activations: null
targets: ["Linear"]
group_1:
weights:
num_bits: 8
type: "int"
symmetric: true
strategy: "tensor"
input_activations: null
output_activations: null
targets: ["Embedding"]
GPTQModifier:
block_size: 128
sequential_update: False
targets: ["re:model.layers.\\d+$"]
targets: ["Linear"]
Original file line number Diff line number Diff line change
Expand Up @@ -218,32 +218,6 @@ def test_evol(self):
)


@pytest.mark.unit
class TestDVCLoading(unittest.TestCase):
def setUp(self):
self.data_args = DataTrainingArguments(
dataset="csv",
dataset_path="dvc://workshop/satellite-data/jan_train.csv",
dvc_data_repository="https://github.com/iterative/dataset-registry.git",
)

@pytest.fixture(autouse=True)
def prepare_fixture(self, tiny_llama_tokenizer):
self.tiny_llama_tokenizer = tiny_llama_tokenizer

def test_dvc_dataloading(self):
manager = TextGenerationDataset(
text_column="",
data_args=self.data_args,
split="train",
tokenizer=self.tiny_llama_tokenizer,
)

raw_dataset = manager.get_raw_dataset()
self.assertGreater(len(raw_dataset), 0)
self.assertIsInstance(raw_dataset[0], dict)


@pytest.mark.unit
class TestStreamLoading(unittest.TestCase):
def setUp(self):
Expand Down

0 comments on commit e8c07af

Please sign in to comment.