Skip to content

Commit f72def5

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 645ee50 commit f72def5

File tree

6 files changed

+15
-26
lines changed

6 files changed

+15
-26
lines changed

docs/source/adaptor.md

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,22 +44,17 @@ For example, a user can implement an `Abc` adaptor like below:
4444
```python
4545
@adaptor_registry
4646
class AbcAdaptor(Adaptor):
47-
def __init__(self, framework_specific_info):
48-
...
47+
def __init__(self, framework_specific_info): ...
4948

50-
def quantize(self, tune_cfg, model, dataloader, q_func=None):
51-
...
49+
def quantize(self, tune_cfg, model, dataloader, q_func=None): ...
5250

5351
def evaluate(
5452
self, model, dataloader, postprocess=None, metric=None, measurer=None, iteration=-1, tensorboard=False
55-
):
56-
...
53+
): ...
5754

58-
def query_fw_capability(self, model):
59-
...
55+
def query_fw_capability(self, model): ...
6056

61-
def query_fused_patterns(self, model):
62-
...
57+
def query_fused_patterns(self, model): ...
6358
```
6459

6560
* `quantize` function is used to perform quantization for post-training quantization and quantization-aware training. Quantization processing includes calibration and conversion processing for post-training quantization, while for quantization-aware training, it includes training and conversion processing.

docs/source/migration.md

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ val_dataloader = torch.utils.data.Dataloader(
4141
)
4242

4343

44-
def eval_func(model):
45-
...
44+
def eval_func(model): ...
4645

4746

4847
# Quantization code
@@ -115,8 +114,7 @@ val_dataloader = torch.utils.data.Dataloader(
115114
)
116115

117116

118-
def eval_func(model):
119-
...
117+
def eval_func(model): ...
120118

121119

122120
# Quantization code
@@ -147,12 +145,10 @@ model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
147145
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
148146

149147

150-
def eval_func(model):
151-
...
148+
def eval_func(model): ...
152149

153150

154-
def train_func(model):
155-
...
151+
def train_func(model): ...
156152

157153

158154
trainer = Trainer(...)
@@ -213,12 +209,10 @@ model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
213209
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
214210

215211

216-
def eval_func(model):
217-
...
212+
def eval_func(model): ...
218213

219214

220-
def train_func(model):
221-
...
215+
def train_func(model): ...
222216

223217

224218
trainer = Trainer(...)

docs/source/pruning.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ The following section exemplifies how to use hooks in user pass-in training func
342342
on_after_optimizer_step() # Update weights' criteria, mask weights
343343
on_train_end() # End of pruner, print sparse information
344344
"""
345+
345346
from neural_compressor.training import prepare_compression, WeightPruningConfig
346347

347348
config = WeightPruningConfig(configs)

docs/source/quantization.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -319,8 +319,7 @@ criterion = ...
319319

320320

321321
# Quantization code
322-
def train_func(model):
323-
...
322+
def train_func(model): ...
324323

325324

326325
from neural_compressor import QuantizationAwareTrainingConfig

docs/source/tuning_strategies.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -507,8 +507,7 @@ For example, user can implement an `Abc` strategy like below:
507507
```python
508508
@strategy_registry
509509
class AbcTuneStrategy(TuneStrategy):
510-
def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None, eval_func=None, dicts=None):
511-
...
510+
def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None, eval_func=None, dicts=None): ...
512511

513512
def next_tune_cfg(self):
514513
# generate the next tuning config

neural_compressor/compression/pruner/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,7 @@ The following section exemplifies how to use hooks in user pass-in training func
343343
on_after_optimizer_step() # Update weights' criteria, mask weights
344344
on_train_end() # End of pruner, print sparse information
345345
"""
346+
346347
from neural_compressor.training import prepare_compression, WeightPruningConfig
347348

348349
config = WeightPruningConfig(configs)

0 commit comments

Comments
 (0)