Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,10 +241,9 @@
DATA_SAMPLERS = [RandomSampler]
if version.parse(accelerate_version) > version.parse("1.3.0"):
from accelerate.utils import TorchTensorParallelPlugin
if version.parse(accelerate_version) > version.parse("0.23.0"):
from accelerate.data_loader import SeedableRandomSampler
from accelerate.data_loader import SeedableRandomSampler

DATA_SAMPLERS += [SeedableRandomSampler]
DATA_SAMPLERS += [SeedableRandomSampler]

if is_deepspeed_available():
from accelerate.utils import DeepSpeedSchedulerWrapper
Expand Down Expand Up @@ -4196,9 +4195,7 @@ def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = Fa
elif (tp_size := getattr(self.model, "_tp_size", 0)) is not None and tp_size > 1:
self._save(output_dir)
elif self.is_fsdp_enabled:
if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and (
version.parse(accelerate_version) > version.parse("0.24.1")
):
if "FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type):
state_dict = self.accelerator.get_state_dict(self.model)
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
Expand Down
14 changes: 1 addition & 13 deletions tests/fsdp/test_fsdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,22 +88,11 @@ def get_master_port(real_launcher=False):


if is_torch_available():
from tests.trainer.test_trainer import ( # noqa
RegressionModelConfig,
RegressionPreTrainedModel,
)

# hack to restore original logging level pre #21700
get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info")

require_fsdp_version = require_fsdp
if is_accelerate_available():
from accelerate.utils.constants import (
FSDP_PYTORCH_VERSION,
FSDP_SHARDING_STRATEGY,
)

require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION)
from accelerate.utils.constants import FSDP_SHARDING_STRATEGY


FSDP2_ACCELERATE_VERSION = "1.6.0"
Expand Down Expand Up @@ -142,7 +131,6 @@ def _parameterized_custom_name_func(func, param_num, param):

@require_accelerate
@require_torch_accelerator
@require_fsdp_version
class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
Expand Down