Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add compatibility with skip_memory_metrics for mps device #29264

Merged
merged 3 commits into from
Feb 27, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion src/transformers/trainer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,8 @@ def start(self):
elif is_torch_npu_available():
self.torch.npu.reset_peak_memory_stats()
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()

# gpu
if self.torch is not None:
Expand All @@ -535,6 +537,8 @@ def start(self):
self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated()
elif is_torch_npu_available():
self.gpu_mem_used_at_start = self.torch.npu.memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory()

# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
Expand Down Expand Up @@ -564,6 +568,8 @@ def stop(self, stage):
self.torch.xpu.empty_cache()
elif is_torch_npu_available():
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()

# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
Expand All @@ -581,15 +587,23 @@ def stop(self, stage):
elif is_torch_npu_available():
self.gpu_mem_used_now = self.torch.npu.memory_allocated()
self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_now = self.torch.mps.current_allocated_memory()
# self.torch.mps.max_memory_allocated() does not exist yet
self.gpu_mem_used_peak = None

else:
raise ValueError("No available GPU device found!")

self.gpu[self.cur_stage] = {
"begin": self.gpu_mem_used_at_start,
"end": self.gpu_mem_used_now,
"alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start),
"peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
}
if self.gpu_mem_used_peak is not None:
self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now)
else:
self.gpu[self.cur_stage]["peaked"] = "Not available"

# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
Expand Down
Loading