Skip to content

Commit

Permalink
Add compatibility with skip_memory_metrics for mps device (#29264)
Browse files Browse the repository at this point in the history
* Add compatibility with mps device

* fix

* typo and style
  • Loading branch information
SunMarc authored and Ita Zaporozhets committed May 14, 2024
1 parent 00366cd commit f3ee5f7
Showing 1 changed file with 15 additions and 1 deletion.
16 changes: 15 additions & 1 deletion src/transformers/trainer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,8 @@ def start(self):
elif is_torch_npu_available():
self.torch.npu.reset_peak_memory_stats()
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()

# gpu
if self.torch is not None:
Expand All @@ -535,6 +537,8 @@ def start(self):
self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated()
elif is_torch_npu_available():
self.gpu_mem_used_at_start = self.torch.npu.memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory()

# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
Expand Down Expand Up @@ -564,6 +568,8 @@ def stop(self, stage):
self.torch.xpu.empty_cache()
elif is_torch_npu_available():
self.torch.npu.empty_cache()
elif is_torch_mps_available():
self.torch.mps.empty_cache()

# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
Expand All @@ -581,15 +587,23 @@ def stop(self, stage):
elif is_torch_npu_available():
self.gpu_mem_used_now = self.torch.npu.memory_allocated()
self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated()
elif is_torch_mps_available():
self.gpu_mem_used_now = self.torch.mps.current_allocated_memory()
# self.torch.mps.max_memory_allocated() does not exist yet
self.gpu_mem_used_peak = None

else:
raise ValueError("No available GPU device found!")

self.gpu[self.cur_stage] = {
"begin": self.gpu_mem_used_at_start,
"end": self.gpu_mem_used_now,
"alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start),
"peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
}
if self.gpu_mem_used_peak is not None:
self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now)
else:
self.gpu[self.cur_stage]["peaked"] = "Not available"

# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
Expand Down

0 comments on commit f3ee5f7

Please sign in to comment.