We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1f618c3 commit 105bf03Copy full SHA for 105bf03
vllm/platforms/rocm.py
@@ -169,4 +169,5 @@ def get_current_memory_usage(cls,
169
device: Optional[torch.types.Device] = None
170
) -> float:
171
torch.cuda.reset_peak_memory_stats(device)
172
- return torch.cuda.max_memory_allocated(device)
+ return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(
173
+ device)[0]
0 commit comments