未验证 提交 32e85eda 编写于 作者: S Stas Bekman 提交者: GitHub

[see_memory_usage] fix deprecation (#1234)

Co-authored-by: NOlatunji Ruwase <olruwase@microsoft.com>
上级 89b0fb42
......@@ -21,6 +21,16 @@ import torch.distributed as dist
from deepspeed.utils import logger
from numpy import prod
# pt-1.9 deprecations
if hasattr(torch.cuda, "memory_reserved"):
torch_memory_reserved = torch.cuda.memory_reserved
else:
torch_memory_reserved = torch.cuda.memory_allocated
if hasattr(torch.cuda, "max_memory_reserved"):
torch_max_memory_reserved = torch.cuda.max_memory_reserved
else:
torch_max_memory_reserved = torch.cuda.memory_cached
def noop_decorator(func):
return func
......@@ -589,8 +599,8 @@ def see_memory_usage(message, force=False):
logger.info(
f"MA {round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024),2 )} GB \
Max_MA {round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \
CA {round(torch.cuda.memory_cached() / (1024 * 1024 * 1024),2)} GB \
Max_CA {round(torch.cuda.max_memory_cached() / (1024 * 1024 * 1024))} GB ")
CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \
Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ")
vm_stats = psutil.virtual_memory()
used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册