未验证 提交 e5fe5f65 编写于 作者: O Olatunji Ruwase 提交者: GitHub

Use logger in accelerator (#3682)

* Use logger in accelerator

* Handle pre-build cases

* Explain possible import failure
上级 3fb3cfdc
......@@ -4,6 +4,13 @@
# DeepSpeed Team
import os
try:
# Importing logger currently requires that torch is installed, hence the try...except
# TODO: Remove logger dependency on torch.
from deepspeed.utils import logger as accel_logger
except ImportError as e:
accel_logger = None
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
......@@ -99,14 +106,16 @@ def get_accelerator():
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
_validate_accelerator(ds_accelerator)
print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
......@@ -114,10 +123,10 @@ def set_accelerator(accel_obj):
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
......@@ -131,16 +140,16 @@ my_accelerator.total_memory()=34089730048
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
print(f'{id(cu_accel)=}')
logger.info(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
print(f'{id(my_accelerator)=}')
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
logger.info(f'{id(my_accelerator)=}')
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册