未验证 提交 65412088 编写于 作者: Q QingshuChen 提交者: GitHub

support mask_rcnn for kunlun (#1890)

* support mask_rcnn for kunlun

* minor
上级 2486a941
......@@ -28,12 +28,32 @@ logger = logging.getLogger(__name__)
__all__ = [
'check_gpu',
'check_xpu',
'check_version',
'check_config',
'check_py_func',
]
def check_xpu(use_xpu):
"""
Log error and exit when set use_xpu=true in paddlepaddle
cpu/gpu version.
"""
err = "Config use_xpu cannot be set as true while you are " \
"using paddlepaddle cpu/gpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-xpu to run model on XPU \n" \
"\t2. Set use_xpu as false in config file to run " \
"model on CPU/GPU"
try:
if use_xpu and not fluid.is_compiled_with_xpu():
logger.error(err)
sys.exit(1)
except Exception as e:
pass
def check_gpu(use_gpu):
"""
Log error and exit when set use_gpu=true in paddlepaddle
......
......@@ -27,7 +27,7 @@ import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode
from ppdet.data.reader import create_reader
......@@ -49,15 +49,27 @@ def main():
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
use_xpu = False
if hasattr(cfg, 'use_xpu'):
check_xpu(cfg.use_xpu)
use_xpu = cfg.use_xpu
# check if paddlepaddle version is satisfied
check_version()
assert not (use_xpu and cfg.use_gpu), \
'Can not run on both XPU and GPU'
main_arch = cfg.architecture
multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
if cfg.use_gpu:
place = fluid.CUDAPlace(0)
elif use_xpu:
place = fluid.XPUPlace(0)
else:
place = CPUPlace()
exe = fluid.Executor(place)
# build program
......@@ -91,6 +103,8 @@ def main():
return
compile_program = fluid.CompiledProgram(eval_prog).with_data_parallel()
if use_xpu:
compile_program = eval_prog
assert cfg.metric != 'OID', "eval process of OID dataset \
is not supported."
......
......@@ -43,7 +43,7 @@ from ppdet.utils import dist_utils
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.stats import TrainingStats
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode
from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode
import ppdet.utils.checkpoint as checkpoint
import logging
......@@ -73,9 +73,16 @@ def main():
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
use_xpu = False
if hasattr(cfg, 'use_xpu'):
check_xpu(cfg.use_xpu)
use_xpu = cfg.use_xpu
# check if paddlepaddle version is satisfied
check_version()
assert not (use_xpu and cfg.use_gpu), \
'Can not run on both XPU and GPU'
save_only = getattr(cfg, 'save_prediction_only', False)
if save_only:
raise NotImplementedError('The config file only support prediction,'
......@@ -84,14 +91,25 @@ def main():
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
elif use_xpu:
# ToDo(qingshu): XPU only support single card now
devices_num = 1
else:
devices_num = int(os.environ.get('CPU_NUM', 1))
if 'FLAGS_selected_gpus' in env:
if cfg.use_gpu and 'FLAGS_selected_gpus' in env:
device_id = int(env['FLAGS_selected_gpus'])
elif use_xpu and 'FLAGS_selected_xpus' in env:
device_id = int(env['FLAGS_selected_xpus'])
else:
device_id = 0
place = fluid.CUDAPlace(device_id) if cfg.use_gpu else fluid.CPUPlace()
if cfg.use_gpu:
place = fluid.CUDAPlace(device_id)
elif use_xpu:
place = fluid.XPUPlace(device_id)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
lr_builder = create('LearningRate')
......@@ -184,9 +202,13 @@ def main():
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
if use_xpu:
compiled_train_prog = train_prog
if FLAGS.eval:
compiled_eval_prog = fluid.CompiledProgram(eval_prog)
if use_xpu:
compiled_eval_prog = eval_prog
fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册