diff --git a/PaddleNLP/neural_machine_translation/transformer/infer.py b/PaddleNLP/neural_machine_translation/transformer/infer.py index aaf813a5e1345a47e4e9073d2ff1219274fc97fe..cb40e6851db11c54faf0a393bd26f5263b16ae65 100644 --- a/PaddleNLP/neural_machine_translation/transformer/infer.py +++ b/PaddleNLP/neural_machine_translation/transformer/infer.py @@ -4,12 +4,14 @@ import multiprocessing import numpy as np import os import sys +sys.path.append("../../") sys.path.append("../../models/neural_machine_translation/transformer/") from functools import partial import paddle import paddle.fluid as fluid +from models.model_check import check_cuda import reader from config import * from desc import * @@ -217,6 +219,7 @@ def fast_infer(args): fluid.memory_optimize(infer_program) if InferTaskConfig.use_gpu: + check_cuda(InferTaskConfig.use_gpu) place = fluid.CUDAPlace(0) dev_count = fluid.core.get_cuda_device_count() else: diff --git a/PaddleNLP/neural_machine_translation/transformer/train.py b/PaddleNLP/neural_machine_translation/transformer/train.py index b8e6c95f8a4b779c9dd708a776e91b65cd55365c..f284c9c6a0b547d5c119232d1dab76de3dbd1064 100644 --- a/PaddleNLP/neural_machine_translation/transformer/train.py +++ b/PaddleNLP/neural_machine_translation/transformer/train.py @@ -6,12 +6,14 @@ import multiprocessing import os import six import sys +sys.path.append("../../") sys.path.append("../../models/neural_machine_translation/transformer/") import time import numpy as np import paddle.fluid as fluid +from models.model_check import check_cuda import reader from config import * from desc import * @@ -663,6 +665,7 @@ def train(args): place = fluid.CPUPlace() dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) else: + check_cuda(TrainTaskConfig.use_gpu) gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) dev_count = get_device_num()