From a0a6661613964f0ba29ee81eadf82fbf75408bb1 Mon Sep 17 00:00:00 2001 From: chajchaj <57249073+chajchaj@users.noreply.github.com> Date: Thu, 12 Mar 2020 13:47:57 +0800 Subject: [PATCH] fix bug: use_gpu false (#4403) --- dygraph/mobilenet/reader.py | 9 ++++----- dygraph/mobilenet/train.py | 10 ++++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/dygraph/mobilenet/reader.py b/dygraph/mobilenet/reader.py index b96d1366..bba33c35 100644 --- a/dygraph/mobilenet/reader.py +++ b/dygraph/mobilenet/reader.py @@ -256,8 +256,9 @@ def process_batch_data(input_data, settings, mode, color_jitter, rotate): class ImageNetReader: - def __init__(self, seed=None): + def __init__(self, seed=None, place_num=1): self.shuffle_seed = seed + self.place_num = place_num def set_shuffle_seed(self, seed): assert isinstance(seed, int), "shuffle seed must be int" @@ -275,8 +276,7 @@ class ImageNetReader: if mode == 'test': batch_size = 1 else: - batch_size = settings.batch_size / paddle.fluid.core.get_cuda_device_count( - ) + batch_size = settings.batch_size / self.place_num def reader(): def read_file_list(): @@ -365,8 +365,7 @@ class ImageNetReader: reader = create_mixup_reader(settings, reader) reader = fluid.io.batch( reader, - batch_size=int(settings.batch_size / - paddle.fluid.core.get_cuda_device_count()), + batch_size=int(settings.batch_size / self.place_num), drop_last=True) return reader diff --git a/dygraph/mobilenet/train.py b/dygraph/mobilenet/train.py index 254279ba..a510fa5b 100644 --- a/dygraph/mobilenet/train.py +++ b/dygraph/mobilenet/train.py @@ -42,10 +42,11 @@ def eval(net, test_data_loader, eop): total_acc5 = 0.0 total_sample = 0 t_last = 0 + place_num = paddle.fluid.core.get_cuda_device_count() if args.use_gpu else int(os.environ.get('CPU_NUM', 1)) for img, label in test_data_loader(): t1 = time.time() label = to_variable(label.numpy().astype('int64').reshape( - int(args.batch_size // paddle.fluid.core.get_cuda_device_count()), + int(args.batch_size // place_num), 1)) out = net(img) softmax_out = fluid.layers.softmax(out, use_cudnn=False) @@ -77,6 +78,7 @@ def train_mobilenet(): place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id) with fluid.dygraph.guard(place): # 1. init net and optimizer + place_num = paddle.fluid.core.get_cuda_device_count() if args.use_gpu else int(os.environ.get('CPU_NUM', 1)) if args.ce: print("ce mode") seed = 33 @@ -118,7 +120,7 @@ def train_mobilenet(): test_data_loader, test_data = utility.create_data_loader( is_train=False, args=args) num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) - imagenet_reader = reader.ImageNetReader(0) + imagenet_reader = reader.ImageNetReader(seed=0,place_num=place_num) train_reader = imagenet_reader.train(settings=args) test_reader = imagenet_reader.val(settings=args) train_data_loader.set_sample_list_generator(train_reader, place) @@ -140,8 +142,8 @@ def train_mobilenet(): for img, label in train_data_loader(): t1 = time.time() label = to_variable(label.numpy().astype('int64').reshape( - int(args.batch_size // - paddle.fluid.core.get_cuda_device_count()), 1)) + int(args.batch_size // place_num), + 1)) t_start = time.time() # 4.1.1 call net() -- GitLab