From be2d3ae62919652715254d77b44bb1cced949f26 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 21 Jan 2019 14:05:25 +0800 Subject: [PATCH] Add debug info --- python/paddle/fluid/layer_helper.py | 2 + .../tests/unittests/test_imperative_resnet.py | 326 +++++++++++++++--- 2 files changed, 282 insertions(+), 46 deletions(-) diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index df5591fb2..4d3484d68 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -437,8 +437,10 @@ class LayerHelper(object): # NOTE(dzhwinter): some activation support inplace compution. # NOTE(minqiyang): currently, we don't support inplace in imperative mode if not force_no_inplace and core.IsInplace(act_type): + print("inplace") tmp = input_var else: + print("not inplace") tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index af821dfc0..f0c101694 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -20,12 +20,13 @@ import six import paddle import paddle.fluid as fluid from paddle.fluid import core +from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.imperative.nn import Conv2D, Pool2D, BatchNorm, FC from paddle.fluid.imperative.base import to_variable from test_imperative_base import new_program_scope -batch_size = 8 +batch_size = 1 train_parameters = { "input_size": [3, 224, 224], "input_mean": [0.485, 0.456, 0.406], @@ -88,11 +89,11 @@ class ConvBNLayer(fluid.imperative.Layer): act=None, bias_attr=None) - self._batch_norm = BatchNorm(num_filters, act=act) + # self._batch_norm = BatchNorm(num_filters, act=act) def forward(self, inputs): y = self._conv(inputs) - y = self._batch_norm(y) + # y = self._batch_norm(y) return y @@ -139,7 +140,10 @@ class BottleneckBlock(fluid.imperative.Layer): else: short = self.short(inputs) - return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + y = fluid.layers.elementwise_add(x=short, y=conv2) + + layer_helper = LayerHelper('elementwise_add_activation', act='relu') + return layer_helper.append_activation(y, force_no_inplace=True) class ResNet(fluid.imperative.Layer): @@ -200,16 +204,233 @@ class ResNet(fluid.imperative.Layer): class TestImperativeResnet(unittest.TestCase): - def test_resnet_gpu_float32(self): + # def test_resnet_gpu_float32(self): + # seed = 90 + + # batch_size = train_parameters["batch_size"] + # with fluid.imperative.guard(): + # fluid.default_startup_program().random_seed = seed + # fluid.default_main_program().random_seed = seed + + # resnet = ResNet() + # optimizer = optimizer_setting(train_parameters) + # np.random.seed(seed) + # import random + # random.seed = seed + # train_reader = paddle.batch( + # paddle.dataset.flowers.train(use_xmap=False), + # batch_size=batch_size) + + # dy_param_init_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # dy_param_init_value[param.name] = param._numpy() + + # for batch_id, data in enumerate(train_reader()): + # if batch_id >= 1: + # break + + # dy_x_data = np.array( + # [x[0].reshape(3, 224, 224) for x in data]).astype('float32') + # y_data = np.array([x[1] for x in data]).astype('int64').reshape( + # batch_size, 1) + + # img = to_variable(dy_x_data) + # label = to_variable(y_data) + # label._stop_gradient = True + + # out = resnet(img) + # loss = fluid.layers.cross_entropy(input=out, label=label) + # avg_loss = fluid.layers.mean(x=loss) + + # dy_out = avg_loss._numpy() + + # if batch_id == 0: + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # if param.name not in dy_param_init_value: + # dy_param_init_value[param.name] = param._numpy() + + # avg_loss._backward() + # dy_grad_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # if not param.stop_gradient: + # np_array = np.array(param._ivar._grad_ivar().value() + # .get_tensor()) + # dy_grad_value[param.name + core.grad_var_suffix( + # )] = np_array + + # optimizer.minimize(avg_loss) + + # dy_param_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # dy_param_value[param.name] = param._numpy() + + # with new_program_scope(): + # fluid.default_startup_program().random_seed = seed + # fluid.default_main_program().random_seed = seed + + # exe = fluid.Executor(fluid.CUDAPlace(0)) + + # resnet = ResNet() + # optimizer = optimizer_setting(train_parameters) + + # np.random.seed(seed) + # import random + # random.seed = seed + # train_reader = paddle.batch( + # paddle.dataset.flowers.train(use_xmap=False), + # batch_size=batch_size) + + # img = fluid.layers.data( + # name='pixel', shape=[3, 224, 224], dtype='float32') + # label = fluid.layers.data(name='label', shape=[1], dtype='int64') + # out = resnet(img) + # loss = fluid.layers.cross_entropy(input=out, label=label) + # avg_loss = fluid.layers.mean(x=loss) + # optimizer.minimize(avg_loss) + + # # initialize params and fetch them + # static_param_init_value = {} + # static_param_name_list = [] + # static_grad_name_list = [] + # for param in fluid.default_startup_program().global_block( + # ).all_parameters(): + # static_param_name_list.append(param.name) + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # if not param.stop_gradient: + # static_grad_name_list.append(param.name + + # core.grad_var_suffix()) + + # out = exe.run(fluid.default_startup_program(), + # fetch_list=static_param_name_list) + + # for i in range(len(static_param_name_list)): + # static_param_init_value[static_param_name_list[i]] = out[i] + + # for batch_id, data in enumerate(train_reader()): + # if batch_id >= 1: + # break + + # static_x_data = np.array( + # [x[0].reshape(3, 224, 224) for x in data]).astype('float32') + # y_data = np.array([x[1] for x in data]).astype('int64').reshape( + # [batch_size, 1]) + + # fetch_list = [avg_loss.name] + # fetch_list.extend(static_param_name_list) + # fetch_list.extend(static_grad_name_list) + # out = exe.run(fluid.default_main_program(), + # feed={"pixel": static_x_data, + # "label": y_data}, + # fetch_list=fetch_list) + + # static_param_value = {} + # static_grad_value = {} + # static_out = out[0] + # param_start_pos = 1 + # grad_start_pos = len(static_param_name_list) + param_start_pos + # for i in range(param_start_pos, + # len(static_param_name_list) + param_start_pos): + # static_param_value[static_param_name_list[ + # i - param_start_pos]] = out[i] + # for i in range(grad_start_pos, + # len(static_grad_name_list) + grad_start_pos): + # static_grad_value[static_grad_name_list[ + # i - grad_start_pos]] = out[i] + + # self.assertTrue(np.allclose(static_out, dy_out)) + + # self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) + # for key, value in six.iteritems(static_param_init_value): + # self.assertTrue(np.allclose(value, dy_param_init_value[key])) + + # self.assertEqual(len(dy_grad_value), len(static_grad_value)) + # # TODO(minqiyang): find a way to align the gradient + # # for key, value in six.iteritems(static_grad_value): + # # self.assertTrue( + # # np.allclose(value, dy_grad_value[key])) + + # self.assertEqual(len(dy_param_value), len(static_param_value)) + # # for key, value in six.iteritems(static_param_value): + # # self.assertTrue(np.allclose(value, dy_param_value[key])) + + def test_resnet_cpu_float32(self): seed = 90 batch_size = train_parameters["batch_size"] - with fluid.imperative.guard(): + # with fluid.imperative.guard(device=None): + # fluid.default_startup_program().random_seed = seed + # fluid.default_main_program().random_seed = seed + + # resnet = ResNet() + # optimizer = optimizer_setting(train_parameters) + # np.random.seed(seed) + # import random + # random.seed = seed + # train_reader = paddle.batch( + # paddle.dataset.flowers.train(use_xmap=False), + # batch_size=batch_size) + + # dy_param_init_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # dy_param_init_value[param.name] = param._numpy() + + # for batch_id, data in enumerate(train_reader()): + # if batch_id >= 1: + # break + + # dy_x_data = np.array( + # [x[0].reshape(3, 224, 224) for x in data]).astype('float32') + # y_data = np.array([x[1] for x in data]).astype('int64').reshape( + # batch_size, 1) + + # img = to_variable(dy_x_data) + # label = to_variable(y_data) + # label._stop_gradient = True + + # out = resnet(img) + # loss = fluid.layers.cross_entropy(input=out, label=label) + # avg_loss = fluid.layers.mean(x=loss) + + # dy_out = avg_loss._numpy() + + # if batch_id == 0: + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # if param.name not in dy_param_init_value: + # dy_param_init_value[param.name] = param._numpy() + + # avg_loss._backward() + # dy_grad_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # if not param.stop_gradient: + # np_array = np.array(param._ivar._grad_ivar().value() + # .get_tensor()) + # dy_grad_value[param.name + core.grad_var_suffix( + # )] = np_array + + # optimizer.minimize(avg_loss) + + # dy_param_value = {} + # for param in fluid.default_main_program().global_block( + # ).all_parameters(): + # dy_param_value[param.name] = param._numpy() + + with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed + exe = fluid.Executor(fluid.CPUPlace()) + resnet = ResNet() optimizer = optimizer_setting(train_parameters) + np.random.seed(seed) import random random.seed = seed @@ -217,10 +438,32 @@ class TestImperativeResnet(unittest.TestCase): paddle.dataset.flowers.train(use_xmap=False), batch_size=batch_size) + img = fluid.layers.data( + name='pixel', shape=[3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + out = resnet(img) + loss = fluid.layers.cross_entropy(input=out, label=label) + avg_loss = fluid.layers.mean(x=loss) + optimizer.minimize(avg_loss) + + # initialize params and fetch them dy_param_init_value = {} + dy_param_name_list = [] + dy_grad_name_list = [] + for param in fluid.default_startup_program().global_block( + ).all_parameters(): + dy_param_name_list.append(param.name) for param in fluid.default_main_program().global_block( ).all_parameters(): - dy_param_init_value[param.name] = param._numpy() + if not param.stop_gradient: + dy_grad_name_list.append(param.name + core.grad_var_suffix( + )) + + out = exe.run(fluid.default_startup_program(), + fetch_list=dy_param_name_list) + + for i in range(len(dy_param_name_list)): + dy_param_init_value[dy_param_name_list[i]] = out[i] for batch_id, data in enumerate(train_reader()): if batch_id >= 1: @@ -229,46 +472,35 @@ class TestImperativeResnet(unittest.TestCase): dy_x_data = np.array( [x[0].reshape(3, 224, 224) for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64').reshape( - batch_size, 1) - - img = to_variable(dy_x_data) - label = to_variable(y_data) - label._stop_gradient = True - - out = resnet(img) - loss = fluid.layers.cross_entropy(input=out, label=label) - avg_loss = fluid.layers.mean(x=loss) - - dy_out = avg_loss._numpy() - - if batch_id == 0: - for param in fluid.default_main_program().global_block( - ).all_parameters(): - if param.name not in dy_param_init_value: - dy_param_init_value[param.name] = param._numpy() - - avg_loss._backward() - dy_grad_value = {} - for param in fluid.default_main_program().global_block( - ).all_parameters(): - if not param.stop_gradient: - np_array = np.array(param._ivar._grad_ivar().value() - .get_tensor()) - dy_grad_value[param.name + core.grad_var_suffix( - )] = np_array + [batch_size, 1]) - optimizer.minimize(avg_loss) + fetch_list = [avg_loss.name] + fetch_list.extend(dy_param_name_list) + fetch_list.extend(dy_grad_name_list) + out = exe.run(fluid.default_main_program(), + feed={"pixel": dy_x_data, + "label": y_data}, + fetch_list=fetch_list) dy_param_value = {} - for param in fluid.default_main_program().global_block( - ).all_parameters(): - dy_param_value[param.name] = param._numpy() + dy_grad_value = {} + dy_out = out[0] + param_start_pos = 1 + grad_start_pos = len(dy_param_name_list) + param_start_pos + for i in range(param_start_pos, + len(dy_param_name_list) + param_start_pos): + dy_param_value[dy_param_name_list[i - + param_start_pos]] = out[i] + for i in range(grad_start_pos, + len(dy_grad_name_list) + grad_start_pos): + dy_grad_value[dy_grad_name_list[i - grad_start_pos]] = out[ + i] with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - exe = fluid.Executor(fluid.CUDAPlace(0)) + exe = fluid.Executor(fluid.CPUPlace()) resnet = ResNet() optimizer = optimizer_setting(train_parameters) @@ -345,15 +577,17 @@ class TestImperativeResnet(unittest.TestCase): self.assertTrue(np.allclose(value, dy_param_init_value[key])) self.assertEqual(len(dy_grad_value), len(static_grad_value)) - # TODO(minqiyang): find a way to align the gradient - # for key, value in six.iteritems(static_grad_value): - # self.assertTrue( - # np.allclose(value, dy_grad_value[key])) + for key, value in six.iteritems(static_grad_value): + if not np.allclose(value, dy_grad_value[key]): + # print(key, value, dy_grad_value[key]) + print(key) + # self.assertTrue( + # np.allclose(value, dy_grad_value[key])) self.assertEqual(len(dy_param_value), len(static_param_value)) - # for key, value in six.iteritems(static_param_value): - - # self.assertTrue(np.allclose(value, dy_param_value[key])) + for key, value in six.iteritems(static_param_value): + print(key) + # self.assertTrue(np.allclose(value, dy_param_value[key])) if __name__ == '__main__': -- GitLab