diff --git a/python/paddle/fluid/tests/unittests/test_backward.py b/python/paddle/fluid/tests/unittests/test_backward.py index dc98e04775f3762b931a4ec54ca21468fb3081fb..b108e0e069c8c42005061d1bb9a705f680b35a0e 100644 --- a/python/paddle/fluid/tests/unittests/test_backward.py +++ b/python/paddle/fluid/tests/unittests/test_backward.py @@ -16,72 +16,205 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -from simple_nets import init_data - - -def case1_fill_grad_vars(): - x = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feature = fluid.layers.fc(input=x, size=20, act=None) - part1, part2 = fluid.layers.split(feature, num_or_sections=[10, 10], dim=1) - # Note that: part2 is not used. - loss = fluid.layers.cross_entropy(input=part1, label=label) - loss = fluid.layers.mean(loss) - return loss - - -def case2_prune_no_grad_branch(): - x = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feature = fluid.layers.fc(input=x, size=10, act=None) - label = fluid.layers.cast(label, dtype="float32") - label = fluid.layers.cast(label, dtype='int64') - # Note that the label is not persistable in fluid.layers.cross_entropy. - loss = fluid.layers.cross_entropy(input=feature, label=label) - loss = fluid.layers.mean(loss) - return loss - - -def case3_prune_no_grad_branch2(): - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - label = fluid.layers.cast(label, dtype="float32") - label = fluid.layers.cast(label, dtype='int64') - out = fluid.layers.one_hot(input=label, depth=100) - loss = fluid.layers.mean(out) - return loss - - -def case4_with_no_grad_op_maker(): - out = fluid.layers.gaussian_random(shape=[20, 30]) - loss = fluid.layers.mean(out) - return loss +import numpy as np + + +class BackwardNet(object): + """ + Abstract Base Class. + All Net inherited this Class should implement two functions: + build_model: build net to test the logic of backward + init_data: fake input data to test all programs. + """ + + def __init__(self): + self.stop_gradient_grad_vars = set() + self.no_grad_vars = set() + self.params_names = set() + self.op_path = [] + + def build_model(self): + """ + Build net to test the logic of backward. + :return: loss + """ + raise NotImplementedError + + def init_data(self): + """ + Fake input data to test all programs. + :return: dict, {'var_name': var_data} + """ + raise NotImplementedError class TestBackward(unittest.TestCase): - def check_backward(self, model, feed_dict): - place = fluid.CPUPlace() + """ + All related TestClass should inherit this class, + and only implement test_backward function. + """ + + def _check_all(self, net): + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() exe = fluid.Executor(place) main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - loss = model() + loss = net.build_model() + self._check_backward(loss, main) optimizer = fluid.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss) - - exe.run(fluid.default_startup_program()) - exe.run(feed=feed_dict) - + exe.run(startup) + exe.run(feed=net.init_data()) + + def _check_backward(self, loss, main_program): + global_block_idx = self.global_block_idx + params_grads = self._check_params_grad(loss) + # 1.1 get_stop_gradients + no_grad_dict = self._check_stop_gradient(main_program) + # 1.2 find_op_path + op_path, block_no_grad_set = self._check_op_path( + main_program.block(global_block_idx), [loss], [], no_grad_dict) + # 1.3 _find_no_grad_vars + no_grad_vars = self._check_find_no_grad_vars( + main_program.block(global_block_idx), op_path, [loss], + block_no_grad_set) + # update no_grad_dict + block_no_grad_set.update(no_grad_vars) + no_grad_dict[global_block_idx].update( + list(map(fluid.backward._append_grad_suffix_, block_no_grad_set))) + + def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None): + params_grads = fluid.backward.append_backward(loss, parameter_list, + no_grad_set) + params_names = set( + [param_var.name for (param_var, grad_var) in params_grads]) + self.assertSetEqual(params_names, self.net.params_names) + + return params_grads + + def _check_stop_gradient(self, program): + no_grad_dict = fluid.backward._get_stop_gradients_(program) + if no_grad_dict is not None and isinstance(no_grad_dict, dict): + self.assertSetEqual(no_grad_dict[self.global_block_idx], + self.net.stop_gradient_grad_vars) + + return no_grad_dict + + def _check_op_path(self, root_block, outputs, inputs=[], no_grad_dict=None): + if no_grad_dict is None or not isinstance(no_grad_dict, dict): + block_no_grad_set = None + else: + block_no_grad_set = set( + map(fluid.backward._strip_grad_suffix_, no_grad_dict[ + self.global_block_idx])) + op_path = fluid.backward._find_op_path_(root_block, outputs, inputs, + block_no_grad_set) + op_types = [op.type for op in op_path] + self.assertListEqual(op_types, self.net.op_path) + + return op_path, block_no_grad_set + + def _check_find_no_grad_vars(self, root_block, op_path, targets, + block_no_grad_set): + no_grad_vars = fluid.backward._find_no_grad_vars( + root_block, op_path, targets, block_no_grad_set) + self.assertSetEqual(no_grad_vars, self.net.no_grad_vars) + + return no_grad_vars + + +class SimpleNet(BackwardNet): + def __init__(self): + super(BackwardNet, self).__init__() + self.stop_gradient_grad_vars = set([ + u'x_no_grad@GRAD', u'x2_no_grad@GRAD', u'x3_no_grad@GRAD', + u'label_no_grad@GRAD' + ]) + self.no_grad_vars = set() + self.params_names = set([u'w2v', u'fc_predict.b_0', u'fc_w']) + self.op_path = [ + u'lookup_table_v2', + u'lookup_table_v2', # embedding + u'elementwise_add', # merge + u'mul', + u'elementwise_add', + u'softmax', # fc + u'elementwise_sub', + u'square', + u'mean' + ] # loss + self.shape = [16, 50] + + def init_data(self): + assert len(self.shape) == 2 + x = np.random.randint(0, 90, self.shape).astype('int64') + x2 = np.random.randint(0, 90, self.shape).astype('int64') + x3 = np.random.randint(0, 90, self.shape).astype('int64') + label = np.random.random([self.shape[0], 1]).astype('float32') + return { + 'x_no_grad': x, + 'x2_no_grad': x2, + 'x3_no_grad': x3, + 'label_no_grad': label + } + + def build_model(self): + # stop_gradient = True in input + x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64') + x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64') + x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64') + label = fluid.data( + name='label_no_grad', shape=[self.shape[0], 1], dtype='float32') + # shared layer, the grad of 'w2v' will be summed and renamed. + # To test _addup_repetitive_outputs_ + x_emb = fluid.embedding( + x, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')) + x2_emb = fluid.embedding( + x2, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')) + x3_emb = fluid.embedding( + x3, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v')) + # merge layers + x_merge = fluid.layers.elementwise_add(x_emb, x2_emb, name='x_add_x2') + x2_merge = fluid.layers.elementwise_add( + x2_emb, x3_emb, name='x2_add_x3') + # shared fc_w + predict = fluid.layers.fc(input=x_merge, + size=1, + act='softmax', + param_attr=fluid.ParamAttr(name='fc_w'), + name='fc_predict') + # useless layer for calculating loss + fc_no_use = fluid.layers.fc(input=x2_merge, + size=1, + act='sigmoid', + param_attr=fluid.ParamAttr(name='fc_w'), + name='fc_no_use') + # loss + cost = fluid.layers.square_error_cost(input=predict, label=label) + loss = fluid.layers.mean(cost, name='mean_loss') + + return loss + + +class TestSimpleNet(TestBackward): def test_backward(self): - batch_size = 2 - img, label = init_data(batch_size, img_shape=[784], label_range=9) - feed_dict = {'image': img, 'label': label} - self.check_backward(case1_fill_grad_vars, feed_dict) - self.check_backward(case2_prune_no_grad_branch, feed_dict) - self.check_backward(case3_prune_no_grad_branch2, {'label': label}) - self.check_backward(case4_with_no_grad_op_maker, {}) + """ + Instantiate each NetClass to test backward. + """ + self.global_block_idx = 0 + self.net = SimpleNet() + self._check_all(self.net) + + +# TODO(Aurelius84): add conditional network test +class ConditionalNet(BackwardNet): + def __init__(self): + super(BackwardNet, self).__init__() if __name__ == '__main__':