From 1578c60bdda12501e5951aa9b75f6bed39833b22 Mon Sep 17 00:00:00 2001 From: Krzysztof Binias Date: Thu, 21 Feb 2019 12:36:56 +0100 Subject: [PATCH] Add new ut and remove unnecessary code test=develop --- .../operators/mkldnn/activation_mkldnn_op.cc | 10 --- .../mkldnn/test_activation_mkldnn_op.py | 61 ++++++++++++++++++- 2 files changed, 60 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index e16b6f78d1..223adcaa6b 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -52,11 +52,6 @@ class MKLDNNActivationKernel "Wrong layout/format set for Input x tensor"); Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto &attr : attrs) { - *attr.second = ctx.Attr(attr.first); - } functor(ctx); } }; @@ -76,11 +71,6 @@ class MKLDNNActivationGradKernel "is_test attribute should be set to False in training phase."); Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto &attr : attrs) { - *attr.second = ctx.Attr(attr.first); - } functor(ctx); } }; diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index ad94a4b21c..4c211ef68b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -18,8 +18,8 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest -from scipy.special import expit from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs +import paddle.fluid as fluid class TestMKLDNNReluDim2(TestRelu): @@ -97,5 +97,64 @@ class TestMKLDNNAbsDim4(TestAbs): self.attrs = {"use_mkldnn": True} +# Check if primitives already exist in backward +class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase): + def __assert_close(self, tensor, np_array, msg, atol=1e-4): + self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + + def test_check_forward_backward(self): + place = core.CPUPlace() + + np.random.seed(123) + x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32) + out = np.abs(x) + + out_grad = np.random.random_sample(x.shape).astype(np.float32) + x_grad = out_grad * np.sign(x) # Abs grad calculation + + var_dict = {'x':x, 'out':out, 'out@GRAD':out_grad, 'x@GRAD':x_grad} + var_names = list(var_dict.keys()) + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + + relu_op = block.append_op( + type="abs", + inputs={"X": block.var('x'),}, + outputs={"Out": block.var('out') }, + attrs={"use_mkldnn": True}) + + # Generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + relu_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + + # Do at least 2 iterations + for i in range(2): + out = exe.run(program, + feed={name: var_dict[name] for name in ['x', 'out@GRAD']}, + fetch_list=['x@GRAD']) + + self.__assert_close(x_grad, out[0], "x@GRAD") + + if __name__ == '__main__': unittest.main() -- GitLab