From 15c2667143eb4068387e5621a130a18706cce0fa Mon Sep 17 00:00:00 2001 From: chengjuntao <18222160892@163.com> Date: Tue, 25 Feb 2020 04:02:11 -0600 Subject: [PATCH] register fp16 for assign op (#22744) * register fp16 for assign op, test=develop * add op test for fp16, test=develop --- paddle/fluid/operators/assign_op.cc | 3 ++ .../fluid/tests/unittests/test_assign_op.py | 32 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 5c69ad94b3..7d4d8ee50d 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -114,6 +114,7 @@ DECLARE_INPLACE_OP_INFERER(AssignOpInplaceInferer, {"X", "Out"}); } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OPERATOR(assign, ops::AssignOp, ops::AssignGradMaker, ops::AssignGradMaker, @@ -122,11 +123,13 @@ REGISTER_OPERATOR(assign, ops::AssignOp, REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, bool, + ops::AssignKernel, plat::float16, ops::AssignKernel); #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, bool, + ops::AssignKernel, plat::float16, ops::AssignKernel); #endif diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index dc58003272..914ddbfa2b 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -37,6 +37,20 @@ class TestAssignOp(op_test.OpTest): self.check_grad(['X'], 'Out') +class TestAssignFP16Op(op_test.OpTest): + def setUp(self): + self.op_type = "assign" + x = np.random.random(size=(100, 10)).astype('float16') + self.inputs = {'X': x} + self.outputs = {'Out': x} + + def test_forward(self): + self.check_output() + + def test_backward(self): + self.check_grad(['X'], 'Out') + + class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): @@ -44,22 +58,18 @@ class TestAssignOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.assign, x1) - # When the type of input is Variable, the dtype of input must be float32, float64, int32, int64, bool. - x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16") + # When the type of input is Variable, the dtype of input must be float16, float32, float64, int32, int64, bool. + x3 = fluid.layers.data(name='x3', shape=[4], dtype="uint8") self.assertRaises(TypeError, fluid.layers.assign, x3) - x4 = fluid.layers.data(name='x4', shape=[4], dtype="uint8") - self.assertRaises(TypeError, fluid.layers.assign, x4) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. - x5 = np.array([[2.5, 2.5]], dtype='bool') + x4 = np.array([[2.5, 2.5]], dtype='bool') + self.assertRaises(TypeError, fluid.layers.assign, x4) + x5 = np.array([[2.5, 2.5]], dtype='float64') self.assertRaises(TypeError, fluid.layers.assign, x5) - x6 = np.array([[2.5, 2.5]], dtype='float16') + x6 = np.array([[2.5, 2.5]], dtype='int64') self.assertRaises(TypeError, fluid.layers.assign, x6) - x7 = np.array([[2.5, 2.5]], dtype='float64') + x7 = np.array([[2.5, 2.5]], dtype='uint8') self.assertRaises(TypeError, fluid.layers.assign, x7) - x8 = np.array([[2.5, 2.5]], dtype='int64') - self.assertRaises(TypeError, fluid.layers.assign, x8) - x9 = np.array([[2.5, 2.5]], dtype='uint8') - self.assertRaises(TypeError, fluid.layers.assign, x9) if __name__ == '__main__': -- GitLab