From 61ec75c55200c4fda3eb9eadda0b7a38b8d66417 Mon Sep 17 00:00:00 2001 From: chengjuntao <18222160892@163.com> Date: Wed, 4 Mar 2020 02:47:52 -0600 Subject: [PATCH] register fp16 for assign OP, test=release/1.7 (#22842) --- paddle/fluid/operators/assign_op.cc | 3 ++ .../fluid/tests/unittests/test_assign_op.py | 32 ++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 5c69ad94b36..7d4d8ee50d3 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -114,6 +114,7 @@ DECLARE_INPLACE_OP_INFERER(AssignOpInplaceInferer, {"X", "Out"}); } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OPERATOR(assign, ops::AssignOp, ops::AssignGradMaker, ops::AssignGradMaker, @@ -122,11 +123,13 @@ REGISTER_OPERATOR(assign, ops::AssignOp, REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, bool, + ops::AssignKernel, plat::float16, ops::AssignKernel); #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, int64_t, ops::AssignKernel, bool, + ops::AssignKernel, plat::float16, ops::AssignKernel); #endif diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index dc580032720..914ddbfa2b7 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -37,6 +37,20 @@ class TestAssignOp(op_test.OpTest): self.check_grad(['X'], 'Out') +class TestAssignFP16Op(op_test.OpTest): + def setUp(self): + self.op_type = "assign" + x = np.random.random(size=(100, 10)).astype('float16') + self.inputs = {'X': x} + self.outputs = {'Out': x} + + def test_forward(self): + self.check_output() + + def test_backward(self): + self.check_grad(['X'], 'Out') + + class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): @@ -44,22 +58,18 @@ class TestAssignOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.assign, x1) - # When the type of input is Variable, the dtype of input must be float32, float64, int32, int64, bool. - x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16") + # When the type of input is Variable, the dtype of input must be float16, float32, float64, int32, int64, bool. + x3 = fluid.layers.data(name='x3', shape=[4], dtype="uint8") self.assertRaises(TypeError, fluid.layers.assign, x3) - x4 = fluid.layers.data(name='x4', shape=[4], dtype="uint8") - self.assertRaises(TypeError, fluid.layers.assign, x4) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. - x5 = np.array([[2.5, 2.5]], dtype='bool') + x4 = np.array([[2.5, 2.5]], dtype='bool') + self.assertRaises(TypeError, fluid.layers.assign, x4) + x5 = np.array([[2.5, 2.5]], dtype='float64') self.assertRaises(TypeError, fluid.layers.assign, x5) - x6 = np.array([[2.5, 2.5]], dtype='float16') + x6 = np.array([[2.5, 2.5]], dtype='int64') self.assertRaises(TypeError, fluid.layers.assign, x6) - x7 = np.array([[2.5, 2.5]], dtype='float64') + x7 = np.array([[2.5, 2.5]], dtype='uint8') self.assertRaises(TypeError, fluid.layers.assign, x7) - x8 = np.array([[2.5, 2.5]], dtype='int64') - self.assertRaises(TypeError, fluid.layers.assign, x8) - x9 = np.array([[2.5, 2.5]], dtype='uint8') - self.assertRaises(TypeError, fluid.layers.assign, x9) if __name__ == '__main__': -- GitLab