未验证 提交 61ec75c5 编写于 作者: C chengjuntao 提交者: GitHub

register fp16 for assign OP, test=release/1.7 (#22842)

无相关合并请求
...@@ -114,6 +114,7 @@ DECLARE_INPLACE_OP_INFERER(AssignOpInplaceInferer, {"X", "Out"}); ...@@ -114,6 +114,7 @@ DECLARE_INPLACE_OP_INFERER(AssignOpInplaceInferer, {"X", "Out"});
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OPERATOR(assign, ops::AssignOp, REGISTER_OPERATOR(assign, ops::AssignOp,
ops::AssignGradMaker<paddle::framework::OpDesc>, ops::AssignGradMaker<paddle::framework::OpDesc>,
ops::AssignGradMaker<paddle::imperative::OpBase>, ops::AssignGradMaker<paddle::imperative::OpBase>,
...@@ -122,11 +123,13 @@ REGISTER_OPERATOR(assign, ops::AssignOp, ...@@ -122,11 +123,13 @@ REGISTER_OPERATOR(assign, ops::AssignOp,
REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double,
ops::AssignKernel, int, ops::AssignKernel, ops::AssignKernel, int, ops::AssignKernel,
int64_t, ops::AssignKernel, bool, int64_t, ops::AssignKernel, bool,
ops::AssignKernel, plat::float16,
ops::AssignKernel); ops::AssignKernel);
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, REGISTER_OP_CUDA_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double,
ops::AssignKernel, int, ops::AssignKernel, ops::AssignKernel, int, ops::AssignKernel,
int64_t, ops::AssignKernel, bool, int64_t, ops::AssignKernel, bool,
ops::AssignKernel, plat::float16,
ops::AssignKernel); ops::AssignKernel);
#endif #endif
...@@ -37,6 +37,20 @@ class TestAssignOp(op_test.OpTest): ...@@ -37,6 +37,20 @@ class TestAssignOp(op_test.OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestAssignFP16Op(op_test.OpTest):
def setUp(self):
self.op_type = "assign"
x = np.random.random(size=(100, 10)).astype('float16')
self.inputs = {'X': x}
self.outputs = {'Out': x}
def test_forward(self):
self.check_output()
def test_backward(self):
self.check_grad(['X'], 'Out')
class TestAssignOpError(unittest.TestCase): class TestAssignOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
...@@ -44,22 +58,18 @@ class TestAssignOpError(unittest.TestCase): ...@@ -44,22 +58,18 @@ class TestAssignOpError(unittest.TestCase):
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace()) np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.assign, x1) self.assertRaises(TypeError, fluid.layers.assign, x1)
# When the type of input is Variable, the dtype of input must be float32, float64, int32, int64, bool. # When the type of input is Variable, the dtype of input must be float16, float32, float64, int32, int64, bool.
x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16") x3 = fluid.layers.data(name='x3', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.assign, x3) self.assertRaises(TypeError, fluid.layers.assign, x3)
x4 = fluid.layers.data(name='x4', shape=[4], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.assign, x4)
# When the type of input is numpy.ndarray, the dtype of input must be float32, int32. # When the type of input is numpy.ndarray, the dtype of input must be float32, int32.
x5 = np.array([[2.5, 2.5]], dtype='bool') x4 = np.array([[2.5, 2.5]], dtype='bool')
self.assertRaises(TypeError, fluid.layers.assign, x4)
x5 = np.array([[2.5, 2.5]], dtype='float64')
self.assertRaises(TypeError, fluid.layers.assign, x5) self.assertRaises(TypeError, fluid.layers.assign, x5)
x6 = np.array([[2.5, 2.5]], dtype='float16') x6 = np.array([[2.5, 2.5]], dtype='int64')
self.assertRaises(TypeError, fluid.layers.assign, x6) self.assertRaises(TypeError, fluid.layers.assign, x6)
x7 = np.array([[2.5, 2.5]], dtype='float64') x7 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, fluid.layers.assign, x7) self.assertRaises(TypeError, fluid.layers.assign, x7)
x8 = np.array([[2.5, 2.5]], dtype='int64')
self.assertRaises(TypeError, fluid.layers.assign, x8)
x9 = np.array([[2.5, 2.5]], dtype='uint8')
self.assertRaises(TypeError, fluid.layers.assign, x9)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部