From 3d8571e8840dad76c71bf20b245ba2780524db06 Mon Sep 17 00:00:00 2001 From: guofei <52460041+gfwm2013@users.noreply.github.com> Date: Tue, 10 Mar 2020 20:15:36 +0800 Subject: [PATCH] modify assign op and add unittest of assign op (#22769) As the title. --- paddle/fluid/operators/assign_op.cc | 14 ++++++++- .../fluid/tests/unittests/test_assign_op.py | 31 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 662dee03d5b..0f769a6aa00 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -57,6 +57,17 @@ class AssignOp : public framework::OperatorWithKernel { } }; +class AssignInferVarType : public framework::VarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto out_var_name = ctx->Output("Out")[0]; + auto input_type = ctx->GetType(ctx->Input("X")[0]); + auto input_data_type = ctx->GetDataType(ctx->Input("X")[0]); + ctx->SetType(out_var_name, input_type); + ctx->SetDataType(out_var_name, input_data_type); + } +}; + class AssignKernel { public: void operator()(const framework::ExecutionContext &ctx) const { @@ -116,7 +127,8 @@ namespace plat = paddle::platform; REGISTER_OPERATOR(assign, ops::AssignOp, ops::AssignGradMaker, ops::AssignGradMaker, - ops::AssignOpProtoMaker, ops::AssignOpInplaceInferer); + ops::AssignOpProtoMaker, ops::AssignOpInplaceInferer, + ops::AssignInferVarType); REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 914ddbfa2b7..6b4cdbebc77 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -21,6 +21,7 @@ import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.backward import append_backward class TestAssignOp(op_test.OpTest): @@ -51,6 +52,36 @@ class TestAssignFP16Op(op_test.OpTest): self.check_grad(['X'], 'Out') +class TestAssignOpWithLoDTensorArray(unittest.TestCase): + def test_assign_LoDTensorArray(self): + main_program = Program() + startup_program = Program() + with program_guard(main_program): + x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x.stop_gradient = False + y = fluid.layers.fill_constant( + shape=[100, 10], dtype='float32', value=1) + z = fluid.layers.elementwise_add(x=x, y=y) + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + init_array = fluid.layers.array_write(x=z, i=i) + array = fluid.layers.assign(init_array) + sums = fluid.layers.array_read(array=init_array, i=i) + mean = fluid.layers.mean(sums) + append_backward(mean) + + place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + feed_x = np.random.random(size=(100, 10)).astype('float32') + ones = np.ones((100, 10)).astype('float32') + feed_add = feed_x + ones + res = exe.run(main_program, + feed={'x': feed_x}, + fetch_list=[sums.name, x.grad_name]) + self.assertTrue(np.allclose(res[0], feed_add)) + self.assertTrue(np.allclose(res[1], ones / 1000.0)) + + class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): -- GitLab