diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 662dee03d5b63eee08dcf032d7b3a872e81d00ba..0f769a6aa00304e2d0e774616a01f0b96e4617ef 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -57,6 +57,17 @@ class AssignOp : public framework::OperatorWithKernel { } }; +class AssignInferVarType : public framework::VarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override { + auto out_var_name = ctx->Output("Out")[0]; + auto input_type = ctx->GetType(ctx->Input("X")[0]); + auto input_data_type = ctx->GetDataType(ctx->Input("X")[0]); + ctx->SetType(out_var_name, input_type); + ctx->SetDataType(out_var_name, input_data_type); + } +}; + class AssignKernel { public: void operator()(const framework::ExecutionContext &ctx) const { @@ -116,7 +127,8 @@ namespace plat = paddle::platform; REGISTER_OPERATOR(assign, ops::AssignOp, ops::AssignGradMaker, ops::AssignGradMaker, - ops::AssignOpProtoMaker, ops::AssignOpInplaceInferer); + ops::AssignOpProtoMaker, ops::AssignOpInplaceInferer, + ops::AssignInferVarType); REGISTER_OP_CPU_KERNEL_FUNCTOR(assign, float, ops::AssignKernel, double, ops::AssignKernel, int, ops::AssignKernel, diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 914ddbfa2b7e26053af1f71c6fd291202c09c539..6b4cdbebc776e291baa1af2dcb46bd3a8b81c200 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -21,6 +21,7 @@ import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.backward import append_backward class TestAssignOp(op_test.OpTest): @@ -51,6 +52,36 @@ class TestAssignFP16Op(op_test.OpTest): self.check_grad(['X'], 'Out') +class TestAssignOpWithLoDTensorArray(unittest.TestCase): + def test_assign_LoDTensorArray(self): + main_program = Program() + startup_program = Program() + with program_guard(main_program): + x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x.stop_gradient = False + y = fluid.layers.fill_constant( + shape=[100, 10], dtype='float32', value=1) + z = fluid.layers.elementwise_add(x=x, y=y) + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + init_array = fluid.layers.array_write(x=z, i=i) + array = fluid.layers.assign(init_array) + sums = fluid.layers.array_read(array=init_array, i=i) + mean = fluid.layers.mean(sums) + append_backward(mean) + + place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + feed_x = np.random.random(size=(100, 10)).astype('float32') + ones = np.ones((100, 10)).astype('float32') + feed_add = feed_x + ones + res = exe.run(main_program, + feed={'x': feed_x}, + fetch_list=[sums.name, x.grad_name]) + self.assertTrue(np.allclose(res[0], feed_add)) + self.assertTrue(np.allclose(res[1], ones / 1000.0)) + + class TestAssignOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()):