diff --git a/paddle/framework/variable.h b/paddle/framework/variable.h index 3720393601a5458fb43217d05257c4acb8be28cd..e5a94759f9230ab4ce9d2cc24849a2debb8a5e2f 100644 --- a/paddle/framework/variable.h +++ b/paddle/framework/variable.h @@ -35,7 +35,6 @@ class Variable { template T* GetMutable() { if (!IsType()) { - VLOG(10) << "Resetting " << *this->name_; holder_.reset(new PlaceholderImpl(new T())); } return static_cast(holder_->Ptr()); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index d1277d3edd897b951fe20ee2a0294bd9c615a059..552b48f608b7e0248f03dbea940a83f112a67712 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -37,10 +37,12 @@ class SumKernel : public framework::OpKernel { bool in_place = out_var == in_vars[0]; if (out_var->IsType()) { - auto *out = context.Output("Out"); - auto result = EigenVector::Flatten(*out); + auto *out = context.Output("Out"); if (!in_place) { out->mutable_data(context.GetPlace()); + } + auto result = EigenVector::Flatten(*out); + if (!in_place) { math::SetConstant constant_functor; constant_functor(context.template device_context(), out, 0.0); diff --git a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py index 7f61b966fd53dd9abd768fc198255d636781e395..238fd1a8cba81f37e9d63b0d6ea09582d6b0fe83 100644 --- a/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py +++ b/python/paddle/v2/fluid/tests/test_dynrnn_gradient_check.py @@ -242,7 +242,7 @@ class TestSimpleMul(unittest.TestCase): out = rnn() out = fluid.layers.sequence_pool(out, pool_type='last') loss = fluid.layers.mean(x=out) - fluid.backward.append_backward_ops(loss) + fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) @@ -317,7 +317,7 @@ class TestSimpleMulWithMemory(unittest.TestCase): out = rnn() last = fluid.layers.sequence_pool(input=out, pool_type='last') loss = fluid.layers.mean(x=last) - fluid.backward.append_backward_ops(loss) + fluid.backward.append_backward(loss) cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) @@ -330,6 +330,7 @@ class TestSimpleMulWithMemory(unittest.TestCase): ], return_numpy=False)) last_by_py, = py_rnn.exe().values() + print w_g[0] self.assertTrue(numpy.allclose(last_np, last_by_py)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) # print w_g_num[0], w_g[0]