提交 96bc3352 编写于 作者: Y Yang Yu

Update

上级 c7322793
......@@ -35,7 +35,6 @@ class Variable {
template <typename T>
T* GetMutable() {
if (!IsType<T>()) {
VLOG(10) << "Resetting " << *this->name_;
holder_.reset(new PlaceholderImpl<T>(new T()));
}
return static_cast<T*>(holder_->Ptr());
......
......@@ -37,10 +37,12 @@ class SumKernel : public framework::OpKernel<T> {
bool in_place = out_var == in_vars[0];
if (out_var->IsType<framework::LoDTensor>()) {
auto *out = context.Output<Tensor>("Out");
auto result = EigenVector<T>::Flatten(*out);
auto *out = context.Output<LoDTensor>("Out");
if (!in_place) {
out->mutable_data<T>(context.GetPlace());
}
auto result = EigenVector<T>::Flatten(*out);
if (!in_place) {
math::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context.template device_context<DeviceContext>(), out,
0.0);
......
......@@ -242,7 +242,7 @@ class TestSimpleMul(unittest.TestCase):
out = rnn()
out = fluid.layers.sequence_pool(out, pool_type='last')
loss = fluid.layers.mean(x=out)
fluid.backward.append_backward_ops(loss)
fluid.backward.append_backward(loss)
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
......@@ -317,7 +317,7 @@ class TestSimpleMulWithMemory(unittest.TestCase):
out = rnn()
last = fluid.layers.sequence_pool(input=out, pool_type='last')
loss = fluid.layers.mean(x=last)
fluid.backward.append_backward_ops(loss)
fluid.backward.append_backward(loss)
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
......@@ -330,6 +330,7 @@ class TestSimpleMulWithMemory(unittest.TestCase):
],
return_numpy=False))
last_by_py, = py_rnn.exe().values()
print w_g[0]
self.assertTrue(numpy.allclose(last_np, last_by_py))
w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
# print w_g_num[0], w_g[0]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册