提交 12ed53c1 编写于 作者: Y yangyaming

Inherit LoD from x to x_grad and enhance the unit test.

上级 e3210364
......@@ -146,6 +146,7 @@ class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase {
PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X")));
context->SetOutputDim(framework::GradVarName("X"),
context->GetInputDim("X"));
context->ShareLoD("X", framework::GradVarName("X"));
}
};
......
......@@ -3,45 +3,86 @@ import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.backward import append_backward
from paddle.v2.fluid.framework import default_main_program
import numpy
from paddle.v2.fluid.framework import default_main_program, switch_main_program
from paddle.v2.fluid.framework import Program
import numpy as np
main_program = default_main_program()
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
class TestShrinkRNNMemoryBase(unittest.TestCase):
def setUp(self):
self.main_program = Program()
switch_main_program(self.main_program)
x = layers.data('x', shape=[100], dtype='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
rank_table_tensor = layers.data(
'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
table = layers.lod_rank_table(x=rank_table_tensor)
i = layers.zeros(dtype='int64', shape=[1])
mem1 = layers.shrink_memory(x=x, i=i, table=table)
self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem3 = layers.shrink_memory(x=mem2, i=i, table=table)
self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
mem3_mean = layers.mean(x=self.mem3)
append_backward(loss=mem3_mean)
self.x_grad = self.main_program.global_block().var('x@GRAD')
def sum_lodtensor(self, tensor):
sum_res = 0.0
for i in xrange(np.product(tensor.get_dims())):
sum_res += tensor.get_float_element(i)
return sum_res
class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase):
def test_refer_lod(self):
cpu = core.CPUPlace()
tensor = core.LoDTensor()
tensor.set_lod([[0, 2, 5, 6]])
tensor_np = numpy.random.random(size=(6, 100)).astype('float32')
tensor.set(tensor_np, cpu)
x_tensor = core.LoDTensor()
x_tensor.set_lod([[0, 2, 5, 6]])
tensor_np = np.random.random(size=(6, 100)).astype('float32')
x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu)
exe = Executor(cpu)
outs = exe.run(feed={'x': tensor},
fetch_list=[mem1, mem2, mem3],
return_numpy=False)
self.assertTrue(numpy.allclose(tensor_np[0:6], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:5], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward(loss=mem3_mean)
x_grad = exe.run(
feed={'x': tensor},
fetch_list=[main_program.global_block().var('x@GRAD')])[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
outs = exe.run(
feed={'x': x_tensor,
'rank_table_tensor': rank_table_tensor},
fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
return_numpy=False)
self.assertTrue(np.allclose(tensor_np[0:6], outs[0]))
self.assertTrue(np.allclose(tensor_np[0:5], outs[1]))
self.assertTrue(np.allclose(tensor_np[0:2], outs[2]))
self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
def test_no_lod(self):
cpu = core.CPUPlace()
x_tensor = core.LoDTensor()
tensor_np = np.random.random(size=(3, 100)).astype('float32')
x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu)
exe = Executor(cpu)
outs = exe.run(
feed={'x': x_tensor,
'rank_table_tensor': rank_table_tensor},
fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
return_numpy=False)
self.assertTrue(np.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(np.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(np.allclose(tensor_np[0:1], outs[2]))
self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册