未验证 提交 5996e224 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #13430 from chengduoZH/refine_seq_pool

Refine  seq_pool
...@@ -135,7 +135,7 @@ class SequencePoolFunctor<platform::CUDADeviceContext, T> { ...@@ -135,7 +135,7 @@ class SequencePoolFunctor<platform::CUDADeviceContext, T> {
const std::string pooltype, const framework::LoDTensor& input, const std::string pooltype, const framework::LoDTensor& input,
framework::Tensor* output, framework::Tensor* output,
framework::Tensor* index = nullptr) { framework::Tensor* index = nullptr) {
auto lod = input.lod()[0]; auto& lod = input.lod()[0];
const size_t item_dim = output->numel() / output->dims()[0]; const size_t item_dim = output->numel() / output->dims()[0];
dim3 threads(1024, 1); dim3 threads(1024, 1);
dim3 grid(lod.size(), 1); dim3 grid(lod.size(), 1);
...@@ -297,7 +297,7 @@ class SequencePoolGradFunctor<platform::CUDADeviceContext, T> { ...@@ -297,7 +297,7 @@ class SequencePoolGradFunctor<platform::CUDADeviceContext, T> {
framework::LoDTensor* in_grad, framework::LoDTensor* in_grad,
/* max pool has index */ /* max pool has index */
const framework::Tensor* index = nullptr) { const framework::Tensor* index = nullptr) {
auto lod = in_grad->lod()[0]; auto& lod = in_grad->lod()[0];
const size_t item_dim = in_grad->numel() / in_grad->dims()[0]; const size_t item_dim = in_grad->numel() / in_grad->dims()[0];
dim3 threads(1024, 1); dim3 threads(1024, 1);
dim3 grid(lod.size(), 1); dim3 grid(lod.size(), 1);
......
...@@ -31,11 +31,11 @@ class TestSeqAvgPool(OpTest): ...@@ -31,11 +31,11 @@ class TestSeqAvgPool(OpTest):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
# one level, batch size is 4 # one level, batch size is 4
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
lod = [[4, 1, 3, 3]] lod = [[11]]
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
offset = self.convert_to_offset(lod) offset = self.convert_to_offset(lod)
out = np.zeros((4, 23)).astype('float32') out = np.zeros((len(lod[0]), 23)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, offset, out return x, offset, out
...@@ -71,7 +71,7 @@ class TestSeqMaxPool(TestSeqAvgPool): ...@@ -71,7 +71,7 @@ class TestSeqMaxPool(TestSeqAvgPool):
def set_data(self): def set_data(self):
self.op_type = 'sequence_pool' self.op_type = 'sequence_pool'
x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') x = np.random.uniform(0.1, 1, [13, 23]).astype('float32')
lod = [[4, 1, 3, 5]] lod = [[13]]
offset = self.convert_to_offset(lod) offset = self.convert_to_offset(lod)
for i in range(len(offset[0]) - 1): for i in range(len(offset[0]) - 1):
l = offset[0][i + 1] - offset[0][i] l = offset[0][i + 1] - offset[0][i]
...@@ -79,7 +79,7 @@ class TestSeqMaxPool(TestSeqAvgPool): ...@@ -79,7 +79,7 @@ class TestSeqMaxPool(TestSeqAvgPool):
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
out = np.zeros((4, 23)).astype('float32') out = np.zeros((1, 23)).astype('float32')
self.outputs = {'Out': out} self.outputs = {'Out': out}
return x, offset, out return x, offset, out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册