提交 ac3370a4 编写于 作者: D dangqingqing

Add unit testing for gemv and fix the gradien check for bais.

上级 2e029874
...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) { ...@@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) {
lod_tensor.mutable_data<float>(place); lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod); lod_tensor.set_lod(src_lod);
CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL); EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL); EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod(); auto lod = lod_tensor.lod();
...@@ -45,6 +45,6 @@ TEST(LoDTensor, LoDInGPU) { ...@@ -45,6 +45,6 @@ TEST(LoDTensor, LoDInGPU) {
cudaDeviceSynchronize(); cudaDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) { for (size_t i = 0; i < src_lod[0].size(); ++i) {
CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
} }
} }
\ No newline at end of file
...@@ -162,9 +162,9 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -162,9 +162,9 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto* bias_g = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto& device_ctx = ctx.device_context(); auto& device_ctx = ctx.device_context();
math::SetConstant<Place, T> zero;
if (weight_g) { if (weight_g) {
weight_g->mutable_data<T>(ctx.GetPlace()); weight_g->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> zero;
zero(device_ctx, weight_g, static_cast<T>(0.0)); zero(device_ctx, weight_g, static_cast<T>(0.0));
} }
...@@ -188,6 +188,7 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -188,6 +188,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
math::LstmMetaGrad<T> lstm_grad; math::LstmMetaGrad<T> lstm_grad;
if (bias && bias_g) { if (bias && bias_g) {
T* bias_g_data = const_cast<T*>(bias_g->mutable_data<T>(ctx.GetPlace())); T* bias_g_data = const_cast<T*>(bias_g->mutable_data<T>(ctx.GetPlace()));
zero(device_ctx, bias_g, static_cast<T>(0.0));
lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size; lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size;
lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size; lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size;
lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size; lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size;
...@@ -219,6 +220,8 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -219,6 +220,8 @@ class LSTMGradKernel : public framework::OpKernel<T> {
batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace()); batch_cell_g.mutable_data<T>(out_dims, ctx.GetPlace());
batch_cell_g.set_lod(batch_gate->lod()); batch_cell_g.set_lod(batch_gate->lod());
to_batch(device_ctx, *cell_g, batch_cell_g, false); to_batch(device_ctx, *cell_g, batch_cell_g, false);
// TODO(qingqing) support the case output cell has gradient.
zero(device_ctx, &batch_cell_g, static_cast<T>(0.0));
LoDTensor batch_gate_g; LoDTensor batch_gate_g;
batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace()); batch_gate_g.mutable_data<T>(batch_gate->dims(), ctx.GetPlace());
...@@ -304,7 +307,7 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -304,7 +307,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
int n = static_cast<int>(batch_gate_g.dims()[1]); int n = static_cast<int>(batch_gate_g.dims()[1]);
Tensor ones; Tensor ones;
ones.mutable_data<T>({1, m}, ctx.GetPlace()); ones.mutable_data<T>({m}, ctx.GetPlace());
math::SetConstant<Place, T> set; math::SetConstant<Place, T> set;
set(device_ctx, &ones, static_cast<T>(1.0)); set(device_ctx, &ones, static_cast<T>(1.0));
......
...@@ -89,3 +89,53 @@ TEST(math_function, zero) { ...@@ -89,3 +89,53 @@ TEST(math_function, zero) {
EXPECT_EQ(t[2], 1); EXPECT_EQ(t[2], 1);
EXPECT_EQ(t[3], 1); EXPECT_EQ(t[3], 1);
} }
template <typename T>
void GemvTest(int m, int n, bool trans) {
paddle::framework::Tensor mat_a;
paddle::framework::Tensor vec_b;
paddle::framework::Tensor vec_c;
auto* cpu_place = new paddle::platform::CPUPlace();
int b_num = trans ? m : n;
int c_num = trans ? n : m;
T* data_a = mat_a.mutable_data<T>({m, n}, *cpu_place);
T* data_b = vec_b.mutable_data<T>({b_num}, *cpu_place);
T* data_c = vec_c.mutable_data<T>({c_num}, *cpu_place);
for (int i = 0; i < mat_a.numel(); ++i) {
data_a[i] = static_cast<T>(i);
}
for (int i = 0; i < vec_b.numel(); ++i) {
data_b[i] = static_cast<T>(i);
}
paddle::platform::CPUDeviceContext context(*cpu_place);
paddle::operators::math::gemv<paddle::platform::CPUPlace, T>(
context, trans, static_cast<int>(m), static_cast<int>(n), 1., data_a,
data_b, 0., data_c);
if (!trans) {
for (int i = 0; i < m; ++i) {
T sum = 0.0;
for (int j = 0; j < n; ++j) {
sum += data_a[i * n + j] * data_b[j];
}
ASSERT_FLOAT_EQ(data_c[i], sum);
}
} else {
for (int i = 0; i < n; ++i) {
T sum = 0.0;
for (int j = 0; j < m; ++j) {
sum += data_a[j * n + i] * data_b[j];
}
ASSERT_FLOAT_EQ(data_c[i], sum);
}
}
}
TEST(math_function, gemv) {
GemvTest<float>(3, 13, false);
GemvTest<double>(4, 5, false);
GemvTest<float>(12, 7, true);
GemvTest<double>(7, 9, true);
}
...@@ -177,3 +177,65 @@ TEST(math_function, gemm_trans_cublas) { ...@@ -177,3 +177,65 @@ TEST(math_function, gemm_trans_cublas) {
EXPECT_EQ(input3_ptr[7], 99); EXPECT_EQ(input3_ptr[7], 99);
delete gpu_place; delete gpu_place;
} }
template <typename T>
void GemvTest(int m, int n, bool trans) {
paddle::framework::Tensor mat_a;
paddle::framework::Tensor vec_b;
paddle::framework::Tensor vec_c;
auto* cpu_place = new paddle::platform::CPUPlace();
T* data_a = mat_a.mutable_data<T>({m, n}, *cpu_place);
T* data_b = vec_b.mutable_data<T>({trans ? m : n}, *cpu_place);
T* data_c = vec_c.mutable_data<T>({trans ? n : m}, *cpu_place);
auto* gpu_place = new paddle::platform::GPUPlace(0);
paddle::framework::Tensor g_mat_a;
paddle::framework::Tensor g_vec_b;
paddle::framework::Tensor g_vec_c;
T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), *gpu_place);
T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), *gpu_place);
T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), *gpu_place);
for (int i = 0; i < mat_a.numel(); ++i) {
data_a[i] = static_cast<T>(i);
}
for (int i = 0; i < vec_b.numel(); ++i) {
data_b[i] = static_cast<T>(i);
}
paddle::platform::CUDADeviceContext context(*gpu_place);
g_mat_a.CopyFrom(mat_a, *gpu_place, context);
g_vec_b.CopyFrom(vec_b, *gpu_place, context);
paddle::operators::math::gemv<paddle::platform::GPUPlace, T>(
context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
g_data_b, 0., g_data_c);
vec_c.CopyFrom(g_vec_c, paddle::platform::CPUPlace(), context);
if (!trans) {
for (int i = 0; i < m; ++i) {
T sum = 0.0;
for (int j = 0; j < n; ++j) {
sum += data_a[i * n + j] * data_b[j];
}
ASSERT_FLOAT_EQ(data_c[i], sum);
}
} else {
for (int i = 0; i < n; ++i) {
T sum = 0.0;
for (int j = 0; j < m; ++j) {
sum += data_a[j * n + i] * data_b[j];
}
ASSERT_FLOAT_EQ(data_c[i], sum);
}
}
}
TEST(math_function, gemv) {
GemvTest<float>(3, 13, false);
GemvTest<double>(3, 13, false);
GemvTest<float>(3, 13, true);
GemvTest<double>(3, 13, true);
}
...@@ -114,26 +114,20 @@ def lstm( ...@@ -114,26 +114,20 @@ def lstm(
class TestLstmOp(OpTest): class TestLstmOp(OpTest):
def set_data(self): def set_argument(self):
# self.lod = [[0, 2, 6, 9]] self.lod = [[0, 2, 6, 9]]
# self.D = 64 self.D = 16
# self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5]
self.lod = [[0, 1]]
self.D = 4
self.sort_idx = [0]
# self.act_gate = 'identity'
# self.act_cell = 'identity'
# self.act_cand = 'identity'
self.act_gate = 'sigmoid' self.act_gate = 'sigmoid'
self.act_cell = 'tanh' self.act_cell = 'tanh'
self.act_cand = 'tanh' self.act_cand = 'tanh'
self.has_initial_state = True
self.is_reverse = False self.is_reverse = False
def setUp(self): def setUp(self):
self.set_data() self.set_argument()
self.op_type = 'lstm' self.op_type = 'lstm'
T = self.lod[0][-1] T = self.lod[0][-1]
...@@ -155,17 +149,14 @@ class TestLstmOp(OpTest): ...@@ -155,17 +149,14 @@ class TestLstmOp(OpTest):
for i, j in enumerate(self.sort_idx): for i, j in enumerate(self.sort_idx):
g_sort[i, :] = g[j, :] g_sort[i, :] = g[j, :]
self.inputs = { self.inputs = {'Input': (x, self.lod), 'Weight': w, 'Bias': b}
'Input': (x, self.lod), self.inputs['H0'] = h0
'H0': h0, self.inputs['C0'] = c0
'C0': c0,
'Weight': w,
'Bias': b
}
self.outputs = { self.outputs = {
'Hidden': (h, self.lod), 'Hidden': (h, self.lod),
'Cell': (c, self.lod), 'Cell': (c, self.lod),
#'BatchGate': g_sort, 'BatchGate': g_sort,
} }
self.attrs = { self.attrs = {
'usePeepholes': True, 'usePeepholes': True,
...@@ -175,26 +166,43 @@ class TestLstmOp(OpTest): ...@@ -175,26 +166,43 @@ class TestLstmOp(OpTest):
'candidateActivation': self.act_cand 'candidateActivation': self.act_cand
} }
def not_test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
#TODO(qingqing) add more unit testing case
def test_check_grad(self): def test_check_grad(self):
# TODO(qingqing) remove folowing two lines after the check_grad is refined.
self.outputs['BatchGate'] = None self.outputs['BatchGate'] = None
self.outputs['BatchCellPreAct'] = None self.outputs['BatchCellPreAct'] = None
self.check_grad(['Input', 'Weight'], ['Hidden', 'Cell']) self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden'])
#['Input', 'Weight', 'Bias'], ['Hidden', 'Cell'])
#class TestLstmOpRerverse(TestLstmOp): class TestLstmOpHasNoInitial(TestLstmOp):
# def set_data(self): def set_argument(self):
# self.lod = [[0, 2, 6, 9]] self.lod = [[0, 2, 6, 9]]
# self.D = 64 self.D = 64
# self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5]
#
# self.act_gate = 'sigmoid' self.act_gate = 'sigmoid'
# self.act_cell = 'tanh' self.act_cell = 'tanh'
# self.act_cand = 'tanh' self.act_cand = 'tanh'
#
# self.is_reverse = True self.has_initial_state = False
self.is_reverse = True
class TestLstmOpRerverse(TestLstmOp):
def set_argument(self):
self.lod = [[0, 2, 6, 9]]
self.D = 64
self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5]
self.act_gate = 'sigmoid'
self.act_cell = 'tanh'
self.act_cand = 'tanh'
self.has_initial_state = True
self.is_reverse = True
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册