提交 0596cd88 编写于 作者: T tensor-tang

refine test recurrent layer

上级 290edd8f
......@@ -222,6 +222,7 @@ TEST(Layer, RecurrentLayer) {
#define protected public
#include "paddle/gserver/layers/GatedRecurrentLayer.h"
#include "paddle/gserver/layers/LstmLayer.h"
#include "paddle/gserver/layers/RecurrentLayer.h"
template <class T>
class TestRecurrentLayer {
public:
......@@ -422,6 +423,8 @@ TEST(Layer, LstmLayer) {
#ifdef PADDLE_WITH_MKLML
#include "paddle/gserver/layers/MKLPackedRecurrentLayer.h"
LayerPtr initMKLPackedLayer(LayerConfig layerConfig,
bool reversed,
int layerSize,
......@@ -453,7 +456,31 @@ LayerPtr initMKLPackedLayer(LayerConfig layerConfig,
return testLayer;
}
void checkMKLPackedLayer(LayerPtr testLayer1, LayerPtr testLayer2) {
void checkMKLPackedLayer(LayerConfig layerConfig1,
LayerConfig layerConfig2,
bool reversed,
int layerSize,
int batchSize,
bool useBatch1,
bool useBatch2) {
LayerPtr dataLayer;
ParameterPtr para, bias;
if (layerConfig1.type() == "recurrent") {
dataLayer = creatDataLayer("layer_0", batchSize, layerSize, false);
para = creatParameter("para_0", 0, layerSize * layerSize, false);
bias = nullptr;
} else if (layerConfig1.type() == "gated_recurrent") {
dataLayer = creatDataLayer("layer_0", batchSize, layerSize * 3, false);
para = creatParameter("para_0", 0, layerSize * layerSize * 3, false);
bias = creatParameterBias("bias_0", 1, layerSize * 3, false);
}
LayerPtr testLayer1 = initMKLPackedLayer(
layerConfig1, reversed, layerSize, dataLayer, para, bias);
LayerPtr testLayer2 = initMKLPackedLayer(
layerConfig2, reversed, layerSize, dataLayer, para, bias);
const VectorPtr& weightGrad =
(testLayer1->getParameters()[0])->getBuf(PARAMETER_GRADIENT);
const MatrixPtr& inputGrad = testLayer1->getPrev(0)->getOutputGrad();
......@@ -462,78 +489,34 @@ void checkMKLPackedLayer(LayerPtr testLayer1, LayerPtr testLayer2) {
CpuMatrix input_grad1(inputGrad->getHeight(), inputGrad->getWidth());
CpuMatrix input_grad2(inputGrad->getHeight(), inputGrad->getWidth());
CpuMatrix outputGrad(inputGrad->getHeight(), inputGrad->getWidth());
outputGrad.randomizeUniform();
for (int i = 0; i < 2; i++) {
FLAGS_rnn_use_batch = true;
FLAGS_rnn_use_batch = useBatch1;
testLayer1->forward(PASS_GC);
testLayer1->getOutputGrad()->copyFrom(outputGrad);
weightGrad->zero();
inputGrad->zero();
testLayer1->backward(nullptr);
wgt_grad1.copyFrom(*weightGrad);
input_grad1.copyFrom(*inputGrad);
FLAGS_rnn_use_batch = true;
testLayer2->forward(PASS_GC);
testLayer2->getOutputGrad()->copyFrom(outputGrad);
weightGrad->zero();
inputGrad->zero();
testLayer2->backward(nullptr);
wgt_grad2.copyFrom(*weightGrad);
input_grad2.copyFrom(*inputGrad);
checkError(*testLayer1->getOutputValue(), *testLayer2->getOutputValue());
checkError(wgt_grad1, wgt_grad2);
checkError(input_grad1, input_grad2);
}
for (int i = 0; i < 2; i++) {
CpuMatrix outputValue(testLayer2->getOutputValue()->getHeight(),
testLayer2->getOutputValue()->getWidth());
FLAGS_rnn_use_batch = true;
FLAGS_rnn_use_batch = useBatch2;
testLayer2->forward(PASS_GC);
outputValue.copyFrom(*testLayer2->getOutputValue());
testLayer2->getOutputGrad()->copyFrom(outputGrad);
testLayer1->getOutputGrad()->randomizeUniform();
testLayer2->getOutputGrad()->copyFrom(*testLayer1->getOutputGrad());
weightGrad->zero();
inputGrad->zero();
testLayer2->backward(nullptr);
FLAGS_rnn_use_batch = useBatch1;
testLayer1->backward(nullptr);
wgt_grad1.copyFrom(*weightGrad);
input_grad1.copyFrom(*inputGrad);
FLAGS_rnn_use_batch = false;
testLayer2->getOutputValue()->zero();
testLayer2->forward(PASS_GC);
testLayer2->getOutputGrad()->copyFrom(outputGrad);
weightGrad->zero();
inputGrad->zero();
FLAGS_rnn_use_batch = useBatch2;
testLayer2->backward(nullptr);
wgt_grad2.copyFrom(*weightGrad);
input_grad2.copyFrom(*inputGrad);
checkError(outputValue, *testLayer2->getOutputValue());
checkError(*testLayer1->getOutputValue(), *testLayer2->getOutputValue());
checkError(wgt_grad1, wgt_grad2);
checkError(input_grad1, input_grad2);
}
......@@ -556,20 +539,22 @@ TEST(MKLPackedLayer, RecurrentLayer) {
for (auto layerSize : {32, 64, 128, 256, 512}) {
for (auto batchSize : {1, 5, 100, 500}) {
for (auto reversed : {true, false}) {
LOG(INFO) << " layerSize=" << layerSize << " batchSize=" << batchSize
<< " reversed=" << reversed;
LayerPtr dataLayer =
creatDataLayer("layer_0", batchSize, layerSize, false);
ParameterPtr para =
creatParameter("para_0", 0, layerSize * layerSize, false);
LayerPtr testLayer1 = initMKLPackedLayer(
layerConfig1, reversed, layerSize, dataLayer, para);
LayerPtr testLayer2 = initMKLPackedLayer(
layerConfig2, reversed, layerSize, dataLayer, para);
checkMKLPackedLayer(testLayer1, testLayer2);
for (auto paddle_use_batch : {true, false}) {
for (auto MKLPacked_use_batch : {true, false}) {
LOG(INFO) << " layerSize=" << layerSize
<< " batchSize=" << batchSize << " reversed=" << reversed
<< " paddle_use_batch=" << paddle_use_batch
<< " MKLPacked_use_batch=" << MKLPacked_use_batch;
checkMKLPackedLayer(layerConfig1,
layerConfig2,
reversed,
layerSize,
batchSize,
paddle_use_batch,
MKLPacked_use_batch);
}
}
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册