提交 f3bb7b99 编写于 作者: T tensor-tang

refine MKLDNNTester add UpdateCallback for test

上级 94ea8ee0
......@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn,
initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
}
dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF];
dnnLayer_ = std::dynamic_pointer_cast<MKLDNNLayer>(testLayers_[DNN]);
CHECK(dnnLayer_);
// for comparison with Paddle reference results,
// need manually add cpu device output for test
dnnLayer_->addOutputArgument(-1);
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
......@@ -109,20 +113,21 @@ void MKLDNNTester::randomBotDatas() {
void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
dnnLayer_->getOutput(-1).grad->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad());
}
void MKLDNNTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
VLOG(MKLDNN_ALL) << "Check Forward";
printTopDatas();
double delta = compareMatrix(dnnLayer_->getOutput(-1).value,
refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_);
}
void MKLDNNTester::checkBackwardData() {
VLOG(MKLDNN_ALL) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
......@@ -144,14 +149,12 @@ void MKLDNNTester::checkBackwardData() {
}
void MKLDNNTester::checkBackwardWgts() {
VLOG(MKLDNN_ALL) << "Check Backward Weight";
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
const MKLDNNLayerPtr dnnlayer =
std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
CHECK(dnnlayer);
dnnlayer->convertWeightsToPaddle();
dnnLayer_->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
......@@ -189,8 +192,10 @@ void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
}
// clear parameters grad
void MKLDNNTester::clearWgtDiffs() {
void MKLDNNTester::clearWgtDiffs(size_t id) {
CHECK_LE(id, parameters_.size());
for (size_t n = 0; n < parameters_.size(); ++n) {
if (id == n || id == parameters_.size()) {
for (size_t i = 0; i < parameters_[n].size(); ++i) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
if (grad) {
......@@ -198,30 +203,28 @@ void MKLDNNTester::clearWgtDiffs() {
}
}
}
}
}
void MKLDNNTester::clearBotDiffs() {
// dnn and ref
void MKLDNNTester::clearBotDiffs(size_t id) {
CHECK_LE(id, dataLayers_.size());
for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers
if (id == n || id == dataLayers_.size()) {
// clear inputs layers of this specific layer
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
}
void MKLDNNTester::clearBotDiffs(int n) {
CHECK_LT(n, NUM);
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
void MKLDNNTester::clearTopDatas() {
void MKLDNNTester::clearTopDatas(size_t id) {
CHECK_LE(id, testLayers_.size());
for (size_t i = 0; i < testLayers_.size(); ++i) {
if (id == i || id == testLayers_.size()) {
testLayers_[i]->getOutputValue()->zeroMem();
}
}
}
void MKLDNNTester::printTopDatas() {
......@@ -300,16 +303,24 @@ void MKLDNNTester::runOnce() {
checkForward();
// test backward
// simple updater
UpdateCallback updateCallback = [](Parameter* para) {
auto& grad = para->getBuf(PARAMETER_GRADIENT);
auto& value = para->getBuf(PARAMETER_VALUE);
real lr = 1e-3;
value->add(*grad, lr);
};
randomTopDiffs();
dnnLayer_->backward(nullptr);
refLayer_->backward(nullptr);
dnnLayer_->backward(updateCallback);
refLayer_->backward(updateCallback);
checkBackwardData();
checkBackwardWgts();
// clear buffers
// ref code will addto the diff, dnn code will writeto it
// and clearTopDatas() and clearWgtDiffs() should be coverd by test layers
// and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF);
clearWgtDiffs(REF);
}
void MKLDNNTester::run(const TestConfig& dnn,
......
......@@ -18,6 +18,7 @@ limitations under the License. */
#include <vector>
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle {
......@@ -40,7 +41,8 @@ protected:
vector<LayerMap> layerMaps_;
vector<vector<ParameterPtr>> parameters_;
vector<LayerPtr> testLayers_;
LayerPtr dnnLayer_, refLayer_;
LayerPtr refLayer_;
MKLDNNLayerPtr dnnLayer_;
/// run some iterations, all the result should pass
size_t iter_;
......@@ -88,10 +90,10 @@ private:
void checkBackwardData();
void checkBackwardWgts();
void clearWgtDiffs();
void clearBotDiffs();
void clearBotDiffs(int n); // clear specific layer
void clearTopDatas();
// clear specific layer, clear all when id equals NUM
void clearWgtDiffs(size_t id = NUM);
void clearBotDiffs(size_t id = NUM);
void clearTopDatas(size_t id = NUM);
void printTopDatas();
void printMatrix(const MatrixPtr& m);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册