提交 f3bb7b99 编写于 作者: T tensor-tang

refine MKLDNNTester add UpdateCallback for test

上级 94ea8ee0
...@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn, ...@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn,
initTestLayer( initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i])); configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
} }
dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF]; refLayer_ = testLayers_[REF];
dnnLayer_ = std::dynamic_pointer_cast<MKLDNNLayer>(testLayers_[DNN]);
CHECK(dnnLayer_);
// for comparison with Paddle reference results,
// need manually add cpu device output for test
dnnLayer_->addOutputArgument(-1);
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size()); EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
...@@ -109,20 +113,21 @@ void MKLDNNTester::randomBotDatas() { ...@@ -109,20 +113,21 @@ void MKLDNNTester::randomBotDatas() {
void MKLDNNTester::randomTopDiffs() { void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform(); refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); dnnLayer_->getOutput(-1).grad->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; VLOG(lvl_) << "Random Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad()); printMatrix(refLayer_->getOutputGrad());
} }
void MKLDNNTester::checkForward() { void MKLDNNTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
VLOG(MKLDNN_ALL) << "Check Forward"; VLOG(MKLDNN_ALL) << "Check Forward";
printTopDatas();
double delta = compareMatrix(dnnLayer_->getOutput(-1).value,
refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_); EXPECT_LE(fabs(delta), eps_);
} }
void MKLDNNTester::checkBackwardData() { void MKLDNNTester::checkBackwardData() {
VLOG(MKLDNN_ALL) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready // TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; // const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
...@@ -144,14 +149,12 @@ void MKLDNNTester::checkBackwardData() { ...@@ -144,14 +149,12 @@ void MKLDNNTester::checkBackwardData() {
} }
void MKLDNNTester::checkBackwardWgts() { void MKLDNNTester::checkBackwardWgts() {
VLOG(MKLDNN_ALL) << "Check Backward Weight";
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts); saveWgt(parameters_[DNN], dnnWgts);
const MKLDNNLayerPtr dnnlayer = dnnLayer_->convertWeightsToPaddle();
std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
CHECK(dnnlayer);
dnnlayer->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) { for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
...@@ -189,38 +192,38 @@ void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from, ...@@ -189,38 +192,38 @@ void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
} }
// clear parameters grad // clear parameters grad
void MKLDNNTester::clearWgtDiffs() { void MKLDNNTester::clearWgtDiffs(size_t id) {
CHECK_LE(id, parameters_.size());
for (size_t n = 0; n < parameters_.size(); ++n) { for (size_t n = 0; n < parameters_.size(); ++n) {
for (size_t i = 0; i < parameters_[n].size(); ++i) { if (id == n || id == parameters_.size()) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); for (size_t i = 0; i < parameters_[n].size(); ++i) {
if (grad) { const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
grad->zeroMem(); if (grad) {
grad->zeroMem();
}
} }
} }
} }
} }
void MKLDNNTester::clearBotDiffs() { void MKLDNNTester::clearBotDiffs(size_t id) {
// dnn and ref CHECK_LE(id, dataLayers_.size());
for (size_t n = 0; n < dataLayers_.size(); ++n) { for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers if (id == n || id == dataLayers_.size()) {
for (size_t i = 0; i < dataLayers_[n].size(); ++i) { // clear inputs layers of this specific layer
dataLayers_[n][i]->getOutputGrad()->zeroMem(); for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
} }
} }
} }
void MKLDNNTester::clearBotDiffs(int n) { void MKLDNNTester::clearTopDatas(size_t id) {
CHECK_LT(n, NUM); CHECK_LE(id, testLayers_.size());
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
void MKLDNNTester::clearTopDatas() {
for (size_t i = 0; i < testLayers_.size(); ++i) { for (size_t i = 0; i < testLayers_.size(); ++i) {
testLayers_[i]->getOutputValue()->zeroMem(); if (id == i || id == testLayers_.size()) {
testLayers_[i]->getOutputValue()->zeroMem();
}
} }
} }
...@@ -300,16 +303,24 @@ void MKLDNNTester::runOnce() { ...@@ -300,16 +303,24 @@ void MKLDNNTester::runOnce() {
checkForward(); checkForward();
// test backward // test backward
// simple updater
UpdateCallback updateCallback = [](Parameter* para) {
auto& grad = para->getBuf(PARAMETER_GRADIENT);
auto& value = para->getBuf(PARAMETER_VALUE);
real lr = 1e-3;
value->add(*grad, lr);
};
randomTopDiffs(); randomTopDiffs();
dnnLayer_->backward(nullptr); dnnLayer_->backward(updateCallback);
refLayer_->backward(nullptr); refLayer_->backward(updateCallback);
checkBackwardData(); checkBackwardData();
checkBackwardWgts(); checkBackwardWgts();
// clear buffers // clear buffers
// ref code will addto the diff, dnn code will writeto it // ref code will addto the diff, dnn code will writeto it
// and clearTopDatas() and clearWgtDiffs() should be coverd by test layers // and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF); clearBotDiffs(REF);
clearWgtDiffs(REF);
} }
void MKLDNNTester::run(const TestConfig& dnn, void MKLDNNTester::run(const TestConfig& dnn,
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/gserver/layers/MKLDNNBase.h" #include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle { namespace paddle {
...@@ -40,7 +41,8 @@ protected: ...@@ -40,7 +41,8 @@ protected:
vector<LayerMap> layerMaps_; vector<LayerMap> layerMaps_;
vector<vector<ParameterPtr>> parameters_; vector<vector<ParameterPtr>> parameters_;
vector<LayerPtr> testLayers_; vector<LayerPtr> testLayers_;
LayerPtr dnnLayer_, refLayer_; LayerPtr refLayer_;
MKLDNNLayerPtr dnnLayer_;
/// run some iterations, all the result should pass /// run some iterations, all the result should pass
size_t iter_; size_t iter_;
...@@ -88,10 +90,10 @@ private: ...@@ -88,10 +90,10 @@ private:
void checkBackwardData(); void checkBackwardData();
void checkBackwardWgts(); void checkBackwardWgts();
void clearWgtDiffs(); // clear specific layer, clear all when id equals NUM
void clearBotDiffs(); void clearWgtDiffs(size_t id = NUM);
void clearBotDiffs(int n); // clear specific layer void clearBotDiffs(size_t id = NUM);
void clearTopDatas(); void clearTopDatas(size_t id = NUM);
void printTopDatas(); void printTopDatas();
void printMatrix(const MatrixPtr& m); void printMatrix(const MatrixPtr& m);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册