提交 a475a57d 编写于 作者: T tensor-tang

rename files and classes, use uppercase of Mkldnn and Cpu

上级 b2bd6713
...@@ -25,13 +25,13 @@ filter_test(GSERVER_HEADER) ...@@ -25,13 +25,13 @@ filter_test(GSERVER_HEADER)
filter_test(GSERVER_SOURCES) filter_test(GSERVER_SOURCES)
if(NOT WITH_MKLDNN) if(NOT WITH_MKLDNN)
file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.h") file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.h")
file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.cpp") file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.cpp")
list(REMOVE_ITEM GSERVER_HEADER ${DNN_HEADER}) list(REMOVE_ITEM GSERVER_HEADER ${DNN_HEADER})
list(REMOVE_ITEM GSERVER_SOURCES ${DNN_SOURCES}) list(REMOVE_ITEM GSERVER_SOURCES ${DNN_SOURCES})
message(STATUS "Skip compiling with Mkldnnlayers and MkldnnActivations") message(STATUS "Skip compiling with MKLDNNLayers and MKLDNNActivations")
else() else()
message(STATUS "Compile with Mkldnnlayers and MkldnnActivations") message(STATUS "Compile with MKLDNNLayers and MKLDNNActivations")
endif() endif()
if(NOT WITH_GPU) if(NOT WITH_GPU)
......
...@@ -30,26 +30,26 @@ typedef enum { ...@@ -30,26 +30,26 @@ typedef enum {
* @brief MKLDNN CPU engine. * @brief MKLDNN CPU engine.
* *
*/ */
class CpuEngine { class CPUEngine {
public: public:
static CpuEngine& Instance() { static CPUEngine& Instance() {
// Thread-safe in C++11. // Thread-safe in C++11.
static CpuEngine myInstance; static CPUEngine myInstance;
return myInstance; return myInstance;
} }
// Disallow copy or move // Disallow copy or move
CpuEngine(const CpuEngine&) = delete; // Copy constructor CPUEngine(const CPUEngine&) = delete; // Copy constructor
CpuEngine(CpuEngine&&) = delete; // Move constructor CPUEngine(CPUEngine&&) = delete; // Move constructor
CpuEngine& operator=(const CpuEngine&) = delete; // Copy assignment CPUEngine& operator=(const CPUEngine&) = delete; // Copy assignment
CpuEngine& operator=(CpuEngine&&) = delete; // Move assignment CPUEngine& operator=(CPUEngine&&) = delete; // Move assignment
mkldnn::engine& getEngine() { return cpuEngine_; } mkldnn::engine& getEngine() { return cpuEngine_; }
protected: protected:
CpuEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {} CPUEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {}
// CpuEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {} // CPUEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {}
~CpuEngine() {} ~CPUEngine() {}
private: private:
mkldnn::engine cpuEngine_; mkldnn::engine cpuEngine_;
...@@ -59,11 +59,11 @@ private: ...@@ -59,11 +59,11 @@ private:
* @brief MKLDNN Stream. * @brief MKLDNN Stream.
* *
*/ */
class MkldnnStream { class MKLDNNStream {
public: public:
MkldnnStream() : ready_(false) { resetState(); } MKLDNNStream() : ready_(false) { resetState(); }
virtual ~MkldnnStream() {} virtual ~MKLDNNStream() {}
/** /**
* @brief Submit stream * @brief Submit stream
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "MkldnnFcLayer.h" #include "MKLDNNFcLayer.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
...@@ -24,11 +24,11 @@ typedef inner_product_backward_data fc_bwdData; ...@@ -24,11 +24,11 @@ typedef inner_product_backward_data fc_bwdData;
namespace paddle { namespace paddle {
REGISTER_LAYER(mkldnn_fc, MkldnnFcLayer); REGISTER_LAYER(mkldnn_fc, MKLDNNFcLayer);
bool MkldnnFcLayer::init(const LayerMap& layerMap, bool MKLDNNFcLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
if (!MkldnnLayer::init(layerMap, parameterMap)) { if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false; return false;
} }
...@@ -56,7 +56,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap, ...@@ -56,7 +56,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return true; return true;
} }
void MkldnnFcLayer::convertWeightsFromPaddle() { void MKLDNNFcLayer::convertWeightsFromPaddle() {
if (FLAGS_use_mkldnn_wgt) { if (FLAGS_use_mkldnn_wgt) {
return; return;
} }
...@@ -81,7 +81,7 @@ void MkldnnFcLayer::convertWeightsFromPaddle() { ...@@ -81,7 +81,7 @@ void MkldnnFcLayer::convertWeightsFromPaddle() {
hasInitedWgt_ = true; hasInitedWgt_ = true;
} }
void MkldnnFcLayer::convertWeightsToPaddle() { void MKLDNNFcLayer::convertWeightsToPaddle() {
MatrixPtr dnnWgt = weight_->getW(); MatrixPtr dnnWgt = weight_->getW();
MatrixPtr paddleWgt; MatrixPtr paddleWgt;
dnnWgt->transpose(paddleWgt, true); dnnWgt->transpose(paddleWgt, true);
...@@ -92,7 +92,7 @@ void MkldnnFcLayer::convertWeightsToPaddle() { ...@@ -92,7 +92,7 @@ void MkldnnFcLayer::convertWeightsToPaddle() {
dnnWgtT->copyFrom(*paddleWgt); dnnWgtT->copyFrom(*paddleWgt);
} }
void MkldnnFcLayer::reshape() { void MKLDNNFcLayer::reshape() {
const Argument& input = getInput(0); const Argument& input = getInput(0);
int batchSize = input.getBatchSize(); int batchSize = input.getBatchSize();
if (bs_ == batchSize) { if (bs_ == batchSize) {
...@@ -129,7 +129,7 @@ void MkldnnFcLayer::reshape() { ...@@ -129,7 +129,7 @@ void MkldnnFcLayer::reshape() {
convertWeightsFromPaddle(); convertWeightsFromPaddle();
} }
void MkldnnFcLayer::resetFwd() { void MKLDNNFcLayer::resetFwd() {
bool hasBias = biases_ && biases_->getW(); bool hasBias = biases_ && biases_->getW();
real* iData = getInputValue(0)->getData(); real* iData = getInputValue(0)->getData();
real* oData = getOutputValue()->getData(); real* oData = getOutputValue()->getData();
...@@ -166,7 +166,7 @@ void MkldnnFcLayer::resetFwd() { ...@@ -166,7 +166,7 @@ void MkldnnFcLayer::resetFwd() {
pipelineFwd_.push_back(*fwd_); pipelineFwd_.push_back(*fwd_);
} }
void MkldnnFcLayer::resetBwd() { void MKLDNNFcLayer::resetBwd() {
if (!needResetBwd_) { if (!needResetBwd_) {
return; return;
} }
...@@ -231,7 +231,7 @@ void MkldnnFcLayer::resetBwd() { ...@@ -231,7 +231,7 @@ void MkldnnFcLayer::resetBwd() {
pipelineBwd_.push_back(*bwdData_); pipelineBwd_.push_back(*bwdData_);
} }
void MkldnnFcLayer::forward(PassType passType) { void MKLDNNFcLayer::forward(PassType passType) {
Layer::forward(passType); Layer::forward(passType);
reshape(); reshape();
...@@ -253,7 +253,7 @@ void MkldnnFcLayer::forward(PassType passType) { ...@@ -253,7 +253,7 @@ void MkldnnFcLayer::forward(PassType passType) {
} }
} }
void MkldnnFcLayer::backward(const UpdateCallback& callback) { void MKLDNNFcLayer::backward(const UpdateCallback& callback) {
/* Do derivation */ { /* Do derivation */ {
REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
backwardActivation(); backwardActivation();
......
...@@ -14,17 +14,17 @@ limitations under the License. */ ...@@ -14,17 +14,17 @@ limitations under the License. */
#pragma once #pragma once
#include "MkldnnLayer.h" #include "MKLDNNLayer.h"
#include "mkldnn.hpp" #include "mkldnn.hpp"
namespace paddle { namespace paddle {
/** /**
* @brief A subclass of MkldnnLayer fc layer. * @brief A subclass of MKLDNNLayer fc layer.
* *
* The config file api is mkldnn_fc * The config file api is mkldnn_fc
*/ */
class MkldnnFcLayer : public MkldnnLayer { class MKLDNNFcLayer : public MKLDNNLayer {
protected: protected:
// input layer size, can not be change after init // input layer size, can not be change after init
size_t iLayerSize_; // == ic * ih * iw size_t iLayerSize_; // == ic * ih * iw
...@@ -37,10 +37,10 @@ protected: ...@@ -37,10 +37,10 @@ protected:
std::unique_ptr<Weight> biases_; std::unique_ptr<Weight> biases_;
public: public:
explicit MkldnnFcLayer(const LayerConfig& config) explicit MKLDNNFcLayer(const LayerConfig& config)
: MkldnnLayer(config), hasInitedWgt_(false), hasSpatial_(true) {} : MKLDNNLayer(config), hasInitedWgt_(false), hasSpatial_(true) {}
~MkldnnFcLayer() {} ~MKLDNNFcLayer() {}
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "Layer.h" #include "Layer.h"
#include "MkldnnBase.h" #include "MKLDNNBase.h"
#include "mkldnn.hpp" #include "mkldnn.hpp"
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
...@@ -24,14 +24,14 @@ DECLARE_bool(use_mkldnn_wgt); ...@@ -24,14 +24,14 @@ DECLARE_bool(use_mkldnn_wgt);
namespace paddle { namespace paddle {
class MkldnnLayer; class MKLDNNLayer;
typedef std::shared_ptr<MkldnnLayer> MkldnnLayerPtr; typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr;
/** /**
* @brief Base class of Mkldnnlayer. * @brief Base class of MKLDNNlayer.
* *
*/ */
class MkldnnLayer : public Layer { class MKLDNNLayer : public Layer {
protected: protected:
// batch size // batch size
int bs_; int bs_;
...@@ -45,14 +45,14 @@ protected: ...@@ -45,14 +45,14 @@ protected:
// mkldnn engine, stream and primivtives // mkldnn engine, stream and primivtives
mkldnn::engine engine_; mkldnn::engine engine_;
std::shared_ptr<MkldnnStream> stream_; std::shared_ptr<MKLDNNStream> stream_;
std::shared_ptr<mkldnn::primitive> fwd_; std::shared_ptr<mkldnn::primitive> fwd_;
std::shared_ptr<mkldnn::primitive> bwdWgt_; std::shared_ptr<mkldnn::primitive> bwdWgt_;
std::shared_ptr<mkldnn::primitive> bwdData_; std::shared_ptr<mkldnn::primitive> bwdData_;
std::vector<mkldnn::primitive> pipelineFwd_; std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_; std::vector<mkldnn::primitive> pipelineBwd_;
// TODO(TJ): change below memory as MkldnnMatrixPtr type // TODO(TJ): change below memory as MKLDNNMatrixPtr type
std::shared_ptr<mkldnn::memory> inVal_; std::shared_ptr<mkldnn::memory> inVal_;
std::shared_ptr<mkldnn::memory> inGrad_; std::shared_ptr<mkldnn::memory> inGrad_;
std::shared_ptr<mkldnn::memory> outVal_; std::shared_ptr<mkldnn::memory> outVal_;
...@@ -63,7 +63,7 @@ protected: ...@@ -63,7 +63,7 @@ protected:
std::shared_ptr<mkldnn::memory> biasGrad_; std::shared_ptr<mkldnn::memory> biasGrad_;
public: public:
explicit MkldnnLayer(const LayerConfig& config) explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config), : Layer(config),
bs_(0), bs_(0),
ic_(0), ic_(0),
...@@ -79,7 +79,7 @@ public: ...@@ -79,7 +79,7 @@ public:
bwdWgt_(nullptr), bwdWgt_(nullptr),
bwdData_(nullptr) {} bwdData_(nullptr) {}
~MkldnnLayer() {} ~MKLDNNLayer() {}
virtual bool init(const LayerMap& layerMap, virtual bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) { const ParameterMap& parameterMap) {
...@@ -90,8 +90,8 @@ public: ...@@ -90,8 +90,8 @@ public:
CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn."
<< "Please set WITH_MKLDNN=ON " << "Please set WITH_MKLDNN=ON "
<< "and set use_mkldnn=True"; << "and set use_mkldnn=True";
stream_.reset(new MkldnnStream()); stream_.reset(new MKLDNNStream());
engine_ = CpuEngine::Instance().getEngine(); engine_ = CPUEngine::Instance().getEngine();
// TODO(TJ): deivecId // TODO(TJ): deivecId
return true; return true;
......
...@@ -20,11 +20,11 @@ add_test(NAME test_LayerGrad ...@@ -20,11 +20,11 @@ add_test(NAME test_LayerGrad
########## test_Mkldnn layers and activations ########## ########## test_Mkldnn layers and activations ##########
if(WITH_MKLDNN) if(WITH_MKLDNN)
add_unittest_without_exec(test_Mkldnn add_unittest_without_exec(test_MKLDNN
test_Mkldnn.cpp test_MKLDNN.cpp
MkldnnTester.cpp MKLDNNTester.cpp
LayerGradUtil.cpp) LayerGradUtil.cpp)
add_test(NAME test_Mkldnn COMMAND test_Mkldnn) add_test(NAME test_MKLDNN COMMAND test_MKLDNN)
endif() endif()
################ test_CRFLayerGrad #################### ################ test_CRFLayerGrad ####################
......
...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "MkldnnTester.h" #include "MKLDNNTester.h"
#include "paddle/gserver/layers/MkldnnBase.h" #include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MkldnnLayer.h" #include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle { namespace paddle {
// init data layer and test layer of both dnn and reference // init data layer and test layer of both dnn and reference
void MkldnnTester::reset(const TestConfig& dnn, void MKLDNNTester::reset(const TestConfig& dnn,
const TestConfig& ref, const TestConfig& ref,
size_t batchSize) { size_t batchSize) {
const bool trans = false; const bool trans = false;
...@@ -71,7 +71,7 @@ void MkldnnTester::reset(const TestConfig& dnn, ...@@ -71,7 +71,7 @@ void MkldnnTester::reset(const TestConfig& dnn,
setInputImgSize(); setInputImgSize();
} }
void MkldnnTester::setInputImgSize() { void MKLDNNTester::setInputImgSize() {
for (size_t n = 0; n < dataLayers_.size(); ++n) { for (size_t n = 0; n < dataLayers_.size(); ++n) {
for (size_t i = 0; i < dataLayers_[n].size(); ++i) { for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
// TODO(TJ): fix me when concat and elewise ready // TODO(TJ): fix me when concat and elewise ready
...@@ -82,7 +82,7 @@ void MkldnnTester::setInputImgSize() { ...@@ -82,7 +82,7 @@ void MkldnnTester::setInputImgSize() {
} }
// init randome parameters of ref, and copy to mkldnn // init randome parameters of ref, and copy to mkldnn
void MkldnnTester::randomWgtDatas() { void MKLDNNTester::randomWgtDatas() {
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
for (size_t i = 0; i < parameters_[REF].size(); ++i) { for (size_t i = 0; i < parameters_[REF].size(); ++i) {
const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
...@@ -96,7 +96,7 @@ void MkldnnTester::randomWgtDatas() { ...@@ -96,7 +96,7 @@ void MkldnnTester::randomWgtDatas() {
} }
// random botdata of ref layer and copy same to mkldnn // random botdata of ref layer and copy same to mkldnn
void MkldnnTester::randomBotDatas() { void MKLDNNTester::randomBotDatas() {
CHECK_EQ(dataLayers_.size(), NUM); CHECK_EQ(dataLayers_.size(), NUM);
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
dataLayers_[REF][i]->getOutputValue()->randomizeUniform(); dataLayers_[REF][i]->getOutputValue()->randomizeUniform();
...@@ -107,14 +107,14 @@ void MkldnnTester::randomBotDatas() { ...@@ -107,14 +107,14 @@ void MkldnnTester::randomBotDatas() {
} }
} }
void MkldnnTester::randomTopDiffs() { void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform(); refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad()); printMatrix(refLayer_->getOutputGrad());
} }
void MkldnnTester::checkForward() { void MKLDNNTester::checkForward() {
printTopDatas(); printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(), double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue()); testLayers_[REF]->getOutputValue());
...@@ -122,7 +122,7 @@ void MkldnnTester::checkForward() { ...@@ -122,7 +122,7 @@ void MkldnnTester::checkForward() {
EXPECT_LE(fabs(delta), eps_); EXPECT_LE(fabs(delta), eps_);
} }
void MkldnnTester::checkBackwardData() { void MKLDNNTester::checkBackwardData() {
const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad(); const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad();
...@@ -141,13 +141,13 @@ void MkldnnTester::checkBackwardData() { ...@@ -141,13 +141,13 @@ void MkldnnTester::checkBackwardData() {
} }
} }
void MkldnnTester::checkBackwardWgts() { void MKLDNNTester::checkBackwardWgts() {
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts); saveWgt(parameters_[DNN], dnnWgts);
const MkldnnLayerPtr dnnlayer = const MKLDNNLayerPtr dnnlayer =
std::dynamic_pointer_cast<MkldnnLayer>(dnnLayer_); std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
CHECK(dnnlayer); CHECK(dnnlayer);
dnnlayer->convertWeightsToPaddle(); dnnlayer->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) { for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
...@@ -166,7 +166,7 @@ void MkldnnTester::checkBackwardWgts() { ...@@ -166,7 +166,7 @@ void MkldnnTester::checkBackwardWgts() {
restoreWgt(dnnWgts, parameters_[DNN]); restoreWgt(dnnWgts, parameters_[DNN]);
} }
void MkldnnTester::saveWgt(const vector<ParameterPtr>& from, void MKLDNNTester::saveWgt(const vector<ParameterPtr>& from,
vector<VectorPtr>& to) { vector<VectorPtr>& to) {
const bool useGpu = false; const bool useGpu = false;
to.resize(from.size()); to.resize(from.size());
...@@ -177,7 +177,7 @@ void MkldnnTester::saveWgt(const vector<ParameterPtr>& from, ...@@ -177,7 +177,7 @@ void MkldnnTester::saveWgt(const vector<ParameterPtr>& from,
} }
} }
void MkldnnTester::restoreWgt(const vector<VectorPtr>& from, void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
vector<ParameterPtr>& to) { vector<ParameterPtr>& to) {
CHECK_EQ(from.size(), to.size()); CHECK_EQ(from.size(), to.size());
for (size_t i = 0; i < from.size(); ++i) { for (size_t i = 0; i < from.size(); ++i) {
...@@ -187,7 +187,7 @@ void MkldnnTester::restoreWgt(const vector<VectorPtr>& from, ...@@ -187,7 +187,7 @@ void MkldnnTester::restoreWgt(const vector<VectorPtr>& from,
} }
// clear parameters grad // clear parameters grad
void MkldnnTester::clearWgtDiffs() { void MKLDNNTester::clearWgtDiffs() {
for (size_t n = 0; n < parameters_.size(); ++n) { for (size_t n = 0; n < parameters_.size(); ++n) {
for (size_t i = 0; i < parameters_[n].size(); ++i) { for (size_t i = 0; i < parameters_[n].size(); ++i) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
...@@ -198,7 +198,7 @@ void MkldnnTester::clearWgtDiffs() { ...@@ -198,7 +198,7 @@ void MkldnnTester::clearWgtDiffs() {
} }
} }
void MkldnnTester::clearBotDiffs() { void MKLDNNTester::clearBotDiffs() {
// dnn and ref // dnn and ref
for (size_t n = 0; n < dataLayers_.size(); ++n) { for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers // all inputs layers
...@@ -208,7 +208,7 @@ void MkldnnTester::clearBotDiffs() { ...@@ -208,7 +208,7 @@ void MkldnnTester::clearBotDiffs() {
} }
} }
void MkldnnTester::clearBotDiffs(int n) { void MKLDNNTester::clearBotDiffs(int n) {
CHECK_LT(n, NUM); CHECK_LT(n, NUM);
// all inputs layers // all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) { for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
...@@ -216,13 +216,13 @@ void MkldnnTester::clearBotDiffs(int n) { ...@@ -216,13 +216,13 @@ void MkldnnTester::clearBotDiffs(int n) {
} }
} }
void MkldnnTester::clearTopDatas() { void MKLDNNTester::clearTopDatas() {
for (size_t i = 0; i < testLayers_.size(); ++i) { for (size_t i = 0; i < testLayers_.size(); ++i) {
testLayers_[i]->getOutputValue()->zeroMem(); testLayers_[i]->getOutputValue()->zeroMem();
} }
} }
void MkldnnTester::printTopDatas() { void MKLDNNTester::printTopDatas() {
if (!log_) { if (!log_) {
return; return;
} }
...@@ -233,7 +233,7 @@ void MkldnnTester::printTopDatas() { ...@@ -233,7 +233,7 @@ void MkldnnTester::printTopDatas() {
} }
} }
void MkldnnTester::printMatrix(const MatrixPtr& m) { void MKLDNNTester::printMatrix(const MatrixPtr& m) {
if (!log_) { if (!log_) {
return; return;
} }
...@@ -243,7 +243,7 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) { ...@@ -243,7 +243,7 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) {
VLOG(lvl_) << std::endl << ostr.str(); VLOG(lvl_) << std::endl << ostr.str();
} }
void MkldnnTester::printVector(const VectorPtr& v) { void MKLDNNTester::printVector(const VectorPtr& v) {
if (!log_) { if (!log_) {
return; return;
} }
...@@ -253,7 +253,7 @@ void MkldnnTester::printVector(const VectorPtr& v) { ...@@ -253,7 +253,7 @@ void MkldnnTester::printVector(const VectorPtr& v) {
VLOG(lvl_) << std::endl << ostr.str(); VLOG(lvl_) << std::endl << ostr.str();
} }
double MkldnnTester::getDelta(const real* d1, double MKLDNNTester::getDelta(const real* d1,
const real* d2, const real* d2,
size_t len, size_t len,
const float failRate, const float failRate,
...@@ -280,17 +280,17 @@ double MkldnnTester::getDelta(const real* d1, ...@@ -280,17 +280,17 @@ double MkldnnTester::getDelta(const real* d1,
return (failCnt / (float)len) > failRate ? maxOut : delta / sum; return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
} }
double MkldnnTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) { double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
CHECK_EQ(m1->getElementCnt(), m2->getElementCnt()); CHECK_EQ(m1->getElementCnt(), m2->getElementCnt());
return getDelta(m1->getData(), m2->getData(), m1->getElementCnt()); return getDelta(m1->getData(), m2->getData(), m1->getElementCnt());
} }
double MkldnnTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) { double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
CHECK_EQ(v1->getSize(), v2->getSize()); CHECK_EQ(v1->getSize(), v2->getSize());
return getDelta(v1->getData(), v2->getData(), v1->getSize()); return getDelta(v1->getData(), v2->getData(), v1->getSize());
} }
void MkldnnTester::runOnce() { void MKLDNNTester::runOnce() {
// test forward // test forward
randomBotDatas(); randomBotDatas();
dnnLayer_->forward(PASS_TRAIN); dnnLayer_->forward(PASS_TRAIN);
...@@ -310,7 +310,7 @@ void MkldnnTester::runOnce() { ...@@ -310,7 +310,7 @@ void MkldnnTester::runOnce() {
clearBotDiffs(REF); clearBotDiffs(REF);
} }
void MkldnnTester::run(const TestConfig& dnn, void MKLDNNTester::run(const TestConfig& dnn,
const TestConfig& ref, const TestConfig& ref,
size_t batchSize, size_t batchSize,
size_t inputImgH, size_t inputImgH,
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/gserver/layers/MkldnnBase.h" #include "paddle/gserver/layers/MKLDNNBase.h"
namespace paddle { namespace paddle {
...@@ -25,7 +25,7 @@ namespace paddle { ...@@ -25,7 +25,7 @@ namespace paddle {
* @brief test the functionality of Mkldnnlayers * @brief test the functionality of Mkldnnlayers
* refer to paddle original function * refer to paddle original function
*/ */
class MkldnnTester { class MKLDNNTester {
enum { enum {
DNN = 0, DNN = 0,
REF = 1, REF = 1,
...@@ -54,14 +54,14 @@ protected: ...@@ -54,14 +54,14 @@ protected:
size_t ih_, iw_; size_t ih_, iw_;
public: public:
explicit MkldnnTester(size_t iter = 3, float epsilon = 1e-4) { explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) {
iter_ = iter; iter_ = iter;
eps_ = epsilon; eps_ = epsilon;
log_ = false; log_ = false;
lvl_ = MKLDNN_ALL; lvl_ = MKLDNN_ALL;
} }
~MkldnnTester() {} ~MKLDNNTester() {}
public: public:
void run(const TestConfig& dnn, void run(const TestConfig& dnn,
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <string> #include <string>
#include <vector> #include <vector>
#include "MkldnnTester.h" #include "MKLDNNTester.h"
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
...@@ -43,7 +43,7 @@ void testFcLayer(const testFCDesc& pm) { ...@@ -43,7 +43,7 @@ void testFcLayer(const testFCDesc& pm) {
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)}); /* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
MkldnnTester tester; MKLDNNTester tester;
for (auto biasSize : {pm.oc, 0}) { for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize; cfg.biasSize = biasSize;
TestConfig ref = cfg; TestConfig ref = cfg;
...@@ -54,7 +54,7 @@ void testFcLayer(const testFCDesc& pm) { ...@@ -54,7 +54,7 @@ void testFcLayer(const testFCDesc& pm) {
} }
} }
TEST(MkldnnLayer, fcLayer) { TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1}); testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1});
testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1}); testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1});
testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13}); testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册