提交 a475a57d 编写于 作者: T tensor-tang

rename files and classes, use uppercase of Mkldnn and Cpu

上级 b2bd6713
......@@ -25,13 +25,13 @@ filter_test(GSERVER_HEADER)
filter_test(GSERVER_SOURCES)
if(NOT WITH_MKLDNN)
file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.h")
file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.cpp")
file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.h")
file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.cpp")
list(REMOVE_ITEM GSERVER_HEADER ${DNN_HEADER})
list(REMOVE_ITEM GSERVER_SOURCES ${DNN_SOURCES})
message(STATUS "Skip compiling with Mkldnnlayers and MkldnnActivations")
message(STATUS "Skip compiling with MKLDNNLayers and MKLDNNActivations")
else()
message(STATUS "Compile with Mkldnnlayers and MkldnnActivations")
message(STATUS "Compile with MKLDNNLayers and MKLDNNActivations")
endif()
if(NOT WITH_GPU)
......
......@@ -30,26 +30,26 @@ typedef enum {
* @brief MKLDNN CPU engine.
*
*/
class CpuEngine {
class CPUEngine {
public:
static CpuEngine& Instance() {
static CPUEngine& Instance() {
// Thread-safe in C++11.
static CpuEngine myInstance;
static CPUEngine myInstance;
return myInstance;
}
// Disallow copy or move
CpuEngine(const CpuEngine&) = delete; // Copy constructor
CpuEngine(CpuEngine&&) = delete; // Move constructor
CpuEngine& operator=(const CpuEngine&) = delete; // Copy assignment
CpuEngine& operator=(CpuEngine&&) = delete; // Move assignment
CPUEngine(const CPUEngine&) = delete; // Copy constructor
CPUEngine(CPUEngine&&) = delete; // Move constructor
CPUEngine& operator=(const CPUEngine&) = delete; // Copy assignment
CPUEngine& operator=(CPUEngine&&) = delete; // Move assignment
mkldnn::engine& getEngine() { return cpuEngine_; }
protected:
CpuEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {}
// CpuEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {}
~CpuEngine() {}
CPUEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {}
// CPUEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {}
~CPUEngine() {}
private:
mkldnn::engine cpuEngine_;
......@@ -59,11 +59,11 @@ private:
* @brief MKLDNN Stream.
*
*/
class MkldnnStream {
class MKLDNNStream {
public:
MkldnnStream() : ready_(false) { resetState(); }
MKLDNNStream() : ready_(false) { resetState(); }
virtual ~MkldnnStream() {}
virtual ~MKLDNNStream() {}
/**
* @brief Submit stream
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MkldnnFcLayer.h"
#include "MKLDNNFcLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
......@@ -24,11 +24,11 @@ typedef inner_product_backward_data fc_bwdData;
namespace paddle {
REGISTER_LAYER(mkldnn_fc, MkldnnFcLayer);
REGISTER_LAYER(mkldnn_fc, MKLDNNFcLayer);
bool MkldnnFcLayer::init(const LayerMap& layerMap,
bool MKLDNNFcLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
if (!MkldnnLayer::init(layerMap, parameterMap)) {
if (!MKLDNNLayer::init(layerMap, parameterMap)) {
return false;
}
......@@ -56,7 +56,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return true;
}
void MkldnnFcLayer::convertWeightsFromPaddle() {
void MKLDNNFcLayer::convertWeightsFromPaddle() {
if (FLAGS_use_mkldnn_wgt) {
return;
}
......@@ -81,7 +81,7 @@ void MkldnnFcLayer::convertWeightsFromPaddle() {
hasInitedWgt_ = true;
}
void MkldnnFcLayer::convertWeightsToPaddle() {
void MKLDNNFcLayer::convertWeightsToPaddle() {
MatrixPtr dnnWgt = weight_->getW();
MatrixPtr paddleWgt;
dnnWgt->transpose(paddleWgt, true);
......@@ -92,7 +92,7 @@ void MkldnnFcLayer::convertWeightsToPaddle() {
dnnWgtT->copyFrom(*paddleWgt);
}
void MkldnnFcLayer::reshape() {
void MKLDNNFcLayer::reshape() {
const Argument& input = getInput(0);
int batchSize = input.getBatchSize();
if (bs_ == batchSize) {
......@@ -129,7 +129,7 @@ void MkldnnFcLayer::reshape() {
convertWeightsFromPaddle();
}
void MkldnnFcLayer::resetFwd() {
void MKLDNNFcLayer::resetFwd() {
bool hasBias = biases_ && biases_->getW();
real* iData = getInputValue(0)->getData();
real* oData = getOutputValue()->getData();
......@@ -166,7 +166,7 @@ void MkldnnFcLayer::resetFwd() {
pipelineFwd_.push_back(*fwd_);
}
void MkldnnFcLayer::resetBwd() {
void MKLDNNFcLayer::resetBwd() {
if (!needResetBwd_) {
return;
}
......@@ -231,7 +231,7 @@ void MkldnnFcLayer::resetBwd() {
pipelineBwd_.push_back(*bwdData_);
}
void MkldnnFcLayer::forward(PassType passType) {
void MKLDNNFcLayer::forward(PassType passType) {
Layer::forward(passType);
reshape();
......@@ -253,7 +253,7 @@ void MkldnnFcLayer::forward(PassType passType) {
}
}
void MkldnnFcLayer::backward(const UpdateCallback& callback) {
void MKLDNNFcLayer::backward(const UpdateCallback& callback) {
/* Do derivation */ {
REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
backwardActivation();
......
......@@ -14,17 +14,17 @@ limitations under the License. */
#pragma once
#include "MkldnnLayer.h"
#include "MKLDNNLayer.h"
#include "mkldnn.hpp"
namespace paddle {
/**
* @brief A subclass of MkldnnLayer fc layer.
* @brief A subclass of MKLDNNLayer fc layer.
*
* The config file api is mkldnn_fc
*/
class MkldnnFcLayer : public MkldnnLayer {
class MKLDNNFcLayer : public MKLDNNLayer {
protected:
// input layer size, can not be change after init
size_t iLayerSize_; // == ic * ih * iw
......@@ -37,10 +37,10 @@ protected:
std::unique_ptr<Weight> biases_;
public:
explicit MkldnnFcLayer(const LayerConfig& config)
: MkldnnLayer(config), hasInitedWgt_(false), hasSpatial_(true) {}
explicit MKLDNNFcLayer(const LayerConfig& config)
: MKLDNNLayer(config), hasInitedWgt_(false), hasSpatial_(true) {}
~MkldnnFcLayer() {}
~MKLDNNFcLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <vector>
#include "Layer.h"
#include "MkldnnBase.h"
#include "MKLDNNBase.h"
#include "mkldnn.hpp"
DECLARE_bool(use_mkldnn);
......@@ -24,14 +24,14 @@ DECLARE_bool(use_mkldnn_wgt);
namespace paddle {
class MkldnnLayer;
typedef std::shared_ptr<MkldnnLayer> MkldnnLayerPtr;
class MKLDNNLayer;
typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr;
/**
* @brief Base class of Mkldnnlayer.
* @brief Base class of MKLDNNlayer.
*
*/
class MkldnnLayer : public Layer {
class MKLDNNLayer : public Layer {
protected:
// batch size
int bs_;
......@@ -45,14 +45,14 @@ protected:
// mkldnn engine, stream and primivtives
mkldnn::engine engine_;
std::shared_ptr<MkldnnStream> stream_;
std::shared_ptr<MKLDNNStream> stream_;
std::shared_ptr<mkldnn::primitive> fwd_;
std::shared_ptr<mkldnn::primitive> bwdWgt_;
std::shared_ptr<mkldnn::primitive> bwdData_;
std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_;
// TODO(TJ): change below memory as MkldnnMatrixPtr type
// TODO(TJ): change below memory as MKLDNNMatrixPtr type
std::shared_ptr<mkldnn::memory> inVal_;
std::shared_ptr<mkldnn::memory> inGrad_;
std::shared_ptr<mkldnn::memory> outVal_;
......@@ -63,7 +63,7 @@ protected:
std::shared_ptr<mkldnn::memory> biasGrad_;
public:
explicit MkldnnLayer(const LayerConfig& config)
explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config),
bs_(0),
ic_(0),
......@@ -79,7 +79,7 @@ public:
bwdWgt_(nullptr),
bwdData_(nullptr) {}
~MkldnnLayer() {}
~MKLDNNLayer() {}
virtual bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
......@@ -90,8 +90,8 @@ public:
CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn."
<< "Please set WITH_MKLDNN=ON "
<< "and set use_mkldnn=True";
stream_.reset(new MkldnnStream());
engine_ = CpuEngine::Instance().getEngine();
stream_.reset(new MKLDNNStream());
engine_ = CPUEngine::Instance().getEngine();
// TODO(TJ): deivecId
return true;
......
......@@ -20,11 +20,11 @@ add_test(NAME test_LayerGrad
########## test_Mkldnn layers and activations ##########
if(WITH_MKLDNN)
add_unittest_without_exec(test_Mkldnn
test_Mkldnn.cpp
MkldnnTester.cpp
add_unittest_without_exec(test_MKLDNN
test_MKLDNN.cpp
MKLDNNTester.cpp
LayerGradUtil.cpp)
add_test(NAME test_Mkldnn COMMAND test_Mkldnn)
add_test(NAME test_MKLDNN COMMAND test_MKLDNN)
endif()
################ test_CRFLayerGrad ####################
......
......@@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "MkldnnTester.h"
#include "paddle/gserver/layers/MkldnnBase.h"
#include "paddle/gserver/layers/MkldnnLayer.h"
#include "MKLDNNTester.h"
#include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle {
// init data layer and test layer of both dnn and reference
void MkldnnTester::reset(const TestConfig& dnn,
void MKLDNNTester::reset(const TestConfig& dnn,
const TestConfig& ref,
size_t batchSize) {
const bool trans = false;
......@@ -71,7 +71,7 @@ void MkldnnTester::reset(const TestConfig& dnn,
setInputImgSize();
}
void MkldnnTester::setInputImgSize() {
void MKLDNNTester::setInputImgSize() {
for (size_t n = 0; n < dataLayers_.size(); ++n) {
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
// TODO(TJ): fix me when concat and elewise ready
......@@ -82,7 +82,7 @@ void MkldnnTester::setInputImgSize() {
}
// init randome parameters of ref, and copy to mkldnn
void MkldnnTester::randomWgtDatas() {
void MKLDNNTester::randomWgtDatas() {
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
for (size_t i = 0; i < parameters_[REF].size(); ++i) {
const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
......@@ -96,7 +96,7 @@ void MkldnnTester::randomWgtDatas() {
}
// random botdata of ref layer and copy same to mkldnn
void MkldnnTester::randomBotDatas() {
void MKLDNNTester::randomBotDatas() {
CHECK_EQ(dataLayers_.size(), NUM);
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
dataLayers_[REF][i]->getOutputValue()->randomizeUniform();
......@@ -107,14 +107,14 @@ void MkldnnTester::randomBotDatas() {
}
}
void MkldnnTester::randomTopDiffs() {
void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad());
}
void MkldnnTester::checkForward() {
void MKLDNNTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
......@@ -122,7 +122,7 @@ void MkldnnTester::checkForward() {
EXPECT_LE(fabs(delta), eps_);
}
void MkldnnTester::checkBackwardData() {
void MKLDNNTester::checkBackwardData() {
const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad();
......@@ -141,13 +141,13 @@ void MkldnnTester::checkBackwardData() {
}
}
void MkldnnTester::checkBackwardWgts() {
void MKLDNNTester::checkBackwardWgts() {
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
const MkldnnLayerPtr dnnlayer =
std::dynamic_pointer_cast<MkldnnLayer>(dnnLayer_);
const MKLDNNLayerPtr dnnlayer =
std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
CHECK(dnnlayer);
dnnlayer->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
......@@ -166,7 +166,7 @@ void MkldnnTester::checkBackwardWgts() {
restoreWgt(dnnWgts, parameters_[DNN]);
}
void MkldnnTester::saveWgt(const vector<ParameterPtr>& from,
void MKLDNNTester::saveWgt(const vector<ParameterPtr>& from,
vector<VectorPtr>& to) {
const bool useGpu = false;
to.resize(from.size());
......@@ -177,7 +177,7 @@ void MkldnnTester::saveWgt(const vector<ParameterPtr>& from,
}
}
void MkldnnTester::restoreWgt(const vector<VectorPtr>& from,
void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
vector<ParameterPtr>& to) {
CHECK_EQ(from.size(), to.size());
for (size_t i = 0; i < from.size(); ++i) {
......@@ -187,7 +187,7 @@ void MkldnnTester::restoreWgt(const vector<VectorPtr>& from,
}
// clear parameters grad
void MkldnnTester::clearWgtDiffs() {
void MKLDNNTester::clearWgtDiffs() {
for (size_t n = 0; n < parameters_.size(); ++n) {
for (size_t i = 0; i < parameters_[n].size(); ++i) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
......@@ -198,7 +198,7 @@ void MkldnnTester::clearWgtDiffs() {
}
}
void MkldnnTester::clearBotDiffs() {
void MKLDNNTester::clearBotDiffs() {
// dnn and ref
for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers
......@@ -208,7 +208,7 @@ void MkldnnTester::clearBotDiffs() {
}
}
void MkldnnTester::clearBotDiffs(int n) {
void MKLDNNTester::clearBotDiffs(int n) {
CHECK_LT(n, NUM);
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
......@@ -216,13 +216,13 @@ void MkldnnTester::clearBotDiffs(int n) {
}
}
void MkldnnTester::clearTopDatas() {
void MKLDNNTester::clearTopDatas() {
for (size_t i = 0; i < testLayers_.size(); ++i) {
testLayers_[i]->getOutputValue()->zeroMem();
}
}
void MkldnnTester::printTopDatas() {
void MKLDNNTester::printTopDatas() {
if (!log_) {
return;
}
......@@ -233,7 +233,7 @@ void MkldnnTester::printTopDatas() {
}
}
void MkldnnTester::printMatrix(const MatrixPtr& m) {
void MKLDNNTester::printMatrix(const MatrixPtr& m) {
if (!log_) {
return;
}
......@@ -243,7 +243,7 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) {
VLOG(lvl_) << std::endl << ostr.str();
}
void MkldnnTester::printVector(const VectorPtr& v) {
void MKLDNNTester::printVector(const VectorPtr& v) {
if (!log_) {
return;
}
......@@ -253,7 +253,7 @@ void MkldnnTester::printVector(const VectorPtr& v) {
VLOG(lvl_) << std::endl << ostr.str();
}
double MkldnnTester::getDelta(const real* d1,
double MKLDNNTester::getDelta(const real* d1,
const real* d2,
size_t len,
const float failRate,
......@@ -280,17 +280,17 @@ double MkldnnTester::getDelta(const real* d1,
return (failCnt / (float)len) > failRate ? maxOut : delta / sum;
}
double MkldnnTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
CHECK_EQ(m1->getElementCnt(), m2->getElementCnt());
return getDelta(m1->getData(), m2->getData(), m1->getElementCnt());
}
double MkldnnTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
CHECK_EQ(v1->getSize(), v2->getSize());
return getDelta(v1->getData(), v2->getData(), v1->getSize());
}
void MkldnnTester::runOnce() {
void MKLDNNTester::runOnce() {
// test forward
randomBotDatas();
dnnLayer_->forward(PASS_TRAIN);
......@@ -310,7 +310,7 @@ void MkldnnTester::runOnce() {
clearBotDiffs(REF);
}
void MkldnnTester::run(const TestConfig& dnn,
void MKLDNNTester::run(const TestConfig& dnn,
const TestConfig& ref,
size_t batchSize,
size_t inputImgH,
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include <string>
#include <vector>
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/MkldnnBase.h"
#include "paddle/gserver/layers/MKLDNNBase.h"
namespace paddle {
......@@ -25,7 +25,7 @@ namespace paddle {
* @brief test the functionality of Mkldnnlayers
* refer to paddle original function
*/
class MkldnnTester {
class MKLDNNTester {
enum {
DNN = 0,
REF = 1,
......@@ -54,14 +54,14 @@ protected:
size_t ih_, iw_;
public:
explicit MkldnnTester(size_t iter = 3, float epsilon = 1e-4) {
explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) {
iter_ = iter;
eps_ = epsilon;
log_ = false;
lvl_ = MKLDNN_ALL;
}
~MkldnnTester() {}
~MKLDNNTester() {}
public:
void run(const TestConfig& dnn,
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include "MkldnnTester.h"
#include "MKLDNNTester.h"
#include "ModelConfig.pb.h"
using namespace paddle; // NOLINT
......@@ -43,7 +43,7 @@ void testFcLayer(const testFCDesc& pm) {
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs();
MkldnnTester tester;
MKLDNNTester tester;
for (auto biasSize : {pm.oc, 0}) {
cfg.biasSize = biasSize;
TestConfig ref = cfg;
......@@ -54,7 +54,7 @@ void testFcLayer(const testFCDesc& pm) {
}
}
TEST(MkldnnLayer, fcLayer) {
TEST(MKLDNNLayer, FcLayer) {
testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1});
testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1});
testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册