提交 f6a94093 编写于 作者: T tensor-tang

remove unused comments, refine and rename

上级 abc49f74
...@@ -50,7 +50,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap, ...@@ -50,7 +50,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap,
return true; return true;
} }
void MkldnnFcLayer::cvtWgtFromPaddle() { void MkldnnFcLayer::convertWeightsFromPaddle() {
if (FLAGS_use_mkldnn_wgt) { if (FLAGS_use_mkldnn_wgt) {
return; return;
} }
...@@ -75,7 +75,7 @@ void MkldnnFcLayer::cvtWgtFromPaddle() { ...@@ -75,7 +75,7 @@ void MkldnnFcLayer::cvtWgtFromPaddle() {
hasInitedWgt_ = true; hasInitedWgt_ = true;
} }
void MkldnnFcLayer::cvtWgtToPaddle() { void MkldnnFcLayer::convertWeightsToPaddle() {
MatrixPtr dnnWgt = weight_->getW(); MatrixPtr dnnWgt = weight_->getW();
MatrixPtr paddleWgt; MatrixPtr paddleWgt;
dnnWgt->transpose(paddleWgt, true); dnnWgt->transpose(paddleWgt, true);
......
...@@ -44,9 +44,9 @@ public: ...@@ -44,9 +44,9 @@ public:
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void cvtWgtFromPaddle() override; void convertWeightsFromPaddle() override;
void cvtWgtToPaddle() override; void convertWeightsToPaddle() override;
void forward(PassType passType) override; void forward(PassType passType) override;
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#include "MkldnnLayer.h" #include "MkldnnLayer.h"
// using namespace mkldnn; // NOLINT
using mem = mkldnn::memory; // NOLINT using mem = mkldnn::memory; // NOLINT
typedef mem::format format; typedef mem::format format;
typedef mkldnn::inner_product_forward fc_fwd; typedef mkldnn::inner_product_forward fc_fwd;
...@@ -94,7 +93,7 @@ void MkldnnLayer::mkldnnForwardFC(int bs, ...@@ -94,7 +93,7 @@ void MkldnnLayer::mkldnnForwardFC(int bs,
// if input size changed, reset it // if input size changed, reset it
resetForwardFC(bs, ic, ih, iw, botData, oc, topData, wgtData, biasData); resetForwardFC(bs, ic, ih, iw, botData, oc, topData, wgtData, biasData);
this->cvtWgtFromPaddle(); this->convertWeightsFromPaddle();
// update input, since the data might be changed if this is after data layer // update input, since the data might be changed if this is after data layer
inVal_->set_data_handle(botData); inVal_->set_data_handle(botData);
...@@ -208,9 +207,9 @@ void MkldnnLayer::mkldnnBackwardFC(int bs, ...@@ -208,9 +207,9 @@ void MkldnnLayer::mkldnnBackwardFC(int bs,
} }
void MkldnnLayer::printSizeInfo() { void MkldnnLayer::printSizeInfo() {
VLOG(DNN_SIZES) << "bs: " << bs_ << ", ic: " << ic_ << ", ih: " << ih_ VLOG(DNN_SIZES) << getName() << ": bs: " << bs_ << ", ic: " << ic_
<< ", iw: " << iw_ << ", oc: " << oc_ << ", oh: " << oh_ << ", ih: " << ih_ << ", iw: " << iw_ << ", oc: " << oc_
<< ", ow: " << ow_; << ", oh: " << oh_ << ", ow: " << ow_;
} }
mem::desc MkldnnLayer::createMD(mem::dims dims, mem::desc MkldnnLayer::createMD(mem::dims dims,
......
...@@ -87,13 +87,13 @@ public: ...@@ -87,13 +87,13 @@ public:
* convert weight from paddle format to mkldnn format * convert weight from paddle format to mkldnn format
* weight_ will be override * weight_ will be override
*/ */
virtual void cvtWgtFromPaddle() { ; } virtual void convertWeightsFromPaddle() {}
/** /**
* convert mkldnn weight to paddle format * convert mkldnn weight to paddle format
* weight_ will be override * weight_ will be override
*/ */
virtual void cvtWgtToPaddle() { ; } virtual void convertWeightsToPaddle() {}
void resetForwardFC(int bs, void resetForwardFC(int bs,
int ic, int ic,
......
...@@ -149,7 +149,7 @@ void MkldnnTester::checkBackwardWgts() { ...@@ -149,7 +149,7 @@ void MkldnnTester::checkBackwardWgts() {
const MkldnnLayerPtr dnnlayer = const MkldnnLayerPtr dnnlayer =
std::dynamic_pointer_cast<MkldnnLayer>(dnnLayer_); std::dynamic_pointer_cast<MkldnnLayer>(dnnLayer_);
CHECK(dnnlayer); CHECK(dnnlayer);
dnnlayer->cvtWgtToPaddle(); dnnlayer->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) { for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
......
...@@ -1614,13 +1614,13 @@ class FCLayer(LayerBase): ...@@ -1614,13 +1614,13 @@ class FCLayer(LayerBase):
error_clipping_threshold=None, error_clipping_threshold=None,
**xargs): **xargs):
use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0)))
use_mkldnn_wgt = bool(
int(g_command_config_args.get("use_mkldnn_wgt", 0)))
if use_mkldnn: if use_mkldnn:
self.layer_type = 'mkldnn_fc' self.layer_type = 'mkldnn_fc'
config_assert( config_assert(
len(inputs) == 1, len(inputs) == 1,
"MkldnnFCLayer support one and only one input!") "MkldnnFCLayer support one and only one input!")
use_mkldnn_wgt = bool(
int(g_command_config_args.get("use_mkldnn_wgt", 0)))
super(FCLayer, self).__init__( super(FCLayer, self).__init__(
name, self.layer_type, size, inputs=inputs, **xargs) name, self.layer_type, size, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)): for input_index in xrange(len(self.inputs)):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册