diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 1305d5438ab9eb03128f7cdb36e034919eb405dd..62cff9361ccba3ae3b9359ddb932f5b26146eb97 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -25,13 +25,13 @@ filter_test(GSERVER_HEADER) filter_test(GSERVER_SOURCES) if(NOT WITH_MKLDNN) - file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.h") - file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "Mkldnn*.cpp") + file(GLOB_RECURSE DNN_HEADER RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.h") + file(GLOB_RECURSE DNN_SOURCES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "MKLDNN*.cpp") list(REMOVE_ITEM GSERVER_HEADER ${DNN_HEADER}) list(REMOVE_ITEM GSERVER_SOURCES ${DNN_SOURCES}) - message(STATUS "Skip compiling with Mkldnnlayers and MkldnnActivations") + message(STATUS "Skip compiling with MKLDNNLayers and MKLDNNActivations") else() - message(STATUS "Compile with Mkldnnlayers and MkldnnActivations") + message(STATUS "Compile with MKLDNNLayers and MKLDNNActivations") endif() if(NOT WITH_GPU) diff --git a/paddle/gserver/layers/MkldnnBase.h b/paddle/gserver/layers/MKLDNNBase.h similarity index 77% rename from paddle/gserver/layers/MkldnnBase.h rename to paddle/gserver/layers/MKLDNNBase.h index 63fd67a85087764a49dae83d5709d0d59846a25d..4c0234e7b3a91053596c32cea581fa5d1e26b9d5 100644 --- a/paddle/gserver/layers/MkldnnBase.h +++ b/paddle/gserver/layers/MKLDNNBase.h @@ -30,26 +30,26 @@ typedef enum { * @brief MKLDNN CPU engine. * */ -class CpuEngine { +class CPUEngine { public: - static CpuEngine& Instance() { + static CPUEngine& Instance() { // Thread-safe in C++11. - static CpuEngine myInstance; + static CPUEngine myInstance; return myInstance; } // Disallow copy or move - CpuEngine(const CpuEngine&) = delete; // Copy constructor - CpuEngine(CpuEngine&&) = delete; // Move constructor - CpuEngine& operator=(const CpuEngine&) = delete; // Copy assignment - CpuEngine& operator=(CpuEngine&&) = delete; // Move assignment + CPUEngine(const CPUEngine&) = delete; // Copy constructor + CPUEngine(CPUEngine&&) = delete; // Move constructor + CPUEngine& operator=(const CPUEngine&) = delete; // Copy assignment + CPUEngine& operator=(CPUEngine&&) = delete; // Move assignment mkldnn::engine& getEngine() { return cpuEngine_; } protected: - CpuEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {} - // CpuEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {} - ~CpuEngine() {} + CPUEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {} + // CPUEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {} + ~CPUEngine() {} private: mkldnn::engine cpuEngine_; @@ -59,11 +59,11 @@ private: * @brief MKLDNN Stream. * */ -class MkldnnStream { +class MKLDNNStream { public: - MkldnnStream() : ready_(false) { resetState(); } + MKLDNNStream() : ready_(false) { resetState(); } - virtual ~MkldnnStream() {} + virtual ~MKLDNNStream() {} /** * @brief Submit stream diff --git a/paddle/gserver/layers/MkldnnFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp similarity index 94% rename from paddle/gserver/layers/MkldnnFcLayer.cpp rename to paddle/gserver/layers/MKLDNNFcLayer.cpp index f89db169efa1ea1f45e8640a3bd4f2c674147e41..30f567eaf8248a8fba1b461a2bdbf2aab13f9e08 100644 --- a/paddle/gserver/layers/MkldnnFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "MkldnnFcLayer.h" +#include "MKLDNNFcLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" @@ -24,11 +24,11 @@ typedef inner_product_backward_data fc_bwdData; namespace paddle { -REGISTER_LAYER(mkldnn_fc, MkldnnFcLayer); +REGISTER_LAYER(mkldnn_fc, MKLDNNFcLayer); -bool MkldnnFcLayer::init(const LayerMap& layerMap, +bool MKLDNNFcLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { - if (!MkldnnLayer::init(layerMap, parameterMap)) { + if (!MKLDNNLayer::init(layerMap, parameterMap)) { return false; } @@ -56,7 +56,7 @@ bool MkldnnFcLayer::init(const LayerMap& layerMap, return true; } -void MkldnnFcLayer::convertWeightsFromPaddle() { +void MKLDNNFcLayer::convertWeightsFromPaddle() { if (FLAGS_use_mkldnn_wgt) { return; } @@ -81,7 +81,7 @@ void MkldnnFcLayer::convertWeightsFromPaddle() { hasInitedWgt_ = true; } -void MkldnnFcLayer::convertWeightsToPaddle() { +void MKLDNNFcLayer::convertWeightsToPaddle() { MatrixPtr dnnWgt = weight_->getW(); MatrixPtr paddleWgt; dnnWgt->transpose(paddleWgt, true); @@ -92,7 +92,7 @@ void MkldnnFcLayer::convertWeightsToPaddle() { dnnWgtT->copyFrom(*paddleWgt); } -void MkldnnFcLayer::reshape() { +void MKLDNNFcLayer::reshape() { const Argument& input = getInput(0); int batchSize = input.getBatchSize(); if (bs_ == batchSize) { @@ -129,7 +129,7 @@ void MkldnnFcLayer::reshape() { convertWeightsFromPaddle(); } -void MkldnnFcLayer::resetFwd() { +void MKLDNNFcLayer::resetFwd() { bool hasBias = biases_ && biases_->getW(); real* iData = getInputValue(0)->getData(); real* oData = getOutputValue()->getData(); @@ -166,7 +166,7 @@ void MkldnnFcLayer::resetFwd() { pipelineFwd_.push_back(*fwd_); } -void MkldnnFcLayer::resetBwd() { +void MKLDNNFcLayer::resetBwd() { if (!needResetBwd_) { return; } @@ -231,7 +231,7 @@ void MkldnnFcLayer::resetBwd() { pipelineBwd_.push_back(*bwdData_); } -void MkldnnFcLayer::forward(PassType passType) { +void MKLDNNFcLayer::forward(PassType passType) { Layer::forward(passType); reshape(); @@ -253,7 +253,7 @@ void MkldnnFcLayer::forward(PassType passType) { } } -void MkldnnFcLayer::backward(const UpdateCallback& callback) { +void MKLDNNFcLayer::backward(const UpdateCallback& callback) { /* Do derivation */ { REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); backwardActivation(); diff --git a/paddle/gserver/layers/MkldnnFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h similarity index 86% rename from paddle/gserver/layers/MkldnnFcLayer.h rename to paddle/gserver/layers/MKLDNNFcLayer.h index c4c0fa1c41f38fbb75e844f431f57b2bf513e5e8..dffae27d7ba00b04eb2dbc52738dffd3245b15b4 100644 --- a/paddle/gserver/layers/MkldnnFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -14,17 +14,17 @@ limitations under the License. */ #pragma once -#include "MkldnnLayer.h" +#include "MKLDNNLayer.h" #include "mkldnn.hpp" namespace paddle { /** - * @brief A subclass of MkldnnLayer fc layer. + * @brief A subclass of MKLDNNLayer fc layer. * * The config file api is mkldnn_fc */ -class MkldnnFcLayer : public MkldnnLayer { +class MKLDNNFcLayer : public MKLDNNLayer { protected: // input layer size, can not be change after init size_t iLayerSize_; // == ic * ih * iw @@ -37,10 +37,10 @@ protected: std::unique_ptr biases_; public: - explicit MkldnnFcLayer(const LayerConfig& config) - : MkldnnLayer(config), hasInitedWgt_(false), hasSpatial_(true) {} + explicit MKLDNNFcLayer(const LayerConfig& config) + : MKLDNNLayer(config), hasInitedWgt_(false), hasSpatial_(true) {} - ~MkldnnFcLayer() {} + ~MKLDNNFcLayer() {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; diff --git a/paddle/gserver/layers/MkldnnLayer.h b/paddle/gserver/layers/MKLDNNLayer.h similarity index 88% rename from paddle/gserver/layers/MkldnnLayer.h rename to paddle/gserver/layers/MKLDNNLayer.h index 620bdfc984802f22da797c6cb435e2f6361f8617..63e29f447eede5ff9df8715bc9140b64ab7f7d17 100644 --- a/paddle/gserver/layers/MkldnnLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "Layer.h" -#include "MkldnnBase.h" +#include "MKLDNNBase.h" #include "mkldnn.hpp" DECLARE_bool(use_mkldnn); @@ -24,14 +24,14 @@ DECLARE_bool(use_mkldnn_wgt); namespace paddle { -class MkldnnLayer; -typedef std::shared_ptr MkldnnLayerPtr; +class MKLDNNLayer; +typedef std::shared_ptr MKLDNNLayerPtr; /** - * @brief Base class of Mkldnnlayer. + * @brief Base class of MKLDNNlayer. * */ -class MkldnnLayer : public Layer { +class MKLDNNLayer : public Layer { protected: // batch size int bs_; @@ -45,14 +45,14 @@ protected: // mkldnn engine, stream and primivtives mkldnn::engine engine_; - std::shared_ptr stream_; + std::shared_ptr stream_; std::shared_ptr fwd_; std::shared_ptr bwdWgt_; std::shared_ptr bwdData_; std::vector pipelineFwd_; std::vector pipelineBwd_; - // TODO(TJ): change below memory as MkldnnMatrixPtr type + // TODO(TJ): change below memory as MKLDNNMatrixPtr type std::shared_ptr inVal_; std::shared_ptr inGrad_; std::shared_ptr outVal_; @@ -63,7 +63,7 @@ protected: std::shared_ptr biasGrad_; public: - explicit MkldnnLayer(const LayerConfig& config) + explicit MKLDNNLayer(const LayerConfig& config) : Layer(config), bs_(0), ic_(0), @@ -79,7 +79,7 @@ public: bwdWgt_(nullptr), bwdData_(nullptr) {} - ~MkldnnLayer() {} + ~MKLDNNLayer() {} virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) { @@ -90,8 +90,8 @@ public: CHECK(FLAGS_use_mkldnn) << "MkldnnLayers only support use_mkldnn." << "Please set WITH_MKLDNN=ON " << "and set use_mkldnn=True"; - stream_.reset(new MkldnnStream()); - engine_ = CpuEngine::Instance().getEngine(); + stream_.reset(new MKLDNNStream()); + engine_ = CPUEngine::Instance().getEngine(); // TODO(TJ): deivecId return true; diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index bcfc85aea0175a7468045a19441e1de1de47a5a8..ade5f633b46dcdc1572f4aec7103985655995915 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -20,11 +20,11 @@ add_test(NAME test_LayerGrad ########## test_Mkldnn layers and activations ########## if(WITH_MKLDNN) - add_unittest_without_exec(test_Mkldnn - test_Mkldnn.cpp - MkldnnTester.cpp + add_unittest_without_exec(test_MKLDNN + test_MKLDNN.cpp + MKLDNNTester.cpp LayerGradUtil.cpp) - add_test(NAME test_Mkldnn COMMAND test_Mkldnn) + add_test(NAME test_MKLDNN COMMAND test_MKLDNN) endif() ################ test_CRFLayerGrad #################### diff --git a/paddle/gserver/tests/MkldnnTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp similarity index 89% rename from paddle/gserver/tests/MkldnnTester.cpp rename to paddle/gserver/tests/MKLDNNTester.cpp index 9232e2fdcd87bf69ff31dd2492292beccc134a00..d91e4ed60c94522aa7efe35d7c93467f7364d406 100644 --- a/paddle/gserver/tests/MkldnnTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "MkldnnTester.h" -#include "paddle/gserver/layers/MkldnnBase.h" -#include "paddle/gserver/layers/MkldnnLayer.h" +#include "MKLDNNTester.h" +#include "paddle/gserver/layers/MKLDNNBase.h" +#include "paddle/gserver/layers/MKLDNNLayer.h" namespace paddle { // init data layer and test layer of both dnn and reference -void MkldnnTester::reset(const TestConfig& dnn, +void MKLDNNTester::reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize) { const bool trans = false; @@ -71,7 +71,7 @@ void MkldnnTester::reset(const TestConfig& dnn, setInputImgSize(); } -void MkldnnTester::setInputImgSize() { +void MKLDNNTester::setInputImgSize() { for (size_t n = 0; n < dataLayers_.size(); ++n) { for (size_t i = 0; i < dataLayers_[n].size(); ++i) { // TODO(TJ): fix me when concat and elewise ready @@ -82,7 +82,7 @@ void MkldnnTester::setInputImgSize() { } // init randome parameters of ref, and copy to mkldnn -void MkldnnTester::randomWgtDatas() { +void MKLDNNTester::randomWgtDatas() { EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); for (size_t i = 0; i < parameters_[REF].size(); ++i) { const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); @@ -96,7 +96,7 @@ void MkldnnTester::randomWgtDatas() { } // random botdata of ref layer and copy same to mkldnn -void MkldnnTester::randomBotDatas() { +void MKLDNNTester::randomBotDatas() { CHECK_EQ(dataLayers_.size(), NUM); for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { dataLayers_[REF][i]->getOutputValue()->randomizeUniform(); @@ -107,14 +107,14 @@ void MkldnnTester::randomBotDatas() { } } -void MkldnnTester::randomTopDiffs() { +void MKLDNNTester::randomTopDiffs() { refLayer_->getOutputGrad()->randomizeUniform(); dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; printMatrix(refLayer_->getOutputGrad()); } -void MkldnnTester::checkForward() { +void MKLDNNTester::checkForward() { printTopDatas(); double delta = compareMatrix(testLayers_[DNN]->getOutputValue(), testLayers_[REF]->getOutputValue()); @@ -122,7 +122,7 @@ void MkldnnTester::checkForward() { EXPECT_LE(fabs(delta), eps_); } -void MkldnnTester::checkBackwardData() { +void MKLDNNTester::checkBackwardData() { const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad(); @@ -141,13 +141,13 @@ void MkldnnTester::checkBackwardData() { } } -void MkldnnTester::checkBackwardWgts() { +void MKLDNNTester::checkBackwardWgts() { CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); vector dnnWgts; // used to temply save mkldnn weights saveWgt(parameters_[DNN], dnnWgts); - const MkldnnLayerPtr dnnlayer = - std::dynamic_pointer_cast(dnnLayer_); + const MKLDNNLayerPtr dnnlayer = + std::dynamic_pointer_cast(dnnLayer_); CHECK(dnnlayer); dnnlayer->convertWeightsToPaddle(); for (size_t i = 0; i < parameters_[DNN].size(); ++i) { @@ -166,7 +166,7 @@ void MkldnnTester::checkBackwardWgts() { restoreWgt(dnnWgts, parameters_[DNN]); } -void MkldnnTester::saveWgt(const vector& from, +void MKLDNNTester::saveWgt(const vector& from, vector& to) { const bool useGpu = false; to.resize(from.size()); @@ -177,7 +177,7 @@ void MkldnnTester::saveWgt(const vector& from, } } -void MkldnnTester::restoreWgt(const vector& from, +void MKLDNNTester::restoreWgt(const vector& from, vector& to) { CHECK_EQ(from.size(), to.size()); for (size_t i = 0; i < from.size(); ++i) { @@ -187,7 +187,7 @@ void MkldnnTester::restoreWgt(const vector& from, } // clear parameters grad -void MkldnnTester::clearWgtDiffs() { +void MKLDNNTester::clearWgtDiffs() { for (size_t n = 0; n < parameters_.size(); ++n) { for (size_t i = 0; i < parameters_[n].size(); ++i) { const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); @@ -198,7 +198,7 @@ void MkldnnTester::clearWgtDiffs() { } } -void MkldnnTester::clearBotDiffs() { +void MKLDNNTester::clearBotDiffs() { // dnn and ref for (size_t n = 0; n < dataLayers_.size(); ++n) { // all inputs layers @@ -208,7 +208,7 @@ void MkldnnTester::clearBotDiffs() { } } -void MkldnnTester::clearBotDiffs(int n) { +void MKLDNNTester::clearBotDiffs(int n) { CHECK_LT(n, NUM); // all inputs layers for (size_t i = 0; i < dataLayers_[n].size(); ++i) { @@ -216,13 +216,13 @@ void MkldnnTester::clearBotDiffs(int n) { } } -void MkldnnTester::clearTopDatas() { +void MKLDNNTester::clearTopDatas() { for (size_t i = 0; i < testLayers_.size(); ++i) { testLayers_[i]->getOutputValue()->zeroMem(); } } -void MkldnnTester::printTopDatas() { +void MKLDNNTester::printTopDatas() { if (!log_) { return; } @@ -233,7 +233,7 @@ void MkldnnTester::printTopDatas() { } } -void MkldnnTester::printMatrix(const MatrixPtr& m) { +void MKLDNNTester::printMatrix(const MatrixPtr& m) { if (!log_) { return; } @@ -243,7 +243,7 @@ void MkldnnTester::printMatrix(const MatrixPtr& m) { VLOG(lvl_) << std::endl << ostr.str(); } -void MkldnnTester::printVector(const VectorPtr& v) { +void MKLDNNTester::printVector(const VectorPtr& v) { if (!log_) { return; } @@ -253,7 +253,7 @@ void MkldnnTester::printVector(const VectorPtr& v) { VLOG(lvl_) << std::endl << ostr.str(); } -double MkldnnTester::getDelta(const real* d1, +double MKLDNNTester::getDelta(const real* d1, const real* d2, size_t len, const float failRate, @@ -280,17 +280,17 @@ double MkldnnTester::getDelta(const real* d1, return (failCnt / (float)len) > failRate ? maxOut : delta / sum; } -double MkldnnTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) { +double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) { CHECK_EQ(m1->getElementCnt(), m2->getElementCnt()); return getDelta(m1->getData(), m2->getData(), m1->getElementCnt()); } -double MkldnnTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) { +double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) { CHECK_EQ(v1->getSize(), v2->getSize()); return getDelta(v1->getData(), v2->getData(), v1->getSize()); } -void MkldnnTester::runOnce() { +void MKLDNNTester::runOnce() { // test forward randomBotDatas(); dnnLayer_->forward(PASS_TRAIN); @@ -310,7 +310,7 @@ void MkldnnTester::runOnce() { clearBotDiffs(REF); } -void MkldnnTester::run(const TestConfig& dnn, +void MKLDNNTester::run(const TestConfig& dnn, const TestConfig& ref, size_t batchSize, size_t inputImgH, diff --git a/paddle/gserver/tests/MkldnnTester.h b/paddle/gserver/tests/MKLDNNTester.h similarity index 95% rename from paddle/gserver/tests/MkldnnTester.h rename to paddle/gserver/tests/MKLDNNTester.h index 7d1db870d122c8b08a1d2cf71a6120745eb2f6d7..d21f92d426ed00406e9a5f715883490344f3bf0b 100644 --- a/paddle/gserver/tests/MkldnnTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "LayerGradUtil.h" -#include "paddle/gserver/layers/MkldnnBase.h" +#include "paddle/gserver/layers/MKLDNNBase.h" namespace paddle { @@ -25,7 +25,7 @@ namespace paddle { * @brief test the functionality of Mkldnnlayers * refer to paddle original function */ -class MkldnnTester { +class MKLDNNTester { enum { DNN = 0, REF = 1, @@ -54,14 +54,14 @@ protected: size_t ih_, iw_; public: - explicit MkldnnTester(size_t iter = 3, float epsilon = 1e-4) { + explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) { iter_ = iter; eps_ = epsilon; log_ = false; lvl_ = MKLDNN_ALL; } - ~MkldnnTester() {} + ~MKLDNNTester() {} public: void run(const TestConfig& dnn, diff --git a/paddle/gserver/tests/test_Mkldnn.cpp b/paddle/gserver/tests/test_MKLDNN.cpp similarity index 96% rename from paddle/gserver/tests/test_Mkldnn.cpp rename to paddle/gserver/tests/test_MKLDNN.cpp index 8e4a8595d3c5e6ec9a5224176ca6e8a90c913d68..e1d2270df24331914f3a51acc90a518084b3ce4e 100644 --- a/paddle/gserver/tests/test_Mkldnn.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include -#include "MkldnnTester.h" +#include "MKLDNNTester.h" #include "ModelConfig.pb.h" using namespace paddle; // NOLINT @@ -43,7 +43,7 @@ void testFcLayer(const testFCDesc& pm) { /* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)}); cfg.layerConfig.add_inputs(); - MkldnnTester tester; + MKLDNNTester tester; for (auto biasSize : {pm.oc, 0}) { cfg.biasSize = biasSize; TestConfig ref = cfg; @@ -54,7 +54,7 @@ void testFcLayer(const testFCDesc& pm) { } } -TEST(MkldnnLayer, fcLayer) { +TEST(MKLDNNLayer, FcLayer) { testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1}); testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1}); testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13});