提交 3a68955f 编写于 作者: X X.Dragon 提交者: GitHub

Merge pull request #4509 from hedaoyuan/inference

Build mobile inference library for minimum size
...@@ -86,6 +86,14 @@ if(ANDROID OR IOS) ...@@ -86,6 +86,14 @@ if(ANDROID OR IOS)
"Disable MKLDNN when cross-compiling for Android and iOS" FORCE) "Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLML OFF CACHE STRING set(WITH_MKLML OFF CACHE STRING
"Disable MKLML package when cross-compiling for Android and iOS" FORCE) "Disable MKLML package when cross-compiling for Android and iOS" FORCE)
# Compile PaddlePaddle mobile inference library
if (NOT WITH_C_API)
set(WITH_C_API ON CACHE STRING
"Always compile the C_API when cross-compiling for Android and iOS" FORCE)
endif()
set(MOBILE_INFERENCE ON)
add_definitions(-DPADDLE_MOBILE_INFERENCE)
endif() endif()
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
...@@ -160,9 +168,11 @@ endif(USE_NNPACK) ...@@ -160,9 +168,11 @@ endif(USE_NNPACK)
add_subdirectory(proto) add_subdirectory(proto)
# "add_subdirectory(go)" should be placed after the following loine, if(NOT MOBILE_INFERENCE)
# because it depends on paddle/optimizer. # "add_subdirectory(go)" should be placed after the following loine,
add_subdirectory(paddle/optimizer) # because it depends on paddle/optimizer.
add_subdirectory(paddle/optimizer)
endif()
# "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
# placed after this block, because they depends on it. # placed after this block, because they depends on it.
......
...@@ -73,6 +73,23 @@ function(link_paddle_exe TARGET_NAME) ...@@ -73,6 +73,23 @@ function(link_paddle_exe TARGET_NAME)
generate_rdma_links() generate_rdma_links()
endif() endif()
if(MOBILE_INFERENCE)
target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START
paddle_gserver
paddle_function
ARCHIVE_END
paddle_math
paddle_utils
paddle_parameter
paddle_proto
paddle_cuda
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS}
${RDMA_LD_FLAGS}
${RDMA_LIBS})
else()
target_circle_link_libraries(${TARGET_NAME} target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START ARCHIVE_START
paddle_gserver paddle_gserver
...@@ -92,6 +109,7 @@ function(link_paddle_exe TARGET_NAME) ...@@ -92,6 +109,7 @@ function(link_paddle_exe TARGET_NAME)
${CMAKE_DL_LIBS} ${CMAKE_DL_LIBS}
${RDMA_LD_FLAGS} ${RDMA_LD_FLAGS}
${RDMA_LIBS}) ${RDMA_LIBS})
endif()
if(ANDROID) if(ANDROID)
target_link_libraries(${TARGET_NAME} log) target_link_libraries(${TARGET_NAME} log)
......
add_subdirectory(cuda) add_subdirectory(cuda)
add_subdirectory(function) add_subdirectory(function)
add_subdirectory(utils) add_subdirectory(utils)
add_subdirectory(testing)
add_subdirectory(math) add_subdirectory(math)
add_subdirectory(parameter)
add_subdirectory(gserver) add_subdirectory(gserver)
add_subdirectory(pserver) add_subdirectory(parameter)
add_subdirectory(trainer) add_subdirectory(testing)
add_subdirectory(scripts)
add_subdirectory(string) if(MOBILE_INFERENCE)
add_subdirectory(capi)
else()
add_subdirectory(pserver)
add_subdirectory(trainer)
add_subdirectory(string)
add_subdirectory(scripts)
if(Boost_FOUND) if(WITH_C_API)
add_subdirectory(capi)
endif()
if(Boost_FOUND)
add_subdirectory(memory) add_subdirectory(memory)
add_subdirectory(platform) add_subdirectory(platform)
add_subdirectory(framework) add_subdirectory(framework)
add_subdirectory(operators) add_subdirectory(operators)
add_subdirectory(pybind) add_subdirectory(pybind)
endif() endif()
if(WITH_C_API)
add_subdirectory(capi)
endif()
if(WITH_SWIG_PY) if(WITH_SWIG_PY)
add_subdirectory(api) add_subdirectory(api)
endif()
endif() endif()
...@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS ...@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS
paddle_cuda paddle_cuda
paddle_function paddle_function
paddle_gserver paddle_gserver
paddle_proto paddle_proto)
paddle_pserver
paddle_network)
cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
......
...@@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp ...@@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp
target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH})
target_link_libraries(capi_test_mats paddle_capi) target_link_libraries(capi_test_mats paddle_capi)
if(NOT MOBILE_INFERENCE)
add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp)
target_include_directories(capi_test_gradientMachine PUBLIC target_include_directories(capi_test_gradientMachine PUBLIC
${PADDLE_CAPI_INC_PATH}) ${PADDLE_CAPI_INC_PATH})
target_link_libraries(capi_test_gradientMachine paddle_capi) target_link_libraries(capi_test_gradientMachine paddle_capi)
add_test(NAME capi_test_gradientMachine add_test(NAME capi_test_gradientMachine
COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests)
endif()
...@@ -60,6 +60,36 @@ if(NOT WITH_PYTHON) ...@@ -60,6 +60,36 @@ if(NOT WITH_PYTHON)
dataproviders/PyDataProvider.h) dataproviders/PyDataProvider.h)
endif() endif()
if(MOBILE_INFERENCE)
# Remove evaluators
list(REMOVE_ITEM GSERVER_SOURCES
layers/ValidationLayer.cpp
evaluators/Evaluator.cpp
evaluators/DetectionMAPEvaluator.cpp
evaluators/CTCErrorEvaluator.cpp
evaluators/ChunkEvaluator.cpp)
# Remove dataproviders
list(REMOVE_ITEM GSERVER_SOURCES
dataproviders/DataProvider.cpp
dataproviders/MultiDataProvider.cpp
dataproviders/ProtoDataProvider.cpp
dataproviders/PyDataProvider2.cpp
dataproviders/PyDataProvider.cpp)
# Remove useless gradientmachines
list(REMOVE_ITEM GSERVER_SOURCES
gradientmachines/MultiNetwork.cpp
gradientmachines/RecurrentGradientMachine.cpp
gradientmachines/ParallelNeuralNetwork.cpp
gradientmachines/GradientMachineMode.cpp
gradientmachines/MultiGradientMachine.cpp)
# Remove useless layers
list(REMOVE_ITEM GSERVER_SOURCES
layers/RecurrentLayerGroup.cpp)
endif()
if(WITH_GPU) if(WITH_GPU)
cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) cuda_add_library(paddle_gserver ${GSERVER_SOURCES})
else() else()
......
...@@ -17,12 +17,15 @@ limitations under the License. */ ...@@ -17,12 +17,15 @@ limitations under the License. */
#include <fstream> #include <fstream>
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "GradientMachineMode.h" #include "GradientMachineMode.h"
#include "MultiGradientMachine.h" #include "MultiGradientMachine.h"
#include "MultiNetwork.h" #include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h" #include "ParallelNeuralNetwork.h"
#include "hl_gpu.h" #endif
namespace paddle { namespace paddle {
...@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create( ...@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create(
const ModelConfig& config, const ModelConfig& config,
int mode, int mode,
const std::vector<ParameterType>& parameterTypes) { const std::vector<ParameterType>& parameterTypes) {
#ifndef PADDLE_MOBILE_INFERENCE
if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) { if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) {
return gm; return gm;
} }
if (FLAGS_trainer_count > 1) { if (FLAGS_trainer_count > 1) {
return new MultiGradientMachine(config, FLAGS_use_gpu); return new MultiGradientMachine(config, FLAGS_use_gpu);
} }
#endif
if (FLAGS_trainer_count == 1) { // single if (FLAGS_trainer_count == 1) { // single
#ifndef PADDLE_MOBILE_INFERENCE
NeuralNetwork* nn; NeuralNetwork* nn;
if (config.type() == "multi_nn") { if (config.type() == "multi_nn") {
/* multi submodel calculate, thread(s) will be initialized inside */ /* multi submodel calculate, thread(s) will be initialized inside */
...@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create( ...@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create(
/* single thread calculate */ /* single thread calculate */
nn = NeuralNetwork::create(config); nn = NeuralNetwork::create(config);
} }
#else
NeuralNetwork* nn = NeuralNetwork::create(config);
#endif
ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) { ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) {
para->enableType(PARAMETER_VALUE); para->enableType(PARAMETER_VALUE);
}; };
......
...@@ -20,13 +20,16 @@ limitations under the License. */ ...@@ -20,13 +20,16 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "TrainerConfig.pb.h" #include "TrainerConfig.pb.h"
#include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/dataproviders/DataProvider.h"
#include "paddle/gserver/evaluators/Evaluator.h"
#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/Layer.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/parameter/Parameter.h" #include "paddle/parameter/Parameter.h"
#include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/parameter/ParameterUpdaterBase.h"
#include "paddle/utils/Thread.h" #include "paddle/utils/Thread.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "paddle/gserver/evaluators/Evaluator.h"
#endif
namespace paddle { namespace paddle {
/** /**
* @brief A gradient machine is capable of calculating some outputs given * @brief A gradient machine is capable of calculating some outputs given
...@@ -147,6 +150,7 @@ public: ...@@ -147,6 +150,7 @@ public:
virtual void onPassEnd() = 0; virtual void onPassEnd() = 0;
#ifndef PADDLE_MOBILE_INFERENCE
/** /**
* Create an evaluator which can be used for eval() * Create an evaluator which can be used for eval()
*/ */
...@@ -156,6 +160,7 @@ public: ...@@ -156,6 +160,7 @@ public:
* evaluate using the given evaluator * evaluate using the given evaluator
*/ */
virtual void eval(Evaluator* evaluator) const = 0; virtual void eval(Evaluator* evaluator) const = 0;
#endif
std::vector<ParameterPtr>& getParameters() { return parameters_; } std::vector<ParameterPtr>& getParameters() { return parameters_; }
......
...@@ -14,15 +14,17 @@ limitations under the License. */ ...@@ -14,15 +14,17 @@ limitations under the License. */
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "MultiNetwork.h" #include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "RecurrentGradientMachine.h" #include "RecurrentGradientMachine.h"
#include "hl_gpu.h" #endif
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle { namespace paddle {
void parameterInitNN(int paramId, void parameterInitNN(int paramId,
...@@ -54,6 +56,7 @@ void parameterInitNN(int paramId, ...@@ -54,6 +56,7 @@ void parameterInitNN(int paramId,
} }
NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
#ifndef PADDLE_MOBILE_INFERENCE
if (config.type() == "recurrent_nn") { if (config.type() == "recurrent_nn") {
return newNeuralNetwork("root"); return newNeuralNetwork("root");
} else if (config.type() == "multi_nn") { } else if (config.type() == "multi_nn") {
...@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { ...@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
} else { } else {
return newNeuralNetwork(); return newNeuralNetwork();
} }
#else
return new NeuralNetwork();
#endif
} }
std::map<std::string, bool> NeuralNetwork::dllInitMap; std::map<std::string, bool> NeuralNetwork::dllInitMap;
...@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() { ...@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() {
} }
} }
#ifndef PADDLE_MOBILE_INFERENCE
class CombinedEvaluator : public Evaluator { class CombinedEvaluator : public Evaluator {
public: public:
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) { void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
...@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const { ...@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); } void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); }
#endif
void NeuralNetwork::setOutputGrad(const std::vector<Argument>& args) { void NeuralNetwork::setOutputGrad(const std::vector<Argument>& args) {
CHECK_GE(outputLayers_.size(), args.size()); CHECK_GE(outputLayers_.size(), args.size());
for (size_t i = 0; i < args.size(); ++i) { for (size_t i = 0; i < args.size(); ++i) {
......
...@@ -97,9 +97,12 @@ public: ...@@ -97,9 +97,12 @@ public:
virtual void onPassEnd(); virtual void onPassEnd();
#ifndef PADDLE_MOBILE_INFERENCE
virtual Evaluator* makeEvaluator() const; virtual Evaluator* makeEvaluator() const;
virtual void eval(Evaluator* evaluator) const; virtual void eval(Evaluator* evaluator) const;
#endif
virtual void resetState(); virtual void resetState();
virtual void setOutputGrad(const std::vector<Argument>& args); virtual void setOutputGrad(const std::vector<Argument>& args);
......
...@@ -15,11 +15,14 @@ limitations under the License. */ ...@@ -15,11 +15,14 @@ limitations under the License. */
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
#include "CostLayer.h" #include "CostLayer.h"
#include "ValidationLayer.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/math/SparseMatrix.h"
#include "paddle/utils/Error.h" #include "paddle/utils/Error.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "ValidationLayer.h"
#endif
DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
namespace paddle { namespace paddle {
...@@ -103,10 +106,12 @@ LayerPtr Layer::create(const LayerConfig& config) { ...@@ -103,10 +106,12 @@ LayerPtr Layer::create(const LayerConfig& config) {
return LayerPtr(new MultiClassCrossEntropy(config)); return LayerPtr(new MultiClassCrossEntropy(config));
else if (type == "rank-cost") else if (type == "rank-cost")
return LayerPtr(new RankingCost(config)); return LayerPtr(new RankingCost(config));
#ifndef PADDLE_MOBILE_INFERENCE
else if (type == "auc-validation") else if (type == "auc-validation")
return LayerPtr(new AucValidation(config)); return LayerPtr(new AucValidation(config));
else if (type == "pnpair-validation") else if (type == "pnpair-validation")
return LayerPtr(new PnpairValidation(config)); return LayerPtr(new PnpairValidation(config));
#endif
return LayerPtr(registrar_.createByType(config.type(), config)); return LayerPtr(registrar_.createByType(config.type(), config));
} }
......
# gserver pacakge unittests # gserver pacakge unittests
if(NOT MOBILE_INFERENCE)
################### test_ProtoDataProvider ############ ################### test_ProtoDataProvider ############
add_unittest_without_exec(test_ProtoDataProvider add_unittest_without_exec(test_ProtoDataProvider
test_ProtoDataProvider.cpp) test_ProtoDataProvider.cpp)
# test_ProtoDataProvider will mkdir as same name, # test_ProtoDataProvider will mkdir as same name,
# so if WORKING_DIRECTORY is default directory, then # so if WORKING_DIRECTORY is default directory, then
# mkdir will get error. # mkdir will get error.
add_test(NAME test_ProtoDataProvider add_test(NAME test_ProtoDataProvider
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
endif()
################# test_LayerGrad ####################### ################# test_LayerGrad #######################
add_unittest_without_exec(test_LayerGrad add_unittest_without_exec(test_LayerGrad
...@@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore ...@@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore
add_test(NAME test_KmaxSeqScore add_test(NAME test_KmaxSeqScore
COMMAND test_KmaxSeqScore) COMMAND test_KmaxSeqScore)
if(NOT MOBILE_INFERENCE)
################## test_Evaluator ####################### ################## test_Evaluator #######################
add_unittest(test_Evaluator add_unittest(test_Evaluator
test_Evaluator.cpp) test_Evaluator.cpp)
endif()
################ test_LinearChainCRF #################### ################ test_LinearChainCRF ####################
add_simple_unittest(test_LinearChainCRF) add_simple_unittest(test_LinearChainCRF)
...@@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE) ...@@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE)
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
endif() endif()
if(NOT MOBILE_INFERENCE)
############### test_RecurrentGradientMachine ############### ############### test_RecurrentGradientMachine ###############
# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine
# I will fix it. # I will fix it.
add_unittest_without_exec(test_RecurrentGradientMachine add_unittest_without_exec(test_RecurrentGradientMachine
test_RecurrentGradientMachine.cpp) test_RecurrentGradientMachine.cpp)
add_test(NAME test_RecurrentGradientMachine add_test(NAME test_RecurrentGradientMachine
COMMAND .set_python_path.sh -d COMMAND .set_python_path.sh -d
${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests
${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
endif()
add_unittest_without_exec(test_NetworkCompare if(NOT MOBILE_INFERENCE)
add_unittest_without_exec(test_NetworkCompare
test_NetworkCompare.cpp) test_NetworkCompare.cpp)
if(WITH_GPU) if(WITH_GPU)
add_test(NAME test_NetworkCompare add_test(NAME test_NetworkCompare
COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
else() else()
add_test(NAME test_NetworkCompare add_test(NAME test_NetworkCompare
COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
endif()
endif() endif()
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#pragma once #pragma once
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/gserver/layers/LinearChainCRF.h" #include "paddle/gserver/layers/LinearChainCRF.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
......
...@@ -18,7 +18,6 @@ limitations under the License. */ ...@@ -18,7 +18,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
......
...@@ -21,7 +21,6 @@ limitations under the License. */ ...@@ -21,7 +21,6 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/math/MathUtils.h" #include "paddle/math/MathUtils.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
......
...@@ -24,7 +24,6 @@ limitations under the License. */ ...@@ -24,7 +24,6 @@ limitations under the License. */
#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/Layer.h"
#include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h"
#include "paddle/math/CpuSparseMatrix.h" #include "paddle/math/CpuSparseMatrix.h"
#include "paddle/trainer/Trainer.h"
using namespace paddle; // NOLINT using namespace paddle; // NOLINT
using namespace std; // NOLINT using namespace std; // NOLINT
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册