提交 3c66b307 编写于 作者: H hedaoyuan

Remove the pserver, trainer, evaluators and some useless gradientmachines when...

Remove the pserver, trainer, evaluators and some useless gradientmachines when compile mobile inference library.
上级 7cc5ae99
...@@ -86,6 +86,14 @@ if(ANDROID OR IOS) ...@@ -86,6 +86,14 @@ if(ANDROID OR IOS)
"Disable MKLDNN when cross-compiling for Android and iOS" FORCE) "Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLML OFF CACHE STRING set(WITH_MKLML OFF CACHE STRING
"Disable MKLML package when cross-compiling for Android and iOS" FORCE) "Disable MKLML package when cross-compiling for Android and iOS" FORCE)
if(WITH_C_API)
# Compile PaddlePaddle mobile inference library
set(MOBILE_INFERENCE ON)
add_definitions(-DPADDLE_MOBILE_INFERENCE)
endif()
set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling
for Android and iOS" FORCE)
endif() endif()
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
......
...@@ -73,25 +73,44 @@ function(link_paddle_exe TARGET_NAME) ...@@ -73,25 +73,44 @@ function(link_paddle_exe TARGET_NAME)
generate_rdma_links() generate_rdma_links()
endif() endif()
target_circle_link_libraries(${TARGET_NAME} if(MOBILE_INFERENCE)
ARCHIVE_START target_circle_link_libraries(${TARGET_NAME}
paddle_gserver ARCHIVE_START
paddle_function paddle_gserver
ARCHIVE_END paddle_function
paddle_pserver ARCHIVE_END
paddle_trainer_lib paddle_math
paddle_network paddle_utils
paddle_math paddle_parameter
paddle_utils paddle_proto
paddle_parameter paddle_cuda
paddle_proto paddle_optimizer
paddle_cuda ${EXTERNAL_LIBS}
paddle_optimizer ${CMAKE_THREAD_LIBS_INIT}
${EXTERNAL_LIBS} ${CMAKE_DL_LIBS}
${CMAKE_THREAD_LIBS_INIT} ${RDMA_LD_FLAGS}
${CMAKE_DL_LIBS} ${RDMA_LIBS})
${RDMA_LD_FLAGS} else()
${RDMA_LIBS}) target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START
paddle_gserver
paddle_function
ARCHIVE_END
paddle_pserver
paddle_trainer_lib
paddle_network
paddle_math
paddle_utils
paddle_parameter
paddle_proto
paddle_cuda
paddle_optimizer
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS}
${RDMA_LD_FLAGS}
${RDMA_LIBS})
endif()
if(ANDROID) if(ANDROID)
target_link_libraries(${TARGET_NAME} log) target_link_libraries(${TARGET_NAME} log)
......
...@@ -3,25 +3,30 @@ add_subdirectory(function) ...@@ -3,25 +3,30 @@ add_subdirectory(function)
add_subdirectory(utils) add_subdirectory(utils)
add_subdirectory(testing) add_subdirectory(testing)
add_subdirectory(math) add_subdirectory(math)
add_subdirectory(parameter)
add_subdirectory(gserver) add_subdirectory(gserver)
add_subdirectory(pserver)
add_subdirectory(trainer)
add_subdirectory(scripts) add_subdirectory(scripts)
add_subdirectory(string) add_subdirectory(string)
add_subdirectory(parameter)
if(Boost_FOUND) if(MOBILE_INFERENCE)
add_subdirectory(memory)
add_subdirectory(platform)
add_subdirectory(framework)
add_subdirectory(operators)
add_subdirectory(pybind)
endif()
if(WITH_C_API)
add_subdirectory(capi) add_subdirectory(capi)
endif() else()
add_subdirectory(pserver)
add_subdirectory(trainer)
if(WITH_C_API)
add_subdirectory(capi)
endif()
if(Boost_FOUND)
add_subdirectory(memory)
add_subdirectory(platform)
add_subdirectory(framework)
add_subdirectory(operators)
add_subdirectory(pybind)
endif()
if(WITH_SWIG_PY) if(WITH_SWIG_PY)
add_subdirectory(api) add_subdirectory(api)
endif()
endif() endif()
...@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS ...@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS
paddle_cuda paddle_cuda
paddle_function paddle_function
paddle_gserver paddle_gserver
paddle_proto paddle_proto)
paddle_pserver
paddle_network)
cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
...@@ -50,7 +48,9 @@ if(NOT IOS) ...@@ -50,7 +48,9 @@ if(NOT IOS)
add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
link_paddle_exe(paddle_capi_shared)
link_paddle_exe(paddle_capi_shared)
endif() endif()
# install library & headers. # install library & headers.
......
...@@ -60,6 +60,28 @@ if(NOT WITH_PYTHON) ...@@ -60,6 +60,28 @@ if(NOT WITH_PYTHON)
dataproviders/PyDataProvider.h) dataproviders/PyDataProvider.h)
endif() endif()
if(MOBILE_INFERENCE)
# Remove evaluators
list(REMOVE_ITEM GSERVER_SOURCES
layers/ValidationLayer.cpp
evaluators/Evaluator.cpp
evaluators/DetectionMAPEvaluator.cpp
evaluators/CTCErrorEvaluator.cpp
evaluators/ChunkEvaluator.cpp)
# Remove useless gradientmachines
list(REMOVE_ITEM GSERVER_SOURCES
gradientmachines/MultiNetwork.cpp
gradientmachines/RecurrentGradientMachine.cpp
gradientmachines/ParallelNeuralNetwork.cpp
gradientmachines/GradientMachineMode.cpp
gradientmachines/MultiGradientMachine.cpp)
# Remove useless layers
list(REMOVE_ITEM GSERVER_SOURCES
layers/RecurrentLayerGroup.cpp)
endif()
if(WITH_GPU) if(WITH_GPU)
cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) cuda_add_library(paddle_gserver ${GSERVER_SOURCES})
else() else()
......
...@@ -17,12 +17,15 @@ limitations under the License. */ ...@@ -17,12 +17,15 @@ limitations under the License. */
#include <fstream> #include <fstream>
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "GradientMachineMode.h" #include "GradientMachineMode.h"
#include "MultiGradientMachine.h" #include "MultiGradientMachine.h"
#include "MultiNetwork.h" #include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h" #include "ParallelNeuralNetwork.h"
#include "hl_gpu.h" #endif
namespace paddle { namespace paddle {
...@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create( ...@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create(
const ModelConfig& config, const ModelConfig& config,
int mode, int mode,
const std::vector<ParameterType>& parameterTypes) { const std::vector<ParameterType>& parameterTypes) {
#ifndef PADDLE_MOBILE_INFERENCE
if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) { if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) {
return gm; return gm;
} }
if (FLAGS_trainer_count > 1) { if (FLAGS_trainer_count > 1) {
return new MultiGradientMachine(config, FLAGS_use_gpu); return new MultiGradientMachine(config, FLAGS_use_gpu);
} }
#endif
if (FLAGS_trainer_count == 1) { // single if (FLAGS_trainer_count == 1) { // single
#ifndef PADDLE_MOBILE_INFERENCE
NeuralNetwork* nn; NeuralNetwork* nn;
if (config.type() == "multi_nn") { if (config.type() == "multi_nn") {
/* multi submodel calculate, thread(s) will be initialized inside */ /* multi submodel calculate, thread(s) will be initialized inside */
...@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create( ...@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create(
/* single thread calculate */ /* single thread calculate */
nn = NeuralNetwork::create(config); nn = NeuralNetwork::create(config);
} }
#else
NeuralNetwork* nn = NeuralNetwork::create(config);
#endif
ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) { ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) {
para->enableType(PARAMETER_VALUE); para->enableType(PARAMETER_VALUE);
}; };
......
...@@ -20,13 +20,16 @@ limitations under the License. */ ...@@ -20,13 +20,16 @@ limitations under the License. */
#include "ModelConfig.pb.h" #include "ModelConfig.pb.h"
#include "TrainerConfig.pb.h" #include "TrainerConfig.pb.h"
#include "paddle/gserver/dataproviders/DataProvider.h" #include "paddle/gserver/dataproviders/DataProvider.h"
#include "paddle/gserver/evaluators/Evaluator.h"
#include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/Layer.h"
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/parameter/Parameter.h" #include "paddle/parameter/Parameter.h"
#include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/parameter/ParameterUpdaterBase.h"
#include "paddle/utils/Thread.h" #include "paddle/utils/Thread.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "paddle/gserver/evaluators/Evaluator.h"
#endif
namespace paddle { namespace paddle {
/** /**
* @brief A gradient machine is capable of calculating some outputs given * @brief A gradient machine is capable of calculating some outputs given
...@@ -147,6 +150,7 @@ public: ...@@ -147,6 +150,7 @@ public:
virtual void onPassEnd() = 0; virtual void onPassEnd() = 0;
#ifndef PADDLE_MOBILE_INFERENCE
/** /**
* Create an evaluator which can be used for eval() * Create an evaluator which can be used for eval()
*/ */
...@@ -156,6 +160,7 @@ public: ...@@ -156,6 +160,7 @@ public:
* evaluate using the given evaluator * evaluate using the given evaluator
*/ */
virtual void eval(Evaluator* evaluator) const = 0; virtual void eval(Evaluator* evaluator) const = 0;
#endif
std::vector<ParameterPtr>& getParameters() { return parameters_; } std::vector<ParameterPtr>& getParameters() { return parameters_; }
......
...@@ -14,15 +14,17 @@ limitations under the License. */ ...@@ -14,15 +14,17 @@ limitations under the License. */
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
#include "NeuralNetwork.h"
#include "hl_gpu.h"
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
#ifndef PADDLE_MOBILE_INFERENCE
#include "MultiNetwork.h" #include "MultiNetwork.h"
#include "NeuralNetwork.h"
#include "RecurrentGradientMachine.h" #include "RecurrentGradientMachine.h"
#include "hl_gpu.h" #endif
#include "paddle/gserver/layers/AgentLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle { namespace paddle {
void parameterInitNN(int paramId, void parameterInitNN(int paramId,
...@@ -54,6 +56,7 @@ void parameterInitNN(int paramId, ...@@ -54,6 +56,7 @@ void parameterInitNN(int paramId,
} }
NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
#ifndef PADDLE_MOBILE_INFERENCE
if (config.type() == "recurrent_nn") { if (config.type() == "recurrent_nn") {
return newNeuralNetwork("root"); return newNeuralNetwork("root");
} else if (config.type() == "multi_nn") { } else if (config.type() == "multi_nn") {
...@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { ...@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
} else { } else {
return newNeuralNetwork(); return newNeuralNetwork();
} }
#else
return new NeuralNetwork();
#endif
} }
std::map<std::string, bool> NeuralNetwork::dllInitMap; std::map<std::string, bool> NeuralNetwork::dllInitMap;
...@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() { ...@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() {
} }
} }
#ifndef PADDLE_MOBILE_INFERENCE
class CombinedEvaluator : public Evaluator { class CombinedEvaluator : public Evaluator {
public: public:
void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) { void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
...@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const { ...@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); } void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); }
#endif
void NeuralNetwork::setOutputGrad(const std::vector<Argument>& args) { void NeuralNetwork::setOutputGrad(const std::vector<Argument>& args) {
CHECK_GE(outputLayers_.size(), args.size()); CHECK_GE(outputLayers_.size(), args.size());
for (size_t i = 0; i < args.size(); ++i) { for (size_t i = 0; i < args.size(); ++i) {
......
...@@ -97,9 +97,12 @@ public: ...@@ -97,9 +97,12 @@ public:
virtual void onPassEnd(); virtual void onPassEnd();
#ifndef PADDLE_MOBILE_INFERENCE
virtual Evaluator* makeEvaluator() const; virtual Evaluator* makeEvaluator() const;
virtual void eval(Evaluator* evaluator) const; virtual void eval(Evaluator* evaluator) const;
#endif
virtual void resetState(); virtual void resetState();
virtual void setOutputGrad(const std::vector<Argument>& args); virtual void setOutputGrad(const std::vector<Argument>& args);
......
...@@ -103,10 +103,12 @@ LayerPtr Layer::create(const LayerConfig& config) { ...@@ -103,10 +103,12 @@ LayerPtr Layer::create(const LayerConfig& config) {
return LayerPtr(new MultiClassCrossEntropy(config)); return LayerPtr(new MultiClassCrossEntropy(config));
else if (type == "rank-cost") else if (type == "rank-cost")
return LayerPtr(new RankingCost(config)); return LayerPtr(new RankingCost(config));
#ifndef PADDLE_MOBILE_INFERENCE
else if (type == "auc-validation") else if (type == "auc-validation")
return LayerPtr(new AucValidation(config)); return LayerPtr(new AucValidation(config));
else if (type == "pnpair-validation") else if (type == "pnpair-validation")
return LayerPtr(new PnpairValidation(config)); return LayerPtr(new PnpairValidation(config));
#endif
return LayerPtr(registrar_.createByType(config.type(), config)); return LayerPtr(registrar_.createByType(config.type(), config));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册