From 7a81327c28dd864dca3fdc711587c2c717717c99 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 14 Mar 2017 16:13:49 +0800 Subject: [PATCH] remove compile option "with_metric_learning" --- cmake/util.cmake | 12 ------------ doc/howto/usage/cmd_parameter/arguments_cn.md | 10 ---------- doc/howto/usage/cmd_parameter/arguments_en.md | 10 ---------- .../usage/cmd_parameter/detail_introduction_cn.md | 9 --------- .../usage/cmd_parameter/detail_introduction_en.md | 9 --------- .../gradientmachines/MultiGradientMachine.cpp | 7 ------- paddle/gserver/layers/Layer.cpp | 3 +-- paddle/pserver/BaseClient.h | 3 --- paddle/pserver/ParameterServer2.cpp | 8 ++------ paddle/trainer/Trainer.h | 8 -------- paddle/utils/Flags.cpp | 1 - paddle/utils/Flags.h | 1 - paddle/utils/GlobalConstants.h | 5 ----- 13 files changed, 3 insertions(+), 83 deletions(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 24ad5c815ca..3640e4651fd 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - if(WITH_METRIC) - if(WITH_GPU) - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu) - else() - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric_cpu) - endif() - else() - set(METRIC_LIBS "") - endif() - target_circle_link_libraries(${TARGET_NAME} ARCHIVE_START paddle_gserver paddle_function - ${METRIC_LIBS} ARCHIVE_END paddle_pserver paddle_trainer_lib @@ -95,7 +84,6 @@ function(link_paddle_exe TARGET_NAME) paddle_parameter paddle_proto paddle_cuda - ${METRIC_LIBS} ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} diff --git a/doc/howto/usage/cmd_parameter/arguments_cn.md b/doc/howto/usage/cmd_parameter/arguments_cn.md index 2e2a2fcc54a..f7aa5250544 100644 --- a/doc/howto/usage/cmd_parameter/arguments_cn.md +++ b/doc/howto/usage/cmd_parameter/arguments_cn.md @@ -228,16 +228,6 @@ √√ - -度量学习(metric learning)external -√√√√ - - - -data_server_port -√√ - - 参数服务器(PServer)start_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/arguments_en.md b/doc/howto/usage/cmd_parameter/arguments_en.md index e5546f0ddc7..d1963067bda 100644 --- a/doc/howto/usage/cmd_parameter/arguments_en.md +++ b/doc/howto/usage/cmd_parameter/arguments_en.md @@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo √√ - -metric learningexternal -√√√√ - - - -data_server_port -√√ - - PServerstart_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md index 3b573a324d5..b4625ba68cf 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md @@ -180,15 +180,6 @@  - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径. - 类型: string (默认: "", null). -## 度量学习(Metric Learning) -* `--external` - - 指示是否使用外部机器进行度量学习. - - 类型: bool (默认: 0). - -* `--data_server_port` - - 数据服务器(data server)的监听端口,主要用在度量学习中. - - 类型: int32 (默认: 21134). - ## 数据支持(DataProvider) * `--memory_threshold_on_load_data` diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_en.md b/doc/howto/usage/cmd_parameter/detail_introduction_en.md index 33b7ec0d51a..b681ebc81a3 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_en.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_en.md @@ -184,15 +184,6 @@ - Specify shared dynamic library. It can be defined out of paddle by user. - type: string (default: "", null). -## Metric Learning -* `--external` - - Whether to use external machine for metric learning. - - type: bool (default: 0). - -* `--data_server_port` - - Listening port for dserver (data server), dserver is mainly used in metric learning. - - type: int32 (default: 21134). - ## DataProvider * `--memory_threshold_on_load_data` diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 4654d020641..6ae60102b3e 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -24,9 +24,6 @@ limitations under the License. */ DEFINE_bool(allow_only_one_model_on_one_gpu, true, "If true, do not allow multiple models on one GPU device"); -#ifdef PADDLE_METRIC_LEARNING -DECLARE_bool(external); -#endif namespace paddle { @@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, trainerBarrier_(FLAGS_trainer_count), allBarrier_(FLAGS_trainer_count + 1), inArgsCopied_(false) { -#ifdef PADDLE_METRIC_LEARNING - isPassGrad_ = FLAGS_external; -#else isPassGrad_ = false; -#endif numThreads_ = FLAGS_trainer_count; if (useGpu) { //! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu, diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index f76d41ad3e8..125aaf947f3 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -381,8 +381,7 @@ void Layer::backwardActivation() { void Layer::forwardDropOut() { auto& outV = getOutputValue(); - if (passType_ == PASS_TRAIN || passType_ == PASS_METRIC_TRAIN || - passType_ == PASS_METRIC_TRAIN_WITH_NOERROR) { + if (passType_ == PASS_TRAIN) { // new dropOutMask_ if dropOutMask_ is null ptr Matrix::resizeOrCreate(dropOutMask_, outV->getHeight(), diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 11d7a147bf7..667bc451d16 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -30,9 +30,6 @@ namespace paddle { * the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/ * recvJobQueue_. the second solution use some shared thread pool to manage * connections. - * In addition to pserver, metric learning also uses network to exchange - * features within multi-machines, so this class just abstracts some basic - * threads and queue buffer creation for them */ class BaseClient { protected: diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 856fa0ad1ab..877cbb86ec1 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -367,11 +367,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, std::vector* outputBuffers) { VLOG(1) << "pserver: addGradient"; -/// forwardbackward delta from all trainers -/// indicate the fluctuation caused by forwardbackward. -#ifndef PADDLE_METRIC_LEARNING - // @TODO(yanfei): - // add support tuning forwardbackward balance for metric learning + // forwardbackward delta from all trainers + // indicate the fluctuation caused by forwardbackward. if (!numPassFinishClients_) { REGISTER_BARRIER_DELTA_SERVER_SET( *statSet_, @@ -381,7 +378,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, request.forwardbackward_time(), isSparseServer_ ? "_sparseUpdater" : "_denseUpdater"); } -#endif { /// approximately pure network overhead diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index c8ee4726c24..fac589d1d71 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -30,10 +30,6 @@ limitations under the License. */ #include "TrainerConfigHelper.h" #include "TrainerInternal.h" -#ifdef PADDLE_METRIC_LEARNING -#include "paddle/internals/metric_learning/MetricTrainer.h" -#endif - DECLARE_int32(num_passes); namespace paddle { @@ -201,12 +197,8 @@ protected: // parameter util std::unique_ptr paramUtil_; -#ifdef PADDLE_METRIC_LEARNING - MetricTrainer trainerInternal_; -#else // trainer Internal TrainerInternal trainerInternal_; -#endif }; } // namespace paddle diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index e8f31bc811a..320f671ed97 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn, DEFINE_int32(trainer_count, 1, "Defined how many trainers to train"); DEFINE_int32(gpu_id, 0, "Which gpu core to use"); DEFINE_int32(port, 20134, "Listening port for pserver"); -DEFINE_int32(data_server_port, 21134, "Listening port for dserver"); DEFINE_int32(ports_num, 1, "Number of ports for sending dense parameter," diff --git a/paddle/utils/Flags.h b/paddle/utils/Flags.h index 3e72f8356d8..dc4faef8331 100644 --- a/paddle/utils/Flags.h +++ b/paddle/utils/Flags.h @@ -19,7 +19,6 @@ limitations under the License. */ DECLARE_bool(parallel_nn); DECLARE_int32(async_count); DECLARE_int32(port); -DECLARE_int32(data_server_port); DECLARE_bool(use_gpu); DECLARE_int32(gpu_id); DECLARE_int32(trainer_count); diff --git a/paddle/utils/GlobalConstants.h b/paddle/utils/GlobalConstants.h index 707346f2c76..0ec1c28dfbb 100644 --- a/paddle/utils/GlobalConstants.h +++ b/paddle/utils/GlobalConstants.h @@ -23,11 +23,6 @@ enum PassType { PASS_TEST, // Test pass PASS_GC, // Gradient Check pass PASS_METRIC, // pass for generate template output with no drop rate. - // pass for metric learning training with metric learning error, only used - // when we are doing KNN evaluation. - PASS_METRIC_TRAIN, - PASS_METRIC_TRAIN_WITH_NOERROR, // Pass for metric learning training - // with no evaluation. }; enum ParameterType { -- GitLab