diff --git a/cmake/util.cmake b/cmake/util.cmake index 24ad5c815ca20d9b6b317b1be4d2dc93a9e06fba..3640e4651fdd8b491f63875a7ea886afcadf978a 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - if(WITH_METRIC) - if(WITH_GPU) - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu) - else() - set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric_cpu) - endif() - else() - set(METRIC_LIBS "") - endif() - target_circle_link_libraries(${TARGET_NAME} ARCHIVE_START paddle_gserver paddle_function - ${METRIC_LIBS} ARCHIVE_END paddle_pserver paddle_trainer_lib @@ -95,7 +84,6 @@ function(link_paddle_exe TARGET_NAME) paddle_parameter paddle_proto paddle_cuda - ${METRIC_LIBS} ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} diff --git a/doc/howto/usage/cmd_parameter/arguments_cn.md b/doc/howto/usage/cmd_parameter/arguments_cn.md index 2e2a2fcc54a09f4f41e4ebbc317e1409591ddd9c..f7aa525054468670f59309ddf9206af55bb77869 100644 --- a/doc/howto/usage/cmd_parameter/arguments_cn.md +++ b/doc/howto/usage/cmd_parameter/arguments_cn.md @@ -228,16 +228,6 @@ √√ - -度量学习(metric learning)external -√√√√ - - - -data_server_port -√√ - - 参数服务器(PServer)start_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/arguments_en.md b/doc/howto/usage/cmd_parameter/arguments_en.md index e5546f0ddc78a9f8bdc306a19c2fe9a415463e5a..d1963067bda949b11ececefed3db7db1432c6223 100644 --- a/doc/howto/usage/cmd_parameter/arguments_en.md +++ b/doc/howto/usage/cmd_parameter/arguments_en.md @@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo √√ - -metric learningexternal -√√√√ - - - -data_server_port -√√ - - PServerstart_pserver √√ diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md index 3b573a324d541b024600a254d5266e517db229c5..b4625ba68cf23e5697554ba94efaf0b873f2c1de 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_cn.md @@ -180,15 +180,6 @@  - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径. - 类型: string (默认: "", null). -## 度量学习(Metric Learning) -* `--external` - - 指示是否使用外部机器进行度量学习. - - 类型: bool (默认: 0). - -* `--data_server_port` - - 数据服务器(data server)的监听端口,主要用在度量学习中. - - 类型: int32 (默认: 21134). - ## 数据支持(DataProvider) * `--memory_threshold_on_load_data` diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_en.md b/doc/howto/usage/cmd_parameter/detail_introduction_en.md index 33b7ec0d51a96ee126197e7aa819fdae0d3dc353..b681ebc81a355dfc1a7638a4463dff6979929a45 100644 --- a/doc/howto/usage/cmd_parameter/detail_introduction_en.md +++ b/doc/howto/usage/cmd_parameter/detail_introduction_en.md @@ -184,15 +184,6 @@ - Specify shared dynamic library. It can be defined out of paddle by user. - type: string (default: "", null). -## Metric Learning -* `--external` - - Whether to use external machine for metric learning. - - type: bool (default: 0). - -* `--data_server_port` - - Listening port for dserver (data server), dserver is mainly used in metric learning. - - type: int32 (default: 21134). - ## DataProvider * `--memory_threshold_on_load_data` diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 4654d0206413ec198da62af12e294cd5b442e735..6ae60102b3e431727c0954e8b8073bfe0534f8ee 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -24,9 +24,6 @@ limitations under the License. */ DEFINE_bool(allow_only_one_model_on_one_gpu, true, "If true, do not allow multiple models on one GPU device"); -#ifdef PADDLE_METRIC_LEARNING -DECLARE_bool(external); -#endif namespace paddle { @@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, trainerBarrier_(FLAGS_trainer_count), allBarrier_(FLAGS_trainer_count + 1), inArgsCopied_(false) { -#ifdef PADDLE_METRIC_LEARNING - isPassGrad_ = FLAGS_external; -#else isPassGrad_ = false; -#endif numThreads_ = FLAGS_trainer_count; if (useGpu) { //! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu, diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index f76d41ad3e8a3b1730f9d50c0773ee4f61ddb541..125aaf947f3c9d976b117667d1d1b7700a029cc6 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -381,8 +381,7 @@ void Layer::backwardActivation() { void Layer::forwardDropOut() { auto& outV = getOutputValue(); - if (passType_ == PASS_TRAIN || passType_ == PASS_METRIC_TRAIN || - passType_ == PASS_METRIC_TRAIN_WITH_NOERROR) { + if (passType_ == PASS_TRAIN) { // new dropOutMask_ if dropOutMask_ is null ptr Matrix::resizeOrCreate(dropOutMask_, outV->getHeight(), diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index 11d7a147bf749ba2de0772b5efd5f73ab0ccdb1a..667bc451d16aa1436ac5d74dd96edbd70556edd0 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -30,9 +30,6 @@ namespace paddle { * the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/ * recvJobQueue_. the second solution use some shared thread pool to manage * connections. - * In addition to pserver, metric learning also uses network to exchange - * features within multi-machines, so this class just abstracts some basic - * threads and queue buffer creation for them */ class BaseClient { protected: diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 856fa0ad1ab30e3fc554ac96dd3bed71b1548579..877cbb86ec112739a5c7eeee969ca48ef491ee87 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -367,11 +367,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, std::vector* outputBuffers) { VLOG(1) << "pserver: addGradient"; -/// forwardbackward delta from all trainers -/// indicate the fluctuation caused by forwardbackward. -#ifndef PADDLE_METRIC_LEARNING - // @TODO(yanfei): - // add support tuning forwardbackward balance for metric learning + // forwardbackward delta from all trainers + // indicate the fluctuation caused by forwardbackward. if (!numPassFinishClients_) { REGISTER_BARRIER_DELTA_SERVER_SET( *statSet_, @@ -381,7 +378,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, request.forwardbackward_time(), isSparseServer_ ? "_sparseUpdater" : "_denseUpdater"); } -#endif { /// approximately pure network overhead diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index c8ee4726c24c335ceda22ea3a20049b01d11c149..fac589d1d711affcd008f90edf87d865c8362f69 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -30,10 +30,6 @@ limitations under the License. */ #include "TrainerConfigHelper.h" #include "TrainerInternal.h" -#ifdef PADDLE_METRIC_LEARNING -#include "paddle/internals/metric_learning/MetricTrainer.h" -#endif - DECLARE_int32(num_passes); namespace paddle { @@ -201,12 +197,8 @@ protected: // parameter util std::unique_ptr paramUtil_; -#ifdef PADDLE_METRIC_LEARNING - MetricTrainer trainerInternal_; -#else // trainer Internal TrainerInternal trainerInternal_; -#endif }; } // namespace paddle diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index e8f31bc811ac30d83e8203b784ee1f93a8d35d90..320f671ed97dbadc4fa1b4b52d5611cf9239e7dd 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn, DEFINE_int32(trainer_count, 1, "Defined how many trainers to train"); DEFINE_int32(gpu_id, 0, "Which gpu core to use"); DEFINE_int32(port, 20134, "Listening port for pserver"); -DEFINE_int32(data_server_port, 21134, "Listening port for dserver"); DEFINE_int32(ports_num, 1, "Number of ports for sending dense parameter," diff --git a/paddle/utils/Flags.h b/paddle/utils/Flags.h index 3e72f8356d883b353127ccae80f2881320d20b2b..dc4faef8331ed47b9ce3e952389b6469cd9fda2e 100644 --- a/paddle/utils/Flags.h +++ b/paddle/utils/Flags.h @@ -19,7 +19,6 @@ limitations under the License. */ DECLARE_bool(parallel_nn); DECLARE_int32(async_count); DECLARE_int32(port); -DECLARE_int32(data_server_port); DECLARE_bool(use_gpu); DECLARE_int32(gpu_id); DECLARE_int32(trainer_count); diff --git a/paddle/utils/GlobalConstants.h b/paddle/utils/GlobalConstants.h index 707346f2c76e59b50722f4f8805ebe56c3cf861b..0ec1c28dfbb2a7db9fa84c9eb2bc4dad806b78e9 100644 --- a/paddle/utils/GlobalConstants.h +++ b/paddle/utils/GlobalConstants.h @@ -23,11 +23,6 @@ enum PassType { PASS_TEST, // Test pass PASS_GC, // Gradient Check pass PASS_METRIC, // pass for generate template output with no drop rate. - // pass for metric learning training with metric learning error, only used - // when we are doing KNN evaluation. - PASS_METRIC_TRAIN, - PASS_METRIC_TRAIN_WITH_NOERROR, // Pass for metric learning training - // with no evaluation. }; enum ParameterType {