提交 7a81327c 编写于 作者: L Luo Tao

remove compile option "with_metric_learning"

上级 85189e8d
...@@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME) ...@@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME)
generate_rdma_links() generate_rdma_links()
endif() endif()
if(WITH_METRIC)
if(WITH_GPU)
set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu)
else()
set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric_cpu)
endif()
else()
set(METRIC_LIBS "")
endif()
target_circle_link_libraries(${TARGET_NAME} target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START ARCHIVE_START
paddle_gserver paddle_gserver
paddle_function paddle_function
${METRIC_LIBS}
ARCHIVE_END ARCHIVE_END
paddle_pserver paddle_pserver
paddle_trainer_lib paddle_trainer_lib
...@@ -95,7 +84,6 @@ function(link_paddle_exe TARGET_NAME) ...@@ -95,7 +84,6 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter paddle_parameter
paddle_proto paddle_proto
paddle_cuda paddle_cuda
${METRIC_LIBS}
${EXTERNAL_LIBS} ${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT} ${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS} ${CMAKE_DL_LIBS}
......
...@@ -228,16 +228,6 @@ ...@@ -228,16 +228,6 @@
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td> <td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr> </tr>
<tr>
<td class="left" rowspan = "2">度量学习(metric learning)</td><td class="left">external</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left">data_server_port</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr> <tr>
<td class="left" rowspan = "16">参数服务器(PServer)</td><td class="left">start_pserver</td> <td class="left" rowspan = "16">参数服务器(PServer)</td><td class="left">start_pserver</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td> <td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
......
...@@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo ...@@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td> <td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr> </tr>
<tr>
<td class="left" rowspan = "2">metric learning</td><td class="left">external</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left">data_server_port</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr> <tr>
<td class="left" rowspan = "16">PServer</td><td class="left">start_pserver</td> <td class="left" rowspan = "16">PServer</td><td class="left">start_pserver</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td> <td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
......
...@@ -180,15 +180,6 @@ ...@@ -180,15 +180,6 @@
 - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径.  - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径.
- 类型: string (默认: "", null). - 类型: string (默认: "", null).
## 度量学习(Metric Learning)
* `--external`
- 指示是否使用外部机器进行度量学习.
- 类型: bool (默认: 0).
* `--data_server_port`
- 数据服务器(data server)的监听端口,主要用在度量学习中.
- 类型: int32 (默认: 21134).
## 数据支持(DataProvider) ## 数据支持(DataProvider)
* `--memory_threshold_on_load_data` * `--memory_threshold_on_load_data`
......
...@@ -184,15 +184,6 @@ ...@@ -184,15 +184,6 @@
- Specify shared dynamic library. It can be defined out of paddle by user. - Specify shared dynamic library. It can be defined out of paddle by user.
- type: string (default: "", null). - type: string (default: "", null).
## Metric Learning
* `--external`
- Whether to use external machine for metric learning.
- type: bool (default: 0).
* `--data_server_port`
- Listening port for dserver (data server), dserver is mainly used in metric learning.
- type: int32 (default: 21134).
## DataProvider ## DataProvider
* `--memory_threshold_on_load_data` * `--memory_threshold_on_load_data`
......
...@@ -24,9 +24,6 @@ limitations under the License. */ ...@@ -24,9 +24,6 @@ limitations under the License. */
DEFINE_bool(allow_only_one_model_on_one_gpu, DEFINE_bool(allow_only_one_model_on_one_gpu,
true, true,
"If true, do not allow multiple models on one GPU device"); "If true, do not allow multiple models on one GPU device");
#ifdef PADDLE_METRIC_LEARNING
DECLARE_bool(external);
#endif
namespace paddle { namespace paddle {
...@@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config, ...@@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config,
trainerBarrier_(FLAGS_trainer_count), trainerBarrier_(FLAGS_trainer_count),
allBarrier_(FLAGS_trainer_count + 1), allBarrier_(FLAGS_trainer_count + 1),
inArgsCopied_(false) { inArgsCopied_(false) {
#ifdef PADDLE_METRIC_LEARNING
isPassGrad_ = FLAGS_external;
#else
isPassGrad_ = false; isPassGrad_ = false;
#endif
numThreads_ = FLAGS_trainer_count; numThreads_ = FLAGS_trainer_count;
if (useGpu) { if (useGpu) {
//! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu, //! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu,
......
...@@ -381,8 +381,7 @@ void Layer::backwardActivation() { ...@@ -381,8 +381,7 @@ void Layer::backwardActivation() {
void Layer::forwardDropOut() { void Layer::forwardDropOut() {
auto& outV = getOutputValue(); auto& outV = getOutputValue();
if (passType_ == PASS_TRAIN || passType_ == PASS_METRIC_TRAIN || if (passType_ == PASS_TRAIN) {
passType_ == PASS_METRIC_TRAIN_WITH_NOERROR) {
// new dropOutMask_ if dropOutMask_ is null ptr // new dropOutMask_ if dropOutMask_ is null ptr
Matrix::resizeOrCreate(dropOutMask_, Matrix::resizeOrCreate(dropOutMask_,
outV->getHeight(), outV->getHeight(),
......
...@@ -30,9 +30,6 @@ namespace paddle { ...@@ -30,9 +30,6 @@ namespace paddle {
* the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/ * the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/
* recvJobQueue_. the second solution use some shared thread pool to manage * recvJobQueue_. the second solution use some shared thread pool to manage
* connections. * connections.
* In addition to pserver, metric learning also uses network to exchange
* features within multi-machines, so this class just abstracts some basic
* threads and queue buffer creation for them
*/ */
class BaseClient { class BaseClient {
protected: protected:
......
...@@ -367,11 +367,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, ...@@ -367,11 +367,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request,
std::vector<Buffer>* outputBuffers) { std::vector<Buffer>* outputBuffers) {
VLOG(1) << "pserver: addGradient"; VLOG(1) << "pserver: addGradient";
/// forwardbackward delta from all trainers // forwardbackward delta from all trainers
/// indicate the fluctuation caused by forwardbackward. // indicate the fluctuation caused by forwardbackward.
#ifndef PADDLE_METRIC_LEARNING
// @TODO(yanfei):
// add support tuning forwardbackward balance for metric learning
if (!numPassFinishClients_) { if (!numPassFinishClients_) {
REGISTER_BARRIER_DELTA_SERVER_SET( REGISTER_BARRIER_DELTA_SERVER_SET(
*statSet_, *statSet_,
...@@ -381,7 +378,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request, ...@@ -381,7 +378,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request,
request.forwardbackward_time(), request.forwardbackward_time(),
isSparseServer_ ? "_sparseUpdater" : "_denseUpdater"); isSparseServer_ ? "_sparseUpdater" : "_denseUpdater");
} }
#endif
{ {
/// approximately pure network overhead /// approximately pure network overhead
......
...@@ -30,10 +30,6 @@ limitations under the License. */ ...@@ -30,10 +30,6 @@ limitations under the License. */
#include "TrainerConfigHelper.h" #include "TrainerConfigHelper.h"
#include "TrainerInternal.h" #include "TrainerInternal.h"
#ifdef PADDLE_METRIC_LEARNING
#include "paddle/internals/metric_learning/MetricTrainer.h"
#endif
DECLARE_int32(num_passes); DECLARE_int32(num_passes);
namespace paddle { namespace paddle {
...@@ -201,12 +197,8 @@ protected: ...@@ -201,12 +197,8 @@ protected:
// parameter util // parameter util
std::unique_ptr<ParameterUtil> paramUtil_; std::unique_ptr<ParameterUtil> paramUtil_;
#ifdef PADDLE_METRIC_LEARNING
MetricTrainer trainerInternal_;
#else
// trainer Internal // trainer Internal
TrainerInternal trainerInternal_; TrainerInternal trainerInternal_;
#endif
}; };
} // namespace paddle } // namespace paddle
...@@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn, ...@@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn,
DEFINE_int32(trainer_count, 1, "Defined how many trainers to train"); DEFINE_int32(trainer_count, 1, "Defined how many trainers to train");
DEFINE_int32(gpu_id, 0, "Which gpu core to use"); DEFINE_int32(gpu_id, 0, "Which gpu core to use");
DEFINE_int32(port, 20134, "Listening port for pserver"); DEFINE_int32(port, 20134, "Listening port for pserver");
DEFINE_int32(data_server_port, 21134, "Listening port for dserver");
DEFINE_int32(ports_num, DEFINE_int32(ports_num,
1, 1,
"Number of ports for sending dense parameter," "Number of ports for sending dense parameter,"
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
DECLARE_bool(parallel_nn); DECLARE_bool(parallel_nn);
DECLARE_int32(async_count); DECLARE_int32(async_count);
DECLARE_int32(port); DECLARE_int32(port);
DECLARE_int32(data_server_port);
DECLARE_bool(use_gpu); DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id); DECLARE_int32(gpu_id);
DECLARE_int32(trainer_count); DECLARE_int32(trainer_count);
......
...@@ -23,11 +23,6 @@ enum PassType { ...@@ -23,11 +23,6 @@ enum PassType {
PASS_TEST, // Test pass PASS_TEST, // Test pass
PASS_GC, // Gradient Check pass PASS_GC, // Gradient Check pass
PASS_METRIC, // pass for generate template output with no drop rate. PASS_METRIC, // pass for generate template output with no drop rate.
// pass for metric learning training with metric learning error, only used
// when we are doing KNN evaluation.
PASS_METRIC_TRAIN,
PASS_METRIC_TRAIN_WITH_NOERROR, // Pass for metric learning training
// with no evaluation.
}; };
enum ParameterType { enum ParameterType {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册