提交 e9166edd 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #1615 from luotao1/ml

remove with_metric_learning compile option
......@@ -71,21 +71,10 @@ function(link_paddle_exe TARGET_NAME)
generate_rdma_links()
endif()
if(WITH_METRIC)
if(WITH_GPU)
set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu)
else()
set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric_cpu)
endif()
else()
set(METRIC_LIBS "")
endif()
target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START
paddle_gserver
paddle_function
${METRIC_LIBS}
ARCHIVE_END
paddle_pserver
paddle_trainer_lib
......@@ -95,7 +84,6 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter
paddle_proto
paddle_cuda
${METRIC_LIBS}
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${CMAKE_DL_LIBS}
......
......@@ -228,16 +228,6 @@
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left" rowspan = "2">度量学习(metric learning)</td><td class="left">external</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left">data_server_port</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left" rowspan = "16">参数服务器(PServer)</td><td class="left">start_pserver</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
......
......@@ -228,16 +228,6 @@ It looks like there are a lot of arguments. However, most of them are for develo
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left" rowspan = "2">metric learning</td><td class="left">external</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left">data_server_port</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
</tr>
<tr>
<td class="left" rowspan = "16">PServer</td><td class="left">start_pserver</td>
<td class="left"></td><td class="left"></td><td class="left"></td><td class="left"></td>
......
......@@ -180,15 +180,6 @@
 - 用户可以自定义beam search的方法,编译成动态库,供PaddlePaddle加载。 该参数用于指定动态库路径.
- 类型: string (默认: "", null).
## 度量学习(Metric Learning)
* `--external`
- 指示是否使用外部机器进行度量学习.
- 类型: bool (默认: 0).
* `--data_server_port`
- 数据服务器(data server)的监听端口,主要用在度量学习中.
- 类型: int32 (默认: 21134).
## 数据支持(DataProvider)
* `--memory_threshold_on_load_data`
......
......@@ -184,15 +184,6 @@
- Specify shared dynamic library. It can be defined out of paddle by user.
- type: string (default: "", null).
## Metric Learning
* `--external`
- Whether to use external machine for metric learning.
- type: bool (default: 0).
* `--data_server_port`
- Listening port for dserver (data server), dserver is mainly used in metric learning.
- type: int32 (default: 21134).
## DataProvider
* `--memory_threshold_on_load_data`
......
......@@ -24,9 +24,6 @@ limitations under the License. */
DEFINE_bool(allow_only_one_model_on_one_gpu,
true,
"If true, do not allow multiple models on one GPU device");
#ifdef PADDLE_METRIC_LEARNING
DECLARE_bool(external);
#endif
namespace paddle {
......@@ -45,11 +42,7 @@ MultiGradientMachine::MultiGradientMachine(const ModelConfig& config,
trainerBarrier_(FLAGS_trainer_count),
allBarrier_(FLAGS_trainer_count + 1),
inArgsCopied_(false) {
#ifdef PADDLE_METRIC_LEARNING
isPassGrad_ = FLAGS_external;
#else
isPassGrad_ = false;
#endif
numThreads_ = FLAGS_trainer_count;
if (useGpu) {
//! TODO(yuyang18): When useGpu=false && paddle is not compiled with gpu,
......
......@@ -381,8 +381,7 @@ void Layer::backwardActivation() {
void Layer::forwardDropOut() {
auto& outV = getOutputValue();
if (passType_ == PASS_TRAIN || passType_ == PASS_METRIC_TRAIN ||
passType_ == PASS_METRIC_TRAIN_WITH_NOERROR) {
if (passType_ == PASS_TRAIN) {
// new dropOutMask_ if dropOutMask_ is null ptr
Matrix::resizeOrCreate(dropOutMask_,
outV->getHeight(),
......
......@@ -30,9 +30,6 @@ namespace paddle {
* the first solution arms with sendThreads_/recvThreads_ and sendJobQueue_/
* recvJobQueue_. the second solution use some shared thread pool to manage
* connections.
* In addition to pserver, metric learning also uses network to exchange
* features within multi-machines, so this class just abstracts some basic
* threads and queue buffer creation for them
*/
class BaseClient {
protected:
......
......@@ -367,11 +367,8 @@ void ParameterServer2::addGradient(const SendParameterRequest& request,
std::vector<Buffer>* outputBuffers) {
VLOG(1) << "pserver: addGradient";
/// forwardbackward delta from all trainers
/// indicate the fluctuation caused by forwardbackward.
#ifndef PADDLE_METRIC_LEARNING
// @TODO(yanfei):
// add support tuning forwardbackward balance for metric learning
// forwardbackward delta from all trainers
// indicate the fluctuation caused by forwardbackward.
if (!numPassFinishClients_) {
REGISTER_BARRIER_DELTA_SERVER_SET(
*statSet_,
......@@ -381,7 +378,6 @@ void ParameterServer2::addGradient(const SendParameterRequest& request,
request.forwardbackward_time(),
isSparseServer_ ? "_sparseUpdater" : "_denseUpdater");
}
#endif
{
/// approximately pure network overhead
......
......@@ -30,10 +30,6 @@ limitations under the License. */
#include "TrainerConfigHelper.h"
#include "TrainerInternal.h"
#ifdef PADDLE_METRIC_LEARNING
#include "paddle/internals/metric_learning/MetricTrainer.h"
#endif
DECLARE_int32(num_passes);
namespace paddle {
......@@ -201,12 +197,8 @@ protected:
// parameter util
std::unique_ptr<ParameterUtil> paramUtil_;
#ifdef PADDLE_METRIC_LEARNING
MetricTrainer trainerInternal_;
#else
// trainer Internal
TrainerInternal trainerInternal_;
#endif
};
} // namespace paddle
......@@ -30,7 +30,6 @@ DEFINE_bool(parallel_nn,
DEFINE_int32(trainer_count, 1, "Defined how many trainers to train");
DEFINE_int32(gpu_id, 0, "Which gpu core to use");
DEFINE_int32(port, 20134, "Listening port for pserver");
DEFINE_int32(data_server_port, 21134, "Listening port for dserver");
DEFINE_int32(ports_num,
1,
"Number of ports for sending dense parameter,"
......
......@@ -19,7 +19,6 @@ limitations under the License. */
DECLARE_bool(parallel_nn);
DECLARE_int32(async_count);
DECLARE_int32(port);
DECLARE_int32(data_server_port);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_int32(trainer_count);
......
......@@ -23,11 +23,6 @@ enum PassType {
PASS_TEST, // Test pass
PASS_GC, // Gradient Check pass
PASS_METRIC, // pass for generate template output with no drop rate.
// pass for metric learning training with metric learning error, only used
// when we are doing KNN evaluation.
PASS_METRIC_TRAIN,
PASS_METRIC_TRAIN_WITH_NOERROR, // Pass for metric learning training
// with no evaluation.
};
enum ParameterType {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册