async_executor.cc 11.8 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/async_executor.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/executor_thread_worker.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"
H
heqiaozhi 已提交
32
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
33
#include <pslib.h>
H
heqiaozhi 已提交
34
#endif
W
Wang Guibao 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

namespace paddle {
namespace framework {
AsyncExecutor::AsyncExecutor(Scope* scope, const platform::Place& place)
    : root_scope_(scope), place_(place) {}

void AsyncExecutor::CreateThreads(
    ExecutorThreadWorker* worker, const ProgramDesc& main_program,
    const std::shared_ptr<DataFeed>& reader,
    const std::vector<std::string>& fetch_var_names, Scope* root_scope,
    const int thread_index, const bool debug) {
  worker->SetThreadId(thread_index);
  worker->SetDebug(debug);
  worker->SetRootScope(root_scope);
  worker->CreateThreadResource(main_program, place_);
  worker->SetDataFeed(reader);
  worker->SetFetchVarNames(fetch_var_names);
  worker->BindingDataFeedMemory();
H
heqiaozhi 已提交
53
#ifdef PADDLE_WITH_PSLIB
54 55 56
  worker->SetPSlibPtr(_pslib_ptr);
  worker->SetPullDenseThread(_pull_dense_thread);
  worker->SetParamConfig(&_param_config);
H
heqiaozhi 已提交
57
#endif
W
Wang Guibao 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70
}

void PrepareReaders(std::vector<std::shared_ptr<DataFeed>>& readers,  // NOLINT
                    const int thread_num, const DataFeedDesc& data_feed_desc,
                    const std::vector<std::string>& filelist) {
  readers.resize(thread_num);
  for (size_t i = 0; i < readers.size(); ++i) {
    readers[i] = DataFeedFactory::CreateDataFeed(data_feed_desc.name());
    readers[i]->Init(data_feed_desc);  // set batch_size and queue_size here
  }
  readers[0]->SetFileList(filelist);
}

H
heqiaozhi 已提交
71
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
72
void AsyncExecutor::InitServer(const std::string& dist_desc, int index) {
H
heqiaozhi 已提交
73 74 75 76
  _pslib_ptr = std::shared_ptr<paddle::distributed::PSlib>(
      new paddle::distributed::PSlib());
  _pslib_ptr->init_server(dist_desc, index);
  InitParamConfig();
77 78
}

79 80 81
void AsyncExecutor::InitWorker(const std::string& dist_desc,
                               const std::vector<uint64_t>& host_sign_list,
                               int node_num, int index) {
H
heqiaozhi 已提交
82 83 84 85 86
  _pslib_ptr = std::shared_ptr<paddle::distributed::PSlib>(
      new paddle::distributed::PSlib());
  _pslib_ptr->init_worker(dist_desc,
                          static_cast<uint64_t*>(host_sign_list.data()),
                          node_num, index);
H
heqiaozhi 已提交
87

H
heqiaozhi 已提交
88
  InitParamConfig();
H
heqiaozhi 已提交
89 90
}

H
heqiaozhi 已提交
91
uint64_t AsyncExecutor::StartServer() { return _pslib_ptr->run_server(); }
H
heqiaozhi 已提交
92

H
heqiaozhi 已提交
93
void AsyncExecutor::StopServer() { _pslib_ptr->stop_server(); }
H
heqiaozhi 已提交
94

H
heqiaozhi 已提交
95 96 97 98
void AsyncExecutor::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                  int node_num) {
  _pslib_ptr->gather_servers(static_cast<uint64_t*>(host_sign_list.data()),
                             node_num);
99 100
}

H
heqiaozhi 已提交
101
void AsyncExecutor::InitParamConfig() {
H
heqiaozhi 已提交
102 103 104 105
  for (int i = 0; i < _pslib_ptr->get_param()
                          ->server_param()
                          .downpour_server_param()
                          .downpour_table_param_size();
D
dongdaxiang 已提交
106
       ++i) {
H
heqiaozhi 已提交
107 108 109 110 111 112 113 114 115 116 117 118
    if (_pslib_ptr->get_param()
            ->server_param()
            .downpour_server_param()
            .downpour_table_param(i)
            .table_class()
            .find("SparseTable") != -1) {
      _param_config.fea_dim = _pslib_ptr->get_param()
                                  ->server_param()
                                  .downpour_server_param()
                                  .downpour_table_param(i)
                                  .accessor()
                                  .fea_dim();
D
dongdaxiang 已提交
119
      break;
H
heqiaozhi 已提交
120
    }
D
dongdaxiang 已提交
121 122 123 124 125 126
  }
  _param_config.slot_dim = _param_config.fea_dim - 2;
  _param_config.tmp_push_dense_wait_times = static_cast<int32_t>(
      _pslib_ptr->get_param()->trainer_param().push_dense_per_batch());
  _param_config.tmp_push_sparse_wait_times = static_cast<int32_t>(
      _pslib_ptr->get_param()->trainer_param().push_sparse_per_batch());
H
heqiaozhi 已提交
127 128

  for (auto t = 0u; t < _pslib_ptr->get_param()->trainer_param().skip_op_size();
D
dongdaxiang 已提交
129 130 131 132
       ++t) {
    _param_config.skip_op.push_back(
        _pslib_ptr->get_param()->trainer_param().skip_op(t));
  }
H
heqiaozhi 已提交
133

D
dongdaxiang 已提交
134
  for (auto t = 0u;
H
heqiaozhi 已提交
135
       t < _pslib_ptr->get_param()->trainer_param().sparse_table_size(); ++t) {
D
dongdaxiang 已提交
136 137 138 139
    auto& table = _pslib_ptr->get_param()->trainer_param().sparse_table(t);
    std::vector<std::string> tmp_sparse_variable_name;
    for (int i = 0u; i < table.slot_value_size(); ++i) {
      tmp_sparse_variable_name.push_back(table.slot_value(i));
H
heqiaozhi 已提交
140
      _param_config.slot_alias_to_table[table.slot_key(i)] = table.table_id();
H
heqiaozhi 已提交
141
    }
D
dongdaxiang 已提交
142 143
    std::vector<std::string> tmp_sparse_gradient_variable_name;
    for (auto i = 0u; i < table.slot_gradient_size(); ++i) {
H
heqiaozhi 已提交
144
      tmp_sparse_gradient_variable_name.push_back(table.slot_gradient(i));
H
heqiaozhi 已提交
145
    }
D
dongdaxiang 已提交
146 147 148 149 150 151
    _param_config.slot_input_vec[table.table_id()] =
        std::move(tmp_sparse_variable_name);
    _param_config.gradient_var[table.table_id()] =
        std::move(tmp_sparse_gradient_variable_name);
    _param_config.sparse_table_id.push_back(table.table_id());
  }
H
heqiaozhi 已提交
152

D
dongdaxiang 已提交
153
  for (auto t = 0u;
H
heqiaozhi 已提交
154
       t < _pslib_ptr->get_param()->trainer_param().dense_table_size(); ++t) {
D
dongdaxiang 已提交
155 156 157 158 159 160 161 162 163
    auto& table = _pslib_ptr->get_param()->trainer_param().dense_table(t);
    std::vector<std::string> tmp_dense_variable_name;
    for (int i = 0u; i < table.dense_variable_name_size(); ++i) {
      tmp_dense_variable_name.push_back(table.dense_variable_name(i));
    }
    std::vector<std::string> tmp_dense_gradient_variable_name;
    for (auto i = 0u; i < table.dense_gradient_variable_name_size(); ++i) {
      tmp_dense_gradient_variable_name.push_back(
          table.dense_gradient_variable_name(i));
H
heqiaozhi 已提交
164
    }
D
dongdaxiang 已提交
165 166 167 168 169 170 171
    _param_config.dense_variable_name[table.table_id()] =
        std::move(tmp_dense_variable_name);
    _param_config.dense_gradient_variable_name[table.table_id()] =
        std::move(tmp_dense_gradient_variable_name);
    _param_config.dense_table_id.push_back(table.table_id());
    _param_config.dense_table_size.push_back(table.fea_dim());
  }
H
heqiaozhi 已提交
172 173
}

174
void AsyncExecutor::InitModel() {
D
dongdaxiang 已提交
175 176 177 178 179 180
  for (auto table_id : _param_config.dense_table_id) {
    std::vector<paddle::ps::Region> regions;
    for (auto& t : _param_config.dense_variable_name[table_id]) {
      Variable* var = root_scope_->FindVar(t);
      CHECK(var != nullptr) << "var[" << t << "] not found";
      LoDTensor* tensor = var->GetMutable<LoDTensor>();
H
heqiaozhi 已提交
181

D
dongdaxiang 已提交
182 183
      float* g = tensor->data<float>();
      CHECK(g != nullptr) << "var[" << t << "] value not initialized";
184

D
dongdaxiang 已提交
185 186 187
      float init_range = 0.2;
      int rown = tensor->dims()[0];
      init_range /= sqrt(rown);
H
heqiaozhi 已提交
188

D
dongdaxiang 已提交
189 190 191 192
      std::normal_distribution<float> ndistr(0.0, 1.0);
      for (auto i = 0u; i < tensor->numel(); ++i) {
        g[i] = ndistr(local_random_engine()) * init_range;
      }
H
heqiaozhi 已提交
193

D
dongdaxiang 已提交
194 195 196
      paddle::ps::Region reg(g, tensor->numel());
      regions.emplace_back(std::move(reg));
    }
H
heqiaozhi 已提交
197 198 199

    auto push_status = _pslib_ptr->_worker_ptr->push_dense_param(
        regions.data(), regions.size(), table_id);
D
dongdaxiang 已提交
200 201 202 203 204
    push_status.wait();
    auto status = push_status.get();
    if (status != 0) {
      LOG(FATAL) << "push dense param failed, status[" << status << "]";
      exit(-1);
205
    }
D
dongdaxiang 已提交
206
  }
207 208 209
}

void AsyncExecutor::SaveModel(const std::string& path) {
D
dongdaxiang 已提交
210 211 212 213 214 215 216 217 218
  auto ret = _pslib_ptr->_worker_ptr->flush();
  ret.wait();
  ret = _pslib_ptr->_worker_ptr->save(path, 0);
  ret.wait();
  int32_t feasign_cnt = ret.get();
  if (feasign_cnt == -1) {  // (colourful-tree) TODO should be feasign_cnt < 0
    LOG(FATAL) << "save model failed";
    exit(-1);
  }
219 220
}

H
heqiaozhi 已提交
221
void AsyncExecutor::PrepareDenseThread(const std::string& mode) {
D
dongdaxiang 已提交
222 223
  if (mode == "mpi") {
    DensePullThreadParam param;
H
heqiaozhi 已提交
224
    param.ps_client = _pslib_ptr->_worker_ptr;
D
dongdaxiang 已提交
225 226 227 228
    param.threshold = 1;
    param.training_thread_num = actual_thread_num;
    param.root_scope = root_scope_;
    param.dense_params = &_param_config.dense_variable_name;
H
heqiaozhi 已提交
229 230 231

    _pull_dense_thread =
        std::shared_ptr<DensePullThread>(new DensePullThread(param));
D
dongdaxiang 已提交
232 233
    _pull_dense_thread->start();
  }
234
}
H
heqiaozhi 已提交
235
#endif
236

W
Wang Guibao 已提交
237 238 239 240 241
void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
                                const std::string& data_feed_desc_str,
                                const std::vector<std::string>& filelist,
                                const int thread_num,
                                const std::vector<std::string>& fetch_var_names,
H
heqiaozhi 已提交
242
                                const std::string& mode, const bool debug) {
W
Wang Guibao 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
  std::vector<std::thread> threads;

  auto& block = main_program.Block(0);
  for (auto var_name : fetch_var_names) {
    auto var_desc = block.FindVar(var_name);
    auto shapes = var_desc->GetShape();
    PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
                   "var %s: Fetched var has wrong shape, "
                   "only variables with the last dimension size 1 supported",
                   var_name);
  }

  DataFeedDesc data_feed_desc;
  google::protobuf::TextFormat::ParseFromString(data_feed_desc_str,
                                                &data_feed_desc);

259
  actual_thread_num = thread_num;
W
Wang Guibao 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
  int file_cnt = filelist.size();
  PADDLE_ENFORCE(file_cnt > 0, "File list cannot be empty");

  if (actual_thread_num > file_cnt) {
    VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
            << ". Changing thread_num = " << file_cnt;
    actual_thread_num = file_cnt;
  }

  /*
    readerDesc: protobuf description for reader initlization
    argument: class_name, batch_size, use_slot, queue_size, buffer_size,
    padding_index

    reader:
    1) each thread has a reader, reader will read input data and
    put it into input queue
    2) each reader has a Next() iterface, that can fetch an instance
    from the input queue
   */
  // todo: should be factory method for creating datafeed
  std::vector<std::shared_ptr<DataFeed>> readers;
  PrepareReaders(readers, actual_thread_num, data_feed_desc, filelist);
H
heqiaozhi 已提交
283
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
284
  PrepareDenseThread(mode);
H
heqiaozhi 已提交
285
#endif
W
Wang Guibao 已提交
286 287 288
  std::vector<std::shared_ptr<ExecutorThreadWorker>> workers;
  workers.resize(actual_thread_num);
  for (auto& worker : workers) {
H
heqiaozhi 已提交
289
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
290
    if (mode == "mpi") {
H
heqiaozhi 已提交
291
      worker.reset(new AsyncExecutorThreadWorker);
H
heqiaozhi 已提交
292
    } else {
H
heqiaozhi 已提交
293
      worker.reset(new ExecutorThreadWorker);
H
heqiaozhi 已提交
294
    }
H
heqiaozhi 已提交
295 296 297
#else
    worker.reset(new ExecutorThreadWorker);
#endif
W
Wang Guibao 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
  }

  // prepare thread resource here
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
    CreateThreads(workers[thidx].get(), main_program, readers[thidx],
                  fetch_var_names, root_scope_, thidx, debug);
  }

  // start executing ops in multiple threads
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
    threads.push_back(
        std::thread(&ExecutorThreadWorker::TrainFiles, workers[thidx].get()));
  }

  for (auto& th : threads) {
    th.join();
  }
H
heqiaozhi 已提交
315
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
316 317 318
  if (mode == "mpi") {
    _pull_dense_thread->stop();
  }
H
heqiaozhi 已提交
319
#endif
W
Wang Guibao 已提交
320 321 322 323 324 325 326
  root_scope_->DropKids();

  return;
}

}  // einit_modelnd namespace framework
}  // end namespace paddle