async_executor.cc 5.9 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/async_executor.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/executor_thread_worker.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
29
#include "paddle/fluid/framework/trainer.h"
D
dongdaxiang 已提交
30
#include "paddle/fluid/framework/trainer_desc.pb.h"
31
#include "paddle/fluid/framework/trainer_factory.h"
W
Wang Guibao 已提交
32 33 34 35 36 37 38 39 40
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"

namespace paddle {
namespace framework {
AsyncExecutor::AsyncExecutor(Scope* scope, const platform::Place& place)
    : root_scope_(scope), place_(place) {}

H
heqiaozhi 已提交
41
void AsyncExecutor::InitServer(const std::string& dist_desc, int index) {
42 43
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitServer(dist_desc, index);
44 45
}

46 47 48
void AsyncExecutor::InitWorker(const std::string& dist_desc,
                               const std::vector<uint64_t>& host_sign_list,
                               int node_num, int index) {
49 50
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitWorker(dist_desc, host_sign_list, node_num, index);
H
heqiaozhi 已提交
51 52
}

53
uint64_t AsyncExecutor::StartServer() { return fleet_ptr_->RunServer(); }
H
heqiaozhi 已提交
54

55
void AsyncExecutor::StopServer() { fleet_ptr_->StopServer(); }
H
heqiaozhi 已提交
56

H
heqiaozhi 已提交
57 58
void AsyncExecutor::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                  int node_num) {
59
  fleet_ptr_->GatherServers(host_sign_list, node_num);
H
heqiaozhi 已提交
60 61
}

62
// todo InitModel
D
dongdaxiang 已提交
63
void AsyncExecutor::InitModel() {}
64 65

// todo SaveModel
D
dongdaxiang 已提交
66
void AsyncExecutor::SaveModel(const std::string& path) {}
67

W
Wang Guibao 已提交
68 69 70 71 72
void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
                                const std::string& data_feed_desc_str,
                                const std::vector<std::string>& filelist,
                                const int thread_num,
                                const std::vector<std::string>& fetch_var_names,
H
heqiaozhi 已提交
73
                                const std::string& mode, const bool debug) {
W
Wang Guibao 已提交
74 75 76 77 78
  std::vector<std::thread> threads;

  auto& block = main_program.Block(0);
  for (auto var_name : fetch_var_names) {
    auto var_desc = block.FindVar(var_name);
79
    PADDLE_ENFORCE_NOT_NULL(var_desc, "%s is not found.", var_name);
W
Wang Guibao 已提交
80 81 82 83 84 85 86 87
    auto shapes = var_desc->GetShape();
    PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
                   "var %s: Fetched var has wrong shape, "
                   "only variables with the last dimension size 1 supported",
                   var_name);
  }

  DataFeedDesc data_feed_desc;
H
hutuxian 已提交
88 89 90
  bool success = data_feed_desc.ParseFromString(data_feed_desc_str);
  PADDLE_ENFORCE(success, "Fail to parse DataFeedDesc from string:\n%s",
                 data_feed_desc_str.c_str());
W
Wang Guibao 已提交
91

D
dongdaxiang 已提交
92
  actual_thread_num_ = thread_num;
W
Wang Guibao 已提交
93 94 95
  int file_cnt = filelist.size();
  PADDLE_ENFORCE(file_cnt > 0, "File list cannot be empty");

D
dongdaxiang 已提交
96
  if (actual_thread_num_ > file_cnt) {
W
Wang Guibao 已提交
97 98
    VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
            << ". Changing thread_num = " << file_cnt;
D
dongdaxiang 已提交
99
    actual_thread_num_ = file_cnt;
W
Wang Guibao 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
  }

  /*
    readerDesc: protobuf description for reader initlization
    argument: class_name, batch_size, use_slot, queue_size, buffer_size,
    padding_index

    reader:
    1) each thread has a reader, reader will read input data and
    put it into input queue
    2) each reader has a Next() iterface, that can fetch an instance
    from the input queue
   */
  // todo: should be factory method for creating datafeed
  std::vector<std::shared_ptr<DataFeed>> readers;
D
dongdaxiang 已提交
115 116
  /*
  PrepareReaders(readers, actual_thread_num_, data_feed_desc, filelist);
H
heqiaozhi 已提交
117
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
118
  PrepareDenseThread(mode);
H
heqiaozhi 已提交
119
#endif
D
dongdaxiang 已提交
120
  */
W
Wang Guibao 已提交
121
  std::vector<std::shared_ptr<ExecutorThreadWorker>> workers;
D
dongdaxiang 已提交
122
  workers.resize(actual_thread_num_);
W
Wang Guibao 已提交
123
  for (auto& worker : workers) {
H
heqiaozhi 已提交
124
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
125
    if (mode == "mpi") {
H
heqiaozhi 已提交
126
      worker.reset(new AsyncExecutorThreadWorker);
H
heqiaozhi 已提交
127
    } else {
H
heqiaozhi 已提交
128
      worker.reset(new ExecutorThreadWorker);
H
heqiaozhi 已提交
129
    }
H
heqiaozhi 已提交
130 131 132
#else
    worker.reset(new ExecutorThreadWorker);
#endif
W
Wang Guibao 已提交
133 134 135
  }

  // prepare thread resource here
D
dongdaxiang 已提交
136 137
  /*
  for (int thidx = 0; thidx < actual_thread_num_; ++thidx) {
W
Wang Guibao 已提交
138 139 140
    CreateThreads(workers[thidx].get(), main_program, readers[thidx],
                  fetch_var_names, root_scope_, thidx, debug);
  }
D
dongdaxiang 已提交
141
  */
W
Wang Guibao 已提交
142 143

  // start executing ops in multiple threads
D
dongdaxiang 已提交
144
  for (int thidx = 0; thidx < actual_thread_num_; ++thidx) {
145 146 147 148 149 150 151
    if (debug) {
      threads.push_back(std::thread(&ExecutorThreadWorker::TrainFilesWithTimer,
                                    workers[thidx].get()));
    } else {
      threads.push_back(
          std::thread(&ExecutorThreadWorker::TrainFiles, workers[thidx].get()));
    }
W
Wang Guibao 已提交
152 153 154 155 156
  }

  for (auto& th : threads) {
    th.join();
  }
D
dongdaxiang 已提交
157 158
  // TODO(guru4elephant): we don't need this
  /*
H
heqiaozhi 已提交
159
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
160
  if (mode == "mpi") {
D
dongdaxiang 已提交
161
    _pull_dense_thread->stop();
H
heqiaozhi 已提交
162
  }
H
heqiaozhi 已提交
163
#endif
D
dongdaxiang 已提交
164
  */
D
dongdaxiang 已提交
165 166
  VLOG(3) << "start to run from files in async_executor";
  VLOG(3) << "Drop current scope kids";
W
Wang Guibao 已提交
167 168 169 170
  root_scope_->DropKids();
  return;
}

D
dongdaxiang 已提交
171
}  // end namespace framework
W
Wang Guibao 已提交
172
}  // end namespace paddle