async_executor.cc 6.0 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/async_executor.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/executor_thread_worker.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
29
#include "paddle/fluid/framework/trainer.h"
D
dongdaxiang 已提交
30
#include "paddle/fluid/framework/trainer_desc.pb.h"
31
#include "paddle/fluid/framework/trainer_factory.h"
W
Wang Guibao 已提交
32 33 34 35 36 37 38 39 40
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"

namespace paddle {
namespace framework {
AsyncExecutor::AsyncExecutor(Scope* scope, const platform::Place& place)
    : root_scope_(scope), place_(place) {}

H
heqiaozhi 已提交
41
void AsyncExecutor::InitServer(const std::string& dist_desc, int index) {
42 43
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitServer(dist_desc, index);
44 45
}

46 47 48
void AsyncExecutor::InitWorker(const std::string& dist_desc,
                               const std::vector<uint64_t>& host_sign_list,
                               int node_num, int index) {
49 50
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitWorker(dist_desc, host_sign_list, node_num, index);
H
heqiaozhi 已提交
51 52
}

53
uint64_t AsyncExecutor::StartServer() { return fleet_ptr_->RunServer(); }
H
heqiaozhi 已提交
54

55
void AsyncExecutor::StopServer() { fleet_ptr_->StopServer(); }
H
heqiaozhi 已提交
56

H
heqiaozhi 已提交
57 58
void AsyncExecutor::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                  int node_num) {
59
  fleet_ptr_->GatherServers(host_sign_list, node_num);
H
heqiaozhi 已提交
60 61
}

W
Wang Guibao 已提交
62 63 64 65 66
void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
                                const std::string& data_feed_desc_str,
                                const std::vector<std::string>& filelist,
                                const int thread_num,
                                const std::vector<std::string>& fetch_var_names,
H
heqiaozhi 已提交
67
                                const std::string& mode, const bool debug) {
W
Wang Guibao 已提交
68 69 70 71 72
  std::vector<std::thread> threads;

  auto& block = main_program.Block(0);
  for (auto var_name : fetch_var_names) {
    auto var_desc = block.FindVar(var_name);
73
    PADDLE_ENFORCE_NOT_NULL(var_desc, "%s is not found.", var_name);
W
Wang Guibao 已提交
74 75 76 77 78 79 80 81 82 83 84
    auto shapes = var_desc->GetShape();
    PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
                   "var %s: Fetched var has wrong shape, "
                   "only variables with the last dimension size 1 supported",
                   var_name);
  }

  DataFeedDesc data_feed_desc;
  google::protobuf::TextFormat::ParseFromString(data_feed_desc_str,
                                                &data_feed_desc);

85
  actual_thread_num = thread_num;
W
Wang Guibao 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
  int file_cnt = filelist.size();
  PADDLE_ENFORCE(file_cnt > 0, "File list cannot be empty");

  if (actual_thread_num > file_cnt) {
    VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
            << ". Changing thread_num = " << file_cnt;
    actual_thread_num = file_cnt;
  }

  /*
    readerDesc: protobuf description for reader initlization
    argument: class_name, batch_size, use_slot, queue_size, buffer_size,
    padding_index

    reader:
    1) each thread has a reader, reader will read input data and
    put it into input queue
    2) each reader has a Next() iterface, that can fetch an instance
    from the input queue
   */
  // todo: should be factory method for creating datafeed
  std::vector<std::shared_ptr<DataFeed>> readers;
  PrepareReaders(readers, actual_thread_num, data_feed_desc, filelist);
H
heqiaozhi 已提交
109
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
110
  PrepareDenseThread(mode);
H
heqiaozhi 已提交
111
#endif
W
Wang Guibao 已提交
112 113 114
  std::vector<std::shared_ptr<ExecutorThreadWorker>> workers;
  workers.resize(actual_thread_num);
  for (auto& worker : workers) {
H
heqiaozhi 已提交
115
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
116
    if (mode == "mpi") {
H
heqiaozhi 已提交
117
      worker.reset(new AsyncExecutorThreadWorker);
H
heqiaozhi 已提交
118
    } else {
H
heqiaozhi 已提交
119
      worker.reset(new ExecutorThreadWorker);
H
heqiaozhi 已提交
120
    }
H
heqiaozhi 已提交
121 122 123
#else
    worker.reset(new ExecutorThreadWorker);
#endif
W
Wang Guibao 已提交
124 125 126 127 128 129 130 131 132 133
  }

  // prepare thread resource here
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
    CreateThreads(workers[thidx].get(), main_program, readers[thidx],
                  fetch_var_names, root_scope_, thidx, debug);
  }

  // start executing ops in multiple threads
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
134 135 136 137 138 139 140
    if (debug) {
      threads.push_back(std::thread(&ExecutorThreadWorker::TrainFilesWithTimer,
                                    workers[thidx].get()));
    } else {
      threads.push_back(
          std::thread(&ExecutorThreadWorker::TrainFiles, workers[thidx].get()));
    }
W
Wang Guibao 已提交
141 142 143 144 145
  }

  for (auto& th : threads) {
    th.join();
  }
H
heqiaozhi 已提交
146
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
147 148 149
  if (mode == "mpi") {
    _pull_dense_thread->stop();
  }
H
heqiaozhi 已提交
150
#endif
D
dongdaxiang 已提交
151 152
  VLOG(3) << "start to run from files in async_executor";
  VLOG(3) << "Drop current scope kids";
W
Wang Guibao 已提交
153 154 155 156
  root_scope_->DropKids();
  return;
}

157 158 159 160 161 162 163 164 165
// todo RunFromDataset
void AsyncExecutor::RunFromDataset(const ProgramDesc& main_program,
                                   Dataset* data_set,
                                   const std::string& trainer_desc_str,
                                   const bool debug) {

}
                                                                  

W
Wang Guibao 已提交
166 167
}  // einit_modelnd namespace framework
}  // end namespace paddle