async_executor.cc 6.5 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/async_executor.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/executor_thread_worker.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
D
dongdaxiang 已提交
29
#include "paddle/fluid/framework/trainer_desc.pb.h"
W
Wang Guibao 已提交
30 31 32 33 34 35 36 37 38
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"

namespace paddle {
namespace framework {
AsyncExecutor::AsyncExecutor(Scope* scope, const platform::Place& place)
    : root_scope_(scope), place_(place) {}

H
heqiaozhi 已提交
39
void AsyncExecutor::InitServer(const std::string& dist_desc, int index) {
40 41
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitServer(dist_desc, index);
42 43
}

44 45 46
void AsyncExecutor::InitWorker(const std::string& dist_desc,
                               const std::vector<uint64_t>& host_sign_list,
                               int node_num, int index) {
47 48
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitWorker(dist_desc, host_sign_list, node_num, index);
H
heqiaozhi 已提交
49 50
}

51
uint64_t AsyncExecutor::StartServer() { return fleet_ptr_->RunServer(); }
H
heqiaozhi 已提交
52

53
void AsyncExecutor::StopServer() { fleet_ptr_->StopServer(); }
H
heqiaozhi 已提交
54

H
heqiaozhi 已提交
55 56
void AsyncExecutor::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                  int node_num) {
57
  fleet_ptr_->GatherServers(host_sign_list, node_num);
H
heqiaozhi 已提交
58 59
}

D
dongdaxiang 已提交
60
void AsyncExecutor::InitModel() {}
H
heqiaozhi 已提交
61

D
dongdaxiang 已提交
62
void AsyncExecutor::SaveModel(const std::string& path) {}
63

W
Wang Guibao 已提交
64
void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
65
<<<<<<< HEAD
W
Wang Guibao 已提交
66 67 68 69
                                const std::string& data_feed_desc_str,
                                const std::vector<std::string>& filelist,
                                const int thread_num,
                                const std::vector<std::string>& fetch_var_names,
H
heqiaozhi 已提交
70
                                const std::string& mode, const bool debug) {
W
Wang Guibao 已提交
71 72 73 74 75
  std::vector<std::thread> threads;

  auto& block = main_program.Block(0);
  for (auto var_name : fetch_var_names) {
    auto var_desc = block.FindVar(var_name);
76
    PADDLE_ENFORCE_NOT_NULL(var_desc, "%s is not found.", var_name);
W
Wang Guibao 已提交
77 78 79 80 81 82 83 84 85 86 87
    auto shapes = var_desc->GetShape();
    PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
                   "var %s: Fetched var has wrong shape, "
                   "only variables with the last dimension size 1 supported",
                   var_name);
  }

  DataFeedDesc data_feed_desc;
  google::protobuf::TextFormat::ParseFromString(data_feed_desc_str,
                                                &data_feed_desc);

88
  actual_thread_num = thread_num;
W
Wang Guibao 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
  int file_cnt = filelist.size();
  PADDLE_ENFORCE(file_cnt > 0, "File list cannot be empty");

  if (actual_thread_num > file_cnt) {
    VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
            << ". Changing thread_num = " << file_cnt;
    actual_thread_num = file_cnt;
  }

  /*
    readerDesc: protobuf description for reader initlization
    argument: class_name, batch_size, use_slot, queue_size, buffer_size,
    padding_index

    reader:
    1) each thread has a reader, reader will read input data and
    put it into input queue
    2) each reader has a Next() iterface, that can fetch an instance
    from the input queue
   */
  // todo: should be factory method for creating datafeed
  std::vector<std::shared_ptr<DataFeed>> readers;
  PrepareReaders(readers, actual_thread_num, data_feed_desc, filelist);
H
heqiaozhi 已提交
112
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
113
  PrepareDenseThread(mode);
H
heqiaozhi 已提交
114
#endif
W
Wang Guibao 已提交
115 116 117
  std::vector<std::shared_ptr<ExecutorThreadWorker>> workers;
  workers.resize(actual_thread_num);
  for (auto& worker : workers) {
H
heqiaozhi 已提交
118
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
119
    if (mode == "mpi") {
H
heqiaozhi 已提交
120
      worker.reset(new AsyncExecutorThreadWorker);
H
heqiaozhi 已提交
121
    } else {
H
heqiaozhi 已提交
122
      worker.reset(new ExecutorThreadWorker);
H
heqiaozhi 已提交
123
    }
H
heqiaozhi 已提交
124 125 126
#else
    worker.reset(new ExecutorThreadWorker);
#endif
W
Wang Guibao 已提交
127 128 129 130 131 132 133 134 135 136
  }

  // prepare thread resource here
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
    CreateThreads(workers[thidx].get(), main_program, readers[thidx],
                  fetch_var_names, root_scope_, thidx, debug);
  }

  // start executing ops in multiple threads
  for (int thidx = 0; thidx < actual_thread_num; ++thidx) {
137 138 139 140 141 142 143
    if (debug) {
      threads.push_back(std::thread(&ExecutorThreadWorker::TrainFilesWithTimer,
                                    workers[thidx].get()));
    } else {
      threads.push_back(
          std::thread(&ExecutorThreadWorker::TrainFiles, workers[thidx].get()));
    }
W
Wang Guibao 已提交
144 145 146 147 148
  }

  for (auto& th : threads) {
    th.join();
  }
H
heqiaozhi 已提交
149
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
150 151 152
  if (mode == "mpi") {
    _pull_dense_thread->stop();
  }
H
heqiaozhi 已提交
153
#endif
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
=======
                                const std::string& trainer_desc_str,
                                const bool debug) {
  TrainerDesc trainer_desc;
  google::protobuf::TextFormat::ParseFromString(trainer_desc_str,
                                                &trainer_desc);
  std::shared_ptr<TrainerBase> trainer;
  trainer = TrainerFactory::CreateTrainer(trainer_desc.class_name());
  // initialize trainer
  trainer->Initialize(trainer_desc);
  // trainer->SetRootScope(root_scope_);
  trainer->SetDebug(debug);
  // prepare training environment and helper environment
  trainer->InitTrainerEnv(main_program, place_);
  trainer->InitOtherEnv(main_program);
  // training and finalize training
  trainer->Run();
  trainer->Finalize();
>>>>>>> add dist_multi_trainer for distributed training, add trainer_factory and device_worker_factory so that we can easily extend new training mode, add pull dense worker which is a singleton for parameter fetching
W
Wang Guibao 已提交
173 174 175 176 177 178 179
  root_scope_->DropKids();

  return;
}

}  // einit_modelnd namespace framework
}  // end namespace paddle