async_executor.cc 6.1 KB
Newer Older
W
Wang Guibao 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/async_executor.h"
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"

#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/executor_thread_worker.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
29
#include "paddle/fluid/framework/trainer.h"
D
dongdaxiang 已提交
30
#include "paddle/fluid/framework/trainer_desc.pb.h"
31
#include "paddle/fluid/framework/trainer_factory.h"
W
Wang Guibao 已提交
32 33 34 35 36 37 38 39 40
#include "paddle/fluid/inference/io.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/pybind/pybind.h"

namespace paddle {
namespace framework {
AsyncExecutor::AsyncExecutor(Scope* scope, const platform::Place& place)
    : root_scope_(scope), place_(place) {}

H
heqiaozhi 已提交
41
void AsyncExecutor::InitServer(const std::string& dist_desc, int index) {
42 43
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitServer(dist_desc, index);
44 45
}

46 47 48
void AsyncExecutor::InitWorker(const std::string& dist_desc,
                               const std::vector<uint64_t>& host_sign_list,
                               int node_num, int index) {
49 50
  fleet_ptr_ = FleetWrapper::GetInstance();
  fleet_ptr_->InitWorker(dist_desc, host_sign_list, node_num, index);
H
heqiaozhi 已提交
51 52
}

53
uint64_t AsyncExecutor::StartServer() { return fleet_ptr_->RunServer(); }
H
heqiaozhi 已提交
54

55
void AsyncExecutor::StopServer() { fleet_ptr_->StopServer(); }
H
heqiaozhi 已提交
56

H
heqiaozhi 已提交
57 58
void AsyncExecutor::GatherServers(const std::vector<uint64_t>& host_sign_list,
                                  int node_num) {
59
  fleet_ptr_->GatherServers(host_sign_list, node_num);
H
heqiaozhi 已提交
60 61
}

62
// todo InitModel
D
dongdaxiang 已提交
63
void AsyncExecutor::InitModel() {}
64 65

// todo SaveModel
D
dongdaxiang 已提交
66
void AsyncExecutor::SaveModel(const std::string& path) {}
67

W
Wang Guibao 已提交
68 69 70 71 72
void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
                                const std::string& data_feed_desc_str,
                                const std::vector<std::string>& filelist,
                                const int thread_num,
                                const std::vector<std::string>& fetch_var_names,
H
heqiaozhi 已提交
73
                                const std::string& mode, const bool debug) {
W
Wang Guibao 已提交
74 75 76 77 78
  std::vector<std::thread> threads;

  auto& block = main_program.Block(0);
  for (auto var_name : fetch_var_names) {
    auto var_desc = block.FindVar(var_name);
79 80
    PADDLE_ENFORCE_NOT_NULL(
        var_desc, platform::errors::NotFound("%s is not found.", var_name));
W
Wang Guibao 已提交
81 82 83 84 85 86 87 88
    auto shapes = var_desc->GetShape();
    PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
                   "var %s: Fetched var has wrong shape, "
                   "only variables with the last dimension size 1 supported",
                   var_name);
  }

  DataFeedDesc data_feed_desc;
H
hutuxian 已提交
89
  bool success = data_feed_desc.ParseFromString(data_feed_desc_str);
90 91 92 93
  PADDLE_ENFORCE_EQ(success, true,
                    platform::errors::InvalidArgument(
                        "Fail to parse DataFeedDesc from string: %s.",
                        data_feed_desc_str.c_str()));
W
Wang Guibao 已提交
94

D
dongdaxiang 已提交
95
  actual_thread_num_ = thread_num;
W
Wang Guibao 已提交
96
  int file_cnt = filelist.size();
97 98
  PADDLE_ENFORCE_GT(file_cnt, 0,
                    platform::errors::NotFound("Input file list is empty"));
W
Wang Guibao 已提交
99

D
dongdaxiang 已提交
100
  if (actual_thread_num_ > file_cnt) {
W
Wang Guibao 已提交
101 102
    VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
            << ". Changing thread_num = " << file_cnt;
D
dongdaxiang 已提交
103
    actual_thread_num_ = file_cnt;
W
Wang Guibao 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
  }

  /*
    readerDesc: protobuf description for reader initlization
    argument: class_name, batch_size, use_slot, queue_size, buffer_size,
    padding_index

    reader:
    1) each thread has a reader, reader will read input data and
    put it into input queue
    2) each reader has a Next() iterface, that can fetch an instance
    from the input queue
   */
  // todo: should be factory method for creating datafeed
  std::vector<std::shared_ptr<DataFeed>> readers;
D
dongdaxiang 已提交
119 120
  /*
  PrepareReaders(readers, actual_thread_num_, data_feed_desc, filelist);
H
heqiaozhi 已提交
121
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
122
  PrepareDenseThread(mode);
H
heqiaozhi 已提交
123
#endif
D
dongdaxiang 已提交
124
  */
W
Wang Guibao 已提交
125
  std::vector<std::shared_ptr<ExecutorThreadWorker>> workers;
D
dongdaxiang 已提交
126
  workers.resize(actual_thread_num_);
W
Wang Guibao 已提交
127
  for (auto& worker : workers) {
H
heqiaozhi 已提交
128
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
129
    if (mode == "mpi") {
H
heqiaozhi 已提交
130
      worker.reset(new AsyncExecutorThreadWorker);
H
heqiaozhi 已提交
131
    } else {
H
heqiaozhi 已提交
132
      worker.reset(new ExecutorThreadWorker);
H
heqiaozhi 已提交
133
    }
H
heqiaozhi 已提交
134 135 136
#else
    worker.reset(new ExecutorThreadWorker);
#endif
W
Wang Guibao 已提交
137 138 139
  }

  // prepare thread resource here
D
dongdaxiang 已提交
140 141
  /*
  for (int thidx = 0; thidx < actual_thread_num_; ++thidx) {
W
Wang Guibao 已提交
142 143 144
    CreateThreads(workers[thidx].get(), main_program, readers[thidx],
                  fetch_var_names, root_scope_, thidx, debug);
  }
D
dongdaxiang 已提交
145
  */
W
Wang Guibao 已提交
146 147

  // start executing ops in multiple threads
D
dongdaxiang 已提交
148
  for (int thidx = 0; thidx < actual_thread_num_; ++thidx) {
149 150 151 152 153 154 155
    if (debug) {
      threads.push_back(std::thread(&ExecutorThreadWorker::TrainFilesWithTimer,
                                    workers[thidx].get()));
    } else {
      threads.push_back(
          std::thread(&ExecutorThreadWorker::TrainFiles, workers[thidx].get()));
    }
W
Wang Guibao 已提交
156 157 158 159 160
  }

  for (auto& th : threads) {
    th.join();
  }
D
dongdaxiang 已提交
161 162
  // TODO(guru4elephant): we don't need this
  /*
H
heqiaozhi 已提交
163
#ifdef PADDLE_WITH_PSLIB
H
heqiaozhi 已提交
164
  if (mode == "mpi") {
D
dongdaxiang 已提交
165
    _pull_dense_thread->stop();
H
heqiaozhi 已提交
166
  }
H
heqiaozhi 已提交
167
#endif
D
dongdaxiang 已提交
168
  */
D
dongdaxiang 已提交
169 170
  VLOG(3) << "start to run from files in async_executor";
  VLOG(3) << "Drop current scope kids";
W
Wang Guibao 已提交
171 172 173 174
  root_scope_->DropKids();
  return;
}

D
dongdaxiang 已提交
175
}  // end namespace framework
W
Wang Guibao 已提交
176
}  // end namespace paddle