pipeline_trainer.cc 10.2 KB
Newer Older
H
hutuxian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#if defined(PADDLE_WITH_NCCL)
16
#include <map>
H
hutuxian 已提交
17 18 19 20 21 22 23 24 25 26
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/device_worker_factory.h"
#include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/trainer_desc.pb.h"

namespace paddle {
namespace framework {

void PipelineTrainer::Initialize(const TrainerDesc& trainer_desc,
                                 Dataset* dataset) {
L
lilong12 已提交
27 28 29 30 31 32 33
  const auto& section_params = trainer_desc.section_param();
  num_microbatches_ = section_params.num_microbatches();
  VLOG(3) << "Number of microbatches per minibatch: " << num_microbatches_;
  section_num_ = section_params.section_config_size();
  VLOG(3) << "Number of program sections: " << section_num_;
  trainer_desc_ = trainer_desc;
  start_cpu_core_id_ = section_params.start_cpu_core_id();
H
hutuxian 已提交
34 35

  SetDataset(dataset);
H
hutuxian 已提交
36
  ParseDumpConfig(trainer_desc);
H
hutuxian 已提交
37
  // get filelist from trainer_desc here
J
jiaqi 已提交
38
  const std::vector<paddle::framework::DataFeed*> readers =
H
hutuxian 已提交
39 40
      dataset->GetReaders();
  VLOG(3) << "readers num: " << readers.size();
L
lilong12 已提交
41 42 43 44 45 46 47
  int num_readers = readers.size();
  PADDLE_ENFORCE_EQ(num_readers, 1,
                    platform::errors::InvalidArgument(
                        "Number of dataset readers for pipeline "
                        "must be 1 now, but the value you give is %d.",
                        num_readers));
  auto* reader = readers[0];
H
hutuxian 已提交
48 49 50

  workers_.resize(section_num_);
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
51
    const auto& section_config = section_params.section_config(i);
H
hutuxian 已提交
52
    platform::Place place;
L
lilong12 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    int place_id = section_config.place_id();
    switch (section_config.place()) {
      case SectionConfig::CPUPlace:
        place = platform::CPUPlace();
        break;
      case SectionConfig::CUDAPlace:
        // Note that one section has at most one GPU place in one pipeline
        PADDLE_ENFORCE_GE(
            place_id, 0,
            platform::errors::InvalidArgument(
                "The place_id value for CUDAPlace shoud be greater "
                "than or equal to 0, but the value you give is %d.",
                place_id));
        place = platform::CUDAPlace(place_id);
        break;
      case SectionConfig::CUDAPinnedPlace:
        place = platform::CUDAPinnedPlace();
        break;
      default:
        PADDLE_ENFORCE_NOT_NULL(nullptr,
                                platform::errors::InvalidArgument(
                                    "Unkown place type in SectionConfig: %d",
                                    section_config.place()));
    }
    places_.emplace_back(place);
    VLOG(3) << "Device worker place: " << place << ", device id: " << place_id
            << ", section: " << i;
H
hutuxian 已提交
80

L
lilong12 已提交
81 82 83 84 85 86 87 88 89
    workers_[i] = DeviceWorkerFactory::CreateDeviceWorker(
        trainer_desc.device_worker_name());
    auto this_worker =
        std::dynamic_pointer_cast<paddle::framework::SectionWorker>(
            workers_[i]);
    if (i == 0) {
      // we only set reader for the first section
      this_worker->SetDataFeed(reader);
      this_worker->SetReaderPlace(place);
H
hutuxian 已提交
90
    }
L
lilong12 已提交
91 92 93 94 95
    this_worker->SetThreadIndex(i);
    this_worker->SetSectionIndex(i);
    this_worker->SetPlace(place);
    this_worker->Initialize(trainer_desc);
    this_worker->SetMicrobatchNum(num_microbatches_);
H
hutuxian 已提交
96 97 98 99 100
  }
  // set debug here
  SetDebug(trainer_desc.debug());
}

H
hutuxian 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113
void PipelineTrainer::InitOtherEnv(const ProgramDesc& main_program) {
  if (need_dump_field_) {
    InitDumpEnv();
  }
  VLOG(3) << "init other env done.";
}

std::string PipelineTrainer::GetDumpPath(int tid) {
  return string::format_string("%s/part-%05d", dump_fields_path_.c_str(), tid);
}

void PipelineTrainer::InitDumpEnv() {
  queue_ = paddle::framework::MakeChannel<std::string>();
L
lilong12 已提交
114
  // TODO(sandyhouse): should make it as a config
H
hutuxian 已提交
115 116 117 118 119 120 121
  dump_thread_num_ = 1;
  for (int i = 0; i < dump_thread_num_; i++) {
    dump_thread_.push_back(
        std::thread(std::bind(&TrainerBase::DumpWork, this, i)));
  }
}

L
lilong12 已提交
122 123 124 125
void PipelineTrainer::CopyParameters(int section_id, int microbatch_id,
                                     const ProgramDesc& program,
                                     const platform::Place& place) {
  auto& global_block = program.Block(0);
126
  std::map<std::string, int> param_map;
L
lilong12 已提交
127
  for (auto& var : global_block.AllVars()) {
128 129 130 131 132 133 134 135 136 137 138 139 140
    if (var->Persistable()) {
      param_map[var->Name()] = 1;
    }
  }
  for (auto& var : global_block.AllVars()) {
    bool is_param_grad = false;
    size_t pos = 0;
    if ((pos = var->Name().find(kGradVarSuffix)) != std::string::npos) {
      auto prefix_name = var->Name().substr(0, pos);
      if (param_map.find(prefix_name) != param_map.end()) {
        is_param_grad = true;
      }
    }
S
update  
sandyhouse 已提交
141
    VLOG(3) << "Var name: " << var->Name();
142
    if ((var->Persistable() || is_param_grad) && microbatch_id == 0) {
143 144 145 146 147
      auto* ptr = root_scope_->FindVar(var->Name());
      auto* new_ptr = minibatch_scopes_[section_id]->Var(var->Name());
      VLOG(3) << "Create persistable var " << var->Name() << " for minibatch "
              << section_id << ", which pointer is " << new_ptr;
      InitializeVariable(new_ptr, var->GetType());
148
      if (is_param_grad) {
149
        continue;
H
hutuxian 已提交
150
      }
151 152 153 154
      const LoDTensor& root_tensor = ptr->Get<LoDTensor>();
      LoDTensor* minibatch_tensor = new_ptr->GetMutable<LoDTensor>();
      TensorCopy(*static_cast<const Tensor*>(&root_tensor), place,
                 static_cast<Tensor*>(minibatch_tensor));
155
    } else if (!var->Persistable() && !is_param_grad) {
L
lilong12 已提交
156 157 158 159 160 161
      auto* ptr =
          microbatch_scopes_[section_id][microbatch_id]->Var(var->Name());
      VLOG(3) << "Create variable " << var->Name() << " for section "
              << section_id << " microbatch " << microbatch_id
              << ", which pointer is " << ptr;
      InitializeVariable(ptr, var->GetType());
H
hutuxian 已提交
162 163 164 165
    }
  }
}

L
lilong12 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
void PipelineTrainer::GetSkipVars(int section_id, const ProgramDesc& program) {
  auto& global_block = program.Block(0);
  for (auto& op : global_block.AllOps()) {
    if (op->Type() != "enqueue") {
      continue;
    }
    auto input_arg_names = op->InputArgumentNames();
    PADDLE_ENFORCE_EQ(input_arg_names.size(), 1,
                      platform::errors::InvalidArgument(
                          "Number of input arguments for enqueue op must be 1, "
                          "but the value is %d.",
                          input_arg_names.size()));
    std::string input_arg_name = input_arg_names[0];
    if (input_arg_name.rfind("@GRAD") != input_arg_name.size() - 5) {
      skip_vars_[section_id].emplace_back(input_arg_name);
      VLOG(3) << "add skip var name: " << input_arg_name;
    }
H
hutuxian 已提交
183 184 185 186 187
  }
}

void PipelineTrainer::InitTrainerEnv(const ProgramDesc& main_program,
                                     const platform::Place& place) {
L
lilong12 已提交
188 189 190 191 192 193 194 195
  PADDLE_ENFORCE_NOT_NULL(root_scope_,
                          platform::errors::InvalidArgument(
                              "root_scope pointer can not be nullptr"));
  auto start_cpu_id = trainer_desc_.section_param().start_cpu_core_id();
  SectionWorker::cpu_id_.store(start_cpu_id);
  minibatch_scopes_.resize(section_num_);
  microbatch_scopes_.resize(section_num_);
  skip_vars_.resize(section_num_);
H
hutuxian 已提交
196 197 198

  VLOG(3) << "Init ScopeQueues and create all scopes";
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
199 200 201 202 203 204 205 206
    minibatch_scopes_[i] = &root_scope_->NewScope();
    std::shared_ptr<framework::ProgramDesc> program;
    program.reset(new ProgramDesc(
        trainer_desc_.section_param().section_config(i).program_desc()));
    microbatch_scopes_[i].resize(num_microbatches_);
    for (int j = 0; j < num_microbatches_; ++j) {
      microbatch_scopes_[i][j] = &minibatch_scopes_[i]->NewScope();
      CopyParameters(i, j, *program, places_[i]);
H
hutuxian 已提交
207
    }
L
lilong12 已提交
208
    GetSkipVars(i, *program);
H
hutuxian 已提交
209 210 211
  }

  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
212 213 214 215 216 217 218
    auto this_worker =
        std::dynamic_pointer_cast<paddle::framework::SectionWorker>(
            workers_[i]);
    this_worker->SetRootScope(root_scope_);
    this_worker->SetMinibatchScope(minibatch_scopes_[i]);
    this_worker->SetMicrobatchScopes(microbatch_scopes_[i]);
    this_worker->SetSkipVars(skip_vars_[i]);
H
hutuxian 已提交
219 220 221 222 223 224
  }
}

void PipelineTrainer::Run() {
  VLOG(3) << "Going to run";
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
225 226 227 228 229 230
    if (!debug_) {
      section_threads_.push_back(
          std::thread(&DeviceWorker::TrainFiles, workers_[i].get()));
    } else {
      section_threads_.push_back(std::thread(
          &DeviceWorker::TrainFilesWithProfiler, workers_[i].get()));
H
hutuxian 已提交
231 232 233 234 235 236 237 238
    }
  }
}

void PipelineTrainer::Finalize() {
  for (auto& th : section_threads_) {
    th.join();
  }
H
hutuxian 已提交
239 240 241
  if (need_dump_field_) {
    FinalizeDumpEnv();
  }
L
lilong12 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
  VLOG(3) << "copying back parameters. ";
  for (int i = 0; i < section_num_; ++i) {
    std::shared_ptr<framework::ProgramDesc> program;
    program.reset(new ProgramDesc(
        trainer_desc_.section_param().section_config(i).program_desc()));
    for (int j = 0; j < num_microbatches_; ++j) {
      auto& global_block = program->Block(0);
      for (auto& var : global_block.AllVars()) {
        if (var->Persistable()) {
          auto* ptr = root_scope_->FindVar(var->Name());
          LoDTensor* root_tensor = ptr->GetMutable<LoDTensor>();
          auto* minibatch_ptr = minibatch_scopes_[i]->Var(var->Name());
          const LoDTensor& minibatch_tensor = minibatch_ptr->Get<LoDTensor>();
          TensorCopy(*static_cast<const Tensor*>(&minibatch_tensor), places_[0],
                     static_cast<Tensor*>(root_tensor));
          VLOG(4) << "Copy persitable var " << var->Name() << " to root scope";
        }
      }
    }
H
hutuxian 已提交
261 262
  }
  root_scope_->DropKids();
263
  SectionWorker::ResetBatchId();
H
hutuxian 已提交
264 265
}

266
Scope* PipelineTrainer::GetWorkerScope(int thread_id) {
L
lilong12 已提交
267
  return microbatch_scopes_[thread_id][0];
268 269
}

H
hutuxian 已提交
270 271 272
}  // end namespace framework
}  // end namespace paddle
#endif