pipeline_trainer.cc 10.1 KB
Newer Older
H
hutuxian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#if defined(PADDLE_WITH_NCCL)
H
hutuxian 已提交
16 17 18 19 20 21 22 23 24 25
#include "paddle/fluid/framework/data_feed_factory.h"
#include "paddle/fluid/framework/device_worker_factory.h"
#include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/trainer_desc.pb.h"

namespace paddle {
namespace framework {

void PipelineTrainer::Initialize(const TrainerDesc& trainer_desc,
                                 Dataset* dataset) {
L
lilong12 已提交
26 27 28 29 30 31 32
  const auto& section_params = trainer_desc.section_param();
  num_microbatches_ = section_params.num_microbatches();
  VLOG(3) << "Number of microbatches per minibatch: " << num_microbatches_;
  section_num_ = section_params.section_config_size();
  VLOG(3) << "Number of program sections: " << section_num_;
  trainer_desc_ = trainer_desc;
  start_cpu_core_id_ = section_params.start_cpu_core_id();
H
hutuxian 已提交
33 34

  SetDataset(dataset);
H
hutuxian 已提交
35
  ParseDumpConfig(trainer_desc);
H
hutuxian 已提交
36
  // get filelist from trainer_desc here
J
jiaqi 已提交
37
  const std::vector<paddle::framework::DataFeed*> readers =
H
hutuxian 已提交
38 39
      dataset->GetReaders();
  VLOG(3) << "readers num: " << readers.size();
L
lilong12 已提交
40 41 42 43 44 45 46 47
  int num_readers = readers.size();
  PADDLE_ENFORCE_EQ(num_readers, 1,
                    platform::errors::InvalidArgument(
                        "Number of dataset readers for pipeline "
                        "must be 1 now, but the value you give is %d.",
                        num_readers));
  auto* reader = readers[0];
  feed_var_names_ = reader->GetUseSlotAlias();
H
hutuxian 已提交
48 49 50

  workers_.resize(section_num_);
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
51
    const auto& section_config = section_params.section_config(i);
H
hutuxian 已提交
52
    platform::Place place;
L
lilong12 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    int place_id = section_config.place_id();
    switch (section_config.place()) {
      case SectionConfig::CPUPlace:
        place = platform::CPUPlace();
        break;
      case SectionConfig::CUDAPlace:
        // Note that one section has at most one GPU place in one pipeline
        PADDLE_ENFORCE_GE(
            place_id, 0,
            platform::errors::InvalidArgument(
                "The place_id value for CUDAPlace shoud be greater "
                "than or equal to 0, but the value you give is %d.",
                place_id));
        place = platform::CUDAPlace(place_id);
        break;
      case SectionConfig::CUDAPinnedPlace:
        place = platform::CUDAPinnedPlace();
        break;
      default:
        PADDLE_ENFORCE_NOT_NULL(nullptr,
                                platform::errors::InvalidArgument(
                                    "Unkown place type in SectionConfig: %d",
                                    section_config.place()));
    }
    places_.emplace_back(place);
    VLOG(3) << "Device worker place: " << place << ", device id: " << place_id
            << ", section: " << i;
H
hutuxian 已提交
80

L
lilong12 已提交
81 82 83 84 85 86 87 88 89
    workers_[i] = DeviceWorkerFactory::CreateDeviceWorker(
        trainer_desc.device_worker_name());
    auto this_worker =
        std::dynamic_pointer_cast<paddle::framework::SectionWorker>(
            workers_[i]);
    if (i == 0) {
      // we only set reader for the first section
      this_worker->SetDataFeed(reader);
      this_worker->SetReaderPlace(place);
H
hutuxian 已提交
90
    }
L
lilong12 已提交
91 92 93 94 95
    this_worker->SetThreadIndex(i);
    this_worker->SetSectionIndex(i);
    this_worker->SetPlace(place);
    this_worker->Initialize(trainer_desc);
    this_worker->SetMicrobatchNum(num_microbatches_);
H
hutuxian 已提交
96 97 98 99 100
  }
  // set debug here
  SetDebug(trainer_desc.debug());
}

H
hutuxian 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113
void PipelineTrainer::InitOtherEnv(const ProgramDesc& main_program) {
  if (need_dump_field_) {
    InitDumpEnv();
  }
  VLOG(3) << "init other env done.";
}

std::string PipelineTrainer::GetDumpPath(int tid) {
  return string::format_string("%s/part-%05d", dump_fields_path_.c_str(), tid);
}

void PipelineTrainer::InitDumpEnv() {
  queue_ = paddle::framework::MakeChannel<std::string>();
L
lilong12 已提交
114
  // TODO(sandyhouse): should make it as a config
H
hutuxian 已提交
115 116 117 118 119 120 121
  dump_thread_num_ = 1;
  for (int i = 0; i < dump_thread_num_; i++) {
    dump_thread_.push_back(
        std::thread(std::bind(&TrainerBase::DumpWork, this, i)));
  }
}

L
lilong12 已提交
122 123 124 125 126 127 128
void PipelineTrainer::CopyParameters(int section_id, int microbatch_id,
                                     const ProgramDesc& program,
                                     const platform::Place& place) {
  auto& global_block = program.Block(0);
  for (auto& var : global_block.AllVars()) {
    int is_feed_var =
        std::count(feed_var_names_.begin(), feed_var_names_.end(), var->Name());
S
update  
sandyhouse 已提交
129
    VLOG(3) << "Var name: " << var->Name();
L
lilong12 已提交
130 131 132 133 134
    if ((var->Persistable() || is_feed_var) && microbatch_id == 0) {
      if (is_feed_var) {
        auto* new_ptr = minibatch_scopes_[section_id]->Var(var->Name());
        VLOG(3) << "data name: " << var->Name() << ", ptr: " << new_ptr;
        InitializeVariable(new_ptr, var->GetType());
H
hutuxian 已提交
135
      } else {
L
lilong12 已提交
136 137 138 139 140 141 142 143 144
        auto* ptr = root_scope_->FindVar(var->Name());
        auto* new_ptr = minibatch_scopes_[section_id]->Var(var->Name());
        VLOG(3) << "Create persistable var " << var->Name() << " for minibatch "
                << section_id << ", which pointer is " << new_ptr;
        InitializeVariable(new_ptr, var->GetType());
        const LoDTensor& root_tensor = ptr->Get<LoDTensor>();
        LoDTensor* minibatch_tensor = new_ptr->GetMutable<LoDTensor>();
        TensorCopy(*static_cast<const Tensor*>(&root_tensor), place,
                   static_cast<Tensor*>(minibatch_tensor));
H
hutuxian 已提交
145
      }
L
lilong12 已提交
146 147 148 149 150 151 152
    } else if (!var->Persistable() && !is_feed_var) {
      auto* ptr =
          microbatch_scopes_[section_id][microbatch_id]->Var(var->Name());
      VLOG(3) << "Create variable " << var->Name() << " for section "
              << section_id << " microbatch " << microbatch_id
              << ", which pointer is " << ptr;
      InitializeVariable(ptr, var->GetType());
H
hutuxian 已提交
153 154 155 156
    }
  }
}

L
lilong12 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
void PipelineTrainer::GetSkipVars(int section_id, const ProgramDesc& program) {
  auto& global_block = program.Block(0);
  for (auto& op : global_block.AllOps()) {
    if (op->Type() != "enqueue") {
      continue;
    }
    auto input_arg_names = op->InputArgumentNames();
    PADDLE_ENFORCE_EQ(input_arg_names.size(), 1,
                      platform::errors::InvalidArgument(
                          "Number of input arguments for enqueue op must be 1, "
                          "but the value is %d.",
                          input_arg_names.size()));
    std::string input_arg_name = input_arg_names[0];
    if (input_arg_name.rfind("@GRAD") != input_arg_name.size() - 5) {
      skip_vars_[section_id].emplace_back(input_arg_name);
      VLOG(3) << "add skip var name: " << input_arg_name;
    }
H
hutuxian 已提交
174 175 176 177 178
  }
}

void PipelineTrainer::InitTrainerEnv(const ProgramDesc& main_program,
                                     const platform::Place& place) {
L
lilong12 已提交
179 180 181 182 183 184 185 186
  PADDLE_ENFORCE_NOT_NULL(root_scope_,
                          platform::errors::InvalidArgument(
                              "root_scope pointer can not be nullptr"));
  auto start_cpu_id = trainer_desc_.section_param().start_cpu_core_id();
  SectionWorker::cpu_id_.store(start_cpu_id);
  minibatch_scopes_.resize(section_num_);
  microbatch_scopes_.resize(section_num_);
  skip_vars_.resize(section_num_);
H
hutuxian 已提交
187 188 189

  VLOG(3) << "Init ScopeQueues and create all scopes";
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
190 191 192 193 194 195 196 197
    minibatch_scopes_[i] = &root_scope_->NewScope();
    std::shared_ptr<framework::ProgramDesc> program;
    program.reset(new ProgramDesc(
        trainer_desc_.section_param().section_config(i).program_desc()));
    microbatch_scopes_[i].resize(num_microbatches_);
    for (int j = 0; j < num_microbatches_; ++j) {
      microbatch_scopes_[i][j] = &minibatch_scopes_[i]->NewScope();
      CopyParameters(i, j, *program, places_[i]);
H
hutuxian 已提交
198
    }
L
lilong12 已提交
199
    GetSkipVars(i, *program);
H
hutuxian 已提交
200 201 202
  }

  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
203 204 205 206 207 208 209
    auto this_worker =
        std::dynamic_pointer_cast<paddle::framework::SectionWorker>(
            workers_[i]);
    this_worker->SetRootScope(root_scope_);
    this_worker->SetMinibatchScope(minibatch_scopes_[i]);
    this_worker->SetMicrobatchScopes(microbatch_scopes_[i]);
    this_worker->SetSkipVars(skip_vars_[i]);
H
hutuxian 已提交
210 211 212 213 214 215
  }
}

void PipelineTrainer::Run() {
  VLOG(3) << "Going to run";
  for (int i = 0; i < section_num_; ++i) {
L
lilong12 已提交
216 217 218 219 220 221
    if (!debug_) {
      section_threads_.push_back(
          std::thread(&DeviceWorker::TrainFiles, workers_[i].get()));
    } else {
      section_threads_.push_back(std::thread(
          &DeviceWorker::TrainFilesWithProfiler, workers_[i].get()));
H
hutuxian 已提交
222 223 224 225 226 227 228 229
    }
  }
}

void PipelineTrainer::Finalize() {
  for (auto& th : section_threads_) {
    th.join();
  }
H
hutuxian 已提交
230 231 232
  if (need_dump_field_) {
    FinalizeDumpEnv();
  }
L
lilong12 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
  VLOG(3) << "copying back parameters. ";
  for (int i = 0; i < section_num_; ++i) {
    std::shared_ptr<framework::ProgramDesc> program;
    program.reset(new ProgramDesc(
        trainer_desc_.section_param().section_config(i).program_desc()));
    for (int j = 0; j < num_microbatches_; ++j) {
      auto& global_block = program->Block(0);
      for (auto& var : global_block.AllVars()) {
        if (var->Persistable()) {
          auto* ptr = root_scope_->FindVar(var->Name());
          LoDTensor* root_tensor = ptr->GetMutable<LoDTensor>();
          auto* minibatch_ptr = minibatch_scopes_[i]->Var(var->Name());
          const LoDTensor& minibatch_tensor = minibatch_ptr->Get<LoDTensor>();
          TensorCopy(*static_cast<const Tensor*>(&minibatch_tensor), places_[0],
                     static_cast<Tensor*>(root_tensor));
          VLOG(4) << "Copy persitable var " << var->Name() << " to root scope";
        }
      }
    }
H
hutuxian 已提交
252 253 254 255
  }
  root_scope_->DropKids();
}

256
Scope* PipelineTrainer::GetWorkerScope(int thread_id) {
L
lilong12 已提交
257
  return microbatch_scopes_[thread_id][0];
258 259
}

H
hutuxian 已提交
260 261 262
}  // end namespace framework
}  // end namespace paddle
#endif