pull_dense_worker.cc 7.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <time.h>
W
wanghuancoder 已提交
15

16 17 18 19 20
#include "paddle/fluid/framework/device_worker.h"

namespace paddle {
namespace framework {

W
wanghuancoder 已提交
21 22 23 24
class LoDTensor;
class Scope;
class Variable;

25
std::shared_ptr<PullDenseWorker> PullDenseWorker::s_instance_ = NULL;
D
dongdaxiang 已提交
26 27 28 29 30 31
std::mutex PullDenseWorker::mutex_for_version_;
std::map<uint64_t, uint64_t> PullDenseWorker::last_versions_;
std::map<uint64_t, uint64_t> PullDenseWorker::current_version_;
std::map<uint64_t, std::vector<uint64_t>> PullDenseWorker::training_versions_;
std::map<uint64_t, std::vector<std::string>>
    PullDenseWorker::dense_value_names_;
32 33 34 35

void PullDenseWorker::Initialize(const TrainerDesc& param) {
  running_ = false;
  param_ = param.pull_dense_param();
H
heqiaozhi 已提交
36
  dwp_param_ = param.downpour_param();
37 38 39
  threshold_ = param_.threshold();
  thread_num_ = param_.device_num();
  sleep_time_ms_ = param_.sleep_time_ms();
40 41
  for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
       ++i) {
H
heqiaozhi 已提交
42 43 44 45 46 47 48 49 50
    uint64_t tid = static_cast<uint64_t>(
        dwp_param_.program_config(0).pull_dense_table_id(i));
    TableParameter table;
    for (auto i : param_.dense_table()) {
      if (i.table_id() == tid) {
        table = i;
        break;
      }
    }
51
    // setup dense variables for each table
H
heqiaozhi 已提交
52
    int var_num = table.dense_value_name_size();
53 54
    dense_value_names_[tid].resize(var_num);
    for (int j = 0; j < var_num; ++j) {
55
      dense_value_names_[tid][j] = table.dense_value_name(j);
56 57 58 59 60 61
    }
    // setup training version for each table
    training_versions_[tid].resize(thread_num_, 0);
    last_versions_[tid] = 0;
    current_version_[tid] = 0;
  }
62
  fleet_ptr_ = FleetWrapper::GetInstance();
T
Thunderbrook 已提交
63 64
#ifdef PADDLE_WITH_CUDA
  copy_streams_.clear();
T
Thunderbrook 已提交
65 66
#endif
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_XPU)
T
Thunderbrook 已提交
67 68 69 70 71 72
  places_.clear();
  thread_scopes_.clear();
#endif
}

void PullDenseWorker::CreatePinVar() {
T
Thunderbrook 已提交
73
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_PSLIB)
T
Thunderbrook 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87
  // for (auto& v : dense_value_names_) {
  //  for (auto& name : v.second) {
  for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
       ++i) {
    uint64_t tid = static_cast<uint64_t>(
        dwp_param_.program_config(0).pull_dense_table_id(i));
    for (size_t j = 0; j < dense_value_names_[tid].size(); j++) {
      auto& name = dense_value_names_[tid][j];
      Variable* var = root_scope_->FindVar(name);

      LoDTensor* tensor = var->GetMutable<LoDTensor>();
      auto* ptr = root_scope_->Var(name + "pin");
      InitializeVariable(ptr, proto::VarType::LOD_TENSOR);
      LoDTensor* pin_tensor = ptr->GetMutable<LoDTensor>();
T
Thunderbrook 已提交
88
#ifdef PADDLE_WITH_CUDA
T
Thunderbrook 已提交
89 90
      pin_tensor->mutable_data<float>(tensor->dims(),
                                      platform::CUDAPinnedPlace());
T
Thunderbrook 已提交
91 92 93 94
#endif
#ifdef PADDLE_WITH_XPU
      pin_tensor->mutable_data<float>(tensor->dims(), platform::CPUPlace());
#endif
T
Thunderbrook 已提交
95 96 97
    }
  }
#endif
98 99 100 101 102 103 104 105 106 107 108 109
}

void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) {
  for (auto& t : *status_vec) {
    t.wait();
    auto status = t.get();
    if (status != 0) {
      LOG(WARNING) << "Current Pull Dense Thread Failed Times"
                   << ++pull_dense_fail_times_;
    }
  }

110
  size_t MAX_FAIL_NUM = 20;
111
  if (pull_dense_fail_times_ > MAX_FAIL_NUM) {
112 113
    PADDLE_THROW(platform::errors::Fatal(
        "Pull dense failed more than %d times.", MAX_FAIL_NUM));
114 115
    exit(-1);
  }
116
  status_vec->resize(0);
T
Thunderbrook 已提交
117
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_XPU)
T
Thunderbrook 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

  for (size_t i = 0; i < places_.size(); ++i) {
    // for (auto& v : dense_value_names_) {
    //  for (auto& name : v.second) {
    for (int x = 0; x < dwp_param_.program_config(0).pull_dense_table_id_size();
         ++x) {
      uint64_t tid = static_cast<uint64_t>(
          dwp_param_.program_config(0).pull_dense_table_id(x));
      for (size_t j = 0; j < dense_value_names_[tid].size(); j++) {
        auto& name = dense_value_names_[tid][j];

        Variable* pin_var = root_scope_->FindVar(name + "pin");
        LoDTensor* pin_tensor = pin_var->GetMutable<LoDTensor>();
        float* pin_w = pin_tensor->data<float>();
        Variable* var = thread_scopes_[i]->FindVar(name);
        LoDTensor* tensor = var->GetMutable<LoDTensor>();
        float* w = tensor->data<float>();
T
Thunderbrook 已提交
135
#ifdef PADDLE_WITH_CUDA
T
Thunderbrook 已提交
136 137 138
        memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, places_[i]), w,
                     platform::CUDAPinnedPlace(), pin_w,
                     sizeof(float) * tensor->numel(), copy_streams_[i]);
T
Thunderbrook 已提交
139 140 141 142 143 144
#endif
#ifdef PADDLE_WITH_XPU
        memory::Copy(BOOST_GET_CONST(platform::XPUPlace, places_[i]), w,
                     platform::CPUPlace(), pin_w,
                     sizeof(float) * tensor->numel());
#endif
T
Thunderbrook 已提交
145 146 147 148
      }
    }
  }
#endif
149 150 151 152 153 154 155 156 157
}

void PullDenseWorker::Stop() {
  if (running_) {
    running_ = false;
    t_.join();
  }
}

158 159
void PullDenseWorker::PullDense(bool force_update) {
  pull_dense_status_.resize(0);
160 161
  for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
       ++i) {
162 163 164
    uint64_t tid = static_cast<uint64_t>(
        dwp_param_.program_config(0).pull_dense_table_id(i));
    if (force_update || CheckUpdateParam(tid)) {
T
Thunderbrook 已提交
165
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_XPU)
T
Thunderbrook 已提交
166
      VLOG(3) << "pull dense " << force_update << " " << tid;
167
      fleet_ptr_->PullDenseVarsAsync(*root_scope_, tid, dense_value_names_[tid],
T
Thunderbrook 已提交
168 169 170 171 172
                                     &pull_dense_status_, false);
#else
      fleet_ptr_->PullDenseVarsAsync(*root_scope_, tid, dense_value_names_[tid],
                                     &pull_dense_status_, true);
#endif
173 174 175 176 177 178 179 180
      ResetThreadVersion(tid);
    }
  }
  if (pull_dense_status_.size() != 0) {
    Wait(&pull_dense_status_);
  }
}

181 182
int PullDenseWorker::Start() {
  running_ = true;
183 184
  // before training, we can pull dense from pserver first.
  PullDense(true);
185 186 187 188 189 190
  t_ = std::thread(&PullDenseWorker::Run, this);
  return 0;
}

void PullDenseWorker::Run() {
  while (running_) {
191
    PullDense(false);
D
dongdaxiang 已提交
192
#ifndef _WIN32
193
    usleep(sleep_time_ms_ * 1000);
D
dongdaxiang 已提交
194
#endif
195 196 197 198 199 200 201 202 203 204 205 206 207
  }
}

void PullDenseWorker::IncreaseThreadVersion(int thread_id, uint64_t table_id) {
  std::lock_guard<std::mutex> lock(mutex_for_version_);
  training_versions_[table_id][thread_id]++;
}

bool PullDenseWorker::CheckUpdateParam(uint64_t table_id) {
  std::lock_guard<std::mutex> lock(mutex_for_version_);
  auto& version = training_versions_[table_id];
  current_version_[table_id] =
      *(std::min_element(version.begin(), version.end()));
208 209
  if (current_version_[table_id] - last_versions_[table_id] <
      static_cast<size_t>(threshold_)) {
210 211 212 213 214 215 216 217 218 219
    return false;
  }
  return true;
}

void PullDenseWorker::ResetThreadVersion(uint64_t table_id) {
  std::lock_guard<std::mutex> lock(mutex_for_version_);
  last_versions_[table_id] = current_version_[table_id];
}

220 221 222 223 224 225 226 227 228 229 230
int PullDenseWorker::GetThreadIdByScope(const Scope* scope) {
  if (scope_to_thread_id_.find(scope) != scope_to_thread_id_.end()) {
    return scope_to_thread_id_[scope];
  }
  return -1;
}

void PullDenseWorker::SetThreadIdByScope(const Scope* scope, int tid) {
  scope_to_thread_id_[scope] = tid;
}

231 232
}  // namespace framework
}  // namespace paddle