heter_context.h 8.3 KB
Newer Older
T
Thunderbrook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

T
Thunderbrook 已提交
17
#ifdef PADDLE_WITH_HETERPS
T
Thunderbrook 已提交
18

T
Thunderbrook 已提交
19
#include <ThreadPool.h>
20

Y
yaoxuefeng 已提交
21
#include <algorithm>
T
Thunderbrook 已提交
22 23 24 25
#include <map>
#include <unordered_map>
#include <vector>

T
Thunderbrook 已提交
26
#ifdef PADDLE_WITH_PSLIB
27
#include "common/common_value.h"  // NOLINT
T
Thunderbrook 已提交
28 29 30
#endif

#ifdef PADDLE_WITH_PSCORE
31
#include "paddle/fluid/distributed/ps/table/depends/feature_value.h"
T
Thunderbrook 已提交
32 33
#endif

34
#include "paddle/fluid/distributed/ps/thirdparty/round_robin.h"
T
Thunderbrook 已提交
35 36 37 38 39 40 41 42
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/scope.h"

namespace paddle {
namespace framework {

class HeterContext {
 public:
T
Thunderbrook 已提交
43
  virtual ~HeterContext() {
44 45 46 47 48 49 50 51 52 53 54 55
    if (!multi_mf_dim_) {
      for (size_t i = 0; i < mutex_.size(); ++i) {
        delete mutex_[i];
      }
      mutex_.clear();
    } else {
      for (size_t i = 0; i < dim_mutex_.size(); ++i) {
        for (size_t j = 0; j < dim_mutex_[i].size(); j++) {
          delete dim_mutex_[i][j];
        }
        dim_mutex_[i].clear();
      }
56 57
    }
  }
T
Thunderbrook 已提交
58 59
  Scope* scope_{nullptr};
  std::vector<std::vector<FeatureKey>> feature_keys_;
60
  std::vector<std::vector<std::vector<FeatureKey>>> feature_dim_keys_;
T
Thunderbrook 已提交
61
  std::vector<std::vector<std::vector<FeatureKey>>> device_task_keys_;
62

T
Thunderbrook 已提交
63
#ifdef PADDLE_WITH_PSLIB
T
Thunderbrook 已提交
64
  std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>> value_ptr_;
T
Thunderbrook 已提交
65 66
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      device_task_ptr_;
67 68 69 70
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      value_dim_ptr_;
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      device_dim_ptr_;
T
Thunderbrook 已提交
71 72
#endif
#ifdef PADDLE_WITH_PSCORE
73 74
  std::vector<std::vector<paddle::distributed::FixedFeatureValue*>> value_ptr_;
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
75
      value_dim_ptr_;
T
Thunderbrook 已提交
76 77
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
      device_task_ptr_;
78
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
79
      device_dim_ptr_;
T
Thunderbrook 已提交
80
#endif
81 82
  std::vector<std::vector<FeatureValue>> device_values_;
  std::vector<std::vector<FeatureKey>> device_keys_;
83
  std::vector<std::vector<std::vector<FeatureKey>>> device_dim_keys_;
84
  std::vector<std::mutex*> mutex_;
85 86
  std::vector<std::vector<std::mutex*>> dim_mutex_;
  int multi_mf_dim_ = 0;
87

Y
yaoxuefeng 已提交
88
  uint32_t shard_num_ = 37;
T
Thunderbrook 已提交
89 90 91 92 93 94 95
  uint64_t size() {
    uint64_t total_size = 0;
    for (auto& keys : feature_keys_) {
      total_size += keys.size();
    }
    return total_size;
  }
Y
yaoxuefeng 已提交
96 97
  void SetShardNum(uint32_t shard_num) { shard_num_ = shard_num; }
  uint32_t ShardNum() { return shard_num_; }
98

99 100 101 102 103 104
  void init(int shard_num, int device_num, int dim_num) {
    shard_num_ = shard_num;
    feature_keys_.resize(shard_num_);
    feature_dim_keys_.resize(shard_num_);
    value_ptr_.resize(shard_num_);
    value_dim_ptr_.resize(shard_num_);
T
Thunderbrook 已提交
105 106 107 108 109 110
    device_task_ptr_.resize(shard_num_);
    device_task_keys_.resize(shard_num_);
    for (size_t i = 0; i < device_task_ptr_.size(); i++) {
      device_task_ptr_[i].resize(device_num);
      device_task_keys_[i].resize(device_num);
    }
111 112 113
    for (size_t i = 0; i < feature_dim_keys_.size(); i++) {
      feature_dim_keys_[i].resize(dim_num);
      value_dim_ptr_[i].resize(dim_num);
114
    }
115 116 117 118 119 120 121 122 123
    device_values_.resize(device_num);
    device_keys_.resize(device_num);

    device_dim_keys_.resize(device_num);
    device_dim_ptr_.resize(device_num);
    mutex_.resize(device_num);
    dim_mutex_.resize(device_num);
    for (size_t i = 0; i < mutex_.size(); ++i) {
      mutex_[i] = new std::mutex();
124
    }
125 126 127 128 129
    for (size_t i = 0; i < dim_mutex_.size(); ++i) {
      dim_mutex_[i].resize(dim_num);
      for (int j = 0; j < dim_num; j++) {
        dim_mutex_[i][j] = new std::mutex();
      }
130
    }
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    multi_mf_dim_ = dim_num;
  }

  void Reset() {
    if (!multi_mf_dim_) {
      for (size_t i = 0; i < feature_keys_.size(); ++i) {
        feature_keys_[i].clear();
      }
      for (size_t i = 0; i < value_ptr_.size(); ++i) {
        value_ptr_[i].clear();
      }
      for (size_t i = 0; i < device_values_.size(); ++i) {
        device_values_[i].clear();
      }
      for (size_t i = 0; i < device_keys_.size(); ++i) {
        device_keys_[i].clear();
      }
T
Thunderbrook 已提交
148 149 150 151 152 153
      for (size_t i = 0; i < device_task_ptr_.size(); ++i) {
        for (size_t j = 0; j < device_task_ptr_[i].size(); ++j) {
          device_task_ptr_[i][j].clear();
          device_task_keys_[i][j].clear();
        }
      }
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
    } else {
      VLOG(3) << "Reset gpu task with dynamic mf dimention";
      for (size_t i = 0; i < feature_dim_keys_.size(); i++) {
        for (size_t j = 0; j < feature_dim_keys_[i].size(); j++) {
          feature_dim_keys_[i][j].clear();
        }
      }
      for (size_t i = 0; i < value_dim_ptr_.size(); i++) {
        for (size_t j = 0; j < value_dim_ptr_[i].size(); j++) {
          value_dim_ptr_[i][j].clear();
        }
      }

      for (size_t i = 0; i < device_dim_keys_.size(); i++) {
        for (size_t j = 0; j < device_dim_keys_[i].size(); j++) {
          device_dim_keys_[i][j].clear();
        }
      }
      for (size_t i = 0; i < device_dim_ptr_.size(); i++) {
        for (size_t j = 0; j < device_dim_ptr_[i].size(); j++) {
          device_dim_ptr_[i][j].clear();
        }
      }
177 178
    }
  }
179 180
  void batch_add_keys(
      const std::vector<std::unordered_set<uint64_t>>& thread_keys) {
Y
yaoxuefeng 已提交
181 182 183 184 185 186
    assert(thread_keys.size() == feature_keys_.size());

    for (uint32_t i = 0; i < shard_num_; i++) {
      int idx = 0;
      idx = feature_keys_[i].size();
      feature_keys_[i].resize(feature_keys_[i].size() + thread_keys[i].size());
187 188
      std::copy(thread_keys[i].begin(),
                thread_keys[i].end(),
189
                feature_keys_[i].begin() + idx);
Y
yaoxuefeng 已提交
190 191
    }
  }
192

193
  void batch_add_keys(int shard_num,
194
                      const robin_hood::unordered_set<uint64_t>& shard_keys) {
195 196 197
    int idx = feature_keys_[shard_num].size();
    feature_keys_[shard_num].resize(feature_keys_[shard_num].size() +
                                    shard_keys.size());
198 199
    std::copy(shard_keys.begin(),
              shard_keys.end(),
200 201 202
              feature_keys_[shard_num].begin() + idx);
  }

203 204
  void batch_add_keys(int shard_num,
                      int dim_id,
205 206 207 208
                      const robin_hood::unordered_set<uint64_t>& shard_keys) {
    int idx = feature_dim_keys_[shard_num][dim_id].size();
    feature_dim_keys_[shard_num][dim_id].resize(
        feature_dim_keys_[shard_num][dim_id].size() + shard_keys.size());
209 210
    std::copy(shard_keys.begin(),
              shard_keys.end(),
211 212 213
              feature_dim_keys_[shard_num][dim_id].begin() + idx);
  }

Y
yaoxuefeng 已提交
214 215 216 217 218 219 220 221 222
  void UniqueKeys() {
    std::vector<std::thread> threads;
    auto unique_func = [this](int i) {
      auto& cur_keys = feature_keys_[i];
      std::sort(cur_keys.begin(), cur_keys.end());
      std::vector<FeatureKey>::iterator it;
      it = std::unique(cur_keys.begin(), cur_keys.end());
      cur_keys.resize(std::distance(cur_keys.begin(), it));
    };
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
    auto unique_dynamic_mf_func = [this](int i, int j) {
      auto& cur_keys = feature_dim_keys_[i][j];
      std::sort(cur_keys.begin(), cur_keys.end());
      std::vector<FeatureKey>::iterator it;
      it = std::unique(cur_keys.begin(), cur_keys.end());
      cur_keys.resize(std::distance(cur_keys.begin(), it));
    };
    if (!multi_mf_dim_) {
      for (uint32_t i = 0; i < shard_num_; i++) {
        threads.push_back(std::thread(unique_func, i));
      }
    } else {
      for (uint32_t i = 0; i < shard_num_; i++) {
        for (int j = 0; j < multi_mf_dim_; j++) {
          threads.push_back(std::thread(unique_dynamic_mf_func, i, j));
        }
      }
      VLOG(3) << "heter_context unique keys with dynamic mf dimention";
Y
yaoxuefeng 已提交
241 242 243 244 245
    }
    for (std::thread& t : threads) {
      t.join();
    }
  }
T
Thunderbrook 已提交
246 247 248 249 250
};

}  // end namespace framework
}  // end namespace paddle
#endif