heter_context.h 8.3 KB
Newer Older
T
Thunderbrook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

T
Thunderbrook 已提交
17
#ifdef PADDLE_WITH_HETERPS
T
Thunderbrook 已提交
18

T
Thunderbrook 已提交
19
#include <ThreadPool.h>
Y
yaoxuefeng 已提交
20
#include <algorithm>
T
Thunderbrook 已提交
21 22 23 24
#include <map>
#include <unordered_map>
#include <vector>

T
Thunderbrook 已提交
25
#ifdef PADDLE_WITH_PSLIB
26
#include "common/common_value.h"  // NOLINT
T
Thunderbrook 已提交
27 28 29
#endif

#ifdef PADDLE_WITH_PSCORE
30
#include "paddle/fluid/distributed/ps/table/depends/feature_value.h"
T
Thunderbrook 已提交
31 32
#endif

33
#include "paddle/fluid/distributed/ps/thirdparty/round_robin.h"
T
Thunderbrook 已提交
34 35 36 37 38 39 40 41
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/scope.h"

namespace paddle {
namespace framework {

class HeterContext {
 public:
T
Thunderbrook 已提交
42
  virtual ~HeterContext() {
43 44 45 46 47 48 49 50 51 52 53 54
    if (!multi_mf_dim_) {
      for (size_t i = 0; i < mutex_.size(); ++i) {
        delete mutex_[i];
      }
      mutex_.clear();
    } else {
      for (size_t i = 0; i < dim_mutex_.size(); ++i) {
        for (size_t j = 0; j < dim_mutex_[i].size(); j++) {
          delete dim_mutex_[i][j];
        }
        dim_mutex_[i].clear();
      }
55 56
    }
  }
T
Thunderbrook 已提交
57 58
  Scope* scope_{nullptr};
  std::vector<std::vector<FeatureKey>> feature_keys_;
59
  std::vector<std::vector<std::vector<FeatureKey>>> feature_dim_keys_;
T
Thunderbrook 已提交
60
  std::vector<std::vector<std::vector<FeatureKey>>> device_task_keys_;
61

T
Thunderbrook 已提交
62
#ifdef PADDLE_WITH_PSLIB
T
Thunderbrook 已提交
63
  std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>> value_ptr_;
T
Thunderbrook 已提交
64 65
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      device_task_ptr_;
66 67 68 69
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      value_dim_ptr_;
  std::vector<std::vector<std::vector<paddle::ps::DownpourFixedFeatureValue*>>>
      device_dim_ptr_;
T
Thunderbrook 已提交
70 71
#endif
#ifdef PADDLE_WITH_PSCORE
72 73
  std::vector<std::vector<paddle::distributed::FixedFeatureValue*>> value_ptr_;
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
74
      value_dim_ptr_;
T
Thunderbrook 已提交
75 76
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
      device_task_ptr_;
77
  std::vector<std::vector<std::vector<paddle::distributed::FixedFeatureValue*>>>
78
      device_dim_ptr_;
T
Thunderbrook 已提交
79
#endif
80 81
  std::vector<std::vector<FeatureValue>> device_values_;
  std::vector<std::vector<FeatureKey>> device_keys_;
82 83
  std::vector<std::vector<std::vector<FeatureKey>>> device_dim_keys_;
  std::vector<std::vector<std::vector<FeatureValue>>> device_dim_values_;
84
  std::vector<std::mutex*> mutex_;
85 86
  std::vector<std::vector<std::mutex*>> dim_mutex_;
  int multi_mf_dim_ = 0;
87

Y
yaoxuefeng 已提交
88
  uint32_t shard_num_ = 37;
T
Thunderbrook 已提交
89 90 91 92 93 94 95
  uint64_t size() {
    uint64_t total_size = 0;
    for (auto& keys : feature_keys_) {
      total_size += keys.size();
    }
    return total_size;
  }
Y
yaoxuefeng 已提交
96 97
  void SetShardNum(uint32_t shard_num) { shard_num_ = shard_num; }
  uint32_t ShardNum() { return shard_num_; }
98

99 100 101 102 103 104
  void init(int shard_num, int device_num, int dim_num) {
    shard_num_ = shard_num;
    feature_keys_.resize(shard_num_);
    feature_dim_keys_.resize(shard_num_);
    value_ptr_.resize(shard_num_);
    value_dim_ptr_.resize(shard_num_);
T
Thunderbrook 已提交
105 106 107 108 109 110
    device_task_ptr_.resize(shard_num_);
    device_task_keys_.resize(shard_num_);
    for (size_t i = 0; i < device_task_ptr_.size(); i++) {
      device_task_ptr_[i].resize(device_num);
      device_task_keys_[i].resize(device_num);
    }
111 112 113
    for (size_t i = 0; i < feature_dim_keys_.size(); i++) {
      feature_dim_keys_[i].resize(dim_num);
      value_dim_ptr_[i].resize(dim_num);
114
    }
115 116 117 118 119 120 121 122 123 124
    device_values_.resize(device_num);
    device_dim_values_.resize(device_num);
    device_keys_.resize(device_num);

    device_dim_keys_.resize(device_num);
    device_dim_ptr_.resize(device_num);
    mutex_.resize(device_num);
    dim_mutex_.resize(device_num);
    for (size_t i = 0; i < mutex_.size(); ++i) {
      mutex_[i] = new std::mutex();
125
    }
126 127 128 129 130
    for (size_t i = 0; i < dim_mutex_.size(); ++i) {
      dim_mutex_[i].resize(dim_num);
      for (int j = 0; j < dim_num; j++) {
        dim_mutex_[i][j] = new std::mutex();
      }
131
    }
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
    multi_mf_dim_ = dim_num;
  }

  void Reset() {
    if (!multi_mf_dim_) {
      for (size_t i = 0; i < feature_keys_.size(); ++i) {
        feature_keys_[i].clear();
      }
      for (size_t i = 0; i < value_ptr_.size(); ++i) {
        value_ptr_[i].clear();
      }
      for (size_t i = 0; i < device_values_.size(); ++i) {
        device_values_[i].clear();
      }
      for (size_t i = 0; i < device_keys_.size(); ++i) {
        device_keys_[i].clear();
      }
T
Thunderbrook 已提交
149 150 151 152 153 154
      for (size_t i = 0; i < device_task_ptr_.size(); ++i) {
        for (size_t j = 0; j < device_task_ptr_[i].size(); ++j) {
          device_task_ptr_[i][j].clear();
          device_task_keys_[i][j].clear();
        }
      }
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
    } else {
      VLOG(3) << "Reset gpu task with dynamic mf dimention";
      for (size_t i = 0; i < feature_dim_keys_.size(); i++) {
        for (size_t j = 0; j < feature_dim_keys_[i].size(); j++) {
          feature_dim_keys_[i][j].clear();
        }
      }
      for (size_t i = 0; i < value_dim_ptr_.size(); i++) {
        for (size_t j = 0; j < value_dim_ptr_[i].size(); j++) {
          value_dim_ptr_[i][j].clear();
        }
      }

      for (size_t i = 0; i < device_dim_keys_.size(); i++) {
        for (size_t j = 0; j < device_dim_keys_[i].size(); j++) {
          device_dim_keys_[i][j].clear();
        }
      }
      for (size_t i = 0; i < device_dim_ptr_.size(); i++) {
        for (size_t j = 0; j < device_dim_ptr_[i].size(); j++) {
          device_dim_ptr_[i][j].clear();
        }
      }
178 179
    }
  }
180 181
  void batch_add_keys(
      const std::vector<std::unordered_set<uint64_t>>& thread_keys) {
Y
yaoxuefeng 已提交
182 183 184 185 186 187
    assert(thread_keys.size() == feature_keys_.size());

    for (uint32_t i = 0; i < shard_num_; i++) {
      int idx = 0;
      idx = feature_keys_[i].size();
      feature_keys_[i].resize(feature_keys_[i].size() + thread_keys[i].size());
188 189
      std::copy(thread_keys[i].begin(), thread_keys[i].end(),
                feature_keys_[i].begin() + idx);
Y
yaoxuefeng 已提交
190 191
    }
  }
192

193
  void batch_add_keys(int shard_num,
194
                      const robin_hood::unordered_set<uint64_t>& shard_keys) {
195 196 197 198 199 200 201
    int idx = feature_keys_[shard_num].size();
    feature_keys_[shard_num].resize(feature_keys_[shard_num].size() +
                                    shard_keys.size());
    std::copy(shard_keys.begin(), shard_keys.end(),
              feature_keys_[shard_num].begin() + idx);
  }

202 203 204 205 206 207 208 209 210
  void batch_add_keys(int shard_num, int dim_id,
                      const robin_hood::unordered_set<uint64_t>& shard_keys) {
    int idx = feature_dim_keys_[shard_num][dim_id].size();
    feature_dim_keys_[shard_num][dim_id].resize(
        feature_dim_keys_[shard_num][dim_id].size() + shard_keys.size());
    std::copy(shard_keys.begin(), shard_keys.end(),
              feature_dim_keys_[shard_num][dim_id].begin() + idx);
  }

Y
yaoxuefeng 已提交
211 212 213 214 215 216 217 218 219
  void UniqueKeys() {
    std::vector<std::thread> threads;
    auto unique_func = [this](int i) {
      auto& cur_keys = feature_keys_[i];
      std::sort(cur_keys.begin(), cur_keys.end());
      std::vector<FeatureKey>::iterator it;
      it = std::unique(cur_keys.begin(), cur_keys.end());
      cur_keys.resize(std::distance(cur_keys.begin(), it));
    };
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    auto unique_dynamic_mf_func = [this](int i, int j) {
      auto& cur_keys = feature_dim_keys_[i][j];
      std::sort(cur_keys.begin(), cur_keys.end());
      std::vector<FeatureKey>::iterator it;
      it = std::unique(cur_keys.begin(), cur_keys.end());
      cur_keys.resize(std::distance(cur_keys.begin(), it));
    };
    if (!multi_mf_dim_) {
      for (uint32_t i = 0; i < shard_num_; i++) {
        threads.push_back(std::thread(unique_func, i));
      }
    } else {
      for (uint32_t i = 0; i < shard_num_; i++) {
        for (int j = 0; j < multi_mf_dim_; j++) {
          threads.push_back(std::thread(unique_dynamic_mf_func, i, j));
        }
      }
      VLOG(3) << "heter_context unique keys with dynamic mf dimention";
Y
yaoxuefeng 已提交
238 239 240 241 242
    }
    for (std::thread& t : threads) {
      t.join();
    }
  }
T
Thunderbrook 已提交
243 244 245 246 247
};

}  // end namespace framework
}  // end namespace paddle
#endif