generator.cc 9.4 KB
Newer Older
1
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Y
yaoxuefeng 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/phi/core/generator.h"
L
Leo Chen 已提交
16 17

#include <glog/logging.h>
18

Y
yaoxuefeng 已提交
19 20
#include <memory>
#include <utility>
Y
yaoxuefeng 已提交
21

22 23 24
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/xpu/xpu_info.h"
#include "paddle/phi/core/enforce.h"
Y
yaoxuefeng 已提交
25

26 27 28 29 30 31
static uint64_t GetRandomSeed() {
  std::random_device rd;
  // double has 53 bit significant, so limit uint64 to 53 bits
  return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF;
}

32
namespace phi {
Y
yaoxuefeng 已提交
33

Q
QingshuChen 已提交
34 35 36 37 38 39 40 41 42
const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) {
#if defined(PADDLE_WITH_XPU)

  static int64_t num_xpu_devices = -1;
  static std::once_flag num_devices_init_flag;
  static std::deque<std::once_flag> xpu_device_flags;
  static std::vector<std::shared_ptr<Generator>> default_xpu_generators;

  std::call_once(num_devices_init_flag, []() {
43
    num_xpu_devices = phi::backends::xpu::GetXPUDeviceCount();
Q
QingshuChen 已提交
44 45 46 47
    xpu_device_flags.resize(num_xpu_devices);
    default_xpu_generators.resize(num_xpu_devices);
  });
  if (device_id < 0) {
48 49
    PADDLE_THROW(
        phi::errors::InvalidArgument("xpu device id shoule be greater than 0"));
Q
QingshuChen 已提交
50 51 52 53 54 55 56 57 58 59
  }

  std::call_once(xpu_device_flags[device_id], [device_id]() {
    default_xpu_generators[device_id] =
        std::make_shared<Generator>(GetRandomSeed(), device_id);
    VLOG(4) << "initial seed: "
            << default_xpu_generators[device_id]->GetCurrentSeed();
  });
  return default_xpu_generators[device_id];
#else
60
  PADDLE_THROW(phi::errors::PermissionDenied(
Q
QingshuChen 已提交
61 62 63 64
      "getDefaultXPUGenerator only support in XPU place"));
#endif
}

65
const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id) {
66
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
yaoxuefeng 已提交
67 68 69 70 71 72 73

  static int64_t num_cuda_devices = -1;
  static std::once_flag num_devices_init_flag;
  static std::deque<std::once_flag> cuda_device_flags;
  static std::vector<std::shared_ptr<Generator>> default_cuda_generators;

  std::call_once(num_devices_init_flag, []() {
74
    num_cuda_devices = phi::backends::gpu::GetGPUDeviceCount();
Y
yaoxuefeng 已提交
75 76 77 78
    cuda_device_flags.resize(num_cuda_devices);
    default_cuda_generators.resize(num_cuda_devices);
  });
  if (device_id < 0) {
79
    PADDLE_THROW(phi::errors::InvalidArgument(
Y
yaoxuefeng 已提交
80 81 82 83 84 85 86 87 88 89 90
        "cuda device id shoule be greater than 0"));
  }

  std::call_once(cuda_device_flags[device_id], [device_id]() {
    default_cuda_generators[device_id] =
        std::make_shared<Generator>(GetRandomSeed(), device_id);
    VLOG(4) << "initial seed: "
            << default_cuda_generators[device_id]->GetCurrentSeed();
  });
  return default_cuda_generators[device_id];
#else
91
  PADDLE_THROW(phi::errors::PermissionDenied(
Y
yaoxuefeng 已提交
92 93 94 95
      "getDefaultCUDAGenerator only support in CUDA place"));
#endif
}

L
Leo Chen 已提交
96 97 98 99 100 101
const std::shared_ptr<Generator>& DefaultCPUGenerator() {
  static auto default_cpu_generator =
      std::make_shared<Generator>(GetRandomSeed());
  return default_cpu_generator;
}

102 103 104 105 106 107 108 109 110 111 112
using RNGMap = std::unordered_map<std::string, std::shared_ptr<Generator>>;

static RNGMap& GetRandomSeedGeneratorMap() {
  static auto random_seed_generator_map = RNGMap();
  return random_seed_generator_map;
}

const std::shared_ptr<Generator>& SetRandomSeedGenerator(
    const std::string& name, uint64_t seed) {
  auto& rng_map = GetRandomSeedGeneratorMap();
  auto iter = rng_map.find(name);
113 114
  PADDLE_ENFORCE_EQ(iter == rng_map.end(),
                    true,
115
                    phi::errors::AlreadyExists(
116 117 118 119 120
                        "%s RandomSeedGenerator is already exist", name));

  auto generator = std::make_shared<Generator>(seed);
  bool emplace_success = rng_map.emplace(name, generator).second;
  PADDLE_ENFORCE_EQ(
121 122
      emplace_success,
      true,
123
      phi::errors::PermissionDenied(
124 125 126 127 128 129 130 131 132
          "SetRandomSeedGenerator cannot emplace %s RandomSeedGenerator",
          name));
  return rng_map[name];
}

const std::shared_ptr<Generator>& GetRandomSeedGenerator(
    const std::string& name) {
  auto& rng_map = GetRandomSeedGeneratorMap();
  auto iter = rng_map.find(name);
133 134 135 136 137 138
  PADDLE_ENFORCE_EQ(
      iter != rng_map.end(),
      true,
      phi::errors::NotFound("%s RandomSeedGenerator is not found, please "
                            "use `set_random_seed_generator` to set rng first",
                            name));
139 140 141
  return iter->second;
}

142 143 144 145 146
// There are 3 conditions:
// (1) op seed is set, use op seed.
// (2) op seed is not set, global seed is set, use global seed.
// (3) op seed is not set, global seed is not set too, use random seed from
// RandomGenerator.
L
Leo Chen 已提交
147
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) {
148
  if (seed == 0) {
L
Leo Chen 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
    VLOG(4) << "Use random engine from generator";
    return DefaultCPUGenerator()->GetCPUEngine();
  } else {
    // NOTE(zhiqiu): creating an engine instance everytime instead of using
    // OpDefaultCPUEngine(), this is the legacy behavior of random operators.
    // The benefit is that when runing PE with fixed-seed in multiple thrads,
    // each thread has their own engine, and doesn't affect each other.
    //
    // And we need to measure the determinacy of Generator in PE.
    auto engine = std::make_shared<std::mt19937_64>();
    static std::mutex mu_;
    {
      std::lock_guard<std::mutex> lock(mu_);
      engine->seed(seed);
    }
    return engine;
  }
}
Y
yaoxuefeng 已提交
167

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
Generator::Generator() {
  auto seed = GetRandomSeed();
  std::seed_seq seq({seed});
  auto engine = std::make_shared<std::mt19937_64>(seq);
  this->state_.cpu_engine = *engine;
  this->state_.device = -1;
  this->state_.current_seed = seed;
  this->state_.thread_offset = 0;
  this->engine_ = engine;
  VLOG(4) << "initial seed: " << this->state_.current_seed
          << ", cpu engine: " << &this->state_.cpu_engine;
}

Generator::Generator(uint64_t seed) {
  std::seed_seq seq({seed});
  auto engine = std::make_shared<std::mt19937_64>(seq);
  this->state_.cpu_engine = *engine;
  this->state_.device = -1;
  this->state_.current_seed = seed;
  this->state_.thread_offset = 0;
  this->engine_ = engine;
  VLOG(4) << "initial seed: " << this->state_.current_seed
          << ", cpu engine: " << &this->state_.cpu_engine;
}

Generator::Generator(uint64_t seed, uint64_t device_id) {
  std::seed_seq seq({seed});
  auto engine = std::make_shared<std::mt19937_64>(seq);
  this->state_.cpu_engine = *engine;
  this->state_.device = device_id;
  this->state_.current_seed = seed;
  this->state_.thread_offset = 0;
  this->engine_ = engine;
  VLOG(4) << "initial seed: " << this->state_.current_seed
          << ", cpu engine: " << &this->state_.cpu_engine;
}

205
phi::Generator::GeneratorState Generator::GetState() {
L
Leo Chen 已提交
206 207
  std::lock_guard<std::mutex> lock(this->mu_);
  state_.cpu_engine = *engine_;
208 209 210 211 212
  VLOG(4) << "Get Random state: "
          << "device id: " << (uint64_t)(this->state_.device)
          << ", current_seed: " << this->state_.current_seed
          << ", thread_offset: " << this->state_.thread_offset
          << ", cpu engine: " << *(this->engine_);
L
Leo Chen 已提交
213
  return this->state_;
Y
yaoxuefeng 已提交
214 215
}

216
void Generator::SetState(const phi::Generator::GeneratorState& state) {
L
Leo Chen 已提交
217 218 219
  std::lock_guard<std::mutex> lock(this->mu_);
  this->state_ = state;
  this->engine_ = std::make_shared<std::mt19937_64>(state.cpu_engine);
220 221 222 223 224
  VLOG(4) << "Set Random state: "
          << "device id: " << (uint64_t)(this->state_.device)
          << ", current_seed: " << this->state_.current_seed
          << ", thread_offset: " << this->state_.thread_offset
          << ", cpu engine: " << *(this->engine_);
Y
yaoxuefeng 已提交
225 226 227
}

uint64_t Generator::GetCurrentSeed() {
L
Leo Chen 已提交
228 229
  std::lock_guard<std::mutex> lock(this->mu_);
  return this->state_.current_seed;
Y
yaoxuefeng 已提交
230 231 232
}

uint64_t Generator::Seed() {
L
Leo Chen 已提交
233
  std::lock_guard<std::mutex> lock(this->mu_);
Y
yaoxuefeng 已提交
234 235 236
  uint64_t seed;
  std::random_device de;
  seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF;
L
Leo Chen 已提交
237
  this->state_.current_seed = seed;
Y
yaoxuefeng 已提交
238
  std::seed_seq seq({seed});
L
Leo Chen 已提交
239
  this->engine_->seed(seq);
Y
yaoxuefeng 已提交
240

L
Leo Chen 已提交
241
  return this->state_.current_seed;
Y
yaoxuefeng 已提交
242 243 244
}

void Generator::SetCurrentSeed(uint64_t seed) {
L
Leo Chen 已提交
245 246
  std::lock_guard<std::mutex> lock(this->mu_);
  this->state_.current_seed = seed;
Y
yaoxuefeng 已提交
247
  this->state_.thread_offset = 0;
Y
yaoxuefeng 已提交
248
  std::seed_seq seq({seed});
L
Leo Chen 已提交
249
  this->engine_->seed(seq);
Y
yaoxuefeng 已提交
250 251
}

L
Leo Chen 已提交
252 253 254
std::shared_ptr<std::mt19937_64> Generator::GetCPUEngine() {
  std::lock_guard<std::mutex> lock(this->mu_);
  return this->engine_;
Y
yaoxuefeng 已提交
255 256
}

L
Leo Chen 已提交
257 258 259
void Generator::SetCPUEngine(std::shared_ptr<std::mt19937_64> engine) {
  std::lock_guard<std::mutex> lock(this->mu_);
  this->engine_ = engine;
Y
yaoxuefeng 已提交
260 261 262
}

uint64_t Generator::Random64() {
L
Leo Chen 已提交
263 264 265 266 267
  std::lock_guard<std::mutex> lock(this->mu_);
  auto engine = this->engine_;
  return (*engine)();
}

Y
yaoxuefeng 已提交
268 269
std::pair<uint64_t, uint64_t> Generator::IncrementOffset(
    uint64_t increament_offset) {
270
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
yaoxuefeng 已提交
271
  std::lock_guard<std::mutex> lock(this->mu_);
272
  uint64_t cur_offset = this->state_.thread_offset;
Y
yaoxuefeng 已提交
273
  this->state_.thread_offset += increament_offset;
274
  return std::make_pair(this->state_.current_seed, cur_offset);
Y
yaoxuefeng 已提交
275
#else
276
  PADDLE_THROW(phi::errors::PermissionDenied(
Y
yaoxuefeng 已提交
277 278 279 280
      "Increment Offset only support in CUDA place"));
#endif
}

281
}  // namespace phi