generator.cc 6.0 KB
Newer Older
Y
yaoxuefeng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Leo Chen 已提交
15 16 17 18
#include "paddle/fluid/framework/generator.h"

#include <glog/logging.h>

Y
yaoxuefeng 已提交
19 20 21 22 23
#include <deque>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <utility>
Y
yaoxuefeng 已提交
24 25 26 27 28
#include <vector>

#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/place.h"
Y
yaoxuefeng 已提交
29 30 31 32

namespace paddle {
namespace framework {

Y
yaoxuefeng 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
const std::shared_ptr<Generator>& GetDefaultCUDAGenerator(int64_t device_id) {
#ifdef PADDLE_WITH_CUDA

  static int64_t num_cuda_devices = -1;
  static std::once_flag num_devices_init_flag;
  static std::deque<std::once_flag> cuda_device_flags;
  static std::vector<std::shared_ptr<Generator>> default_cuda_generators;

  std::call_once(num_devices_init_flag, []() {
    num_cuda_devices = paddle::platform::GetCUDADeviceCount();
    cuda_device_flags.resize(num_cuda_devices);
    default_cuda_generators.resize(num_cuda_devices);
  });
  if (device_id < 0) {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "cuda device id shoule be greater than 0"));
  }

  std::call_once(cuda_device_flags[device_id], [device_id]() {
    default_cuda_generators[device_id] =
        std::make_shared<Generator>(GetRandomSeed(), device_id);
    VLOG(4) << "initial seed: "
            << default_cuda_generators[device_id]->GetCurrentSeed();
  });
  return default_cuda_generators[device_id];
#else
  PADDLE_THROW(platform::errors::PermissionDenied(
      "getDefaultCUDAGenerator only support in CUDA place"));
#endif
}

L
Leo Chen 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
const std::shared_ptr<Generator>& DefaultCPUGenerator() {
  static auto default_cpu_generator =
      std::make_shared<Generator>(GetRandomSeed());
  VLOG(4) << "initial seed: " << default_cpu_generator->GetCurrentSeed()
          << ", cpu engine: " << default_cpu_generator->GetCPUEngine().get();
  return default_cpu_generator;
}

std::shared_ptr<std::mt19937_64> OpDefaultCPUEngine() {
  static auto op_default_cpu_engine = std::make_shared<std::mt19937_64>();
  return op_default_cpu_engine;
}

// NOTE(zhiqiu): there are 3 conditions:
// (1) op seed is not set and DefaultCPUGenerator is inited, use
// DefaultCPUGenerator
// (2) op seed is not set and DefaultCPUGenerator is not inited, use se
// OpDefaultCPUEngine() and set a radnom seed
// (3) op seed is set, use OpDefaultCPUEngine() and set the seed
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) {
  if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) {
    VLOG(4) << "Use random engine from generator";
    return DefaultCPUGenerator()->GetCPUEngine();
  } else {
    // NOTE(zhiqiu): creating an engine instance everytime instead of using
    // OpDefaultCPUEngine(), this is the legacy behavior of random operators.
    // The benefit is that when runing PE with fixed-seed in multiple thrads,
    // each thread has their own engine, and doesn't affect each other.
    //
    // And we need to measure the determinacy of Generator in PE.
    auto engine = std::make_shared<std::mt19937_64>();
    if (seed == 0) {
      seed = GetRandomSeed();
      VLOG(4) << "Use default random engine with random seed = " << seed;
    } else {
      VLOG(4) << "Use default random engine with fixed random seed = " << seed;
    }
    static std::mutex mu_;
    {
      std::lock_guard<std::mutex> lock(mu_);
      engine->seed(seed);
    }
    return engine;
  }
}
Y
yaoxuefeng 已提交
109

L
Leo Chen 已提交
110 111 112 113
GeneratorState Generator::GetState() {
  std::lock_guard<std::mutex> lock(this->mu_);
  state_.cpu_engine = *engine_;
  return this->state_;
Y
yaoxuefeng 已提交
114 115
}

L
Leo Chen 已提交
116 117 118 119
void Generator::SetState(const GeneratorState& state) {
  std::lock_guard<std::mutex> lock(this->mu_);
  this->state_ = state;
  this->engine_ = std::make_shared<std::mt19937_64>(state.cpu_engine);
Y
yaoxuefeng 已提交
120 121 122
}

uint64_t Generator::GetCurrentSeed() {
L
Leo Chen 已提交
123 124
  std::lock_guard<std::mutex> lock(this->mu_);
  return this->state_.current_seed;
Y
yaoxuefeng 已提交
125 126 127
}

uint64_t Generator::Seed() {
L
Leo Chen 已提交
128
  std::lock_guard<std::mutex> lock(this->mu_);
Y
yaoxuefeng 已提交
129 130 131
  uint64_t seed;
  std::random_device de;
  seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF;
L
Leo Chen 已提交
132
  this->state_.current_seed = seed;
Y
yaoxuefeng 已提交
133
  std::seed_seq seq({seed});
L
Leo Chen 已提交
134
  this->engine_->seed(seq);
Y
yaoxuefeng 已提交
135

L
Leo Chen 已提交
136
  return this->state_.current_seed;
Y
yaoxuefeng 已提交
137 138 139
}

void Generator::SetCurrentSeed(uint64_t seed) {
L
Leo Chen 已提交
140 141
  std::lock_guard<std::mutex> lock(this->mu_);
  this->state_.current_seed = seed;
Y
yaoxuefeng 已提交
142
  this->state_.thread_offset = 0;
Y
yaoxuefeng 已提交
143
  std::seed_seq seq({seed});
L
Leo Chen 已提交
144
  this->engine_->seed(seq);
Y
yaoxuefeng 已提交
145 146
}

L
Leo Chen 已提交
147 148 149
std::shared_ptr<std::mt19937_64> Generator::GetCPUEngine() {
  std::lock_guard<std::mutex> lock(this->mu_);
  return this->engine_;
Y
yaoxuefeng 已提交
150 151
}

L
Leo Chen 已提交
152 153 154
void Generator::SetCPUEngine(std::shared_ptr<std::mt19937_64> engine) {
  std::lock_guard<std::mutex> lock(this->mu_);
  this->engine_ = engine;
Y
yaoxuefeng 已提交
155 156 157
}

uint64_t Generator::Random64() {
L
Leo Chen 已提交
158 159 160 161 162
  std::lock_guard<std::mutex> lock(this->mu_);
  auto engine = this->engine_;
  return (*engine)();
}

Y
yaoxuefeng 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
std::pair<uint64_t, uint64_t> Generator::IncrementOffset(
    uint64_t increament_offset) {
  uint64_t cur_offset = this->state_.thread_offset;
#ifdef PADDLE_WITH_CUDA
  std::lock_guard<std::mutex> lock(this->mu_);

  this->state_.thread_offset += increament_offset;

#else
  PADDLE_THROW(platform::errors::PermissionDenied(
      "Increment Offset only support in CUDA place"));
#endif
  return std::make_pair(static_cast<int>(this->state_.current_seed),
                        cur_offset);
}

L
Leo Chen 已提交
179 180 181
void Generator::SetIsInitPy(bool is_init_py) {
  this->is_init_py_ = is_init_py;
  VLOG(4) << "SetIsInitPy:" << this->is_init_py_;
Y
yaoxuefeng 已提交
182
}
L
Leo Chen 已提交
183
bool Generator::GetIsInitPy() const { return this->is_init_py_; }
Y
yaoxuefeng 已提交
184 185 186

}  // namespace framework
}  // namespace paddle