未验证 提交 4e417409 编写于 作者: H Huang Jiyi 提交者: GitHub

[phi decoupling] move generator implementation from fluid to phi (#50746)

* move fluid generator to phi

* move fluid generator to phi

* update .gitignore

* fix bugs

* fix cannot find "glog/logging.h" in "generator.h"

* fix bugs
上级 746b774b
...@@ -93,3 +93,5 @@ paddle/fluid/pybind/eager_op_function_impl.h ...@@ -93,3 +93,5 @@ paddle/fluid/pybind/eager_op_function_impl.h
paddle/fluid/pybind/op_function_impl.h paddle/fluid/pybind/op_function_impl.h
paddle/fluid/pybind/*final_state_op_function_impl.h paddle/fluid/pybind/*final_state_op_function_impl.h
paddle/fluid/prim/api/generated/prim_api/* paddle/fluid/prim/api/generated/prim_api/*
paddle/fluid/framework/__init__.py
python/paddle/incubate/fleet/parameter_server/pslib/ps_pb2.py
...@@ -26,11 +26,11 @@ ...@@ -26,11 +26,11 @@
#include "paddle/fluid/distributed/ps/table/graph/graph_node.h" #include "paddle/fluid/distributed/ps/table/graph/graph_node.h"
#include "paddle/fluid/framework/fleet/fleet_wrapper.h" #include "paddle/fluid/framework/fleet/fleet_wrapper.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" #include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/io/fs.h" #include "paddle/fluid/framework/io/fs.h"
#include "paddle/fluid/platform/timer.h" #include "paddle/fluid/platform/timer.h"
#include "paddle/fluid/string/printf.h" #include "paddle/fluid/string/printf.h"
#include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/generator.h"
DECLARE_bool(graph_load_in_parallel); DECLARE_bool(graph_load_in_parallel);
DECLARE_bool(graph_get_neighbor_id); DECLARE_bool(graph_get_neighbor_id);
...@@ -2574,7 +2574,7 @@ int32_t GraphTable::Initialize(const GraphParameter &graph) { ...@@ -2574,7 +2574,7 @@ int32_t GraphTable::Initialize(const GraphParameter &graph) {
_shards_task_pool.resize(task_pool_size_); _shards_task_pool.resize(task_pool_size_);
for (size_t i = 0; i < _shards_task_pool.size(); ++i) { for (size_t i = 0; i < _shards_task_pool.size(); ++i) {
_shards_task_pool[i].reset(new ::ThreadPool(1)); _shards_task_pool[i].reset(new ::ThreadPool(1));
_shards_task_rng_pool.push_back(paddle::framework::GetCPURandomEngine(0)); _shards_task_rng_pool.push_back(phi::GetCPURandomEngine(0));
} }
load_node_edge_task_pool.reset(new ::ThreadPool(load_thread_num)); load_node_edge_task_pool.reset(new ::ThreadPool(load_thread_num));
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
#include <vector> #include <vector>
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/truncated_gaussian_random_op.h" #include "paddle/fluid/operators/truncated_gaussian_random_op.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
...@@ -64,7 +64,7 @@ class UniformInitializer : public Initializer { ...@@ -64,7 +64,7 @@ class UniformInitializer : public Initializer {
max_ = std::stof(attrs[3]); max_ = std::stof(attrs[3]);
dist_ = std::uniform_real_distribution<float>(min_, max_); dist_ = std::uniform_real_distribution<float>(min_, max_);
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
} }
float GetValue() override { return dist_(*random_engine_); } float GetValue() override { return dist_(*random_engine_); }
...@@ -90,7 +90,7 @@ class GaussianInitializer : public Initializer { ...@@ -90,7 +90,7 @@ class GaussianInitializer : public Initializer {
mean_ = std::stof(attrs[2]); mean_ = std::stof(attrs[2]);
std_ = std::stof(attrs[3]); std_ = std::stof(attrs[3]);
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
dist_ = std::normal_distribution<float>(mean_, std_); dist_ = std::normal_distribution<float>(mean_, std_);
} }
...@@ -120,7 +120,7 @@ class TruncatedGaussianInitializer : public Initializer { ...@@ -120,7 +120,7 @@ class TruncatedGaussianInitializer : public Initializer {
std::uniform_real_distribution<float> dist_( std::uniform_real_distribution<float> dist_(
std::numeric_limits<float>::min(), 1.0); std::numeric_limits<float>::min(), 1.0);
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
} }
float GetValue() override { float GetValue() override {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <memory> #include <memory>
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/framework/generator.h" #include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
......
...@@ -1136,11 +1136,6 @@ cc_test_old( ...@@ -1136,11 +1136,6 @@ cc_test_old(
string_helper string_helper
glog) glog)
cc_library(
generator
SRCS generator.cc
DEPS enforce place)
cc_library( cc_library(
infershape_utils infershape_utils
SRCS infershape_utils.cc SRCS infershape_utils.cc
......
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <glog/logging.h>
#include <stdint.h>
#include <atomic>
#include <deque>
#include <iostream> // temp for debug
#include <memory>
#include <mutex> // NOLINT
#include <random>
#include <typeinfo>
#include <utility>
#include "paddle/phi/core/generator.h"
namespace paddle {
namespace framework {
static uint64_t GetRandomSeed() {
std::random_device rd;
// double has 53 bit significant, so limit uint64 to 53 bits
return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF;
}
struct Generator : public phi::Generator {
Generator() {
auto seed = GetRandomSeed();
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
explicit Generator(uint64_t seed) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
Generator(uint64_t seed, uint64_t device_id) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = device_id;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
Generator(const Generator& other) = delete;
// get random state
phi::Generator::GeneratorState GetState();
// set random state
void SetState(const phi::Generator::GeneratorState&);
// get current seed
uint64_t GetCurrentSeed();
// random a seed and get
uint64_t Seed();
// set seed
void SetCurrentSeed(uint64_t seed);
// get cpu engine
std::shared_ptr<std::mt19937_64> GetCPUEngine();
// set cpu engine
void SetCPUEngine(std::shared_ptr<std::mt19937_64>);
uint64_t Random64();
std::pair<uint64_t, uint64_t> IncrementOffset(uint64_t increament_offset);
uint64_t get_device_id() { return this->state_.device; }
private:
phi::Generator::GeneratorState state_;
std::shared_ptr<std::mt19937_64> engine_;
mutable std::mutex mu_;
};
// The DefaultCPUGenerator is used in manual_seed()
const std::shared_ptr<Generator>& DefaultCPUGenerator();
const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id = -1);
const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id = -1);
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t);
const std::shared_ptr<Generator>& SetRandomSeedGenerator(
const std::string& name, uint64_t seed);
const std::shared_ptr<Generator>& GetRandomSeedGenerator(
const std::string& name);
} // namespace framework
} // namespace paddle
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "paddle/fluid//platform/device/gpu/gpu_types.h" #include "paddle/fluid//platform/device/gpu/gpu_types.h"
#include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/framework/naive_executor.h"
...@@ -62,6 +61,7 @@ ...@@ -62,6 +61,7 @@
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/place.h" #include "paddle/phi/common/place.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/funcs/data_type_transform.h" #include "paddle/phi/kernels/funcs/data_type_transform.h"
#include "paddle/utils/string/split.h" #include "paddle/utils/string/split.h"
...@@ -441,8 +441,8 @@ void AnalysisPredictor::InitDeviceContexts() { ...@@ -441,8 +441,8 @@ void AnalysisPredictor::InitDeviceContexts() {
.GetZeroAllocator(platform::CPUPlace()) .GetZeroAllocator(platform::CPUPlace())
.get()); .get());
gpu_context->SetGenerator( gpu_context->SetGenerator(
framework::DefaultCUDAGenerator(place_.GetDeviceId()).get()); phi::DefaultCUDAGenerator(place_.GetDeviceId()).get());
gpu_context->SetHostGenerator(framework::DefaultCPUGenerator().get()); gpu_context->SetHostGenerator(phi::DefaultCPUGenerator().get());
gpu_context->SetStream(gpu_resource->GetStream()); gpu_context->SetStream(gpu_resource->GetStream());
gpu_context->SetBlasHandle(gpu_resource->GetBlasHandleCreator()); gpu_context->SetBlasHandle(gpu_resource->GetBlasHandleCreator());
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/operators/cudnn_lstm_cache.h" #include "paddle/fluid/operators/cudnn_lstm_cache.h"
...@@ -232,7 +232,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> { ...@@ -232,7 +232,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> {
if (seed == 0) { if (seed == 0) {
// If not specify seed, use global Generator to generate seed. // If not specify seed, use global Generator to generate seed.
int device_id = ctx.GetPlace().GetDeviceId(); int device_id = ctx.GetPlace().GetDeviceId();
auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id); auto gen_cuda = phi::DefaultCUDAGenerator(device_id);
seed = static_cast<int>(gen_cuda->Random64()); seed = static_cast<int>(gen_cuda->Random64());
} }
// else use `ctx.Attr<int>("seed")` specified seed // else use `ctx.Attr<int>("seed")` specified seed
......
...@@ -16,11 +16,11 @@ ...@@ -16,11 +16,11 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/fused/fused_dropout_act_bias.h" #include "paddle/fluid/operators/fused/fused_dropout_act_bias.h"
#include "paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h" #include "paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h"
#include "paddle/fluid/operators/fused/fused_residual_dropout_bias.h" #include "paddle/fluid/operators/fused/fused_residual_dropout_bias.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/funcs/dropout_impl_util.h" #include "paddle/phi/kernels/funcs/dropout_impl_util.h"
#include "paddle/phi/kernels/funcs/functors.h" #include "paddle/phi/kernels/funcs/functors.h"
#include "paddle/phi/kernels/layer_norm_kernel.h" #include "paddle/phi/kernels/layer_norm_kernel.h"
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h" #include "paddle/phi/infermeta/binary.h"
......
...@@ -12,8 +12,8 @@ See the License for the specific language governing permissions and ...@@ -12,8 +12,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h" #include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -43,11 +43,11 @@ limitations under the License. */ ...@@ -43,11 +43,11 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <string> #include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h" #include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#include <random> #include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
namespace paddle { namespace paddle {
......
...@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <thrust/random.h> #include <thrust/random.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/funcs/index_impl.cu.h" #include "paddle/phi/kernels/funcs/index_impl.cu.h"
namespace paddle { namespace paddle {
...@@ -59,7 +59,7 @@ class GPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel<T> { ...@@ -59,7 +59,7 @@ class GPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel<T> {
int64_t size = tensor->numel(); int64_t size = tensor->numel();
int device_id = context.GetPlace().GetDeviceId(); int device_id = context.GetPlace().GetDeviceId();
auto gen_cuda = framework::DefaultCUDAGenerator(device_id); auto gen_cuda = phi::DefaultCUDAGenerator(device_id);
auto& dev_cxt = context.template device_context<phi::GPUContext>(); auto& dev_cxt = context.template device_context<phi::GPUContext>();
if (seed == 0) { if (seed == 0) {
......
...@@ -14,8 +14,8 @@ limitations under the License. */ ...@@ -14,8 +14,8 @@ limitations under the License. */
#include <random> #include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -37,7 +37,7 @@ class MLUGaussianRandomKernel : public framework::OpKernel<T> { ...@@ -37,7 +37,7 @@ class MLUGaussianRandomKernel : public framework::OpKernel<T> {
int64_t size = tensor->numel(); int64_t size = tensor->numel();
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
cpu_data[i] = dist(*engine); cpu_data[i] = dist(*engine);
} }
......
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#include <random> #include <random>
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -39,7 +39,7 @@ class NPUGaussianRandomKernel : public framework::OpKernel<T> { ...@@ -39,7 +39,7 @@ class NPUGaussianRandomKernel : public framework::OpKernel<T> {
int64_t size = tensor->numel(); int64_t size = tensor->numel();
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
cpu_data[i] = dist(*engine); cpu_data[i] = dist(*engine);
} }
......
...@@ -16,11 +16,11 @@ limitations under the License. */ ...@@ -16,11 +16,11 @@ limitations under the License. */
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#include <thrust/random.h> #include <thrust/random.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/kernels/funcs/aligned_vector.h" #include "paddle/phi/kernels/funcs/aligned_vector.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/distribution_helper.h"
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include <glog/logging.h> #include <glog/logging.h>
#include "paddle/fluid/framework/generator.h" #include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -26,7 +26,7 @@ Sampler::~Sampler() {} ...@@ -26,7 +26,7 @@ Sampler::~Sampler() {}
UniformSampler::UniformSampler(int64_t range, unsigned int seed) UniformSampler::UniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), inv_range_(1.0 / (range + 1)) { : Sampler(range, seed), inv_range_(1.0 / (range + 1)) {
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range); dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
} }
...@@ -36,7 +36,7 @@ float UniformSampler::Probability(int64_t value) const { return inv_range_; } ...@@ -36,7 +36,7 @@ float UniformSampler::Probability(int64_t value) const { return inv_range_; }
LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed) LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), log_range_(log(range + 1)) { : Sampler(range, seed), log_range_(log(range + 1)) {
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1); dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
} }
...@@ -66,7 +66,7 @@ CustomSampler::CustomSampler(int64_t range, ...@@ -66,7 +66,7 @@ CustomSampler::CustomSampler(int64_t range,
const float *alias_probabilities, const float *alias_probabilities,
unsigned int seed) unsigned int seed)
: Sampler(range, seed) { : Sampler(range, seed) {
random_engine_ = framework::GetCPURandomEngine(seed_); random_engine_ = phi::GetCPURandomEngine(seed_);
real_dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1); real_dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
int_dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range); int_dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
......
...@@ -15,11 +15,11 @@ ...@@ -15,11 +15,11 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h" #include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -20,17 +20,17 @@ limitations under the License. */ ...@@ -20,17 +20,17 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename T> template <typename T>
static inline void random_permate(T* data_ptr, int num, unsigned int seed) { static inline void random_permate(T* data_ptr, int num, unsigned int seed) {
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int i = 0; i < num; ++i) { for (int i = 0; i < num; ++i) {
data_ptr[i] = static_cast<T>(i); data_ptr[i] = static_cast<T>(i);
} }
......
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include <sstream> #include <sstream>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -56,7 +56,7 @@ class SamplingIdKernel : public framework::OpKernel<T> { ...@@ -56,7 +56,7 @@ class SamplingIdKernel : public framework::OpKernel<T> {
static_cast<T>(context.Attr<float>("min")), static_cast<T>(context.Attr<float>("min")),
static_cast<T>(context.Attr<float>("max"))); static_cast<T>(context.Attr<float>("max")));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
std::vector<int64_t> ids(batch_size); std::vector<int64_t> ids(batch_size);
for (int i = 0; i < batch_size; ++i) { for (int i = 0; i < batch_size; ++i) {
T r = dist(*engine); T r = dist(*engine);
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -36,7 +36,7 @@ static int get_seed(const framework::ExecutionContext& context) { ...@@ -36,7 +36,7 @@ static int get_seed(const framework::ExecutionContext& context) {
} }
} else { } else {
std::string name = context.Attr<std::string>("rng_name"); std::string name = context.Attr<std::string>("rng_name");
auto rng = framework::GetRandomSeedGenerator(name); auto rng = phi::GetRandomSeedGenerator(name);
do { // NOTE(wangxi): cpu dropout will use random seed if seed == 0 do { // NOTE(wangxi): cpu dropout will use random seed if seed == 0
seed = static_cast<int>(rng->Random64()); seed = static_cast<int>(rng->Random64());
} while (seed == 0); } while (seed == 0);
......
...@@ -18,9 +18,9 @@ limitations under the License. */ ...@@ -18,9 +18,9 @@ limitations under the License. */
#include <random> #include <random>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
namespace paddle { namespace paddle {
......
...@@ -17,8 +17,8 @@ limitations under the License. */ ...@@ -17,8 +17,8 @@ limitations under the License. */
#include <limits> #include <limits>
#include <random> #include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#include <limits> #include <limits>
#include <random> #include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/truncated_gaussian_random_op.h" #include "paddle/fluid/operators/truncated_gaussian_random_op.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -41,7 +41,7 @@ class TruncatedGaussianRandomMLUKernel : public framework::OpKernel<T> { ...@@ -41,7 +41,7 @@ class TruncatedGaussianRandomMLUKernel : public framework::OpKernel<T> {
int64_t size = tensor->numel(); int64_t size = tensor->numel();
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data_cpu[i] = truncated_normal(dist(*engine)); data_cpu[i] = truncated_normal(dist(*engine));
......
...@@ -90,7 +90,7 @@ class NPUTruncatedGaussianRandomKernel : public framework::OpKernel<T> { ...@@ -90,7 +90,7 @@ class NPUTruncatedGaussianRandomKernel : public framework::OpKernel<T> {
int64_t size = tensor->numel(); int64_t size = tensor->numel();
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
cpu_data[i] = truncated_normal(dist(*engine)); cpu_data[i] = truncated_normal(dist(*engine));
} }
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#ifdef PADDLE_WITH_XPU #ifdef PADDLE_WITH_XPU
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h" #include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -36,7 +36,7 @@ class XPUUniformRandomInplaceKernel : public framework::OpKernel<T> { ...@@ -36,7 +36,7 @@ class XPUUniformRandomInplaceKernel : public framework::OpKernel<T> {
static_cast<T>(ctx.Attr<float>("min")), static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max"))); static_cast<T>(ctx.Attr<float>("max")));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data_cpu[i] = dist(*engine); data_cpu[i] = dist(*engine);
} }
......
...@@ -15,11 +15,11 @@ limitations under the License. */ ...@@ -15,11 +15,11 @@ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/bfloat16.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/infermeta/nullary.h" #include "paddle/phi/infermeta/nullary.h"
namespace paddle { namespace paddle {
...@@ -35,7 +35,7 @@ inline void UniformRealDistribution(T *data, ...@@ -35,7 +35,7 @@ inline void UniformRealDistribution(T *data,
VLOG(4) << "[CPU] UniformRandomKernel<T>"; VLOG(4) << "[CPU] UniformRandomKernel<T>";
std::uniform_real_distribution<T> dist(static_cast<T>(min), std::uniform_real_distribution<T> dist(static_cast<T>(min),
static_cast<T>(max)); static_cast<T>(max));
auto engine = paddle::framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine); data[i] = dist(*engine);
...@@ -50,7 +50,7 @@ inline void UniformRealDistribution(paddle::platform::bfloat16 *data, ...@@ -50,7 +50,7 @@ inline void UniformRealDistribution(paddle::platform::bfloat16 *data,
const unsigned int seed) { const unsigned int seed) {
VLOG(4) << "[CPU] UniformRandomKernel<bfloat16>"; VLOG(4) << "[CPU] UniformRandomKernel<bfloat16>";
std::uniform_real_distribution<float> dist(min, max); std::uniform_real_distribution<float> dist(min, max);
auto engine = paddle::framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data[i] = static_cast<paddle::platform::bfloat16>(dist(*engine)); data[i] = static_cast<paddle::platform::bfloat16>(dist(*engine));
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#if defined(__NVCC__) || defined(__HIPCC__) #if defined(__NVCC__) || defined(__HIPCC__)
#include <thrust/random.h> #include <thrust/random.h>
#include "paddle/fluid/framework/generator.h" #include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/distribution_helper.h"
#include "paddle/phi/kernels/funcs/index_impl.cu.h" #include "paddle/phi/kernels/funcs/index_impl.cu.h"
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h"
#include "paddle/fluid/operators/uniform_random_op.h" #include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -68,7 +68,7 @@ class MLUUniformRandomKernel : public framework::OpKernel<T> { ...@@ -68,7 +68,7 @@ class MLUUniformRandomKernel : public framework::OpKernel<T> {
static_cast<T>(ctx.Attr<float>("min")), static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max"))); static_cast<T>(ctx.Attr<float>("max")));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data_cpu[i] = dist(*engine); data_cpu[i] = dist(*engine);
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h" #include "paddle/fluid/operators/uniform_random_op.h"
#include "paddle/phi/core/generator.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -69,7 +69,7 @@ class NPUUniformRandomKernel : public framework::OpKernel<T> { ...@@ -69,7 +69,7 @@ class NPUUniformRandomKernel : public framework::OpKernel<T> {
static_cast<T>(ctx.Attr<float>("min")), static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max"))); static_cast<T>(ctx.Attr<float>("max")));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed")); unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed); auto engine = phi::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data_cpu[i] = dist(*engine); data_cpu[i] = dist(*engine);
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include <set> #include <set>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/memory/allocation/allocator_facade.h" #include "paddle/fluid/memory/allocation/allocator_facade.h"
#include "paddle/fluid/platform/device/device_wrapper.h" #include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
...@@ -27,6 +26,7 @@ limitations under the License. */ ...@@ -27,6 +26,7 @@ limitations under the License. */
#include "paddle/fluid/platform/profiler/event_tracing.h" #include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/core/allocator.h" #include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/expect.h" #include "paddle/phi/core/expect.h"
#include "paddle/phi/core/generator.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/memory/allocation/cuda_device_context_allocator.h" #include "paddle/fluid/memory/allocation/cuda_device_context_allocator.h"
...@@ -193,22 +193,20 @@ std::unique_ptr<DeviceContext> CreateDeviceContext( ...@@ -193,22 +193,20 @@ std::unique_ptr<DeviceContext> CreateDeviceContext(
instance.GetAllocator(paddle::platform::CUDAPinnedPlace()).get()); instance.GetAllocator(paddle::platform::CUDAPinnedPlace()).get());
cuda_ctx->PartialInitWithAllocator(); cuda_ctx->PartialInitWithAllocator();
dev_ctx->SetGenerator( dev_ctx->SetGenerator(phi::DefaultCUDAGenerator(p.GetDeviceId()).get());
framework::DefaultCUDAGenerator(p.GetDeviceId()).get());
#endif #endif
} else if (is_xpu_place(p)) { } else if (is_xpu_place(p)) {
#if defined(PADDLE_WITH_XPU) #if defined(PADDLE_WITH_XPU)
dev_ctx->SetAllocator( dev_ctx->SetAllocator(
memory::allocation::AllocatorFacade::Instance().GetAllocator(p).get()); memory::allocation::AllocatorFacade::Instance().GetAllocator(p).get());
dev_ctx->SetGenerator( dev_ctx->SetGenerator(phi::DefaultXPUGenerator(p.GetDeviceId()).get());
framework::DefaultXPUGenerator(p.GetDeviceId()).get());
#endif #endif
} else { } else {
dev_ctx->SetAllocator( dev_ctx->SetAllocator(
memory::allocation::AllocatorFacade::Instance().GetAllocator(p).get()); memory::allocation::AllocatorFacade::Instance().GetAllocator(p).get());
dev_ctx->SetGenerator(framework::DefaultCPUGenerator().get()); dev_ctx->SetGenerator(phi::DefaultCPUGenerator().get());
} }
dev_ctx->SetHostGenerator(framework::DefaultCPUGenerator().get()); dev_ctx->SetHostGenerator(phi::DefaultCPUGenerator().get());
dev_ctx->SetHostAllocator(memory::allocation::AllocatorFacade::Instance() dev_ctx->SetHostAllocator(memory::allocation::AllocatorFacade::Instance()
.GetAllocator(platform::CPUPlace()) .GetAllocator(platform::CPUPlace())
.get()); .get());
......
...@@ -21,10 +21,10 @@ limitations under the License. */ ...@@ -21,10 +21,10 @@ limitations under the License. */
#endif #endif
#include <memory> #include <memory>
#include <sstream>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/pybind/generator_py.h" #include "paddle/fluid/pybind/generator_py.h"
namespace py = pybind11; namespace py = pybind11;
...@@ -72,27 +72,24 @@ void BindGenerator(py::module* m_ptr) { ...@@ -72,27 +72,24 @@ void BindGenerator(py::module* m_ptr) {
}); });
py::class_<std::mt19937_64>(m, "mt19937_64", ""); py::class_<std::mt19937_64>(m, "mt19937_64", "");
py::class_<framework::Generator, std::shared_ptr<framework::Generator>>( py::class_<phi::Generator, std::shared_ptr<phi::Generator>>(m, "Generator")
m, "Generator")
.def("__init__", .def("__init__",
[](framework::Generator& self) { [](phi::Generator& self) { new (&self) phi::Generator(); })
new (&self) framework::Generator(); .def("get_state", &phi::Generator::GetState)
}) .def("set_state", &phi::Generator::SetState)
.def("get_state", &framework::Generator::GetState)
.def("set_state", &framework::Generator::SetState)
.def("manual_seed", .def("manual_seed",
[](std::shared_ptr<framework::Generator>& self, uint64_t seed) { [](std::shared_ptr<phi::Generator>& self, uint64_t seed) {
self->SetCurrentSeed(seed); self->SetCurrentSeed(seed);
return self; return self;
}) })
.def("seed", &framework::Generator::Seed) .def("seed", &phi::Generator::Seed)
.def("initial_seed", &framework::Generator::GetCurrentSeed) .def("initial_seed", &phi::Generator::GetCurrentSeed)
.def("random", &framework::Generator::Random64); .def("random", &phi::Generator::Random64);
m.def("default_cpu_generator", &framework::DefaultCPUGenerator); m.def("default_cpu_generator", &phi::DefaultCPUGenerator);
m.def("default_cuda_generator", &framework::DefaultCUDAGenerator); m.def("default_cuda_generator", &phi::DefaultCUDAGenerator);
m.def("default_xpu_generator", &framework::DefaultXPUGenerator); m.def("default_xpu_generator", &phi::DefaultXPUGenerator);
m.def("set_random_seed_generator", &framework::SetRandomSeedGenerator); m.def("set_random_seed_generator", &phi::SetRandomSeedGenerator);
m.def("get_random_seed_generator", &framework::GetRandomSeedGenerator); m.def("get_random_seed_generator", &phi::GetRandomSeedGenerator);
} }
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -120,6 +120,11 @@ cc_library( ...@@ -120,6 +120,11 @@ cc_library(
SRCS mixed_vector.cc SRCS mixed_vector.cc
DEPS device_context place memory) DEPS device_context place memory)
cc_library(
generator
SRCS generator.cc
DEPS enforce place)
# Will remove once we implemented MKLDNN_Tensor # Will remove once we implemented MKLDNN_Tensor
if(WITH_MKLDNN) if(WITH_MKLDNN)
add_dependencies(dense_tensor mkldnn) add_dependencies(dense_tensor mkldnn)
......
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -12,19 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,19 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/generator.h" #include "paddle/phi/core/generator.h"
#include <glog/logging.h> #include <glog/logging.h>
#include <memory> #include <memory>
#include <utility> #include <utility>
#include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/xpu/xpu_info.h" #include "paddle/phi/backends/xpu/xpu_info.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/phi/core/enforce.h"
namespace paddle { namespace phi {
namespace framework {
const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) { const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) {
#if defined(PADDLE_WITH_XPU) #if defined(PADDLE_WITH_XPU)
...@@ -35,13 +34,13 @@ const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) { ...@@ -35,13 +34,13 @@ const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) {
static std::vector<std::shared_ptr<Generator>> default_xpu_generators; static std::vector<std::shared_ptr<Generator>> default_xpu_generators;
std::call_once(num_devices_init_flag, []() { std::call_once(num_devices_init_flag, []() {
num_xpu_devices = paddle::platform::GetXPUDeviceCount(); num_xpu_devices = phi::backends::xpu::GetXPUDeviceCount();
xpu_device_flags.resize(num_xpu_devices); xpu_device_flags.resize(num_xpu_devices);
default_xpu_generators.resize(num_xpu_devices); default_xpu_generators.resize(num_xpu_devices);
}); });
if (device_id < 0) { if (device_id < 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(
"xpu device id shoule be greater than 0")); phi::errors::InvalidArgument("xpu device id shoule be greater than 0"));
} }
std::call_once(xpu_device_flags[device_id], [device_id]() { std::call_once(xpu_device_flags[device_id], [device_id]() {
...@@ -52,7 +51,7 @@ const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) { ...@@ -52,7 +51,7 @@ const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id) {
}); });
return default_xpu_generators[device_id]; return default_xpu_generators[device_id];
#else #else
PADDLE_THROW(platform::errors::PermissionDenied( PADDLE_THROW(phi::errors::PermissionDenied(
"getDefaultXPUGenerator only support in XPU place")); "getDefaultXPUGenerator only support in XPU place"));
#endif #endif
} }
...@@ -66,12 +65,12 @@ const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id) { ...@@ -66,12 +65,12 @@ const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id) {
static std::vector<std::shared_ptr<Generator>> default_cuda_generators; static std::vector<std::shared_ptr<Generator>> default_cuda_generators;
std::call_once(num_devices_init_flag, []() { std::call_once(num_devices_init_flag, []() {
num_cuda_devices = paddle::platform::GetGPUDeviceCount(); num_cuda_devices = phi::backends::gpu::GetGPUDeviceCount();
cuda_device_flags.resize(num_cuda_devices); cuda_device_flags.resize(num_cuda_devices);
default_cuda_generators.resize(num_cuda_devices); default_cuda_generators.resize(num_cuda_devices);
}); });
if (device_id < 0) { if (device_id < 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(phi::errors::InvalidArgument(
"cuda device id shoule be greater than 0")); "cuda device id shoule be greater than 0"));
} }
...@@ -83,7 +82,7 @@ const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id) { ...@@ -83,7 +82,7 @@ const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id) {
}); });
return default_cuda_generators[device_id]; return default_cuda_generators[device_id];
#else #else
PADDLE_THROW(platform::errors::PermissionDenied( PADDLE_THROW(phi::errors::PermissionDenied(
"getDefaultCUDAGenerator only support in CUDA place")); "getDefaultCUDAGenerator only support in CUDA place"));
#endif #endif
} }
...@@ -107,7 +106,7 @@ const std::shared_ptr<Generator>& SetRandomSeedGenerator( ...@@ -107,7 +106,7 @@ const std::shared_ptr<Generator>& SetRandomSeedGenerator(
auto iter = rng_map.find(name); auto iter = rng_map.find(name);
PADDLE_ENFORCE_EQ(iter == rng_map.end(), PADDLE_ENFORCE_EQ(iter == rng_map.end(),
true, true,
platform::errors::AlreadyExists( phi::errors::AlreadyExists(
"%s RandomSeedGenerator is already exist", name)); "%s RandomSeedGenerator is already exist", name));
auto generator = std::make_shared<Generator>(seed); auto generator = std::make_shared<Generator>(seed);
...@@ -115,7 +114,7 @@ const std::shared_ptr<Generator>& SetRandomSeedGenerator( ...@@ -115,7 +114,7 @@ const std::shared_ptr<Generator>& SetRandomSeedGenerator(
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
emplace_success, emplace_success,
true, true,
platform::errors::PermissionDenied( phi::errors::PermissionDenied(
"SetRandomSeedGenerator cannot emplace %s RandomSeedGenerator", "SetRandomSeedGenerator cannot emplace %s RandomSeedGenerator",
name)); name));
return rng_map[name]; return rng_map[name];
...@@ -125,12 +124,12 @@ const std::shared_ptr<Generator>& GetRandomSeedGenerator( ...@@ -125,12 +124,12 @@ const std::shared_ptr<Generator>& GetRandomSeedGenerator(
const std::string& name) { const std::string& name) {
auto& rng_map = GetRandomSeedGeneratorMap(); auto& rng_map = GetRandomSeedGeneratorMap();
auto iter = rng_map.find(name); auto iter = rng_map.find(name);
PADDLE_ENFORCE_EQ(iter != rng_map.end(), PADDLE_ENFORCE_EQ(
true, iter != rng_map.end(),
platform::errors::NotFound( true,
"%s RandomSeedGenerator is not found, please " phi::errors::NotFound("%s RandomSeedGenerator is not found, please "
"use `set_random_seed_generator` to set rng first", "use `set_random_seed_generator` to set rng first",
name)); name));
return iter->second; return iter->second;
} }
...@@ -160,6 +159,43 @@ std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) { ...@@ -160,6 +159,43 @@ std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) {
} }
} }
Generator::Generator() {
auto seed = GetRandomSeed();
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
Generator::Generator(uint64_t seed) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
Generator::Generator(uint64_t seed, uint64_t device_id) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = device_id;
this->state_.current_seed = seed;
this->state_.thread_offset = 0;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
phi::Generator::GeneratorState Generator::GetState() { phi::Generator::GeneratorState Generator::GetState() {
std::lock_guard<std::mutex> lock(this->mu_); std::lock_guard<std::mutex> lock(this->mu_);
state_.cpu_engine = *engine_; state_.cpu_engine = *engine_;
...@@ -231,10 +267,9 @@ std::pair<uint64_t, uint64_t> Generator::IncrementOffset( ...@@ -231,10 +267,9 @@ std::pair<uint64_t, uint64_t> Generator::IncrementOffset(
this->state_.thread_offset += increament_offset; this->state_.thread_offset += increament_offset;
return std::make_pair(this->state_.current_seed, cur_offset); return std::make_pair(this->state_.current_seed, cur_offset);
#else #else
PADDLE_THROW(platform::errors::PermissionDenied( PADDLE_THROW(phi::errors::PermissionDenied(
"Increment Offset only support in CUDA place")); "Increment Offset only support in CUDA place"));
#endif #endif
} }
} // namespace framework } // namespace phi
} // namespace paddle
...@@ -14,12 +14,25 @@ limitations under the License. */ ...@@ -14,12 +14,25 @@ limitations under the License. */
#pragma once #pragma once
#include <cstdint> #include <stdint.h>
#include <atomic>
#include <deque>
#include <iostream> // temp for debug
#include <memory> #include <memory>
#include <mutex> // NOLINT
#include <random> #include <random>
#include <typeinfo>
#include <utility>
namespace phi { namespace phi {
static uint64_t GetRandomSeed() {
std::random_device rd;
// double has 53 bit significant, so limit uint64 to 53 bits
return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF;
}
class Generator { class Generator {
public: public:
struct GeneratorState { struct GeneratorState {
...@@ -29,27 +42,56 @@ class Generator { ...@@ -29,27 +42,56 @@ class Generator {
std::mt19937_64 cpu_engine; std::mt19937_64 cpu_engine;
}; };
virtual ~Generator() = default; Generator();
explicit Generator(uint64_t seed);
Generator(uint64_t seed, uint64_t device_id);
Generator(const Generator& other) = delete;
~Generator() = default;
// get random state // get random state
virtual GeneratorState GetState() = 0; GeneratorState GetState();
// set random state // set random state
virtual void SetState(const GeneratorState&) = 0; void SetState(const GeneratorState&);
// get current seed // get current seed
virtual uint64_t GetCurrentSeed() = 0; uint64_t GetCurrentSeed();
// random a seed and get // random a seed and get
virtual uint64_t Seed() = 0; uint64_t Seed();
// set seed // set seed
virtual void SetCurrentSeed(uint64_t seed) = 0; void SetCurrentSeed(uint64_t seed);
// get cpu engine // get cpu engine
virtual std::shared_ptr<std::mt19937_64> GetCPUEngine() = 0; std::shared_ptr<std::mt19937_64> GetCPUEngine();
// set cpu engine // set cpu engine
virtual void SetCPUEngine(std::shared_ptr<std::mt19937_64>) = 0; void SetCPUEngine(std::shared_ptr<std::mt19937_64>);
virtual uint64_t Random64() = 0;
virtual std::pair<uint64_t, uint64_t> IncrementOffset( uint64_t Random64();
uint64_t increament_offset) = 0;
std::pair<uint64_t, uint64_t> IncrementOffset(uint64_t increament_offset);
virtual uint64_t get_device_id() = 0; uint64_t get_device_id() { return this->state_.device; }
private:
GeneratorState state_;
std::shared_ptr<std::mt19937_64> engine_;
mutable std::mutex mu_;
}; };
// The DefaultCPUGenerator is used in manual_seed()
const std::shared_ptr<Generator>& DefaultCPUGenerator();
const std::shared_ptr<Generator>& DefaultCUDAGenerator(int64_t device_id = -1);
const std::shared_ptr<Generator>& DefaultXPUGenerator(int64_t device_id = -1);
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t);
const std::shared_ptr<Generator>& SetRandomSeedGenerator(
const std::string& name, uint64_t seed);
const std::shared_ptr<Generator>& GetRandomSeedGenerator(
const std::string& name);
} // namespace phi } // namespace phi
...@@ -458,7 +458,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx, ...@@ -458,7 +458,7 @@ void ClassCenterSampleKernel(const Context& dev_ctx,
(NumBlocks(num_classes) * kNumCUDAThreads * vec_size) + (NumBlocks(num_classes) * kNumCUDAThreads * vec_size) +
1) * 1) *
vec_size; vec_size;
// auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id); // auto gen_cuda = phi::DefaultCUDAGenerator(device_id);
auto gen_cuda = dev_ctx.GetGenerator(); auto gen_cuda = dev_ctx.GetGenerator();
if (!fix_seed) { if (!fix_seed) {
auto seed_offset = gen_cuda->IncrementOffset(offset); auto seed_offset = gen_cuda->IncrementOffset(offset);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册