diff --git a/paddle/phi/kernels/cpu/class_center_sample_kernel.cc b/paddle/phi/kernels/cpu/class_center_sample_kernel.cc index 6667ea05ab6ac8673d59118221f0e43bf47c214b..50ad819f91b3bbf4022438842fc1c275dd5a245f 100644 --- a/paddle/phi/kernels/cpu/class_center_sample_kernel.cc +++ b/paddle/phi/kernels/cpu/class_center_sample_kernel.cc @@ -16,8 +16,8 @@ #include #include -#include "paddle/fluid/framework/generator.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { @@ -83,7 +83,13 @@ void ClassCenterSampleKernel(const Context& dev_ctx, seed = rnd(); } std::uniform_int_distribution dist(0, num_classes - 1); - auto engine = paddle::framework::GetCPURandomEngine(seed); + std::shared_ptr engine; + if (seed) { + engine = std::make_shared(); + engine->seed(seed); + } else { + engine = dev_ctx.GetGenerator()->GetCPUEngine(); + } // sample negative class center randomly while (unique_label.size() < static_cast(num_samples)) { T neg = dist(*engine); diff --git a/paddle/phi/kernels/cpu/dropout_kernel.cc b/paddle/phi/kernels/cpu/dropout_kernel.cc index 6f1dc1617989d162804ac66cd191ce920e30a75a..c49c9f96085ad6d5906ceae95793af146fcad9cd 100644 --- a/paddle/phi/kernels/cpu/dropout_kernel.cc +++ b/paddle/phi/kernels/cpu/dropout_kernel.cc @@ -14,8 +14,8 @@ #include "paddle/phi/kernels/dropout_kernel.h" -#include "paddle/fluid/framework/generator.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/expand_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" @@ -82,7 +82,13 @@ void DropoutRawKernel(const Context& dev_ctx, } else { seed_data = fix_seed ? seed : 0; } - auto engine = paddle::framework::GetCPURandomEngine(seed_data); + std::shared_ptr engine; + if (seed_data) { + engine = std::make_shared(); + engine->seed(seed_data); + } else { + engine = dev_ctx.GetGenerator()->GetCPUEngine(); + } std::uniform_real_distribution dist(0, 1); @@ -147,7 +153,13 @@ void DropoutNdKernel(const Context& dev_ctx, } else { seed_data = fix_seed ? seed : 0; } - auto engine = paddle::framework::GetCPURandomEngine(seed_data); + std::shared_ptr engine; + if (seed_data) { + engine = std::make_shared(); + engine->seed(seed_data); + } else { + engine = dev_ctx.GetGenerator()->GetCPUEngine(); + } std::uniform_real_distribution dist(0, 1); diff --git a/paddle/phi/kernels/cpu/gaussian_kernel.cc b/paddle/phi/kernels/cpu/gaussian_kernel.cc index c4efc508aac83babc32ef2e0d618a351c1f430a1..2eb783c695b65f26c2076f4d63526caa0bd4fb0c 100644 --- a/paddle/phi/kernels/cpu/gaussian_kernel.cc +++ b/paddle/phi/kernels/cpu/gaussian_kernel.cc @@ -14,8 +14,8 @@ #include "paddle/phi/kernels/gaussian_kernel.h" -#include "paddle/fluid/framework/generator.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { @@ -35,7 +35,13 @@ void GaussianKernel(const Context& dev_ctx, tensor->Resize(phi::make_ddim(shape.GetData())); int64_t size = tensor->numel(); T* data = dev_ctx.template Alloc(tensor); - auto engine = paddle::framework::GetCPURandomEngine(seed); + std::shared_ptr engine; + if (seed) { + engine = std::make_shared(); + engine->seed(seed); + } else { + engine = dev_ctx.GetGenerator()->GetCPUEngine(); + } for (int64_t i = 0; i < size; ++i) { data[i] = dist(*engine); diff --git a/paddle/phi/kernels/cpu/rnn_functor.h b/paddle/phi/kernels/cpu/rnn_functor.h index e6139b45272e9cea1f21e7678e805b83bbc854c5..cdf024d1ec960434066756a177096b786ef94aea 100644 --- a/paddle/phi/kernels/cpu/rnn_functor.h +++ b/paddle/phi/kernels/cpu/rnn_functor.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/utils.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" @@ -148,7 +148,13 @@ void DropoutCpuFunctionInplace(const CPUContext& dev_ctx, if (dropout_prob == 1.0f) { std::fill(mask_data, mask_data + size, static_cast(0)); } else { - auto engine = paddle::framework::GetCPURandomEngine(seed_number); + std::shared_ptr engine; + if (seed_number) { + engine = std::make_shared(); + engine->seed(seed_number); + } else { + engine = dev_ctx.GetGenerator()->GetCPUEngine(); + } std::uniform_real_distribution dist(0, 1); for (size_t i = 0; i < size; ++i) { if (dist(*engine) < dropout_prob) { diff --git a/paddle/phi/kernels/cpu/rrelu_kernel.cc b/paddle/phi/kernels/cpu/rrelu_kernel.cc index 4c6e30beddfa3c7144e4178e88e63d21c1145c82..d5cf21b1131121d91c92ef5885641aa1d5742a58 100644 --- a/paddle/phi/kernels/cpu/rrelu_kernel.cc +++ b/paddle/phi/kernels/cpu/rrelu_kernel.cc @@ -14,8 +14,8 @@ #include "paddle/phi/kernels/rrelu_kernel.h" -#include "paddle/fluid/framework/generator.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { @@ -50,7 +50,7 @@ void RReluKernel(const Context& dev_ctx, return; } - auto engine = paddle::framework::GetCPURandomEngine(0); + auto engine = dev_ctx.GetGenerator()->GetCPUEngine(); std::uniform_real_distribution dist(lower, upper); diff --git a/paddle/phi/kernels/gpu/gaussian_kernel.cu b/paddle/phi/kernels/gpu/gaussian_kernel.cu index 6caf56c3b51278bfda265642c42afa62c6960482..d0f839bd677d477297926f2cfbb63b810c5ae8c6 100644 --- a/paddle/phi/kernels/gpu/gaussian_kernel.cu +++ b/paddle/phi/kernels/gpu/gaussian_kernel.cu @@ -16,10 +16,10 @@ #include -#include "paddle/fluid/framework/generator.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/index_impl.cu.h" diff --git a/paddle/phi/kernels/gpu/gumbel_softmax_kernel.cu b/paddle/phi/kernels/gpu/gumbel_softmax_kernel.cu index 33bf0eba380e446104059f35e37fb8ba556af16e..072c38b1303070b1bbd56ac43a761ae7858867b6 100644 --- a/paddle/phi/kernels/gpu/gumbel_softmax_kernel.cu +++ b/paddle/phi/kernels/gpu/gumbel_softmax_kernel.cu @@ -27,8 +27,8 @@ namespace cub = hipcub; #endif -#include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/tensor_util.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/kernels/funcs/distribution_helper.h" #include "paddle/phi/kernels/funcs/math_function.h" diff --git a/paddle/phi/kernels/gpu/rnn_kernel.cu.cc b/paddle/phi/kernels/gpu/rnn_kernel.cu.cc index 0eb74303f41b41b79698a956807a11bbd570b4fc..079a159ee81e9f163e43f7e8bf3a762c63c044ae 100644 --- a/paddle/phi/kernels/gpu/rnn_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/rnn_kernel.cu.cc @@ -14,9 +14,9 @@ #include "paddle/phi/kernels/rnn_kernel.h" -#include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/utils.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/gpu/rnn_functor.h" @@ -175,8 +175,7 @@ void RnnKernel(const Context &dev_ctx, if (!is_test) { if (seed == 0) { // If not specify seed, use global Generator to generate seed. - int device_id = dev_ctx.GetPlace().GetDeviceId(); - auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id); + auto gen_cuda = dev_ctx.GetGenerator(); seed = static_cast(gen_cuda->Random64()); } // else use `ctx.Attr("seed")` specified seed diff --git a/paddle/phi/kernels/xpu/gaussian_kernel.cc b/paddle/phi/kernels/xpu/gaussian_kernel.cc index a52a0d429ff80b83525ee779ae882c976476d9ca..a9c22632be9b3d1f1b36950f0cb76a885ff32039 100644 --- a/paddle/phi/kernels/xpu/gaussian_kernel.cc +++ b/paddle/phi/kernels/xpu/gaussian_kernel.cc @@ -14,9 +14,9 @@ #include "paddle/phi/kernels/gaussian_kernel.h" -#include "paddle/fluid/framework/generator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/generator.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { @@ -36,7 +36,13 @@ void GaussianKernel(const Context& ctx, uint64_t seed_v = static_cast(seed); // TODO(pangyoki): implement GetXPURandomEngine to set different seeds on // corresponding XPU device. - auto engine = paddle::framework::GetCPURandomEngine(seed_v); + std::shared_ptr engine; + if (seed_v) { + engine = std::make_shared(); + engine->seed(seed_v); + } else { + engine = ctx.GetGenerator()->GetCPUEngine(); + } std::unique_ptr data_cpu(new T[size]); for (int64_t i = 0; i < size; ++i) {