未验证 提交 28c56d77 编写于 作者: W Wang Xin 提交者: GitHub

[PHI decoupling] remove fluid/framework/generator.h from phi (#47822)

* remove fluid/framework/generator.h from phi

* fix PR-CI-Kunlun-KP-Build fail
上级 60ec3107
......@@ -16,8 +16,8 @@
#include <set>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......@@ -83,7 +83,13 @@ void ClassCenterSampleKernel(const Context& dev_ctx,
seed = rnd();
}
std::uniform_int_distribution<T> dist(0, num_classes - 1);
auto engine = paddle::framework::GetCPURandomEngine(seed);
std::shared_ptr<std::mt19937_64> engine;
if (seed) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
// sample negative class center randomly
while (unique_label.size() < static_cast<size_t>(num_samples)) {
T neg = dist(*engine);
......
......@@ -14,8 +14,8 @@
#include "paddle/phi/kernels/dropout_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
......@@ -82,7 +82,13 @@ void DropoutRawKernel(const Context& dev_ctx,
} else {
seed_data = fix_seed ? seed : 0;
}
auto engine = paddle::framework::GetCPURandomEngine(seed_data);
std::shared_ptr<std::mt19937_64> engine;
if (seed_data) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed_data);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
std::uniform_real_distribution<float> dist(0, 1);
......@@ -147,7 +153,13 @@ void DropoutNdKernel(const Context& dev_ctx,
} else {
seed_data = fix_seed ? seed : 0;
}
auto engine = paddle::framework::GetCPURandomEngine(seed_data);
std::shared_ptr<std::mt19937_64> engine;
if (seed_data) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed_data);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
std::uniform_real_distribution<float> dist(0, 1);
......
......@@ -14,8 +14,8 @@
#include "paddle/phi/kernels/gaussian_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......@@ -35,7 +35,13 @@ void GaussianKernel(const Context& dev_ctx,
tensor->Resize(phi::make_ddim(shape.GetData()));
int64_t size = tensor->numel();
T* data = dev_ctx.template Alloc<T>(tensor);
auto engine = paddle::framework::GetCPURandomEngine(seed);
std::shared_ptr<std::mt19937_64> engine;
if (seed) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
......
......@@ -14,9 +14,9 @@
#pragma once
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
......@@ -148,7 +148,13 @@ void DropoutCpuFunctionInplace(const CPUContext& dev_ctx,
if (dropout_prob == 1.0f) {
std::fill(mask_data, mask_data + size, static_cast<uint8_t>(0));
} else {
auto engine = paddle::framework::GetCPURandomEngine(seed_number);
std::shared_ptr<std::mt19937_64> engine;
if (seed_number) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed_number);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
if (dist(*engine) < dropout_prob) {
......
......@@ -14,8 +14,8 @@
#include "paddle/phi/kernels/rrelu_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......@@ -50,7 +50,7 @@ void RReluKernel(const Context& dev_ctx,
return;
}
auto engine = paddle::framework::GetCPURandomEngine(0);
auto engine = dev_ctx.GetGenerator()->GetCPUEngine();
std::uniform_real_distribution<float> dist(lower, upper);
......
......@@ -16,10 +16,10 @@
#include <thrust/random.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h"
#include "paddle/phi/kernels/funcs/index_impl.cu.h"
......
......@@ -27,8 +27,8 @@
namespace cub = hipcub;
#endif
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h"
#include "paddle/phi/kernels/funcs/math_function.h"
......
......@@ -14,9 +14,9 @@
#include "paddle/phi/kernels/rnn_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/gpu/rnn_functor.h"
......@@ -175,8 +175,7 @@ void RnnKernel(const Context &dev_ctx,
if (!is_test) {
if (seed == 0) {
// If not specify seed, use global Generator to generate seed.
int device_id = dev_ctx.GetPlace().GetDeviceId();
auto gen_cuda = paddle::framework::DefaultCUDAGenerator(device_id);
auto gen_cuda = dev_ctx.GetGenerator();
seed = static_cast<int>(gen_cuda->Random64());
}
// else use `ctx.Attr<int>("seed")` specified seed
......
......@@ -14,9 +14,9 @@
#include "paddle/phi/kernels/gaussian_kernel.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......@@ -36,7 +36,13 @@ void GaussianKernel(const Context& ctx,
uint64_t seed_v = static_cast<uint64_t>(seed);
// TODO(pangyoki): implement GetXPURandomEngine to set different seeds on
// corresponding XPU device.
auto engine = paddle::framework::GetCPURandomEngine(seed_v);
std::shared_ptr<std::mt19937_64> engine;
if (seed_v) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed_v);
} else {
engine = ctx.GetGenerator()->GetCPUEngine();
}
std::unique_ptr<T[]> data_cpu(new T[size]);
for (int64_t i = 0; i < size; ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册