未验证 提交 3e7be9c9 编写于 作者: Z zhangyuqin1998 提交者: GitHub

Rename randint_raw and move it to legacy (#53157)

* Rename randint_raw and move it to legacy

* Update fetch_v2_op.cc

* Update randint_kernel.cc

* Update randint_kernel.cu

* Empty Commit to setup deployments
上级 5a44bf7e
......@@ -22,13 +22,13 @@
namespace phi {
template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
int seed = 0;
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
auto numel = out->numel();
......@@ -45,20 +45,7 @@ void RandintRawKernel(const Context& dev_ctx,
}
}
template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
}
} // namespace phi
PD_REGISTER_KERNEL(
randint_raw, CPU, ALL_LAYOUT, phi::RandintRawKernel, int, int64_t) {}
PD_REGISTER_KERNEL(randint, CPU, ALL_LAYOUT, phi::RandintKernel, int, int64_t) {
}
......@@ -23,21 +23,6 @@
namespace phi {
template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
funcs::uniform_distribution<uint32_t> dist;
funcs::uniform_int_transform<T, uint32_t> trans(low, high);
funcs::distribution_and_transform<T>(dev_ctx, out, dist, trans);
}
template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
......@@ -45,13 +30,15 @@ void RandintKernel(const Context& dev_ctx,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
int seed = 0;
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
funcs::uniform_distribution<uint32_t> dist;
funcs::uniform_int_transform<T, uint32_t> trans(low, high);
funcs::distribution_and_transform<T>(dev_ctx, out, dist, trans);
}
} // namespace phi
PD_REGISTER_KERNEL(
randint_raw, GPU, ALL_LAYOUT, phi::RandintRawKernel, int, int64_t) {}
PD_REGISTER_KERNEL(randint, GPU, ALL_LAYOUT, phi::RandintKernel, int, int64_t) {
}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <random>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void RandintWithSeedKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
auto numel = out->numel();
std::shared_ptr<std::mt19937_64> engine;
if (seed) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
std::uniform_int_distribution<T> dist(low, high - 1);
for (int64_t i = 0; i < numel; ++i) {
data[i] = dist(*engine);
}
}
} // namespace phi
PD_REGISTER_KERNEL(randint_with_seed,
CPU,
ALL_LAYOUT,
phi::RandintWithSeedKernel,
int,
int64_t) {}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <random>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h"
namespace phi {
template <typename T, typename Context>
void RandintWithSeedKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
funcs::uniform_distribution<uint32_t> dist;
funcs::uniform_int_transform<T, uint32_t> trans(low, high);
funcs::distribution_and_transform<T>(dev_ctx, out, dist, trans);
}
} // namespace phi
PD_REGISTER_KERNEL(randint_with_seed,
GPU,
ALL_LAYOUT,
phi::RandintWithSeedKernel,
int,
int64_t) {}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <random>
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/generator.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void RandintWithSeedKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
int64_t size = out->numel();
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
auto numel = out->numel();
std::shared_ptr<std::mt19937_64> engine;
if (seed) {
engine = std::make_shared<std::mt19937_64>();
engine->seed(seed);
} else {
engine = dev_ctx.GetGenerator()->GetCPUEngine();
}
std::unique_ptr<T[]> data_cpu(new T[size]);
std::uniform_int_distribution<T> dist(low, high - 1);
for (int64_t i = 0; i < numel; ++i) {
data_cpu[i] = dist(*engine);
}
memory_utils::Copy(dev_ctx.GetPlace(),
data,
phi::CPUPlace(),
reinterpret_cast<void*>(data_cpu.get()),
size * sizeof(T));
}
} // namespace phi
PD_REGISTER_KERNEL(randint_with_seed,
XPU,
ALL_LAYOUT,
phi::RandintWithSeedKernel,
int,
int64_t) {}
......@@ -27,13 +27,4 @@ void RandintKernel(const Context& dev_ctx,
DataType dtype,
DenseTensor* out);
template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out);
} // namespace phi
......@@ -24,13 +24,13 @@
namespace phi {
template <typename T, typename Context>
void RandintRawKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
int seed,
DenseTensor* out) {
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
int seed = 0;
int64_t size = out->numel();
out->Resize(phi::make_ddim(shape.GetData()));
T* data = dev_ctx.template Alloc<T>(out);
......@@ -54,20 +54,7 @@ void RandintRawKernel(const Context& dev_ctx,
size * sizeof(T));
}
template <typename T, typename Context>
void RandintKernel(const Context& dev_ctx,
int low,
int high,
const IntArray& shape,
DataType dtype,
DenseTensor* out) {
RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
}
} // namespace phi
PD_REGISTER_KERNEL(
randint_raw, XPU, ALL_LAYOUT, phi::RandintRawKernel, int, int64_t) {}
PD_REGISTER_KERNEL(randint, XPU, ALL_LAYOUT, phi::RandintKernel, int, int64_t) {
}
......@@ -21,7 +21,7 @@ KernelSignature RandintOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (seed) {
if (ctx.InputSize("ShapeTensorList") > 0) {
return KernelSignature(
"randint_raw",
"randint_with_seed",
{},
{"low", "high", "ShapeTensorList", "seed", "dtype"},
{"Out"});
......@@ -29,12 +29,12 @@ KernelSignature RandintOpArgumentMapping(const ArgumentMappingContext& ctx) {
const auto& shape =
paddle::any_cast<std::vector<int64_t>>(ctx.Attr("shape"));
if (ctx.HasInput("ShapeTensor") && shape.empty()) {
return KernelSignature("randint_raw",
return KernelSignature("randint_with_seed",
{},
{"low", "high", "ShapeTensor", "seed", "dtype"},
{"Out"});
} else {
return KernelSignature("randint_raw",
return KernelSignature("randint_with_seed",
{},
{"low", "high", "shape", "seed", "dtype"},
{"Out"});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册