未验证 提交 d1595c26 编写于 作者: Z zyfncg 提交者: GitHub

[PHI] adjust the empty kernel and dev_api (#39958)

* remove empty kernel in fluid and adjust the param of empty dev_api

* polish code

* revert fluid empty kernel
上级 27536a32
...@@ -35,7 +35,7 @@ void FullKernel(const Context& dev_ctx, ...@@ -35,7 +35,7 @@ void FullKernel(const Context& dev_ctx,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype,
DenseTensor* out) { DenseTensor* out) {
out->ResizeAndAllocate(phi::make_ddim(shape.GetData())); out->Resize(phi::make_ddim(shape.GetData()));
FullValue<T>(dev_ctx, out, val.to<T>()); FullValue<T>(dev_ctx, out, val.to<T>());
} }
......
...@@ -69,7 +69,9 @@ PD_REGISTER_KERNEL(empty_like, ...@@ -69,7 +69,9 @@ PD_REGISTER_KERNEL(empty_like,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16, phi::dtype::bfloat16,
phi::dtype::complex<float>, phi::dtype::complex<float>,
phi::dtype::complex<double>) {} phi::dtype::complex<double>) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL(empty, PD_REGISTER_KERNEL(empty,
...@@ -101,5 +103,7 @@ PD_REGISTER_KERNEL(empty_like, ...@@ -101,5 +103,7 @@ PD_REGISTER_KERNEL(empty_like,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16, phi::dtype::bfloat16,
phi::dtype::complex<float>, phi::dtype::complex<float>,
phi::dtype::complex<double>) {} phi::dtype::complex<double>) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}
#endif #endif
...@@ -54,22 +54,20 @@ DenseTensor Empty(const Context& dev_ctx) { ...@@ -54,22 +54,20 @@ DenseTensor Empty(const Context& dev_ctx) {
} }
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor Empty(const Context& dev_ctx, DenseTensor Empty(const Context& dev_ctx, const ScalarArray& shape) {
const ScalarArray& shape,
DataType dtype = DataType::FLOAT32) {
auto dense_out = Empty<T, Context>(dev_ctx); auto dense_out = Empty<T, Context>(dev_ctx);
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
DataType dtype = paddle::experimental::CppTypeToDataType<T>::Type();
CreateInferMeta(shape, dtype, &meta_out); CreateInferMeta(shape, dtype, &meta_out);
EmptyKernel<T, Context>(dev_ctx, shape, dtype, &dense_out); EmptyKernel<T, Context>(dev_ctx, shape, dtype, &dense_out);
return dense_out; return dense_out;
} }
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor EmptyLike(const Context& dev_ctx, DenseTensor EmptyLike(const Context& dev_ctx, const DenseTensor& x) {
const DenseTensor& x,
DataType dtype = DataType::UNDEFINED) {
auto dense_out = Empty<T, Context>(dev_ctx); auto dense_out = Empty<T, Context>(dev_ctx);
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
DataType dtype = paddle::experimental::CppTypeToDataType<T>::Type();
CreateLikeInferMeta(x, dtype, &meta_out); CreateLikeInferMeta(x, dtype, &meta_out);
EmptyLikeKernel<T, Context>(dev_ctx, x, dtype, &dense_out); EmptyLikeKernel<T, Context>(dev_ctx, x, dtype, &dense_out);
return dense_out; return dense_out;
......
...@@ -48,10 +48,10 @@ void FullLikeKernel(const Context& dev_ctx, ...@@ -48,10 +48,10 @@ void FullLikeKernel(const Context& dev_ctx,
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor Full(const Context& dev_ctx, DenseTensor Full(const Context& dev_ctx,
const ScalarArray& shape, const ScalarArray& shape,
const Scalar& val, const Scalar& val) {
DataType dtype = DataType::FLOAT32) {
auto dense_out = Empty<T, Context>(dev_ctx); auto dense_out = Empty<T, Context>(dev_ctx);
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
DataType dtype = paddle::experimental::CppTypeToDataType<T>::Type();
CreateInferMeta(shape, dtype, &meta_out); CreateInferMeta(shape, dtype, &meta_out);
FullKernel<T, Context>(dev_ctx, shape, val, dtype, &dense_out); FullKernel<T, Context>(dev_ctx, shape, val, dtype, &dense_out);
return dense_out; return dense_out;
...@@ -60,10 +60,10 @@ DenseTensor Full(const Context& dev_ctx, ...@@ -60,10 +60,10 @@ DenseTensor Full(const Context& dev_ctx,
template <typename T, typename Context> template <typename T, typename Context>
DenseTensor FullLike(const Context& dev_ctx, DenseTensor FullLike(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const Scalar& val, const Scalar& val) {
DataType dtype = DataType::UNDEFINED) {
auto dense_out = Empty<T, Context>(dev_ctx); auto dense_out = Empty<T, Context>(dev_ctx);
MetaTensor meta_out(&dense_out); MetaTensor meta_out(&dense_out);
DataType dtype = paddle::experimental::CppTypeToDataType<T>::Type();
CreateLikeInferMeta(x, dtype, &meta_out); CreateLikeInferMeta(x, dtype, &meta_out);
FullLikeKernel<T, Context>(dev_ctx, x, val, dtype, &dense_out); FullLikeKernel<T, Context>(dev_ctx, x, val, dtype, &dense_out);
return dense_out; return dense_out;
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/operators/eigen/eigen_function.h"
namespace phi {
template <typename T, typename Context, typename VType>
void FullValue(const Context& dev_ctx, DenseTensor* tensor, VType val) {
dev_ctx.template Alloc<T>(tensor);
auto t = phi::EigenVector<T>::Flatten(*tensor);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(val));
}
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const ScalarArray& shape,
const Scalar& val,
DenseTensor* out) {
out->ResizeAndAllocate(phi::make_ddim(shape.GetData()));
FullValue<T>(dev_ctx, out, val.to<T>());
}
template <typename T, typename Context>
void FullLikeKernel(const Context& dev_ctx,
const Scalar& val,
DenseTensor* out) {
auto value = val.to<float>();
using CommonType = typename std::common_type<
float,
typename std::conditional<std::is_same<T, phi::dtype::float16>::value,
float,
T>::type>::type;
auto common_type_value = static_cast<CommonType>(value);
PADDLE_ENFORCE_EQ(
(common_type_value >=
static_cast<CommonType>(std::numeric_limits<T>::lowest())) &&
(common_type_value <=
static_cast<CommonType>(std::numeric_limits<T>::max())),
true,
phi::errors::InvalidArgument(
"The filled value is out of range for target type, "
"current kernel type is %s, the range should between %f "
"and %f, but now value is %f.",
typeid(T).name(),
static_cast<CommonType>(std::numeric_limits<T>::lowest()),
static_cast<CommonType>(std::numeric_limits<T>::max()),
static_cast<float>(value)));
FullValue<T>(dev_ctx, out, value);
}
} // namespace phi
...@@ -39,7 +39,7 @@ TEST(DEV_API, empty) { ...@@ -39,7 +39,7 @@ TEST(DEV_API, empty) {
dev_ctx.Init(); dev_ctx.Init();
// 2. test API // 2. test API
auto out = phi::Empty<int>(dev_ctx, {3, 2}, phi::DataType::INT32); auto out = phi::Empty<int>(dev_ctx, {3, 2});
// 3. check result // 3. check result
ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.dims().size(), 2);
...@@ -87,7 +87,7 @@ TEST(DEV_API, full) { ...@@ -87,7 +87,7 @@ TEST(DEV_API, full) {
.GetAllocator(paddle::platform::CPUPlace()) .GetAllocator(paddle::platform::CPUPlace())
.get()); .get());
dev_ctx.Init(); dev_ctx.Init();
auto out = phi::Full<float>(dev_ctx, {3, 2}, val, phi::DataType::FLOAT32); auto out = phi::Full<float>(dev_ctx, {3, 2}, val);
// 3. check result // 3. check result
ASSERT_EQ(out.dims().size(), 2); ASSERT_EQ(out.dims().size(), 2);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册