diff --git a/paddle/phi/kernels/cpu/full_kernel.cc b/paddle/phi/kernels/cpu/full_kernel.cc index 6b0183d31c6ec3dc3e6712043f27678c3f3a6bb2..86576a861aa4834a4b39b50594565a2d4b3ac510 100644 --- a/paddle/phi/kernels/cpu/full_kernel.cc +++ b/paddle/phi/kernels/cpu/full_kernel.cc @@ -35,7 +35,7 @@ void FullKernel(const Context& dev_ctx, const Scalar& val, DataType dtype, DenseTensor* out) { - out->ResizeAndAllocate(phi::make_ddim(shape.GetData())); + out->Resize(phi::make_ddim(shape.GetData())); FullValue(dev_ctx, out, val.to()); } diff --git a/paddle/phi/kernels/empty_kernel.cc b/paddle/phi/kernels/empty_kernel.cc index a902bd605542c3072b1d7014d8bf51f8b9e843c8..6e5f15fe1692b473965f96f68fd86fad87f1892e 100644 --- a/paddle/phi/kernels/empty_kernel.cc +++ b/paddle/phi/kernels/empty_kernel.cc @@ -69,7 +69,9 @@ PD_REGISTER_KERNEL(empty_like, phi::dtype::float16, phi::dtype::bfloat16, phi::dtype::complex, - phi::dtype::complex) {} + phi::dtype::complex) { + kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); +} #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) PD_REGISTER_KERNEL(empty, @@ -101,5 +103,7 @@ PD_REGISTER_KERNEL(empty_like, phi::dtype::float16, phi::dtype::bfloat16, phi::dtype::complex, - phi::dtype::complex) {} + phi::dtype::complex) { + kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND); +} #endif diff --git a/paddle/phi/kernels/empty_kernel.h b/paddle/phi/kernels/empty_kernel.h index 54ba8b16c1d7409915f11411e99abaac03586aec..0b8d95ee94fb5480684023ec6c71698ba06d9c13 100644 --- a/paddle/phi/kernels/empty_kernel.h +++ b/paddle/phi/kernels/empty_kernel.h @@ -54,22 +54,20 @@ DenseTensor Empty(const Context& dev_ctx) { } template -DenseTensor Empty(const Context& dev_ctx, - const ScalarArray& shape, - DataType dtype = DataType::FLOAT32) { +DenseTensor Empty(const Context& dev_ctx, const ScalarArray& shape) { auto dense_out = Empty(dev_ctx); MetaTensor meta_out(&dense_out); + DataType dtype = paddle::experimental::CppTypeToDataType::Type(); CreateInferMeta(shape, dtype, &meta_out); EmptyKernel(dev_ctx, shape, dtype, &dense_out); return dense_out; } template -DenseTensor EmptyLike(const Context& dev_ctx, - const DenseTensor& x, - DataType dtype = DataType::UNDEFINED) { +DenseTensor EmptyLike(const Context& dev_ctx, const DenseTensor& x) { auto dense_out = Empty(dev_ctx); MetaTensor meta_out(&dense_out); + DataType dtype = paddle::experimental::CppTypeToDataType::Type(); CreateLikeInferMeta(x, dtype, &meta_out); EmptyLikeKernel(dev_ctx, x, dtype, &dense_out); return dense_out; diff --git a/paddle/phi/kernels/full_kernel.h b/paddle/phi/kernels/full_kernel.h index 394aab8f96e1ad1e8f2fb53ee4a163e7ec874226..c7b1f9af0e3191ec217d2907677ff34edebc551b 100644 --- a/paddle/phi/kernels/full_kernel.h +++ b/paddle/phi/kernels/full_kernel.h @@ -48,10 +48,10 @@ void FullLikeKernel(const Context& dev_ctx, template DenseTensor Full(const Context& dev_ctx, const ScalarArray& shape, - const Scalar& val, - DataType dtype = DataType::FLOAT32) { + const Scalar& val) { auto dense_out = Empty(dev_ctx); MetaTensor meta_out(&dense_out); + DataType dtype = paddle::experimental::CppTypeToDataType::Type(); CreateInferMeta(shape, dtype, &meta_out); FullKernel(dev_ctx, shape, val, dtype, &dense_out); return dense_out; @@ -60,10 +60,10 @@ DenseTensor Full(const Context& dev_ctx, template DenseTensor FullLike(const Context& dev_ctx, const DenseTensor& x, - const Scalar& val, - DataType dtype = DataType::UNDEFINED) { + const Scalar& val) { auto dense_out = Empty(dev_ctx); MetaTensor meta_out(&dense_out); + DataType dtype = paddle::experimental::CppTypeToDataType::Type(); CreateLikeInferMeta(x, dtype, &meta_out); FullLikeKernel(dev_ctx, x, val, dtype, &dense_out); return dense_out; diff --git a/paddle/phi/kernels/impl/full_kernel_impl.h b/paddle/phi/kernels/impl/full_kernel_impl.h deleted file mode 100644 index 8cced49906eccdc41ccfb02518dcd06d771d23c9..0000000000000000000000000000000000000000 --- a/paddle/phi/kernels/impl/full_kernel_impl.h +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/phi/common/scalar.h" -#include "paddle/phi/common/scalar_array.h" -#include "paddle/phi/core/dense_tensor.h" -#include "paddle/phi/kernels/funcs/eigen/common.h" - -// See Note [ Why still include the fluid headers? ] -#include "paddle/fluid/operators/eigen/eigen_function.h" - -namespace phi { - -template -void FullValue(const Context& dev_ctx, DenseTensor* tensor, VType val) { - dev_ctx.template Alloc(tensor); - auto t = phi::EigenVector::Flatten(*tensor); - t.device(*dev_ctx.eigen_device()) = t.constant(static_cast(val)); -} - -template -void FullKernel(const Context& dev_ctx, - const ScalarArray& shape, - const Scalar& val, - DenseTensor* out) { - out->ResizeAndAllocate(phi::make_ddim(shape.GetData())); - FullValue(dev_ctx, out, val.to()); -} - -template -void FullLikeKernel(const Context& dev_ctx, - const Scalar& val, - DenseTensor* out) { - auto value = val.to(); - using CommonType = typename std::common_type< - float, - typename std::conditional::value, - float, - T>::type>::type; - - auto common_type_value = static_cast(value); - - PADDLE_ENFORCE_EQ( - (common_type_value >= - static_cast(std::numeric_limits::lowest())) && - (common_type_value <= - static_cast(std::numeric_limits::max())), - true, - phi::errors::InvalidArgument( - "The filled value is out of range for target type, " - "current kernel type is %s, the range should between %f " - "and %f, but now value is %f.", - typeid(T).name(), - static_cast(std::numeric_limits::lowest()), - static_cast(std::numeric_limits::max()), - static_cast(value))); - FullValue(dev_ctx, out, value); -} - -} // namespace phi diff --git a/paddle/phi/tests/kernels/test_creation_dev_api.cc b/paddle/phi/tests/kernels/test_creation_dev_api.cc index e4f80a5bd19eba48a4b19e4b91d50649b5c40c61..8c2c8642ab9005472b74086e70457940b35f8619 100644 --- a/paddle/phi/tests/kernels/test_creation_dev_api.cc +++ b/paddle/phi/tests/kernels/test_creation_dev_api.cc @@ -39,7 +39,7 @@ TEST(DEV_API, empty) { dev_ctx.Init(); // 2. test API - auto out = phi::Empty(dev_ctx, {3, 2}, phi::DataType::INT32); + auto out = phi::Empty(dev_ctx, {3, 2}); // 3. check result ASSERT_EQ(out.dims().size(), 2); @@ -87,7 +87,7 @@ TEST(DEV_API, full) { .GetAllocator(paddle::platform::CPUPlace()) .get()); dev_ctx.Init(); - auto out = phi::Full(dev_ctx, {3, 2}, val, phi::DataType::FLOAT32); + auto out = phi::Full(dev_ctx, {3, 2}, val); // 3. check result ASSERT_EQ(out.dims().size(), 2);