未验证 提交 8164b97a 编写于 作者: H huangjiyi 提交者: GitHub

[PHI Decoupling] remove "paddle/fluid/platform/float16.h" and...

[PHI Decoupling] remove "paddle/fluid/platform/float16.h" and "paddle/fluid/platform/for_range.h" in phi. (#47817)

* rm "paddle/fluid/platform/float16.h" in phi

* rm "paddle/fluid/platform/for_range.h" in phi
上级 4c375454
......@@ -25,6 +25,6 @@ PD_REGISTER_KERNEL(fill_grad,
double,
int64_t,
int,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -25,6 +25,6 @@ PD_REGISTER_KERNEL(fill,
double,
int64_t,
int,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -27,9 +27,9 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/math_function_impl.h"
#include "unsupported/Eigen/CXX11/Tensor"
......
......@@ -17,9 +17,9 @@ limitations under the License. */
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/math_function_impl.h"
......
......@@ -24,4 +24,4 @@ PD_REGISTER_KERNEL(bmm_grad,
phi::BmmGradKernel,
float,
double,
paddle::platform::float16) {}
phi::dtype::float16) {}
......@@ -18,10 +18,5 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/bmm_kernel_impl.h"
PD_REGISTER_KERNEL(bmm,
GPU,
ALL_LAYOUT,
phi::BmmKernel,
float,
double,
paddle::platform::float16) {}
PD_REGISTER_KERNEL(
bmm, GPU, ALL_LAYOUT, phi::BmmKernel, float, double, phi::dtype::float16) {}
......@@ -23,10 +23,10 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
......@@ -130,7 +130,7 @@ void CholeskyKernel(const Context& dev_ctx,
upper ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;
// portf is inplace, thus copy the triangular part of the input matrices to
// the output and set the other triangular part to 0 firstly
paddle::platform::ForRange<GPUContext> for_range(dev_ctx, tensor_size);
phi::funcs::ForRange<GPUContext> for_range(dev_ctx, tensor_size);
if (upper) {
MatrixBandPartFunctor<T> matrix_band_part_functor(m,
m,
......
......@@ -26,6 +26,6 @@ PD_REGISTER_KERNEL(fill_grad,
double,
int64_t,
int,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -26,6 +26,6 @@ PD_REGISTER_KERNEL(fill,
double,
int64_t,
int,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::bfloat16,
bool) {}
......@@ -160,6 +160,6 @@ PD_REGISTER_KERNEL(overlap_add_grad,
int64_t,
float,
double,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::complex<float>,
paddle::platform::complex<double>) {}
......@@ -146,6 +146,6 @@ PD_REGISTER_KERNEL(overlap_add,
int64_t,
float,
double,
paddle::platform::float16,
phi::dtype::float16,
paddle::platform::complex<float>,
paddle::platform::complex<double>) {}
......@@ -28,7 +28,7 @@ PD_REGISTER_KERNEL(set_value,
int,
int64_t,
bool,
paddle::platform::float16,
phi::dtype::float16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
PD_REGISTER_KERNEL(set_value_with_tensor,
......@@ -40,6 +40,6 @@ PD_REGISTER_KERNEL(set_value_with_tensor,
int,
int64_t,
bool,
paddle::platform::float16,
phi::dtype::float16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
......@@ -24,7 +24,6 @@
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
......
......@@ -14,9 +14,9 @@
#pragma once
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/atan2_grad_kernel.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
......
......@@ -14,9 +14,9 @@
#pragma once
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/atan2_kernel.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
template <typename T>
......
......@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/kernels/cholesky_grad_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
......@@ -280,7 +280,7 @@ void CholeskyGradKernel(const Context& dev_ctx,
blas.MatMul(l, trans_desc, l_grad, no_trans_desc, T(1), &middle, T(0));
/*! phi.tril_().diagonal(0, -2, -1).mul_(0.5) */
paddle::platform::ForRange<Context> for_range(dev_ctx, tensor_size);
phi::funcs::ForRange<Context> for_range(dev_ctx, tensor_size);
MatrixBandPartScaleEndFunctor<T> matrix_band_part_scale_end_functor(
m,
m,
......
......@@ -24,9 +24,9 @@
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/padding.h"
......
......@@ -14,8 +14,8 @@
#pragma once
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
......@@ -48,7 +48,7 @@ void EyeKernel(const Context& ctx,
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, out, static_cast<T>(0));
int64_t num_eyes = (std::min)(rows, columns);
paddle::platform::ForRange<Context> for_range(ctx, num_eyes);
phi::funcs::ForRange<Context> for_range(ctx, num_eyes);
EyeFunctor<T> functor(columns, out_data);
for_range(functor);
}
......
......@@ -28,7 +28,7 @@ void SeluGradKernel(const Context& dev_ctx,
SeluGradFunctor<T> functor(
out.data<T>(), dout.data<T>(), alpha, scale, dx_ptr);
size_t limit = static_cast<size_t>(out.numel());
paddle::platform::ForRange<Context> for_range(dev_ctx, limit);
phi::funcs::ForRange<Context> for_range(dev_ctx, limit);
for_range(functor);
}
} // namespace phi
......@@ -16,8 +16,8 @@
#include <string>
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h"
namespace phi {
......@@ -86,7 +86,7 @@ void SeluKernel(const Context& dev_ctx,
auto out_ptr = dev_ctx.template Alloc<T>(out);
SeluFunctor<T> functor(x.data<T>(), alpha, scale, out_ptr);
size_t limit = static_cast<size_t>(x.numel());
paddle::platform::ForRange<Context> for_range(dev_ctx, limit);
phi::funcs::ForRange<Context> for_range(dev_ctx, limit);
for_range(functor);
}
} // namespace phi
......@@ -27,7 +27,7 @@ void MatMul(const Context& dev_ctx,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
xpu::Context* xpu_ctx = dev_ctx.x_context();
if (std::is_same<paddle::platform::float16, T>::value) {
if (std::is_same<phi::dtype::float16, T>::value) {
MatMulXPUFunction<T, int16_t>(a, b, out, trans_a, trans_b, xpu_ctx);
} else {
if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) {
......@@ -99,9 +99,6 @@ void BmmGradKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(bmm_grad,
XPU,
ALL_LAYOUT,
phi::BmmGradKernel,
float,
paddle::platform::float16) {}
PD_REGISTER_KERNEL(
bmm_grad, XPU, ALL_LAYOUT, phi::BmmGradKernel, float, phi::dtype::float16) {
}
......@@ -62,7 +62,7 @@ void BmmKernel(const Context& dev_ctx,
y_dims[1]));
xpu::Context* xpu_ctx = dev_ctx.x_context();
if (std::is_same<paddle::platform::float16, T>::value) {
if (std::is_same<phi::dtype::float16, T>::value) {
MatMulXPUFunction<T, int16_t>(x, y, out, trans_x, trans_y, xpu_ctx);
} else {
if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) {
......@@ -77,4 +77,4 @@ void BmmKernel(const Context& dev_ctx,
} // namespace phi
PD_REGISTER_KERNEL(
bmm, XPU, ALL_LAYOUT, phi::BmmKernel, float, paddle::platform::float16) {}
bmm, XPU, ALL_LAYOUT, phi::BmmKernel, float, phi::dtype::float16) {}
......@@ -34,7 +34,7 @@ enum XPUFCCalcType {
template <typename T>
XPUFCCalcType FCCalcType() {
if (std::is_same<paddle::platform::float16, T>::value ||
if (std::is_same<phi::dtype::float16, T>::value ||
std::is_same<float16, T>::value) {
return XPUFCCalcType::FC_INT16;
} else if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册