From e4670d8074b5d5f21e71d177e0f0dd9700a51853 Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Fri, 18 Nov 2022 10:33:52 +0800 Subject: [PATCH] rm "paddle/fluid/operators/amp/fp16_type_traits.h" in phi (#48051) --- paddle/fluid/operators/group_norm_op.cu | 2 +- paddle/fluid/operators/uniform_random_op.h | 2 +- paddle/phi/kernels/funcs/functors.h | 8 ++++---- paddle/phi/kernels/gpu/norm_grad_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/norm_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/sgd_kernel.cu | 6 +++--- paddle/phi/kernels/primitive/functor_primitives.h | 4 ++-- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/group_norm_op.cu b/paddle/fluid/operators/group_norm_op.cu index 12a989bc82..08ea4d3278 100644 --- a/paddle/fluid/operators/group_norm_op.cu +++ b/paddle/fluid/operators/group_norm_op.cu @@ -324,7 +324,7 @@ class GroupNormKernel : public framework::OpKernel { dim3 grid(group_size, groups, x_dims[0]); dim3 threads(block_size, 1, 1); if (data_layout == DataLayout::kNCHW) { - using AccT = typename details::MPTypeTrait::Type; + using AccT = typename phi::dtype::MPTypeTrait::Type; constexpr int vec_size = sizeof(float4) / sizeof(T); int size = group_size * imsize; const int max_num_threads = 1024; diff --git a/paddle/fluid/operators/uniform_random_op.h b/paddle/fluid/operators/uniform_random_op.h index bf2666deda..3ddf6092f0 100644 --- a/paddle/fluid/operators/uniform_random_op.h +++ b/paddle/fluid/operators/uniform_random_op.h @@ -165,7 +165,7 @@ void UniformRandom(const framework::ExecutionContext& context, if (seed == 0) { // Use global Generator seed - using MT = typename details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; phi::funcs::uniform_distribution dist; phi::funcs::uniform_real_transform trans(min, max); phi::funcs::distribution_and_transform(dev_cxt, tensor, dist, trans); diff --git a/paddle/phi/kernels/funcs/functors.h b/paddle/phi/kernels/funcs/functors.h index 2e6fe8b2d7..3c7ae5ed09 100644 --- a/paddle/phi/kernels/funcs/functors.h +++ b/paddle/phi/kernels/funcs/functors.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/operators/amp/fp16_type_traits.h" +#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/kernels/funcs/math.h" namespace phi { @@ -38,7 +38,7 @@ struct AddGradFunctor { template struct ScaleFunctor { - using MT = typename paddle::operators::details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; explicit ScaleFunctor(const MT coeff) : coeff_(coeff) {} inline HOSTDEVICE T operator()(T ele) { @@ -125,7 +125,7 @@ struct SigmoidGradFunctor { template struct GeluFunctor { - using MT = typename paddle::operators::details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; inline HOSTDEVICE T operator()(T x) { // this function is tanh approximation of gelu // actual gelu is: @@ -141,7 +141,7 @@ struct GeluFunctor { template struct GeluGradFunctor { - using MT = typename paddle::operators::details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; inline HOSTDEVICE T UseX(T x) { MT mx = static_cast(x); MT tanh_out = diff --git a/paddle/phi/kernels/gpu/norm_grad_kernel.cu b/paddle/phi/kernels/gpu/norm_grad_kernel.cu index bbbb6e9c01..cb02cc7138 100644 --- a/paddle/phi/kernels/gpu/norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/norm_grad_kernel.cu @@ -22,8 +22,8 @@ #include namespace cub = hipcub; #endif -#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/common_shape.h" @@ -38,7 +38,7 @@ __global__ void NormalizeGradient(const T* x, const int axis_n, const int post, T* x_grad) { - using MT = typename paddle::operators::details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; typedef cub::BlockReduce BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage_sum; int num = pre * post; diff --git a/paddle/phi/kernels/gpu/norm_kernel.cu b/paddle/phi/kernels/gpu/norm_kernel.cu index bd9cffe796..4843831ebf 100644 --- a/paddle/phi/kernels/gpu/norm_kernel.cu +++ b/paddle/phi/kernels/gpu/norm_kernel.cu @@ -22,8 +22,8 @@ #include namespace cub = hipcub; #endif -#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/common_shape.h" @@ -46,7 +46,7 @@ __global__ void Normalize(const T* x, const T eps, T* y, T* out_norm) { - using MT = typename paddle::operators::details::MPTypeTrait::Type; + using MT = typename phi::dtype::MPTypeTrait::Type; typedef cub::BlockReduce BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int num = pre * post; diff --git a/paddle/phi/kernels/gpu/sgd_kernel.cu b/paddle/phi/kernels/gpu/sgd_kernel.cu index ea257ebd1c..e3f0bf968c 100644 --- a/paddle/phi/kernels/gpu/sgd_kernel.cu +++ b/paddle/phi/kernels/gpu/sgd_kernel.cu @@ -15,10 +15,10 @@ #include "paddle/phi/kernels/sgd_kernel.h" #include "paddle/fluid/framework/mixed_vector.h" -#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_helper.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" +#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { @@ -72,7 +72,7 @@ void SGDDenseKernel(const Context& dev_ctx, bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { - using MPDType = typename paddle::operators::details::MPTypeTrait::Type; + using MPDType = typename phi::dtype::MPTypeTrait::Type; // do check here // if (multi_precision) { // bool has_master = @@ -109,7 +109,7 @@ void SGDDenseParamSparseGradKernel( bool multi_precision, DenseTensor* param_out, DenseTensor* master_param_out) { - using MPDType = typename paddle::operators::details::MPTypeTrait::Type; + using MPDType = typename phi::dtype::MPTypeTrait::Type; // do some check here // if (multi_precision) { // bool has_master = diff --git a/paddle/phi/kernels/primitive/functor_primitives.h b/paddle/phi/kernels/primitive/functor_primitives.h index 700ba00088..b0f3d62823 100644 --- a/paddle/phi/kernels/primitive/functor_primitives.h +++ b/paddle/phi/kernels/primitive/functor_primitives.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/operators/amp/fp16_type_traits.h" +#include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/kernels/funcs/eigen/extensions.h" @@ -79,7 +79,7 @@ struct IdentityFunctor { template struct DivideFunctor { private: - using MPType = typename ::paddle::operators::details::MPTypeTrait::Type; + using MPType = typename ::phi::dtype::MPTypeTrait::Type; public: HOSTDEVICE inline DivideFunctor() { n_inv = static_cast(1.0f); } -- GitLab