未验证 提交 e4670d80 编写于 作者: H huangjiyi 提交者: GitHub

rm "paddle/fluid/operators/amp/fp16_type_traits.h" in phi (#48051)

上级 fafc7be2
......@@ -324,7 +324,7 @@ class GroupNormKernel<phi::GPUContext, T> : public framework::OpKernel<T> {
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
using AccT = typename details::MPTypeTrait<T>::Type;
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * imsize;
const int max_num_threads = 1024;
......
......@@ -165,7 +165,7 @@ void UniformRandom(const framework::ExecutionContext& context,
if (seed == 0) {
// Use global Generator seed
using MT = typename details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
phi::funcs::uniform_distribution<MT> dist;
phi::funcs::uniform_real_transform<MT> trans(min, max);
phi::funcs::distribution_and_transform<T>(dev_cxt, tensor, dist, trans);
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace phi {
......@@ -38,7 +38,7 @@ struct AddGradFunctor {
template <typename T>
struct ScaleFunctor {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
explicit ScaleFunctor(const MT coeff) : coeff_(coeff) {}
inline HOSTDEVICE T operator()(T ele) {
......@@ -125,7 +125,7 @@ struct SigmoidGradFunctor {
template <typename T>
struct GeluFunctor {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
inline HOSTDEVICE T operator()(T x) {
// this function is tanh approximation of gelu
// actual gelu is:
......@@ -141,7 +141,7 @@ struct GeluFunctor {
template <typename T>
struct GeluGradFunctor {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
inline HOSTDEVICE T UseX(T x) {
MT mx = static_cast<MT>(x);
MT tanh_out =
......
......@@ -22,8 +22,8 @@
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
......@@ -38,7 +38,7 @@ __global__ void NormalizeGradient(const T* x,
const int axis_n,
const int post,
T* x_grad) {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
typedef cub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage_sum;
int num = pre * post;
......
......@@ -22,8 +22,8 @@
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
......@@ -46,7 +46,7 @@ __global__ void Normalize(const T* x,
const T eps,
T* y,
T* out_norm) {
using MT = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
typedef cub::BlockReduce<MT, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = pre * post;
......
......@@ -15,10 +15,10 @@
#include "paddle/phi/kernels/sgd_kernel.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_helper.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
......@@ -72,7 +72,7 @@ void SGDDenseKernel(const Context& dev_ctx,
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
// do check here
// if (multi_precision) {
// bool has_master =
......@@ -109,7 +109,7 @@ void SGDDenseParamSparseGradKernel(
bool multi_precision,
DenseTensor* param_out,
DenseTensor* master_param_out) {
using MPDType = typename paddle::operators::details::MPTypeTrait<T>::Type;
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
// do some check here
// if (multi_precision) {
// bool has_master =
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/kernels/funcs/eigen/extensions.h"
......@@ -79,7 +79,7 @@ struct IdentityFunctor {
template <typename Tx, typename Ty = Tx>
struct DivideFunctor {
private:
using MPType = typename ::paddle::operators::details::MPTypeTrait<Tx>::Type;
using MPType = typename ::phi::dtype::MPTypeTrait<Tx>::Type;
public:
HOSTDEVICE inline DivideFunctor() { n_inv = static_cast<MPType>(1.0f); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册