未验证 提交 04c29558 编写于 作者: H huangjiyi 提交者: GitHub

[PHI decoupling] remove dependency on "paddle/fluid/operators/elementwise/xxx.h" in phi (#47870)

* rm "paddle/fluid/operators/elementwise/xxx.h" in phi

* fix bugs

* add LaunchElementwiseCudaKernel in phi

* Revert "add LaunchElementwiseCudaKernel in phi"

This reverts commit 588f45bbdad2372ec7bff0c567a29bff675d22e1.

* rm indirect dependence to "elementwise_op_impl.cu.h"

rm indirect dependence to "elementwise_op_impl.cu.h"

Revert "add LaunchElementwiseCudaKernel in phi"

This reverts commit 588f45bbdad2372ec7bff0c567a29bff675d22e1.

add LaunchElementwiseCudaKernel in phi

fix bugs

* rm LaunchSameDimsElementwiseCudaKernel and LaunchElementwiseCudaKernel in phi
上级 25e63dca
......@@ -14,9 +14,9 @@
#include "paddle/phi/kernels/label_smooth_grad_kernel.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
namespace phi {
template <typename T>
......@@ -42,8 +42,7 @@ void LabelSmoothGradKernel(const Context& ctx,
std::vector<const DenseTensor*> ins = {&out_grad};
std::vector<DenseTensor*> outs = {label_grad};
auto functor = LabelSmoothGradFunctor<T>(epsilon);
paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>(
ctx, ins, &outs, functor);
phi::funcs::ElementwiseKernel<T>(ctx, ins, &outs, functor);
}
} // namespace phi
......
......@@ -16,9 +16,9 @@
#include <vector>
#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
namespace phi {
......@@ -77,8 +77,7 @@ void LabelSmoothKernel(const Context& ctx,
std::vector<const DenseTensor*> ins = {&label};
std::vector<DenseTensor*> outs = {out};
auto functor = LabelSmoothFunctor<T>(epsilon, label_dim);
paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>(
ctx, ins, &outs, functor);
phi::funcs::ElementwiseKernel<T>(ctx, ins, &outs, functor);
}
}
......
......@@ -14,9 +14,9 @@
#include "paddle/phi/kernels/p_norm_kernel.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/gpu/reduce.h"
......
......@@ -30,13 +30,14 @@ namespace cub = hipcub;
#include <string>
#include <vector>
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/compare_functors.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/gather.cu.h"
#include "paddle/phi/kernels/funcs/viterbi_decode_functor.h"
......@@ -90,9 +91,8 @@ struct BinaryOperation {
DenseTensor* output) {
std::vector<const DenseTensor*> ins{&lhs, &rhs};
std::vector<DenseTensor*> outs{output};
paddle::operators::
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(
dev_ctx, ins, &outs, 0, BinaryFunctor<T>());
phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>(
dev_ctx, ins, &outs, 0, BinaryFunctor<T>());
}
};
......@@ -107,7 +107,7 @@ struct GetMask {
DenseTensor* mask) {
std::vector<const DenseTensor*> ins = {&lhs, &rhs};
std::vector<DenseTensor*> outs = {mask};
paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>(
phi::funcs::ElementwiseKernel<T>(
dev_ctx, ins, &outs, CompareFunctor<int64_t, T>());
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册