未验证 提交 c7cd8d98 编写于 作者: J jzhang533 提交者: GitHub

removing dependent to fluid/framework/eigen.h in phi (#47675)

* removing dependent to fluid/framework/eigen.h in phi

* more fix according to PR-CI-Py3 fail
上级 ef21b58b
......@@ -154,7 +154,7 @@ class BlockingQueue {
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
template <typename T>
inline void MergeVars(const std::string &var_name,
......
......@@ -206,13 +206,13 @@ void TensorAdd(const VarType& src, VarType* dst) {
#endif
}
#define TENSOR_ADD_EIGEN(T) \
auto cpu_ctx = static_cast<phi::CPUContext*>( \
platform::DeviceContextPool::Instance().Get(place)); \
auto in = paddle::framework::EigenVector<T>::Flatten(src_tensor); \
auto out = paddle::framework::EigenVector<T>::Flatten(*dst_tensor); \
auto& p = *(cpu_ctx->eigen_device()); \
out.device(p) = out + in; \
#define TENSOR_ADD_EIGEN(T) \
auto cpu_ctx = static_cast<phi::CPUContext*>( \
platform::DeviceContextPool::Instance().Get(place)); \
auto in = phi::EigenVector<T>::Flatten(src_tensor); \
auto out = phi::EigenVector<T>::Flatten(*dst_tensor); \
auto& p = *(cpu_ctx->eigen_device()); \
out.device(p) = out + in; \
return;
if (platform::is_cpu_place(place)) {
......
......@@ -70,7 +70,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
anchors->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
auto e_anchors = framework::EigenTensor<T, 4>::From(*anchors);
auto e_anchors = phi::EigenTensor<T, 4>::From(*anchors);
for (int h_idx = 0; h_idx < feature_height; ++h_idx) {
for (int w_idx = 0; w_idx < feature_width; ++w_idx) {
T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1);
......@@ -110,7 +110,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
var_t.mutable_data<T>(
phi::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<T, 2>::From(var_t);
auto var_et = phi::EigenTensor<T, 2>::From(var_t);
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
}
......@@ -119,7 +119,7 @@ class AnchorGeneratorOpKernel : public framework::OpKernel<T> {
auto var_dim = vars->dims();
vars->Resize({anchor_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
auto e_vars = phi::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
e_vars = var_et.broadcast(Eigen::DSizes<int, 2>(anchor_num, 1));
vars->Resize(var_dim);
......
......@@ -66,7 +66,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
auto box_dim = vars->dims();
boxes->Resize({feature_height, feature_width, num_priors, 4});
auto e_boxes = framework::EigenTensor<T, 4>::From(*boxes).setConstant(0.0);
auto e_boxes = phi::EigenTensor<T, 4>::From(*boxes).setConstant(0.0);
int step_average = static_cast<int>((step_width + step_height) * 0.5);
std::vector<float> sqrt_fixed_ratios;
......@@ -126,7 +126,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
phi::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<T, 2>::From(var_t);
auto var_et = phi::EigenTensor<T, 2>::From(var_t);
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
......@@ -136,7 +136,7 @@ class DensityPriorBoxOpKernel : public framework::OpKernel<T> {
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
auto e_vars = phi::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
......
......@@ -182,7 +182,7 @@ class PriorBoxOpKernel : public framework::OpKernel<T> {
var_t.mutable_data<K>(
phi::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<K, 2>::From(var_t);
auto var_et = phi::EigenTensor<K, 2>::From(var_t);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
......@@ -195,7 +195,7 @@ class PriorBoxOpKernel : public framework::OpKernel<T> {
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
auto e_vars = phi::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
......
......@@ -83,8 +83,8 @@ void IndexSelectInner(const framework::ExecutionContext& context,
input->Resize(phi::make_ddim({outer_nums, input_dim[dim], slice_size}));
output->Resize(phi::make_ddim({outer_nums, index_size, slice_size}));
auto input_tensor = framework::EigenTensor<T, 3>::From(*input);
auto output_tensor = framework::EigenTensor<T, 3>::From(*output);
auto input_tensor = phi::EigenTensor<T, 3>::From(*input);
auto output_tensor = phi::EigenTensor<T, 3>::From(*output);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
......
......@@ -25,7 +25,7 @@ template <typename T,
size_t D,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using EigenTensor = phi::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = phi::DenseTensor;
using DataLayout = phi::DataLayout;
......
......@@ -29,11 +29,11 @@ using LoDTensor = phi::DenseTensor;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
template <typename T, bool is_test>
class MaxSeqPoolFunctor {
......
......@@ -26,7 +26,7 @@ using LoDTensor = phi::DenseTensor;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
struct SequenceExpandFunctor {
......
......@@ -45,10 +45,10 @@ void NormGradKernel(const Context& ctx,
auto* place = ctx.eigen_device();
auto x_e = paddle::framework::EigenVector<T>::Flatten(*in_x);
auto dy_e = paddle::framework::EigenVector<T>::Flatten(*in_dy);
auto norm_e = paddle::framework::EigenVector<T>::Flatten(*in_norm);
auto dx_e = paddle::framework::EigenVector<T>::Flatten(*out_dx);
auto x_e = phi::EigenVector<T>::Flatten(*in_x);
auto dy_e = phi::EigenVector<T>::Flatten(*in_dy);
auto norm_e = phi::EigenVector<T>::Flatten(*in_norm);
auto dx_e = phi::EigenVector<T>::Flatten(*out_dx);
Eigen::DSizes<int, 3> shape(pre, n, post);
Eigen::DSizes<int, 3> rshape(pre, 1, post);
......@@ -60,7 +60,7 @@ void NormGradKernel(const Context& ctx,
DenseTensor rsum;
rsum.Resize({pre, post});
ctx.template Alloc<T>(&rsum);
auto sum = paddle::framework::EigenTensor<T, 2>::From(rsum);
auto sum = phi::EigenTensor<T, 2>::From(rsum);
Eigen::DSizes<int, 1> rdim(1);
Eigen::DSizes<int, 3> bcast(1, n, 1);
......
......@@ -55,9 +55,9 @@ void NormKernel(const Context& ctx,
Eigen::DSizes<int, 3> shape(pre, n, post);
Eigen::DSizes<int, 2> norm_shape(pre, post);
auto x_e = paddle::framework::EigenVector<T>::Flatten(x);
auto y_e = paddle::framework::EigenVector<T>::Flatten(*out);
auto norm_e = paddle::framework::EigenVector<T>::Flatten(*out_norm);
auto x_e = phi::EigenVector<T>::Flatten(x);
auto y_e = phi::EigenVector<T>::Flatten(*out);
auto norm_e = phi::EigenVector<T>::Flatten(*out_norm);
auto x_r = x_e.reshape(shape);
auto y = y_e.reshape(shape);
auto norm_reshape = norm_e.reshape(norm_shape);
......
......@@ -15,9 +15,9 @@ limitations under the License. */
#pragma once
#include <type_traits>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/gru_compute.h"
namespace phi {
......@@ -27,7 +27,7 @@ using Array1 = Eigen::DSizes<int64_t, 1>;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = paddle::framework::EigenVector<T, MajorType, IndexType>;
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
#if !defined(__NVCC__) && !defined(__HIPCC___) // @{ Group for GRU CPU
template <class OpResetOutput, typename T>
......
......@@ -15,9 +15,9 @@ limitations under the License. */
#pragma once
#include <type_traits>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/lstm_compute.h"
#if defined(_WIN32)
......@@ -34,7 +34,7 @@ using Array1 = Eigen::DSizes<int64_t, 1>;
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = paddle::framework::EigenVector<T, MajorType, IndexType>;
using EigenVector = phi::EigenVector<T, MajorType, IndexType>;
#if !defined(__NVCC__) && !defined(__HIPCC___) // @{ Group LSTM CPU
......
......@@ -299,9 +299,9 @@ struct RowwiseAdd<phi::CPUContext, T> {
in_dims_cstr,
out_dims_cstr));
auto in = paddle::framework::EigenMatrix<T>::From(input);
auto vec = paddle::framework::EigenVector<T>::Flatten(vector);
auto out = paddle::framework::EigenMatrix<T>::From(*output);
auto in = phi::EigenMatrix<T>::From(input);
auto vec = phi::EigenVector<T>::Flatten(vector);
auto out = phi::EigenMatrix<T>::From(*output);
for (int64_t i = 0; i < in_dims[0]; ++i) {
out.chip(i, 0) = in.chip(i, 0) + vec;
......
......@@ -18,7 +18,6 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
......@@ -26,6 +25,7 @@ limitations under the License. */
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
namespace phi {
namespace funcs {
......
......@@ -22,13 +22,13 @@ limitations under the License. */
namespace phi {
namespace funcs {
using paddle::framework::To32BitIndex;
using phi::To32BitIndex;
template <typename DeviceContext, typename T>
void SetConstant<DeviceContext, T>::operator()(const DeviceContext& context,
phi::DenseTensor* tensor,
T num) {
auto t = paddle::framework::EigenVector<T>::Flatten(*tensor);
auto t = phi::EigenVector<T>::Flatten(*tensor);
t.device(*context.eigen_device()) = t.constant(static_cast<T>(num));
}
......@@ -60,8 +60,8 @@ void Transpose<DeviceContext, T, Rank>::operator()(
for (int i = 0; i < Rank; i++) {
permute[i] = axis[i];
}
auto eigen_in = paddle::framework::EigenTensor<T, Rank>::From(in);
auto eigen_out = paddle::framework::EigenTensor<T, Rank>::From(*out);
auto eigen_in = phi::EigenTensor<T, Rank>::From(in);
auto eigen_out = phi::EigenTensor<T, Rank>::From(*out);
auto* dev = context.eigen_device();
// use 32bit index to speed up computation
bool use_32bit_index = eigen_out.size() < Eigen::NumTraits<int>::highest();
......@@ -89,8 +89,8 @@ void ColwiseSum<DeviceContext, T>::operator()(const DeviceContext& context,
size,
out->numel()));
auto in = paddle::framework::EigenMatrix<T>::From(input);
auto vec = paddle::framework::EigenVector<T>::Flatten(*out);
auto in = phi::EigenMatrix<T>::From(input);
auto vec = phi::EigenVector<T>::Flatten(*out);
vec.device(*context.eigen_device()) = in.sum(Eigen::array<int, 1>({{0}}));
}
......@@ -151,8 +151,8 @@ void RowwiseMean<DeviceContext, T>::operator()(const DeviceContext& context,
in_dims[0],
out->numel()));
auto in = paddle::framework::EigenMatrix<T>::From(input);
auto vec = paddle::framework::EigenVector<T>::Flatten(*out);
auto in = phi::EigenMatrix<T>::From(input);
auto vec = phi::EigenVector<T>::Flatten(*out);
vec.device(*context.eigen_device()) = in.mean(Eigen::array<int, 1>({{1}}));
}
......@@ -217,8 +217,8 @@ void RowwiseSum<DeviceContext, T>::operator()(const DeviceContext& context,
in_dims[0],
out->numel()));
auto in = paddle::framework::EigenMatrix<T>::From(input);
auto vec = paddle::framework::EigenVector<T>::Flatten(*out);
auto in = phi::EigenMatrix<T>::From(input);
auto vec = phi::EigenVector<T>::Flatten(*out);
vec.device(*context.eigen_device()) = in.sum(Eigen::array<int, 1>({{1}}));
}
......
......@@ -16,10 +16,10 @@ limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
namespace phi {
namespace funcs {
......@@ -27,7 +27,7 @@ namespace funcs {
template <typename T,
int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = paddle::framework::EigenMatrix<T, MajorType, IndexType>;
using EigenMatrix = phi::EigenMatrix<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CopyMatrixRowsFunctor {
......
......@@ -14,7 +14,6 @@
#include "paddle/phi/kernels/conv_grad_kernel.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
......
......@@ -14,7 +14,6 @@
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
......
......@@ -16,7 +16,6 @@
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/segment_pooling.h"
......
......@@ -16,7 +16,6 @@
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/segment_pooling.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册