diff --git a/lite/fluid/eigen.h b/lite/fluid/eigen.h index 4314a6c492be19a7213a7f438bcbc86ae2f85492..eac5332b53c857b05aacbfa95ee2e4b9fcd98a93 100644 --- a/lite/fluid/eigen.h +++ b/lite/fluid/eigen.h @@ -118,8 +118,8 @@ struct EigenScalar { using ConstType = Eigen::TensorMap< Eigen::TensorFixedSize, MajorType, IndexType>>; - static Type From(const Tensor& tensor) { - return Type(const_cast(tensor.data())); + static Type From(Tensor* tensor) { + return Type(const_cast(tensor->data())); } // NOLINT static ConstType From(const Tensor& tensor) { diff --git a/lite/kernels/x86/reduce_compute.h b/lite/kernels/x86/reduce_compute.h index faace5e24ecc58ca0a518c64a598a105dc00f93e..655f104ce65906f1904a7cf02d703069b0a7a2bf 100644 --- a/lite/kernels/x86/reduce_compute.h +++ b/lite/kernels/x86/reduce_compute.h @@ -56,7 +56,7 @@ class ReduceSumCompute : public KernelLite { if (reduce_all) { // Flatten and reduce 1-D tensor auto x = lite::fluid::EigenVector::Flatten(*input); - auto out = lite::fluid::EigenScalar::From(*output); + auto out = lite::fluid::EigenScalar::From(output); // auto& place = *platform::CPUDeviceContext().eigen_device(); auto reduce_dim = Eigen::array({{0}}); SumFunctor functor; diff --git a/lite/kernels/x86/reduce_op_function.h b/lite/kernels/x86/reduce_op_function.h index be3ef6ed0d79b7aac7c0ff2f307dec58c3368ca6..b3ddab64e4bf8dc72cec3b86398f42269c5a947c 100644 --- a/lite/kernels/x86/reduce_op_function.h +++ b/lite/kernels/x86/reduce_op_function.h @@ -70,7 +70,7 @@ void ReduceFunctor(const lite::Tensor& input, Functor functor; if (D == 1) { - auto out = EigenScalar::From(*output); + auto out = EigenScalar::From(output); functor(&x, &out, reduce_dim); } else { auto out = EigenTensor::From(*output, out_dims);