diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 29c7f5d0ce73cbf1af18e6f5869d59d2200917ad..f57674d5601813cbc4f10b7ad74d18b00622a0bb 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor { } void share_meta(const MetaTensor& meta_tensor) override { + share_dims(meta_tensor); set_dtype(meta_tensor.dtype()); // VarDesc doesn't contains layout, so we cannot share layout // set_layout(meta_tensor.layout()); - // special case 1: share lod of LoDTensor + // special case: share lod of LoDTensor share_lod(meta_tensor); - share_dims(meta_tensor); } private: diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index af90baf27d3f5bd79faf143319bfaf361992e649..3840b99dd176d5b348533f3e50f7f90fc3250ea1 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, ops::SoftmaxOpGradMaker, ops::SoftmaxOpGradMaker, ops::SoftmaxInplaceInferer, SoftmaxInferShapeFunctor); -DECLARE_INFER_SHAPE_FUNCTOR(softmax_grad, SoftmaxGradnferShapeFunctor, +DECLARE_INFER_SHAPE_FUNCTOR(softmax_grad, SoftmaxGradInferShapeFunctor, PD_INFER_META(phi::GeneralUnaryGradInferMeta)); REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad, - SoftmaxGradnferShapeFunctor); + SoftmaxGradInferShapeFunctor); diff --git a/paddle/phi/core/meta_tensor.cc b/paddle/phi/core/meta_tensor.cc index eb114304f53ea08b05d36792330cf5bd3ebbee5d..38a6e09a61ef83aa313a67a5de1ee21ce16038eb 100644 --- a/paddle/phi/core/meta_tensor.cc +++ b/paddle/phi/core/meta_tensor.cc @@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) { } } -TensorBase* MetaTensor::get_tensor() const { return tensor_; } +TensorBase* MetaTensor::tensor() const { return tensor_; } void MetaTensor::share_dims(const MetaTensor& meta_tensor) { bool is_dense_tensor = phi::DenseTensor::classof(tensor_); @@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) { if (is_dense_tensor || is_selected_rows) { set_dims(meta_tensor.dims()); if (is_selected_rows) { - const auto in_tensor_base = meta_tensor.get_tensor(); + const auto in_tensor_base = meta_tensor.tensor(); PADDLE_ENFORCE_EQ( phi::SelectedRows::classof(in_tensor_base), true, diff --git a/paddle/phi/core/meta_tensor.h b/paddle/phi/core/meta_tensor.h index 3971a9f7e99e0282cae5e4d1e61ee6eb28c4b9a7..79f8d1c057e85b11a46a90652c769459db178e14 100644 --- a/paddle/phi/core/meta_tensor.h +++ b/paddle/phi/core/meta_tensor.h @@ -66,7 +66,7 @@ class MetaTensor { // Because the lod in compiletime and runtime is different, // so `LoD` cannot in public methods const LoD& lod() const; - TensorBase* get_tensor() const; + TensorBase* tensor() const; TensorBase* tensor_; }; diff --git a/paddle/phi/kernels/funcs/matrix_inverse.h b/paddle/phi/kernels/funcs/matrix_inverse.h index c5b04a8106561962b6916907d86450a63c763830..1c6756f1720a23ada5bb4ff2fdb4f4840660ed58 100644 --- a/paddle/phi/kernels/funcs/matrix_inverse.h +++ b/paddle/phi/kernels/funcs/matrix_inverse.h @@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx, int batch_size = rank > 2 ? a.numel() / (n * n) : 1; const T* a_ptr = a.data(); - T* a_inv_ptr = a_inv->mutable_data(dev_ctx.GetPlace()); + T* a_inv_ptr = dev_ctx.template Alloc(a_inv); for (int i = 0; i < batch_size; ++i) { ConstEigenMatrixMap mat(a_ptr + i * n * n, n, n);