diff --git a/paddle/pten/api/lib/creation.cc b/paddle/pten/api/lib/creation.cc index 088ff919596258fc990af4422a3809cd63282869..de44f96a99392f95015975e51e02e696407d2ab8 100644 --- a/paddle/pten/api/lib/creation.cc +++ b/paddle/pten/api/lib/creation.cc @@ -52,8 +52,8 @@ PD_DLL_DECL Tensor full(const ScalarArray& shape, kernel_context.EmplaceBackAttr(pten::ScalarArray(shape)); kernel_context.EmplaceBackAttr(pten::Scalar(value)); - // 4. InferShape - auto out_meta = pten::FullInferShape(shape, dtype, layout); + // 4. InferMeta + auto out_meta = pten::FullInferMeta(shape, dtype, layout); // 5. Prepare outputs const auto allocator = @@ -97,8 +97,8 @@ PD_DLL_DECL Tensor full_like(const Tensor& x, auto dense_x = std::dynamic_pointer_cast(x.impl()); kernel_context.EmplaceBackAttr(pten::Scalar(value)); - // 4. InferShape - auto out_meta = FullLikeInferShape(dense_x->meta(), dtype, layout); + // 4. InferMeta + auto out_meta = FullLikeInferMeta(dense_x->meta(), dtype, layout); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/linalg.cc b/paddle/pten/api/lib/linalg.cc index 3b7d4994ab75ab4655835b69349ec737958877fa..3255b3b1b5eaa3013d2f83c3e4704640b1523f78 100644 --- a/paddle/pten/api/lib/linalg.cc +++ b/paddle/pten/api/lib/linalg.cc @@ -55,8 +55,8 @@ PD_DLL_DECL Tensor dot(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); // TODO(chenweihang): add transform impl - // 4. InferShape - auto out_meta = DotInferShape(dense_x->meta(), dense_y->meta()); + // 4. InferMeta + auto out_meta = DotInferMeta(dense_x->meta(), dense_y->meta()); // 5. Prepare outputs Tensor out; @@ -95,8 +95,8 @@ PD_DLL_DECL Tensor matmul(const Tensor& x, kernel_context.EmplaceBackAttr(transpose_y); // TODO(chenweihang): add transform impl - // 4. InferShape - auto out_meta = MatmulInferShape( + // 4. InferMeta + auto out_meta = MatmulInferMeta( dense_x->meta(), dense_y->meta(), transpose_x, transpose_y); // 5. Prepare outputs diff --git a/paddle/pten/api/lib/manipulation.cc b/paddle/pten/api/lib/manipulation.cc index d06602d721d712a91443d7c6926ed09f4ef9b256..51a7702d9fc6e75417f8fcc2b96730b5e43d18e5 100644 --- a/paddle/pten/api/lib/manipulation.cc +++ b/paddle/pten/api/lib/manipulation.cc @@ -50,8 +50,8 @@ PD_DLL_DECL Tensor flatten(const Tensor& x, int start_axis, int stop_axis) { kernel_context.EmplaceBackAttr(start_axis); kernel_context.EmplaceBackAttr(stop_axis); - // 4. InferShape - auto out_meta = FlattenInferShape(dense_x->meta(), start_axis, stop_axis); + // 4. InferMeta + auto out_meta = FlattenInferMeta(dense_x->meta(), start_axis, stop_axis); // 5. Prepare outputs Tensor out; @@ -84,7 +84,7 @@ PD_DLL_DECL Tensor cast(const Tensor& x, DataType out_dtype) { kernel_context.EmplaceBackAttr(out_dtype); kernel_context.EmplaceBackAttr(dense_x->meta().dtype); - // 4. InferShape + // 4. InferMeta auto out_meta = CastInferMeta(dense_x->meta(), out_dtype); // 5. Prepare outputs @@ -117,8 +117,8 @@ PD_DLL_DECL Tensor reshape(const Tensor& x, const std::vector& shape) { kernel_context.EmplaceBackInput(dense_x); kernel_context.EmplaceBackAttr(shape); - // 4. InferShape - auto out_meta = InferShapeFromVecValue(dense_x->meta(), shape); + // 4. InferMeta + auto out_meta = InferMetaFromVecValue(dense_x->meta(), shape); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/math.cc b/paddle/pten/api/lib/math.cc index 56a2cddae2a06cbd07a50c354f1430c1343729f2..cbae7dcfc8abb437bc8012a54dc527f5cab58420 100644 --- a/paddle/pten/api/lib/math.cc +++ b/paddle/pten/api/lib/math.cc @@ -50,8 +50,8 @@ PD_DLL_DECL Tensor mean(const Tensor& x) { auto dense_x = std::dynamic_pointer_cast(x.impl()); kernel_context.EmplaceBackInput(dense_x); - // 4. InferShape - auto out_meta = ReductionInferShape(dense_x->meta()); + // 4. InferMeta + auto out_meta = ReductionInferMeta(dense_x->meta()); // 5. Prepare outputs Tensor out; @@ -86,8 +86,8 @@ PD_DLL_DECL Tensor add(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -121,8 +121,8 @@ PD_DLL_DECL Tensor subtract(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -156,8 +156,8 @@ PD_DLL_DECL Tensor divide(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -191,8 +191,8 @@ PD_DLL_DECL Tensor multiply(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/utils.cc b/paddle/pten/api/lib/utils.cc index 90ea1c357ffcdf8fc24c45f8a91c306b56809286..c17e2e33a6d2d03a8ca1fdaf66ab8558b6396d4b 100644 --- a/paddle/pten/api/lib/utils.cc +++ b/paddle/pten/api/lib/utils.cc @@ -55,7 +55,7 @@ PD_DLL_DECL Tensor copy_to(const Tensor& x, Backend backend, bool blocking) { kernel_context.EmplaceBackAttr(blocking); // 4. InferMeta - auto out_meta = UnchangedInferShape(dense_x->meta()); + auto out_meta = UnchangedInferMeta(dense_x->meta()); // 5. Prepare outputs const auto allocator = diff --git a/paddle/pten/include/creation.h b/paddle/pten/include/creation.h index 9ddc9b30f6b2005f351799949a1fa8c53417efab..ee6b9c11063019c7f1a774f86a46eefb2368aa66 100644 --- a/paddle/pten/include/creation.h +++ b/paddle/pten/include/creation.h @@ -31,7 +31,7 @@ DenseTensor FillAnyLike( DataType dtype = DataType::UNDEFINED, Backend backend = Backend::UNDEFINED, // Is backend needed here? DataLayout layout = DataLayout::UNDEFINED) { - auto out_meta = FullLikeInferShape(x.meta(), dtype, layout); + auto out_meta = FullLikeInferMeta(x.meta(), dtype, layout); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/linalg.h b/paddle/pten/include/linalg.h index 70eebac5b684154e5dd9d8242aed562391562c38..6d5b51675e815a4c8a1dfb74d41edaf3a7779f8c 100644 --- a/paddle/pten/include/linalg.h +++ b/paddle/pten/include/linalg.h @@ -26,7 +26,7 @@ template DenseTensor Dot(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y) { - auto out_meta = DotInferShape(x.meta(), y.meta()); + auto out_meta = DotInferMeta(x.meta(), y.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/manipulation.h b/paddle/pten/include/manipulation.h index f6a7fcd3882f015fe1b8d7ddac8151cf970951fa..7a7ecb589842b3f7c7d1e0e2fefa24d37bddcdd5 100644 --- a/paddle/pten/include/manipulation.h +++ b/paddle/pten/include/manipulation.h @@ -28,7 +28,7 @@ DenseTensor Flatten(const ContextT& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis) { - auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis); + auto out_meta = FlattenInferMeta(x.meta(), start_axis, stop_axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -55,7 +55,7 @@ template DenseTensor Reshape(const ContextT& dev_ctx, const DenseTensor& x, const std::vector& shape) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/math.h b/paddle/pten/include/math.h index cc4c4f33f74c929fdde0e17fe3c718219961f104..8fc4e27ee758f1f941c318a0b4575731a41efdc5 100644 --- a/paddle/pten/include/math.h +++ b/paddle/pten/include/math.h @@ -24,7 +24,7 @@ namespace pten { template DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -35,7 +35,7 @@ DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) { template DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) { - auto out_meta = ReductionInferShape(x.meta()); + auto out_meta = ReductionInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -50,7 +50,7 @@ DenseTensor Scale(const ContextT& dev_ctx, float scale, float bias, bool bias_after_scale) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -65,7 +65,7 @@ DenseTensor Scale(const ContextT& dev_ctx, const DenseTensor& scale, float bias, bool bias_after_scale) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -79,7 +79,7 @@ DenseTensor Add(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -93,7 +93,7 @@ DenseTensor Subtract(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -107,7 +107,7 @@ DenseTensor Divide(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -121,7 +121,7 @@ DenseTensor Multiply(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/infermeta/binary.cc b/paddle/pten/infermeta/binary.cc index 838e450007fcd458ae97ca0e00bb10dbe2e6c55f..a0d4cba90dae7b13b978a3d9c28d5968bde56cc4 100644 --- a/paddle/pten/infermeta/binary.cc +++ b/paddle/pten/infermeta/binary.cc @@ -18,8 +18,8 @@ limitations under the License. */ namespace pten { -DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta) { +DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta) { auto x_dims = x_meta.dims; auto x_rank = static_cast(x_dims.size()); PADDLE_ENFORCE_EQ(true, @@ -60,10 +60,10 @@ DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, return return_meta; } -DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - bool trans_x, - bool trans_y) { +DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + bool trans_x, + bool trans_y) { std::vector dims_x = paddle::framework::vectorize(x_meta.dims); std::vector dims_y = paddle::framework::vectorize(y_meta.dims); auto ndims_x = dims_x.size(); @@ -130,9 +130,9 @@ DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, return {x_meta.dtype, ddim_out, x_meta.layout}; } -DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - int axis) { +DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + int axis) { DenseTensorMeta return_meta(x_meta.dtype, x_meta.dims, x_meta.layout); if (x_meta.dims != y_meta.dims) { auto x_dims = x_meta.dims; diff --git a/paddle/pten/infermeta/binary.h b/paddle/pten/infermeta/binary.h index 35749ab61b3661a5fbdfde227eed76b71ef657db..c86fc12a20abef6db422b93c1aa258e008688e0c 100644 --- a/paddle/pten/infermeta/binary.h +++ b/paddle/pten/infermeta/binary.h @@ -19,29 +19,29 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for binary operators, The format like: +// Common InferMeta Functions for binary operators, The format like: // -// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...) +// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...) // {} -// 2. std::pair [OpName]InferShape(const +// 2. std::pair [OpName]InferMeta(const // DenseTensorMeta& // x_meta, ...) {} // 3. std::tuple -// [OpName]InferShape(const +// [OpName]InferMeta(const // DenseTensorMeta& x_meta, ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta); +DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta); -DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - bool trans_x, - bool trans_y); +DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + bool trans_x, + bool trans_y); -DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - int axis); +DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + int axis); } // namespace pten diff --git a/paddle/pten/infermeta/nary.cc b/paddle/pten/infermeta/nary.cc index d79945a384ac84686ee73d2faba323887bab93c6..8b12a88f10fc011b7faa373f896ba4acdc1398da 100644 --- a/paddle/pten/infermeta/nary.cc +++ b/paddle/pten/infermeta/nary.cc @@ -17,16 +17,16 @@ limitations under the License. */ namespace pten { -DenseTensorMeta FullInferShape(const std::vector& shape, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullInferMeta(const std::vector& shape, + DataType dtype, + DataLayout layout) { const auto& out_dims = paddle::framework::make_ddim(shape); return {dtype, out_dims, layout}; } -DenseTensorMeta FullInferShape(const ScalarArray& shape, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullInferMeta(const ScalarArray& shape, + DataType dtype, + DataLayout layout) { const auto& out_dims = paddle::framework::make_ddim(shape.GetData()); return {dtype, out_dims, layout}; } diff --git a/paddle/pten/infermeta/nary.h b/paddle/pten/infermeta/nary.h index c526583d7ba75ca887e1c1b6ca59c64e6515dd47..010accd2e79e54a223122526df71d6cef0c98b3d 100644 --- a/paddle/pten/infermeta/nary.h +++ b/paddle/pten/infermeta/nary.h @@ -19,20 +19,20 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for 0-nary operators(no input tensor), The format +// Common InferMeta Functions for 0-nary operators(no input tensor), The format // like: // -// 1. DenseTensorMeta [OpName]InferShape( ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// 1. DenseTensorMeta [OpName]InferMeta( ...) +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta FullInferShape(const std::vector& shape, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullInferMeta(const std::vector& shape, + DataType dtype, + DataLayout layout); -DenseTensorMeta FullInferShape(const ScalarArray& shape, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullInferMeta(const ScalarArray& shape, + DataType dtype, + DataLayout layout); } // namespace pten diff --git a/paddle/pten/infermeta/unary.cc b/paddle/pten/infermeta/unary.cc index 945a0b4e23f0eb7ac6493111a39018f5baac6da3..1ffdd1fed2815159a3888309df01a1b6ed39aa60 100644 --- a/paddle/pten/infermeta/unary.cc +++ b/paddle/pten/infermeta/unary.cc @@ -17,19 +17,19 @@ limitations under the License. */ namespace pten { -DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta) { +DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta) { return x_meta; } -DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta) { +DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta) { const auto& out_dims = paddle::framework::make_ddim({1}); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); return return_meta; } -DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, - int start_axis, - int stop_axis) { +DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, + int start_axis, + int stop_axis) { auto& x_dims = x_meta.dims; int in_dims_size = x_dims.size(); if (start_axis < 0) { @@ -80,9 +80,9 @@ DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta, return out_meta; } -DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta, + DataType dtype, + DataLayout layout) { return {dtype == DataType::UNDEFINED ? x_meta.dtype : dtype, x_meta.dims, layout == DataLayout::UNDEFINED ? x_meta.layout : layout}; @@ -208,8 +208,8 @@ static paddle::framework::DDim ValidateShape( return paddle::framework::make_ddim(output_shape); } -DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta, - const std::vector& shape) { +DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, + const std::vector& shape) { PADDLE_ENFORCE_EQ(!shape.empty(), true, paddle::platform::errors::InvalidArgument( diff --git a/paddle/pten/infermeta/unary.h b/paddle/pten/infermeta/unary.h index 92c14d43ea94b4673cea0f915f5265a38ced3421..696e2b06df5f83b2b9e819a47374fd1021bf02f5 100644 --- a/paddle/pten/infermeta/unary.h +++ b/paddle/pten/infermeta/unary.h @@ -19,34 +19,34 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for unary operators, The format like: +// Common InferMeta Functions for unary operators, The format like: // -// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...) +// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...) // {} -// 2. std::pair [OpName]InferShape(const +// 2. std::pair [OpName]InferMeta(const // DenseTensorMeta& // x_meta, ...) {} // 3. std::tuple -// [OpName]InferShape(const +// [OpName]InferMeta(const // DenseTensorMeta& x_meta, ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta); +DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta); -DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta); +DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta); -DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, - int start_axis, - int stop_axis); +DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, + int start_axis, + int stop_axis); DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta, const DataType out_dtype); -DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta, + DataType dtype, + DataLayout layout); -DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta, - const std::vector& shape); +DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, + const std::vector& shape); } // namespace pten diff --git a/paddle/pten/kernels/cpu/manipulation.cc b/paddle/pten/kernels/cpu/manipulation.cc index c7027e487b0e384084ebad33f9de6b0f15e82354..8f559b01b3bcb37275a4c0f24146b0fa323c35f1 100644 --- a/paddle/pten/kernels/cpu/manipulation.cc +++ b/paddle/pten/kernels/cpu/manipulation.cc @@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return; diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/cuda/manipulation.cu index 9b8f18dab4ee6825032cec07b59b4d4ad765ec3e..f4bf9322047b442b4a78a1d67faa51cfabadfb56 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/cuda/manipulation.cu @@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CUDAContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return; diff --git a/paddle/pten/kernels/xpu/manipulation.cc b/paddle/pten/kernels/xpu/manipulation.cc index d55def8b8a7b43193df5254dc783c2a2c9ba6369..352d21e6e9bf94a96bca544e12a73dbaf8dfc14f 100644 --- a/paddle/pten/kernels/xpu/manipulation.cc +++ b/paddle/pten/kernels/xpu/manipulation.cc @@ -55,7 +55,7 @@ void ReshapeFromVectorVal(const XPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return;