From 2a905f6bd4d3f3ca84c991c31d87df61adf4f9f9 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 24 Nov 2021 20:16:29 -0600 Subject: [PATCH] infershape func to infermeta (#37524) --- paddle/pten/api/lib/creation.cc | 8 +++---- paddle/pten/api/lib/linalg.cc | 8 +++---- paddle/pten/api/lib/manipulation.cc | 10 ++++---- paddle/pten/api/lib/math.cc | 20 ++++++++-------- paddle/pten/api/lib/utils.cc | 2 +- paddle/pten/include/creation.h | 2 +- paddle/pten/include/linalg.h | 2 +- paddle/pten/include/manipulation.h | 4 ++-- paddle/pten/include/math.h | 16 ++++++------- paddle/pten/infermeta/binary.cc | 18 +++++++------- paddle/pten/infermeta/binary.h | 28 +++++++++++----------- paddle/pten/infermeta/nary.cc | 12 +++++----- paddle/pten/infermeta/nary.h | 18 +++++++------- paddle/pten/infermeta/unary.cc | 20 ++++++++-------- paddle/pten/infermeta/unary.h | 30 ++++++++++++------------ paddle/pten/kernels/cpu/manipulation.cc | 2 +- paddle/pten/kernels/cuda/manipulation.cu | 2 +- paddle/pten/kernels/xpu/manipulation.cc | 2 +- 18 files changed, 102 insertions(+), 102 deletions(-) diff --git a/paddle/pten/api/lib/creation.cc b/paddle/pten/api/lib/creation.cc index 088ff91959..de44f96a99 100644 --- a/paddle/pten/api/lib/creation.cc +++ b/paddle/pten/api/lib/creation.cc @@ -52,8 +52,8 @@ PD_DLL_DECL Tensor full(const ScalarArray& shape, kernel_context.EmplaceBackAttr(pten::ScalarArray(shape)); kernel_context.EmplaceBackAttr(pten::Scalar(value)); - // 4. InferShape - auto out_meta = pten::FullInferShape(shape, dtype, layout); + // 4. InferMeta + auto out_meta = pten::FullInferMeta(shape, dtype, layout); // 5. Prepare outputs const auto allocator = @@ -97,8 +97,8 @@ PD_DLL_DECL Tensor full_like(const Tensor& x, auto dense_x = std::dynamic_pointer_cast(x.impl()); kernel_context.EmplaceBackAttr(pten::Scalar(value)); - // 4. InferShape - auto out_meta = FullLikeInferShape(dense_x->meta(), dtype, layout); + // 4. InferMeta + auto out_meta = FullLikeInferMeta(dense_x->meta(), dtype, layout); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/linalg.cc b/paddle/pten/api/lib/linalg.cc index 3b7d4994ab..3255b3b1b5 100644 --- a/paddle/pten/api/lib/linalg.cc +++ b/paddle/pten/api/lib/linalg.cc @@ -55,8 +55,8 @@ PD_DLL_DECL Tensor dot(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); // TODO(chenweihang): add transform impl - // 4. InferShape - auto out_meta = DotInferShape(dense_x->meta(), dense_y->meta()); + // 4. InferMeta + auto out_meta = DotInferMeta(dense_x->meta(), dense_y->meta()); // 5. Prepare outputs Tensor out; @@ -95,8 +95,8 @@ PD_DLL_DECL Tensor matmul(const Tensor& x, kernel_context.EmplaceBackAttr(transpose_y); // TODO(chenweihang): add transform impl - // 4. InferShape - auto out_meta = MatmulInferShape( + // 4. InferMeta + auto out_meta = MatmulInferMeta( dense_x->meta(), dense_y->meta(), transpose_x, transpose_y); // 5. Prepare outputs diff --git a/paddle/pten/api/lib/manipulation.cc b/paddle/pten/api/lib/manipulation.cc index d06602d721..51a7702d9f 100644 --- a/paddle/pten/api/lib/manipulation.cc +++ b/paddle/pten/api/lib/manipulation.cc @@ -50,8 +50,8 @@ PD_DLL_DECL Tensor flatten(const Tensor& x, int start_axis, int stop_axis) { kernel_context.EmplaceBackAttr(start_axis); kernel_context.EmplaceBackAttr(stop_axis); - // 4. InferShape - auto out_meta = FlattenInferShape(dense_x->meta(), start_axis, stop_axis); + // 4. InferMeta + auto out_meta = FlattenInferMeta(dense_x->meta(), start_axis, stop_axis); // 5. Prepare outputs Tensor out; @@ -84,7 +84,7 @@ PD_DLL_DECL Tensor cast(const Tensor& x, DataType out_dtype) { kernel_context.EmplaceBackAttr(out_dtype); kernel_context.EmplaceBackAttr(dense_x->meta().dtype); - // 4. InferShape + // 4. InferMeta auto out_meta = CastInferMeta(dense_x->meta(), out_dtype); // 5. Prepare outputs @@ -117,8 +117,8 @@ PD_DLL_DECL Tensor reshape(const Tensor& x, const std::vector& shape) { kernel_context.EmplaceBackInput(dense_x); kernel_context.EmplaceBackAttr(shape); - // 4. InferShape - auto out_meta = InferShapeFromVecValue(dense_x->meta(), shape); + // 4. InferMeta + auto out_meta = InferMetaFromVecValue(dense_x->meta(), shape); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/math.cc b/paddle/pten/api/lib/math.cc index 56a2cddae2..cbae7dcfc8 100644 --- a/paddle/pten/api/lib/math.cc +++ b/paddle/pten/api/lib/math.cc @@ -50,8 +50,8 @@ PD_DLL_DECL Tensor mean(const Tensor& x) { auto dense_x = std::dynamic_pointer_cast(x.impl()); kernel_context.EmplaceBackInput(dense_x); - // 4. InferShape - auto out_meta = ReductionInferShape(dense_x->meta()); + // 4. InferMeta + auto out_meta = ReductionInferMeta(dense_x->meta()); // 5. Prepare outputs Tensor out; @@ -86,8 +86,8 @@ PD_DLL_DECL Tensor add(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -121,8 +121,8 @@ PD_DLL_DECL Tensor subtract(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -156,8 +156,8 @@ PD_DLL_DECL Tensor divide(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; @@ -191,8 +191,8 @@ PD_DLL_DECL Tensor multiply(const Tensor& x, const Tensor& y) { kernel_context.EmplaceBackInput(dense_y); kernel_context.EmplaceBackAttr(-1); - // 4. InferShape - auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1); + // 4. InferMeta + auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1); // 5. Prepare outputs Tensor out; diff --git a/paddle/pten/api/lib/utils.cc b/paddle/pten/api/lib/utils.cc index 90ea1c357f..c17e2e33a6 100644 --- a/paddle/pten/api/lib/utils.cc +++ b/paddle/pten/api/lib/utils.cc @@ -55,7 +55,7 @@ PD_DLL_DECL Tensor copy_to(const Tensor& x, Backend backend, bool blocking) { kernel_context.EmplaceBackAttr(blocking); // 4. InferMeta - auto out_meta = UnchangedInferShape(dense_x->meta()); + auto out_meta = UnchangedInferMeta(dense_x->meta()); // 5. Prepare outputs const auto allocator = diff --git a/paddle/pten/include/creation.h b/paddle/pten/include/creation.h index 9ddc9b30f6..ee6b9c1106 100644 --- a/paddle/pten/include/creation.h +++ b/paddle/pten/include/creation.h @@ -31,7 +31,7 @@ DenseTensor FillAnyLike( DataType dtype = DataType::UNDEFINED, Backend backend = Backend::UNDEFINED, // Is backend needed here? DataLayout layout = DataLayout::UNDEFINED) { - auto out_meta = FullLikeInferShape(x.meta(), dtype, layout); + auto out_meta = FullLikeInferMeta(x.meta(), dtype, layout); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/linalg.h b/paddle/pten/include/linalg.h index 70eebac5b6..6d5b51675e 100644 --- a/paddle/pten/include/linalg.h +++ b/paddle/pten/include/linalg.h @@ -26,7 +26,7 @@ template DenseTensor Dot(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y) { - auto out_meta = DotInferShape(x.meta(), y.meta()); + auto out_meta = DotInferMeta(x.meta(), y.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/manipulation.h b/paddle/pten/include/manipulation.h index f6a7fcd388..7a7ecb5898 100644 --- a/paddle/pten/include/manipulation.h +++ b/paddle/pten/include/manipulation.h @@ -28,7 +28,7 @@ DenseTensor Flatten(const ContextT& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis) { - auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis); + auto out_meta = FlattenInferMeta(x.meta(), start_axis, stop_axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -55,7 +55,7 @@ template DenseTensor Reshape(const ContextT& dev_ctx, const DenseTensor& x, const std::vector& shape) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/include/math.h b/paddle/pten/include/math.h index cc4c4f33f7..8fc4e27ee7 100644 --- a/paddle/pten/include/math.h +++ b/paddle/pten/include/math.h @@ -24,7 +24,7 @@ namespace pten { template DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -35,7 +35,7 @@ DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) { template DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) { - auto out_meta = ReductionInferShape(x.meta()); + auto out_meta = ReductionInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -50,7 +50,7 @@ DenseTensor Scale(const ContextT& dev_ctx, float scale, float bias, bool bias_after_scale) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -65,7 +65,7 @@ DenseTensor Scale(const ContextT& dev_ctx, const DenseTensor& scale, float bias, bool bias_after_scale) { - auto out_meta = UnchangedInferShape(x.meta()); + auto out_meta = UnchangedInferMeta(x.meta()); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -79,7 +79,7 @@ DenseTensor Add(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -93,7 +93,7 @@ DenseTensor Subtract(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -107,7 +107,7 @@ DenseTensor Divide(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); @@ -121,7 +121,7 @@ DenseTensor Multiply(const ContextT& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis) { - auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis); + auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis); const auto allocator = std::make_shared( dev_ctx.GetPlace()); diff --git a/paddle/pten/infermeta/binary.cc b/paddle/pten/infermeta/binary.cc index 838e450007..a0d4cba90d 100644 --- a/paddle/pten/infermeta/binary.cc +++ b/paddle/pten/infermeta/binary.cc @@ -18,8 +18,8 @@ limitations under the License. */ namespace pten { -DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta) { +DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta) { auto x_dims = x_meta.dims; auto x_rank = static_cast(x_dims.size()); PADDLE_ENFORCE_EQ(true, @@ -60,10 +60,10 @@ DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, return return_meta; } -DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - bool trans_x, - bool trans_y) { +DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + bool trans_x, + bool trans_y) { std::vector dims_x = paddle::framework::vectorize(x_meta.dims); std::vector dims_y = paddle::framework::vectorize(y_meta.dims); auto ndims_x = dims_x.size(); @@ -130,9 +130,9 @@ DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, return {x_meta.dtype, ddim_out, x_meta.layout}; } -DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - int axis) { +DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + int axis) { DenseTensorMeta return_meta(x_meta.dtype, x_meta.dims, x_meta.layout); if (x_meta.dims != y_meta.dims) { auto x_dims = x_meta.dims; diff --git a/paddle/pten/infermeta/binary.h b/paddle/pten/infermeta/binary.h index 35749ab61b..c86fc12a20 100644 --- a/paddle/pten/infermeta/binary.h +++ b/paddle/pten/infermeta/binary.h @@ -19,29 +19,29 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for binary operators, The format like: +// Common InferMeta Functions for binary operators, The format like: // -// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...) +// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...) // {} -// 2. std::pair [OpName]InferShape(const +// 2. std::pair [OpName]InferMeta(const // DenseTensorMeta& // x_meta, ...) {} // 3. std::tuple -// [OpName]InferShape(const +// [OpName]InferMeta(const // DenseTensorMeta& x_meta, ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta); +DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta); -DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - bool trans_x, - bool trans_y); +DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + bool trans_x, + bool trans_y); -DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta, - const DenseTensorMeta& y_meta, - int axis); +DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta, + const DenseTensorMeta& y_meta, + int axis); } // namespace pten diff --git a/paddle/pten/infermeta/nary.cc b/paddle/pten/infermeta/nary.cc index d79945a384..8b12a88f10 100644 --- a/paddle/pten/infermeta/nary.cc +++ b/paddle/pten/infermeta/nary.cc @@ -17,16 +17,16 @@ limitations under the License. */ namespace pten { -DenseTensorMeta FullInferShape(const std::vector& shape, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullInferMeta(const std::vector& shape, + DataType dtype, + DataLayout layout) { const auto& out_dims = paddle::framework::make_ddim(shape); return {dtype, out_dims, layout}; } -DenseTensorMeta FullInferShape(const ScalarArray& shape, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullInferMeta(const ScalarArray& shape, + DataType dtype, + DataLayout layout) { const auto& out_dims = paddle::framework::make_ddim(shape.GetData()); return {dtype, out_dims, layout}; } diff --git a/paddle/pten/infermeta/nary.h b/paddle/pten/infermeta/nary.h index c526583d7b..010accd2e7 100644 --- a/paddle/pten/infermeta/nary.h +++ b/paddle/pten/infermeta/nary.h @@ -19,20 +19,20 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for 0-nary operators(no input tensor), The format +// Common InferMeta Functions for 0-nary operators(no input tensor), The format // like: // -// 1. DenseTensorMeta [OpName]InferShape( ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// 1. DenseTensorMeta [OpName]InferMeta( ...) +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta FullInferShape(const std::vector& shape, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullInferMeta(const std::vector& shape, + DataType dtype, + DataLayout layout); -DenseTensorMeta FullInferShape(const ScalarArray& shape, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullInferMeta(const ScalarArray& shape, + DataType dtype, + DataLayout layout); } // namespace pten diff --git a/paddle/pten/infermeta/unary.cc b/paddle/pten/infermeta/unary.cc index 945a0b4e23..1ffdd1fed2 100644 --- a/paddle/pten/infermeta/unary.cc +++ b/paddle/pten/infermeta/unary.cc @@ -17,19 +17,19 @@ limitations under the License. */ namespace pten { -DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta) { +DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta) { return x_meta; } -DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta) { +DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta) { const auto& out_dims = paddle::framework::make_ddim({1}); DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout); return return_meta; } -DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, - int start_axis, - int stop_axis) { +DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, + int start_axis, + int stop_axis) { auto& x_dims = x_meta.dims; int in_dims_size = x_dims.size(); if (start_axis < 0) { @@ -80,9 +80,9 @@ DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta, return out_meta; } -DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta, - DataType dtype, - DataLayout layout) { +DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta, + DataType dtype, + DataLayout layout) { return {dtype == DataType::UNDEFINED ? x_meta.dtype : dtype, x_meta.dims, layout == DataLayout::UNDEFINED ? x_meta.layout : layout}; @@ -208,8 +208,8 @@ static paddle::framework::DDim ValidateShape( return paddle::framework::make_ddim(output_shape); } -DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta, - const std::vector& shape) { +DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, + const std::vector& shape) { PADDLE_ENFORCE_EQ(!shape.empty(), true, paddle::platform::errors::InvalidArgument( diff --git a/paddle/pten/infermeta/unary.h b/paddle/pten/infermeta/unary.h index 92c14d43ea..696e2b06df 100644 --- a/paddle/pten/infermeta/unary.h +++ b/paddle/pten/infermeta/unary.h @@ -19,34 +19,34 @@ limitations under the License. */ namespace pten { -// Common InferShape Functions for unary operators, The format like: +// Common InferMeta Functions for unary operators, The format like: // -// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...) +// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...) // {} -// 2. std::pair [OpName]InferShape(const +// 2. std::pair [OpName]InferMeta(const // DenseTensorMeta& // x_meta, ...) {} // 3. std::tuple -// [OpName]InferShape(const +// [OpName]InferMeta(const // DenseTensorMeta& x_meta, ...) -// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good. +// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good. // Because functions in this file // not only can infer shape, but alse need infer lod or other useful data. -DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta); +DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta); -DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta); +DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta); -DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta, - int start_axis, - int stop_axis); +DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta, + int start_axis, + int stop_axis); DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta, const DataType out_dtype); -DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta, - DataType dtype, - DataLayout layout); +DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta, + DataType dtype, + DataLayout layout); -DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta, - const std::vector& shape); +DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta, + const std::vector& shape); } // namespace pten diff --git a/paddle/pten/kernels/cpu/manipulation.cc b/paddle/pten/kernels/cpu/manipulation.cc index c7027e487b..8f559b01b3 100644 --- a/paddle/pten/kernels/cpu/manipulation.cc +++ b/paddle/pten/kernels/cpu/manipulation.cc @@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return; diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/cuda/manipulation.cu index 9b8f18dab4..f4bf932204 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/cuda/manipulation.cu @@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CUDAContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return; diff --git a/paddle/pten/kernels/xpu/manipulation.cc b/paddle/pten/kernels/xpu/manipulation.cc index d55def8b8a..352d21e6e9 100644 --- a/paddle/pten/kernels/xpu/manipulation.cc +++ b/paddle/pten/kernels/xpu/manipulation.cc @@ -55,7 +55,7 @@ void ReshapeFromVectorVal(const XPUContext& dev_ctx, const DenseTensor& x, const std::vector& shape, DenseTensor* out) { - auto out_meta = InferShapeFromVecValue(x.meta(), shape); + auto out_meta = InferMetaFromVecValue(x.meta(), shape); if (&x == out) { out->Resize(out_meta.dims); return; -- GitLab