未验证 提交 2a905f6b 编写于 作者: C Chen Weihang 提交者: GitHub

infershape func to infermeta (#37524)

上级 171da2ce
......@@ -52,8 +52,8 @@ PD_DLL_DECL Tensor full(const ScalarArray& shape,
kernel_context.EmplaceBackAttr(pten::ScalarArray(shape));
kernel_context.EmplaceBackAttr(pten::Scalar(value));
// 4. InferShape
auto out_meta = pten::FullInferShape(shape, dtype, layout);
// 4. InferMeta
auto out_meta = pten::FullInferMeta(shape, dtype, layout);
// 5. Prepare outputs
const auto allocator =
......@@ -97,8 +97,8 @@ PD_DLL_DECL Tensor full_like(const Tensor& x,
auto dense_x = std::dynamic_pointer_cast<pten::DenseTensor>(x.impl());
kernel_context.EmplaceBackAttr(pten::Scalar(value));
// 4. InferShape
auto out_meta = FullLikeInferShape(dense_x->meta(), dtype, layout);
// 4. InferMeta
auto out_meta = FullLikeInferMeta(dense_x->meta(), dtype, layout);
// 5. Prepare outputs
Tensor out;
......
......@@ -55,8 +55,8 @@ PD_DLL_DECL Tensor dot(const Tensor& x, const Tensor& y) {
kernel_context.EmplaceBackInput(dense_y);
// TODO(chenweihang): add transform impl
// 4. InferShape
auto out_meta = DotInferShape(dense_x->meta(), dense_y->meta());
// 4. InferMeta
auto out_meta = DotInferMeta(dense_x->meta(), dense_y->meta());
// 5. Prepare outputs
Tensor out;
......@@ -95,8 +95,8 @@ PD_DLL_DECL Tensor matmul(const Tensor& x,
kernel_context.EmplaceBackAttr(transpose_y);
// TODO(chenweihang): add transform impl
// 4. InferShape
auto out_meta = MatmulInferShape(
// 4. InferMeta
auto out_meta = MatmulInferMeta(
dense_x->meta(), dense_y->meta(), transpose_x, transpose_y);
// 5. Prepare outputs
......
......@@ -50,8 +50,8 @@ PD_DLL_DECL Tensor flatten(const Tensor& x, int start_axis, int stop_axis) {
kernel_context.EmplaceBackAttr(start_axis);
kernel_context.EmplaceBackAttr(stop_axis);
// 4. InferShape
auto out_meta = FlattenInferShape(dense_x->meta(), start_axis, stop_axis);
// 4. InferMeta
auto out_meta = FlattenInferMeta(dense_x->meta(), start_axis, stop_axis);
// 5. Prepare outputs
Tensor out;
......@@ -84,7 +84,7 @@ PD_DLL_DECL Tensor cast(const Tensor& x, DataType out_dtype) {
kernel_context.EmplaceBackAttr(out_dtype);
kernel_context.EmplaceBackAttr(dense_x->meta().dtype);
// 4. InferShape
// 4. InferMeta
auto out_meta = CastInferMeta(dense_x->meta(), out_dtype);
// 5. Prepare outputs
......@@ -117,8 +117,8 @@ PD_DLL_DECL Tensor reshape(const Tensor& x, const std::vector<int64_t>& shape) {
kernel_context.EmplaceBackInput(dense_x);
kernel_context.EmplaceBackAttr(shape);
// 4. InferShape
auto out_meta = InferShapeFromVecValue(dense_x->meta(), shape);
// 4. InferMeta
auto out_meta = InferMetaFromVecValue(dense_x->meta(), shape);
// 5. Prepare outputs
Tensor out;
......
......@@ -50,8 +50,8 @@ PD_DLL_DECL Tensor mean(const Tensor& x) {
auto dense_x = std::dynamic_pointer_cast<pten::DenseTensor>(x.impl());
kernel_context.EmplaceBackInput(dense_x);
// 4. InferShape
auto out_meta = ReductionInferShape(dense_x->meta());
// 4. InferMeta
auto out_meta = ReductionInferMeta(dense_x->meta());
// 5. Prepare outputs
Tensor out;
......@@ -86,8 +86,8 @@ PD_DLL_DECL Tensor add(const Tensor& x, const Tensor& y) {
kernel_context.EmplaceBackInput(dense_y);
kernel_context.EmplaceBackAttr(-1);
// 4. InferShape
auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1);
// 4. InferMeta
auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1);
// 5. Prepare outputs
Tensor out;
......@@ -121,8 +121,8 @@ PD_DLL_DECL Tensor subtract(const Tensor& x, const Tensor& y) {
kernel_context.EmplaceBackInput(dense_y);
kernel_context.EmplaceBackAttr(-1);
// 4. InferShape
auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1);
// 4. InferMeta
auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1);
// 5. Prepare outputs
Tensor out;
......@@ -156,8 +156,8 @@ PD_DLL_DECL Tensor divide(const Tensor& x, const Tensor& y) {
kernel_context.EmplaceBackInput(dense_y);
kernel_context.EmplaceBackAttr(-1);
// 4. InferShape
auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1);
// 4. InferMeta
auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1);
// 5. Prepare outputs
Tensor out;
......@@ -191,8 +191,8 @@ PD_DLL_DECL Tensor multiply(const Tensor& x, const Tensor& y) {
kernel_context.EmplaceBackInput(dense_y);
kernel_context.EmplaceBackAttr(-1);
// 4. InferShape
auto out_meta = ElementwiseInferShape(dense_x->meta(), dense_y->meta(), -1);
// 4. InferMeta
auto out_meta = ElementwiseInferMeta(dense_x->meta(), dense_y->meta(), -1);
// 5. Prepare outputs
Tensor out;
......
......@@ -55,7 +55,7 @@ PD_DLL_DECL Tensor copy_to(const Tensor& x, Backend backend, bool blocking) {
kernel_context.EmplaceBackAttr(blocking);
// 4. InferMeta
auto out_meta = UnchangedInferShape(dense_x->meta());
auto out_meta = UnchangedInferMeta(dense_x->meta());
// 5. Prepare outputs
const auto allocator =
......
......@@ -31,7 +31,7 @@ DenseTensor FillAnyLike(
DataType dtype = DataType::UNDEFINED,
Backend backend = Backend::UNDEFINED, // Is backend needed here?
DataLayout layout = DataLayout::UNDEFINED) {
auto out_meta = FullLikeInferShape(x.meta(), dtype, layout);
auto out_meta = FullLikeInferMeta(x.meta(), dtype, layout);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......
......@@ -26,7 +26,7 @@ template <typename T, typename ContextT>
DenseTensor Dot(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
auto out_meta = DotInferShape(x.meta(), y.meta());
auto out_meta = DotInferMeta(x.meta(), y.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......
......@@ -28,7 +28,7 @@ DenseTensor Flatten(const ContextT& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
auto out_meta = FlattenInferMeta(x.meta(), start_axis, stop_axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -55,7 +55,7 @@ template <typename T, typename ContextT>
DenseTensor Reshape(const ContextT& dev_ctx,
const DenseTensor& x,
const std::vector<int64_t>& shape) {
auto out_meta = InferShapeFromVecValue(x.meta(), shape);
auto out_meta = InferMetaFromVecValue(x.meta(), shape);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......
......@@ -24,7 +24,7 @@ namespace pten {
template <typename T, typename ContextT>
DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = UnchangedInferShape(x.meta());
auto out_meta = UnchangedInferMeta(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -35,7 +35,7 @@ DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
template <typename T, typename ContextT>
DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = ReductionInferShape(x.meta());
auto out_meta = ReductionInferMeta(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -50,7 +50,7 @@ DenseTensor Scale(const ContextT& dev_ctx,
float scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
auto out_meta = UnchangedInferMeta(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -65,7 +65,7 @@ DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
auto out_meta = UnchangedInferMeta(x.meta());
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -79,7 +79,7 @@ DenseTensor Add(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis) {
auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis);
auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -93,7 +93,7 @@ DenseTensor Subtract(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis) {
auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis);
auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -107,7 +107,7 @@ DenseTensor Divide(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis) {
auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis);
auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......@@ -121,7 +121,7 @@ DenseTensor Multiply(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis) {
auto out_meta = ElementwiseInferShape(x.meta(), y.meta(), axis);
auto out_meta = ElementwiseInferMeta(x.meta(), y.meta(), axis);
const auto allocator =
std::make_shared<paddle::experimental::DefaultAllocator>(
dev_ctx.GetPlace());
......
......@@ -18,8 +18,8 @@ limitations under the License. */
namespace pten {
DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta) {
DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta) {
auto x_dims = x_meta.dims;
auto x_rank = static_cast<size_t>(x_dims.size());
PADDLE_ENFORCE_EQ(true,
......@@ -60,10 +60,10 @@ DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta,
return return_meta;
}
DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
bool trans_x,
bool trans_y) {
DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
bool trans_x,
bool trans_y) {
std::vector<int64_t> dims_x = paddle::framework::vectorize(x_meta.dims);
std::vector<int64_t> dims_y = paddle::framework::vectorize(y_meta.dims);
auto ndims_x = dims_x.size();
......@@ -130,9 +130,9 @@ DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta,
return {x_meta.dtype, ddim_out, x_meta.layout};
}
DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
int axis) {
DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
int axis) {
DenseTensorMeta return_meta(x_meta.dtype, x_meta.dims, x_meta.layout);
if (x_meta.dims != y_meta.dims) {
auto x_dims = x_meta.dims;
......
......@@ -19,29 +19,29 @@ limitations under the License. */
namespace pten {
// Common InferShape Functions for binary operators, The format like:
// Common InferMeta Functions for binary operators, The format like:
//
// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...)
// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...)
// {}
// 2. std::pair<DenseTensorMeta, DenseTensorMeta> [OpName]InferShape(const
// 2. std::pair<DenseTensorMeta, DenseTensorMeta> [OpName]InferMeta(const
// DenseTensorMeta&
// x_meta, ...) {}
// 3. std::tuple<DenseTensorMeta, DenseTensorMeta, DenseTensorMeta>
// [OpName]InferShape(const
// [OpName]InferMeta(const
// DenseTensorMeta& x_meta, ...)
// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good.
// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good.
// Because functions in this file
// not only can infer shape, but alse need infer lod or other useful data.
DenseTensorMeta DotInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta);
DenseTensorMeta DotInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta);
DenseTensorMeta MatmulInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
bool trans_x,
bool trans_y);
DenseTensorMeta MatmulInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
bool trans_x,
bool trans_y);
DenseTensorMeta ElementwiseInferShape(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
int axis);
DenseTensorMeta ElementwiseInferMeta(const DenseTensorMeta& x_meta,
const DenseTensorMeta& y_meta,
int axis);
} // namespace pten
......@@ -17,16 +17,16 @@ limitations under the License. */
namespace pten {
DenseTensorMeta FullInferShape(const std::vector<int64_t>& shape,
DataType dtype,
DataLayout layout) {
DenseTensorMeta FullInferMeta(const std::vector<int64_t>& shape,
DataType dtype,
DataLayout layout) {
const auto& out_dims = paddle::framework::make_ddim(shape);
return {dtype, out_dims, layout};
}
DenseTensorMeta FullInferShape(const ScalarArray& shape,
DataType dtype,
DataLayout layout) {
DenseTensorMeta FullInferMeta(const ScalarArray& shape,
DataType dtype,
DataLayout layout) {
const auto& out_dims = paddle::framework::make_ddim(shape.GetData());
return {dtype, out_dims, layout};
}
......
......@@ -19,20 +19,20 @@ limitations under the License. */
namespace pten {
// Common InferShape Functions for 0-nary operators(no input tensor), The format
// Common InferMeta Functions for 0-nary operators(no input tensor), The format
// like:
//
// 1. DenseTensorMeta [OpName]InferShape( ...)
// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good.
// 1. DenseTensorMeta [OpName]InferMeta( ...)
// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good.
// Because functions in this file
// not only can infer shape, but alse need infer lod or other useful data.
DenseTensorMeta FullInferShape(const std::vector<int64_t>& shape,
DataType dtype,
DataLayout layout);
DenseTensorMeta FullInferMeta(const std::vector<int64_t>& shape,
DataType dtype,
DataLayout layout);
DenseTensorMeta FullInferShape(const ScalarArray& shape,
DataType dtype,
DataLayout layout);
DenseTensorMeta FullInferMeta(const ScalarArray& shape,
DataType dtype,
DataLayout layout);
} // namespace pten
......@@ -17,19 +17,19 @@ limitations under the License. */
namespace pten {
DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta) {
DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta) {
return x_meta;
}
DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta) {
DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta) {
const auto& out_dims = paddle::framework::make_ddim({1});
DenseTensorMeta return_meta(x_meta.dtype, out_dims, x_meta.layout);
return return_meta;
}
DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta,
int start_axis,
int stop_axis) {
DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta,
int start_axis,
int stop_axis) {
auto& x_dims = x_meta.dims;
int in_dims_size = x_dims.size();
if (start_axis < 0) {
......@@ -80,9 +80,9 @@ DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta,
return out_meta;
}
DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta,
DataType dtype,
DataLayout layout) {
DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta,
DataType dtype,
DataLayout layout) {
return {dtype == DataType::UNDEFINED ? x_meta.dtype : dtype,
x_meta.dims,
layout == DataLayout::UNDEFINED ? x_meta.layout : layout};
......@@ -208,8 +208,8 @@ static paddle::framework::DDim ValidateShape(
return paddle::framework::make_ddim(output_shape);
}
DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta,
const std::vector<int64_t>& shape) {
DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta,
const std::vector<int64_t>& shape) {
PADDLE_ENFORCE_EQ(!shape.empty(),
true,
paddle::platform::errors::InvalidArgument(
......
......@@ -19,34 +19,34 @@ limitations under the License. */
namespace pten {
// Common InferShape Functions for unary operators, The format like:
// Common InferMeta Functions for unary operators, The format like:
//
// 1. DenseTensorMeta [OpName]InferShape(const DenseTensorMeta& x_meta, ...)
// 1. DenseTensorMeta [OpName]InferMeta(const DenseTensorMeta& x_meta, ...)
// {}
// 2. std::pair<DenseTensorMeta, DenseTensorMeta> [OpName]InferShape(const
// 2. std::pair<DenseTensorMeta, DenseTensorMeta> [OpName]InferMeta(const
// DenseTensorMeta&
// x_meta, ...) {}
// 3. std::tuple<DenseTensorMeta, DenseTensorMeta, DenseTensorMeta>
// [OpName]InferShape(const
// [OpName]InferMeta(const
// DenseTensorMeta& x_meta, ...)
// NOTE: The name "InferShape" may be not appropriate. "InferMeta" may be good.
// NOTE: The name "InferMeta" may be not appropriate. "InferMeta" may be good.
// Because functions in this file
// not only can infer shape, but alse need infer lod or other useful data.
DenseTensorMeta UnchangedInferShape(const DenseTensorMeta& x_meta);
DenseTensorMeta UnchangedInferMeta(const DenseTensorMeta& x_meta);
DenseTensorMeta ReductionInferShape(const DenseTensorMeta& x_meta);
DenseTensorMeta ReductionInferMeta(const DenseTensorMeta& x_meta);
DenseTensorMeta FlattenInferShape(const DenseTensorMeta& x_meta,
int start_axis,
int stop_axis);
DenseTensorMeta FlattenInferMeta(const DenseTensorMeta& x_meta,
int start_axis,
int stop_axis);
DenseTensorMeta CastInferMeta(const DenseTensorMeta& x_meta,
const DataType out_dtype);
DenseTensorMeta FullLikeInferShape(const DenseTensorMeta& x_meta,
DataType dtype,
DataLayout layout);
DenseTensorMeta FullLikeInferMeta(const DenseTensorMeta& x_meta,
DataType dtype,
DataLayout layout);
DenseTensorMeta InferShapeFromVecValue(const DenseTensorMeta& x_meta,
const std::vector<int64_t>& shape);
DenseTensorMeta InferMetaFromVecValue(const DenseTensorMeta& x_meta,
const std::vector<int64_t>& shape);
} // namespace pten
......@@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CPUContext& dev_ctx,
const DenseTensor& x,
const std::vector<int64_t>& shape,
DenseTensor* out) {
auto out_meta = InferShapeFromVecValue(x.meta(), shape);
auto out_meta = InferMetaFromVecValue(x.meta(), shape);
if (&x == out) {
out->Resize(out_meta.dims);
return;
......
......@@ -50,7 +50,7 @@ void ReshapeFromVectorVal(const CUDAContext& dev_ctx,
const DenseTensor& x,
const std::vector<int64_t>& shape,
DenseTensor* out) {
auto out_meta = InferShapeFromVecValue(x.meta(), shape);
auto out_meta = InferMetaFromVecValue(x.meta(), shape);
if (&x == out) {
out->Resize(out_meta.dims);
return;
......
......@@ -55,7 +55,7 @@ void ReshapeFromVectorVal(const XPUContext& dev_ctx,
const DenseTensor& x,
const std::vector<int64_t>& shape,
DenseTensor* out) {
auto out_meta = InferShapeFromVecValue(x.meta(), shape);
auto out_meta = InferMetaFromVecValue(x.meta(), shape);
if (&x == out) {
out->Resize(out_meta.dims);
return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册