Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
080024f0
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
080024f0
编写于
3月 13, 2022
作者:
Z
zyfncg
提交者:
GitHub
3月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor unary infermeta (#40365)
上级
ec09ef26
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
827 addition
and
826 deletion
+827
-826
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+730
-730
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+97
-96
未找到文件。
paddle/phi/infermeta/unary.cc
浏览文件 @
080024f0
...
...
@@ -26,6 +26,82 @@ limitations under the License. */
namespace
phi
{
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
bool
descending
,
...
...
@@ -54,96 +130,6 @@ void ArgsortInferMeta(const MetaTensor& input,
indices
->
share_lod
(
input
);
}
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
out
->
set_layout
(
x
.
layout
());
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
out_dtype
);
...
...
@@ -203,73 +189,275 @@ void CumsumInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
PADDLE_ENFORCE_EQ
(
product
(
x
.
dims
()),
1UL
,
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
out
->
set_dtype
(
x
.
dtype
());
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE_EQ
(
unk_dim_idx
,
-
1
,
phi
::
errors
::
InvalidArgument
(
"Only one dimension value of 'shape' in ReshapeOp can "
"be -1. But received shape = [%s], shape[%d] is also -1."
,
phi
::
make_ddim
(
shape
),
i
));
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d."
,
phi
::
make_ddim
(
shape
),
i
,
in_dims
,
in_dims
.
size
()));
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
out
->
set_dims
({
size_
,
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
int64_t
size_
=
0
;
if
(
offset
>=
0
)
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
size_
=
x_dims
[
0
];
}
else
{
size_
=
x_dims
[
1
]
-
offset
;
}
}
else
{
PADDLE_ENFORCE_GT
(
shape
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"Each dimension value of 'shape' in ReshapeOp must not "
"be negative except one unknown dimension. "
"But received shape = [%s], shape[%d] = %d."
,
phi
::
make_ddim
(
shape
),
i
,
shape
[
i
]));
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
size_
=
x_dims
[
0
]
+
offset
;
}
else
{
size_
=
x_dims
[
1
];
}
}
// NOTE all non-zero values will be converted to True (include negative
// value)
capacity
*=
(
shape
[
i
]
?
shape
[
i
]
:
in_dims
[
i
]);
out
->
set_dims
({
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
{
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
)
{
auto
x_dims
=
input
.
dims
();
int
offset_
=
offset
;
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
"least 2 dimensions, but got %ld)."
,
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
auto
out_dims
=
vectorize
(
x_dims
);
// from out_dims get the dim size of axis1_.
auto
axis1_size
=
out_dims
[
axis1_
];
auto
axis2_size
=
out_dims
[
axis2_
];
// delete two dims by attr axis1 and axis2 from out_dims.
/* example:
out_dim = [2, 3, 4];
axis1 = 0;
axis2 = 1;
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
if
(
offset_
==
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
}
else
if
(
offset_
>
0
)
{
if
((
axis2_size
-
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
else
{
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
PADDLE_ENFORCE_EQ
(
product
(
x
.
dims
()),
1UL
,
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
out
->
set_dtype
(
x
.
dtype
());
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE_EQ
(
unk_dim_idx
,
-
1
,
phi
::
errors
::
InvalidArgument
(
"Only one dimension value of 'shape' in ReshapeOp can "
"be -1. But received shape = [%s], shape[%d] is also -1."
,
phi
::
make_ddim
(
shape
),
i
));
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d."
,
phi
::
make_ddim
(
shape
),
i
,
in_dims
,
in_dims
.
size
()));
}
else
{
PADDLE_ENFORCE_GT
(
shape
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"Each dimension value of 'shape' in ReshapeOp must not "
"be negative except one unknown dimension. "
"But received shape = [%s], shape[%d] = %d."
,
phi
::
make_ddim
(
shape
),
i
,
shape
[
i
]));
}
// NOTE all non-zero values will be converted to True (include negative
// value)
capacity
*=
(
shape
[
i
]
?
shape
[
i
]
:
in_dims
[
i
]);
output_shape
[
i
]
=
(
shape
[
i
]
?
static_cast
<
int64_t
>
(
shape
[
i
])
:
in_dims
[
i
]);
}
...
...
@@ -360,6 +548,11 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
bool
replacement
,
...
...
@@ -395,124 +588,97 @@ void MultinomialInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
INT64
);
}
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
)
{
#define MAX_RANK_SUPPORTED 6
auto
repeat_times_data
=
repeat_times
.
GetData
();
auto
x_dims
=
x
.
dims
();
if
(
repeat_times_data
.
size
()
==
0
)
{
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
}
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
x_dims
.
size
()));
PADDLE_ENFORCE_LE
(
repeat_times_data
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dim
=
input
.
dims
();
PADDLE_ENFORCE_EQ
(
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
,
phi
::
errors
::
InvalidArgument
(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
));
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
PADDLE_ENFORCE_GE
(
paddings
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"The element of 'paddings' should >= 0, but "
"received %d for index %d."
,
paddings
[
i
],
static_cast
<
int
>
(
i
)));
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
out_shape
[
i
]
=
-
1
;
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
PADDLE_ENFORCE_GT
(
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
out
->
share_lod
(
x
);
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
out
->
share_lod
(
input
);
}
out
->
set_dtype
(
input
.
dtype
());
}
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
&
shape_data
=
shape
.
GetData
();
PADDLE_ENFORCE_NOT_NULL
(
out
,
phi
::
errors
::
InvalidArgument
(
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
0
,
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"
The shape's size in ReshapeOp can't be zero."
));
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
}
"
Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
input_dims
.
size
()));
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
xshape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
xshape
,
phi
::
errors
::
InvalidArgument
(
"Output(XShape) of ReshapeOp should not be null."
));
const
auto
&
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
if
(
!
channel_last
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
xshape
->
share_lod
(
x
);
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
auto
output_dims
=
input_dims
;
output_dims
[
0
]
=
input_dims
[
0
];
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
/* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of api.yaml
*/
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
out
->
set_layout
(
x
.
layout
());
}
DDim
ReduceInferDim
(
const
MetaTensor
&
x
,
...
...
@@ -584,29 +750,12 @@ DDim ReduceInferDim(const MetaTensor& x,
return
out_dim
;
}
void
SumRaw
InferMeta
(
const
MetaTensor
&
x
,
void
Reduce
InferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
out
->
set_dims
(
out_dim
);
out
->
set_dtype
(
out_dtype
);
out
->
set_layout
(
x
.
layout
());
bool
reduce_all
=
false
;
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
}
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
...
...
@@ -620,33 +769,109 @@ void ReduceInferMetaBase(const MetaTensor& x,
out
->
set_layout
(
x
.
layout
());
}
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
&
shape_data
=
shape
.
GetData
();
PADDLE_ENFORCE_NOT_NULL
(
out
,
phi
::
errors
::
InvalidArgument
(
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
"The shape's size in ReshapeOp can't be zero."
));
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
}
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
layout
);
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
xshape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
xshape
,
phi
::
errors
::
InvalidArgument
(
"Output(XShape) of ReshapeOp should not be null."
));
const
auto
&
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
xshape
->
share_lod
(
x
);
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
}
void
SplitInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
)
{
int
axis_value
=
axis
.
to
<
int
>
();
int
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_EQ
(
axis_value
>=
-
rank
&&
axis_value
<
rank
,
true
,
phi
::
errors
::
InvalidArgument
(
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
in
.
dims
();
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
SplitInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
)
{
int
axis_value
=
axis
.
to
<
int
>
();
int
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_EQ
(
axis_value
>=
-
rank
&&
axis_value
<
rank
,
true
,
phi
::
errors
::
InvalidArgument
(
"The axis is expected to be in range of [%d, %d), but got %d"
,
-
rank
,
rank
,
...
...
@@ -767,22 +992,108 @@ void SplitInferMeta(const MetaTensor& x,
}
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
)
{
auto
in_dims
=
x
.
dims
();
std
::
vector
<
int
>
out_dim
;
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
/* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of api.yaml
*/
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
}
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
(
*
outs
)[
i
].
set_dims
(
out_dims
);
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
share_lod
(
x
);
out
->
set_dims
(
out_dim
);
out
->
set_dtype
(
out_dtype
);
out
->
set_layout
(
x
.
layout
());
}
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
)
{
#define MAX_RANK_SUPPORTED 6
auto
repeat_times_data
=
repeat_times
.
GetData
();
auto
x_dims
=
x
.
dims
();
if
(
repeat_times_data
.
size
()
==
0
)
{
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
}
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
x_dims
.
size
()));
PADDLE_ENFORCE_LE
(
repeat_times_data
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
out_shape
[
i
]
=
-
1
;
}
else
{
PADDLE_ENFORCE_GT
(
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
out
->
share_lod
(
x
);
}
}
...
...
@@ -840,79 +1151,112 @@ void TraceInferMeta(
out
->
set_dtype
(
x
.
dtype
());
}
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
)
{
auto
x_dims
=
input
.
dims
();
int
offset_
=
offset
;
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
layout
);
}
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
"least 2 dimensions, but got %ld)."
,
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
size_t
x_rank
=
x_dims
.
size
();
size_t
axis_size
=
axis
.
size
();
auto
out_dims
=
vectorize
(
x_dims
);
// from out_dims get the dim size of axis1_.
auto
axis1_size
=
out_dims
[
axis1_
];
auto
axis2_size
=
out_dims
[
axis2_
];
// delete two dims by attr axis1 and axis2 from out_dims.
/* example:
out_dim = [2, 3, 4];
axis1 = 0;
axis2 = 1;
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
PADDLE_ENFORCE_EQ
(
x_rank
,
axis_size
,
errors
::
InvalidArgument
(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d"
,
x_rank
,
axis_size
));
if
(
offset_
==
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
}
else
if
(
offset_
>
0
)
{
if
((
axis2_size
-
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
else
{
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
}
else
{
out_dims
.
push_back
(
0
);
}
std
::
vector
<
int
>
count
(
axis_size
,
0
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
PADDLE_ENFORCE_GE
(
axis
[
i
],
0
,
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]"
,
axis
[
i
],
i
));
PADDLE_ENFORCE_EQ
(
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
true
,
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
)
{
auto
in_dims
=
x
.
dims
();
std
::
vector
<
int
>
out_dim
;
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
(
*
outs
)[
i
].
set_dims
(
out_dims
);
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
share_lod
(
x
);
}
}
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
...
...
@@ -1073,303 +1417,6 @@ void UnfoldInferMeta(const MetaTensor& x,
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
out
->
set_dims
({
size_
,
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
int64_t
size_
=
0
;
if
(
offset
>=
0
)
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
size_
=
x_dims
[
0
];
}
else
{
size_
=
x_dims
[
1
]
-
offset
;
}
}
else
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
size_
=
x_dims
[
0
]
+
offset
;
}
else
{
size_
=
x_dims
[
1
];
}
}
out
->
set_dims
({
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
{
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dim
=
input
.
dims
();
PADDLE_ENFORCE_EQ
(
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
,
phi
::
errors
::
InvalidArgument
(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
));
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
PADDLE_ENFORCE_GE
(
paddings
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"The element of 'paddings' should >= 0, but "
"received %d for index %d."
,
paddings
[
i
],
static_cast
<
int
>
(
i
)));
}
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
out
->
share_lod
(
input
);
}
out
->
set_dtype
(
input
.
dtype
());
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
input_dims
.
size
()));
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
if
(
!
channel_last
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
auto
output_dims
=
input_dims
;
output_dims
[
0
]
=
input_dims
[
0
];
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
size_t
x_rank
=
x_dims
.
size
();
size_t
axis_size
=
axis
.
size
();
PADDLE_ENFORCE_EQ
(
x_rank
,
axis_size
,
errors
::
InvalidArgument
(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d"
,
x_rank
,
axis_size
));
std
::
vector
<
int
>
count
(
axis_size
,
0
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
PADDLE_ENFORCE_GE
(
axis
[
i
],
0
,
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]"
,
axis
[
i
],
i
));
PADDLE_ENFORCE_EQ
(
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
true
,
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
)
{
auto
rank
=
condition
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
...
...
@@ -1381,53 +1428,6 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
INT64
);
}
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
in
.
dims
();
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
}
// namespace phi
PD_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
080024f0
...
...
@@ -32,32 +32,20 @@ class MetaConfig;
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
bool
descending
,
MetaTensor
*
output
,
MetaTensor
*
indices
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
...
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
bool
reverse
,
MetaTensor
*
out
);
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
bool
replacement
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
...
...
@@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
...
...
@@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x,
bool
keep_dim
,
MetaTensor
*
out
);
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
);
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
...
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
}
// namespace phi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录