Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f7765991
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f7765991
编写于
3月 13, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into add_some_yaml_config
上级
1694bcc8
1b0cecb7
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
1566 addition
and
1562 deletion
+1566
-1562
paddle/fluid/framework/infershape_utils.cc
paddle/fluid/framework/infershape_utils.cc
+2
-2
paddle/fluid/operators/gather_nd_op.cc
paddle/fluid/operators/gather_nd_op.cc
+0
-1
paddle/fluid/operators/softmax_op.cc
paddle/fluid/operators/softmax_op.cc
+2
-2
paddle/phi/core/meta_tensor.cc
paddle/phi/core/meta_tensor.cc
+2
-2
paddle/phi/core/meta_tensor.h
paddle/phi/core/meta_tensor.h
+1
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+15
-14
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+6
-1
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+452
-451
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+57
-57
paddle/phi/infermeta/nullary.cc
paddle/phi/infermeta/nullary.cc
+18
-18
paddle/phi/infermeta/nullary.h
paddle/phi/infermeta/nullary.h
+9
-9
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+153
-152
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+21
-25
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+730
-730
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+97
-96
paddle/phi/kernels/funcs/matrix_inverse.h
paddle/phi/kernels/funcs/matrix_inverse.h
+1
-1
未找到文件。
paddle/fluid/framework/infershape_utils.cc
浏览文件 @
f7765991
...
@@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor {
...
@@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor {
}
}
void
share_meta
(
const
MetaTensor
&
meta_tensor
)
override
{
void
share_meta
(
const
MetaTensor
&
meta_tensor
)
override
{
share_dims
(
meta_tensor
);
set_dtype
(
meta_tensor
.
dtype
());
set_dtype
(
meta_tensor
.
dtype
());
// VarDesc doesn't contains layout, so we cannot share layout
// VarDesc doesn't contains layout, so we cannot share layout
// set_layout(meta_tensor.layout());
// set_layout(meta_tensor.layout());
// special case
1
: share lod of LoDTensor
// special case: share lod of LoDTensor
share_lod
(
meta_tensor
);
share_lod
(
meta_tensor
);
share_dims
(
meta_tensor
);
}
}
private:
private:
...
...
paddle/fluid/operators/gather_nd_op.cc
浏览文件 @
f7765991
...
@@ -16,7 +16,6 @@ limitations under the License. */
...
@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/ternary.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
...
paddle/fluid/operators/softmax_op.cc
浏览文件 @
f7765991
...
@@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
...
@@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGradnferShapeFunctor
,
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGrad
I
nferShapeFunctor
,
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
SoftmaxGradnferShapeFunctor
);
SoftmaxGrad
I
nferShapeFunctor
);
paddle/phi/core/meta_tensor.cc
浏览文件 @
f7765991
...
@@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
...
@@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
}
}
}
}
TensorBase
*
MetaTensor
::
get_
tensor
()
const
{
return
tensor_
;
}
TensorBase
*
MetaTensor
::
tensor
()
const
{
return
tensor_
;
}
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
...
@@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) {
...
@@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) {
if
(
is_dense_tensor
||
is_selected_rows
)
{
if
(
is_dense_tensor
||
is_selected_rows
)
{
set_dims
(
meta_tensor
.
dims
());
set_dims
(
meta_tensor
.
dims
());
if
(
is_selected_rows
)
{
if
(
is_selected_rows
)
{
const
auto
in_tensor_base
=
meta_tensor
.
get_
tensor
();
const
auto
in_tensor_base
=
meta_tensor
.
tensor
();
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
phi
::
SelectedRows
::
classof
(
in_tensor_base
),
phi
::
SelectedRows
::
classof
(
in_tensor_base
),
true
,
true
,
...
...
paddle/phi/core/meta_tensor.h
浏览文件 @
f7765991
...
@@ -66,7 +66,7 @@ class MetaTensor {
...
@@ -66,7 +66,7 @@ class MetaTensor {
// Because the lod in compiletime and runtime is different,
// Because the lod in compiletime and runtime is different,
// so `LoD` cannot in public methods
// so `LoD` cannot in public methods
const
LoD
&
lod
()
const
;
const
LoD
&
lod
()
const
;
TensorBase
*
get_
tensor
()
const
;
TensorBase
*
tensor
()
const
;
TensorBase
*
tensor_
;
TensorBase
*
tensor_
;
};
};
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
f7765991
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
if
(
dx
)
{
const
MetaTensor
&
index
,
dx
->
share_meta
(
x
);
const
MetaTensor
&
out_grad
,
}
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
}
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dout
.
dims
(),
dout
.
dims
(),
errors
::
InvalidArgument
(
errors
::
InvalidArgument
(
"Input(Out) and its gradients should have the same shape."
));
"Input(Out) and its gradients should have the same shape."
));
dx
->
share_meta
(
dout
);
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
dx
->
share_meta
(
dout
);
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
f7765991
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
MetaTensor
*
dbias
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
);
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
MetaTensor
*
dy
,
MetaTensor
*
dy
,
MetaTensor
*
dz
);
MetaTensor
*
dz
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
f7765991
...
@@ -22,6 +22,153 @@ limitations under the License. */
...
@@ -22,6 +22,153 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
int
rank
=
input_dims
.
size
();
PADDLE_ENFORCE_EQ
(
rank
,
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d]."
,
rank
,
label_dims
.
size
()));
bool
check
=
true
;
if
((
!
config
.
is_runtime
)
&&
(
phi
::
product
(
input_dims
)
<=
0
||
phi
::
product
(
label_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s]."
,
input_dims
,
label_dims
));
}
out
->
set_dims
(
input_dims
);
out
->
set_dtype
(
input
.
dtype
());
out
->
share_lod
(
input
);
}
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
)
{
auto
input_dim
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
minlength
,
0
,
phi
::
errors
::
InvalidArgument
(
"The minlength should be greater than or equal to 0."
"But received minlength is %d"
,
minlength
));
PADDLE_ENFORCE_EQ
(
input_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]"
,
input_dim
.
size
()));
if
(
weights
.
is_initialized
())
{
auto
weights_dim
=
weights
->
dims
();
PADDLE_ENFORCE_EQ
(
weights_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]"
,
weights_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
weights_dim
[
0
],
input_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]"
,
weights_dim
,
input_dim
));
}
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
if
(
weights
.
is_initialized
())
{
out
->
set_dtype
(
weights
->
dtype
());
}
else
{
out
->
set_dtype
(
x
.
dtype
());
}
out
->
share_lod
(
x
);
}
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input Y must greater or equal to 2"
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input X must greater or equal to 2"
));
PADDLE_ENFORCE_EQ
(
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld"
,
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
]));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld"
,
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
x_broadcast_dims
({
expand_batch_portion
});
x_broadcast_dims
.
insert
(
x_broadcast_dims
.
end
(),
{
x_dims_vec
[
x_dims_n
-
2
],
x_dims_vec
[
x_dims_n
-
1
]});
// dim of 'out' is the same with 'X' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
x_broadcast_dims
));
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
CompareInferMeta
(
const
MetaTensor
&
x
,
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
int
axis
,
int
axis
,
...
@@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x,
...
@@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
BOOL
);
out
->
set_dtype
(
DataType
::
BOOL
);
}
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
PADDLE_ENFORCE_NE
(
phi
::
product
(
x_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s]."
,
x_dims
));
PADDLE_ENFORCE_NE
(
phi
::
product
(
y_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s]."
,
y_dims
));
out
->
set_dims
({
1
});
out
->
set_dtype
(
x
.
dtype
());
}
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
x_dims
=
x
.
dims
();
auto
x_rank
=
static_cast
<
size_t
>
(
x_dims
.
size
());
auto
x_rank
=
static_cast
<
size_t
>
(
x_dims
.
size
());
...
@@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
...
@@ -109,84 +324,11 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out
->
set_layout
(
x
.
layout
());
out
->
set_layout
(
x
.
layout
());
}
}
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
bool
trans_x
,
MetaTensor
*
out
)
{
bool
trans_y
,
return
ElementwiseRawInferMeta
(
x
,
y
,
-
1
,
std
::
move
(
out
));
MetaTensor
*
out
)
{
}
std
::
vector
<
int64_t
>
dims_x
=
phi
::
vectorize
(
x
.
dims
());
std
::
vector
<
int64_t
>
dims_y
=
phi
::
vectorize
(
y
.
dims
());
auto
ndims_x
=
dims_x
.
size
();
auto
ndims_y
=
dims_y
.
size
();
PADDLE_ENFORCE_GT
(
ndims_x
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
PADDLE_ENFORCE_GT
(
ndims_y
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
bool
x_broadcasted
=
false
,
y_broadcasted
=
false
;
if
(
ndims_x
==
1
)
{
dims_x
.
insert
(
dims_x
.
begin
(),
1
);
ndims_x
=
2
;
x_broadcasted
=
true
;
}
if
(
ndims_y
==
1
)
{
dims_y
.
push_back
(
1
);
ndims_y
=
2
;
y_broadcasted
=
true
;
}
size_t
M
,
N
;
if
(
trans_x
)
{
M
=
dims_x
[
ndims_x
-
1
];
}
else
{
M
=
dims_x
[
ndims_x
-
2
];
}
if
(
trans_y
)
{
N
=
dims_y
[
ndims_y
-
2
];
}
else
{
N
=
dims_y
[
ndims_y
-
1
];
}
std
::
vector
<
int64_t
>
new_dims
;
if
(
ndims_x
>
ndims_y
)
{
new_dims
.
assign
(
dims_x
.
begin
(),
dims_x
.
end
()
-
2
);
}
else
if
(
ndims_x
<
ndims_y
)
{
new_dims
.
assign
(
dims_y
.
begin
(),
dims_y
.
end
()
-
2
);
}
else
{
new_dims
.
reserve
(
ndims_x
);
for
(
size_t
i
=
0
;
i
<
ndims_x
-
2
;
++
i
)
{
new_dims
.
push_back
(
std
::
max
(
dims_x
[
i
],
dims_y
[
i
]));
}
}
if
(
!
x_broadcasted
)
{
new_dims
.
push_back
(
M
);
}
if
(
!
y_broadcasted
)
{
new_dims
.
push_back
(
N
);
}
if
(
x_broadcasted
&&
y_broadcasted
)
{
new_dims
.
push_back
(
1
);
}
auto
ddim_out
=
phi
::
make_ddim
(
new_dims
);
out
->
set_dims
(
ddim_out
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
}
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
return
ElementwiseRawInferMeta
(
x
,
y
,
-
1
,
std
::
move
(
out
));
}
void
ElementwiseRawInferMeta
(
const
MetaTensor
&
x
,
void
ElementwiseRawInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
...
@@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
funcs
::
GetBroadcastDimsArrays
(
x_dims
,
funcs
::
GetBroadcastDimsArrays
(
x_dims
,
y_dims
,
y_dims
,
x_dims_array
.
data
(),
x_dims_array
.
data
(),
y_dims_array
.
data
(),
y_dims_array
.
data
(),
out_dims_array
.
data
(),
out_dims_array
.
data
(),
max_dim
,
max_dim
,
axis
);
axis
);
auto
out_dims
=
phi
::
make_ddim
(
out_dims_array
);
auto
out_dims
=
phi
::
make_ddim
(
out_dims_array
);
out
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
}
else
{
out
->
set_dims
(
x
.
dims
());
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
HuberLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
delta
,
MetaTensor
*
out
,
MetaTensor
*
residual
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)"
,
input_dims
.
size
(),
label_dims
.
size
()));
bool
contain_unknown_dim
=
phi
::
contain_unknown_dim
(
input_dims
)
||
phi
::
contain_unknown_dim
(
label_dims
);
if
(
config
.
is_runtime
||
!
contain_unknown_dim
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]"
,
input_dims
,
label_dims
));
}
auto
out_dims
=
label_dims
;
residual
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
share_lod
(
input
);
}
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input Y must greater or equal to 2"
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input X must greater or equal to 2"
));
PADDLE_ENFORCE_EQ
(
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld"
,
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
]));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld"
,
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
x_broadcast_dims
({
expand_batch_portion
});
x_broadcast_dims
.
insert
(
x_broadcast_dims
.
end
(),
{
x_dims_vec
[
x_dims_n
-
2
],
x_dims_vec
[
x_dims_n
-
1
]});
// dim of 'out' is the same with 'X' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
x_broadcast_dims
));
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]"
,
x_dims
.
size
(),
x_dims
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]"
,
y_dims
.
size
(),
y_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
],
phi
::
errors
::
InvalidArgument
(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d."
,
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
y_broadcast_dims
({
expand_batch_portion
});
y_broadcast_dims
.
insert
(
y_broadcast_dims
.
end
(),
{
y_dims_vec
[
y_dims_n
-
2
],
y_dims_vec
[
y_dims_n
-
1
]});
// dim of 'out' is the same with 'Y' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
y_broadcast_dims
));
out
->
set_dtype
(
y
.
dtype
());
out
->
set_layout
(
y
.
layout
());
out
->
share_lod
(
y
);
}
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape."
,
input_dims
));
auto
index_dims
=
y
.
dims
();
PADDLE_ENFORCE_EQ
(
index_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape."
,
input_dims
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
0
],
index_dims
[
0
],
errors
::
InvalidArgument
(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape."
,
input_dims
[
0
],
index_dims
[
0
]));
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
index_dims
);
out
->
share_lod
(
y
);
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
)
{
auto
dims
=
x
.
dims
();
dims
[
0
]
=
-
1
;
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
pooltype
==
"MEAN"
)
{
summed_ids
->
set_dims
({
-
1
,
1
});
summed_ids
->
set_dtype
(
x
.
dtype
());
summed_ids
->
set_layout
(
x
.
layout
());
}
}
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
int
rank
=
input_dims
.
size
();
PADDLE_ENFORCE_EQ
(
rank
,
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d]."
,
rank
,
label_dims
.
size
()));
bool
check
=
true
;
if
((
!
config
.
is_runtime
)
&&
(
phi
::
product
(
input_dims
)
<=
0
||
phi
::
product
(
label_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s]."
,
input_dims
,
label_dims
));
}
out
->
set_dims
(
input_dims
);
out
->
set_dtype
(
input
.
dtype
());
out
->
share_lod
(
input
);
}
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
)
{
auto
input_dim
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
minlength
,
0
,
phi
::
errors
::
InvalidArgument
(
"The minlength should be greater than or equal to 0."
"But received minlength is %d"
,
minlength
));
PADDLE_ENFORCE_EQ
(
input_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]"
,
input_dim
.
size
()));
if
(
weights
.
is_initialized
())
{
auto
weights_dim
=
weights
->
dims
();
PADDLE_ENFORCE_EQ
(
weights_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]"
,
weights_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
weights_dim
[
0
],
input_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]"
,
weights_dim
,
input_dim
));
}
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
if
(
weights
.
is_initialized
())
{
out
->
set_dtype
(
weights
->
dtype
());
}
else
{
}
else
{
out
->
set_d
type
(
x
.
dtype
());
out
->
set_d
ims
(
x
.
dims
());
}
}
out
->
share_lod
(
x
);
}
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
PADDLE_ENFORCE_NE
(
phi
::
product
(
x_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s]."
,
x_dims
));
PADDLE_ENFORCE_NE
(
phi
::
product
(
y_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s]."
,
y_dims
));
out
->
set_dims
({
1
});
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
}
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
...
@@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids,
...
@@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids,
out
->
set_dims
(
ids_dims
);
out
->
set_dims
(
ids_dims
);
}
}
void
HuberLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
delta
,
MetaTensor
*
out
,
MetaTensor
*
residual
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)"
,
input_dims
.
size
(),
label_dims
.
size
()));
bool
contain_unknown_dim
=
phi
::
contain_unknown_dim
(
input_dims
)
||
phi
::
contain_unknown_dim
(
label_dims
);
if
(
config
.
is_runtime
||
!
contain_unknown_dim
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]"
,
input_dims
,
label_dims
));
}
auto
out_dims
=
label_dims
;
residual
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
share_lod
(
input
);
}
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape."
,
input_dims
));
auto
index_dims
=
y
.
dims
();
PADDLE_ENFORCE_EQ
(
index_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape."
,
input_dims
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
0
],
index_dims
[
0
],
errors
::
InvalidArgument
(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape."
,
input_dims
[
0
],
index_dims
[
0
]));
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
index_dims
);
out
->
share_lod
(
y
);
}
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
float
epsilon
,
float
epsilon
,
...
@@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input,
...
@@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input,
out
->
share_lod
(
input
);
out
->
share_lod
(
input
);
}
}
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
)
{
std
::
vector
<
int64_t
>
dims_x
=
phi
::
vectorize
(
x
.
dims
());
std
::
vector
<
int64_t
>
dims_y
=
phi
::
vectorize
(
y
.
dims
());
auto
ndims_x
=
dims_x
.
size
();
auto
ndims_y
=
dims_y
.
size
();
PADDLE_ENFORCE_GT
(
ndims_x
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
PADDLE_ENFORCE_GT
(
ndims_y
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
bool
x_broadcasted
=
false
,
y_broadcasted
=
false
;
if
(
ndims_x
==
1
)
{
dims_x
.
insert
(
dims_x
.
begin
(),
1
);
ndims_x
=
2
;
x_broadcasted
=
true
;
}
if
(
ndims_y
==
1
)
{
dims_y
.
push_back
(
1
);
ndims_y
=
2
;
y_broadcasted
=
true
;
}
size_t
M
,
N
;
if
(
trans_x
)
{
M
=
dims_x
[
ndims_x
-
1
];
}
else
{
M
=
dims_x
[
ndims_x
-
2
];
}
if
(
trans_y
)
{
N
=
dims_y
[
ndims_y
-
2
];
}
else
{
N
=
dims_y
[
ndims_y
-
1
];
}
std
::
vector
<
int64_t
>
new_dims
;
if
(
ndims_x
>
ndims_y
)
{
new_dims
.
assign
(
dims_x
.
begin
(),
dims_x
.
end
()
-
2
);
}
else
if
(
ndims_x
<
ndims_y
)
{
new_dims
.
assign
(
dims_y
.
begin
(),
dims_y
.
end
()
-
2
);
}
else
{
new_dims
.
reserve
(
ndims_x
);
for
(
size_t
i
=
0
;
i
<
ndims_x
-
2
;
++
i
)
{
new_dims
.
push_back
(
std
::
max
(
dims_x
[
i
],
dims_y
[
i
]));
}
}
if
(
!
x_broadcasted
)
{
new_dims
.
push_back
(
M
);
}
if
(
!
y_broadcasted
)
{
new_dims
.
push_back
(
N
);
}
if
(
x_broadcasted
&&
y_broadcasted
)
{
new_dims
.
push_back
(
1
);
}
auto
ddim_out
=
phi
::
make_ddim
(
new_dims
);
out
->
set_dims
(
ddim_out
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
}
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
)
{
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
dim_x
=
x
.
dims
();
auto
dim_vec
=
vec
.
dims
();
auto
dim_vec
=
vec
.
dims
();
...
@@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
...
@@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
out
->
share_lod
(
x
);
out
->
share_lod
(
x
);
}
}
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
)
{
auto
dims
=
x
.
dims
();
dims
[
0
]
=
-
1
;
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
pooltype
==
"MEAN"
)
{
summed_ids
->
set_dims
({
-
1
,
1
});
summed_ids
->
set_dtype
(
x
.
dtype
());
summed_ids
->
set_layout
(
x
.
layout
());
}
}
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
bool
normalize
,
bool
normalize
,
...
@@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
...
@@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
out
->
share_lod
(
x
);
}
}
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]"
,
x_dims
.
size
(),
x_dims
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]"
,
y_dims
.
size
(),
y_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
],
phi
::
errors
::
InvalidArgument
(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d."
,
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
y_broadcast_dims
({
expand_batch_portion
});
y_broadcast_dims
.
insert
(
y_broadcast_dims
.
end
(),
{
y_dims_vec
[
y_dims_n
-
2
],
y_dims_vec
[
y_dims_n
-
1
]});
// dim of 'out' is the same with 'Y' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
y_broadcast_dims
));
out
->
set_dtype
(
y
.
dtype
());
out
->
set_layout
(
y
.
layout
());
out
->
share_lod
(
y
);
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/binary.h
浏览文件 @
f7765991
...
@@ -29,22 +29,43 @@ namespace phi {
...
@@ -29,22 +29,43 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
CompareInferMeta
(
const
MetaTensor
&
x
,
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
int
axis
,
int
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
int
axis
,
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
);
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
int
axis
,
int
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
const
MetaTensor
&
label_meta
,
const
MetaTensor
&
label_meta
,
float
delta
,
float
delta
,
...
@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
...
@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
MetaTensor
*
residual
,
MetaTensor
*
residual
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CrossInferMeta
(
const
MetaTensor
&
x
,
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
y
,
const
MetaTensor
&
label
,
int
axis
,
float
epsilon
,
MetaTensor
*
out
);
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
);
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
MetaTensor
&
segment_ids
,
...
@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x,
...
@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x,
MetaTensor
*
summed_ids
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
epsilon
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
bool
normalize
,
bool
normalize
,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/nullary.cc
浏览文件 @
f7765991
...
@@ -16,6 +16,12 @@ limitations under the License. */
...
@@ -16,6 +16,12 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
out
->
set_layout
(
layout
);
out
->
set_layout
(
layout
);
}
}
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
...
@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows,
...
@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows,
out
->
set_dtype
(
dtype
);
out
->
set_dtype
(
dtype
);
}
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
...
@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
...
@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
out
->
set_layout
(
DataLayout
::
NCHW
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/nullary.h
浏览文件 @
f7765991
...
@@ -28,25 +28,18 @@ namespace phi {
...
@@ -28,25 +28,18 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
);
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
...
@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
...
@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.cc
浏览文件 @
f7765991
...
@@ -18,6 +18,58 @@ limitations under the License. */
...
@@ -18,6 +18,58 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
x
,
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
out
->
set_dtype
(
input
.
dtype
());
out
->
set_dtype
(
input
.
dtype
());
}
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
scores
->
set_dtype
(
length
.
dtype
());
scores
->
set_dtype
(
length
.
dtype
());
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.h
浏览文件 @
f7765991
...
@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input,
float
beta
,
float
beta
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
out_grad
,
const
MetaTensor
&
dst_index
,
MetaTensor
*
x_grad
);
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
y
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
weight
,
bool
overwrite
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
MetaTensor
*
total_weight
,
MetaTensor
*
total_weight
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
bool
overwrite
,
MetaTensor
*
out
);
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
updates
,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
MetaTensor
*
path
,
MetaTensor
*
path
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/unary.cc
浏览文件 @
f7765991
...
@@ -26,6 +26,82 @@ limitations under the License. */
...
@@ -26,6 +26,82 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
int
axis
,
bool
descending
,
bool
descending
,
...
@@ -54,96 +130,6 @@ void ArgsortInferMeta(const MetaTensor& input,
...
@@ -54,96 +130,6 @@ void ArgsortInferMeta(const MetaTensor& input,
indices
->
share_lod
(
input
);
indices
->
share_lod
(
input
);
}
}
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
out
->
set_layout
(
x
.
layout
());
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
)
{
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
out_dtype
);
out
->
set_dtype
(
out_dtype
);
...
@@ -203,73 +189,275 @@ void CumsumInferMeta(const MetaTensor& x,
...
@@ -203,73 +189,275 @@ void CumsumInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
out
->
share_lod
(
x
);
}
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
void
DiagInferMeta
(
const
MetaTensor
&
x
,
PADDLE_ENFORCE_EQ
(
int
offset
,
product
(
x
.
dims
()),
float
padding_value
,
1UL
,
MetaTensor
*
out
)
{
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
auto
x_dims
=
x
.
dims
();
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
out
->
set_dtype
(
x
.
dtype
());
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
capacity
=
1
;
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
int
unk_dim_idx
=
-
1
;
out
->
set_dims
({
size_
,
size_
});
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
out
->
set_dtype
(
x
.
dtype
());
if
(
shape
[
i
]
==
unk_dim_val
)
{
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
PADDLE_ENFORCE_EQ
(
int64_t
size_
=
0
;
unk_dim_idx
,
if
(
offset
>=
0
)
{
-
1
,
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
phi
::
errors
::
InvalidArgument
(
// of `size_` will have unexpected result on Windows Python3.8
"Only one dimension value of 'shape' in ReshapeOp can "
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
"be -1. But received shape = [%s], shape[%d] is also -1."
,
size_
=
x_dims
[
0
];
phi
::
make_ddim
(
shape
),
}
else
{
i
));
size_
=
x_dims
[
1
]
-
offset
;
unk_dim_idx
=
i
;
}
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d."
,
phi
::
make_ddim
(
shape
),
i
,
in_dims
,
in_dims
.
size
()));
}
else
{
}
else
{
PADDLE_ENFORCE_GT
(
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
shape
[
i
],
// of `size_` will have unexpected result on Windows Python3.8
0
,
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
phi
::
errors
::
InvalidArgument
(
size_
=
x_dims
[
0
]
+
offset
;
"Each dimension value of 'shape' in ReshapeOp must not "
}
else
{
"be negative except one unknown dimension. "
size_
=
x_dims
[
1
];
"But received shape = [%s], shape[%d] = %d."
,
}
phi
::
make_ddim
(
shape
),
i
,
shape
[
i
]));
}
}
out
->
set_dims
({
size_
});
// NOTE all non-zero values will be converted to True (include negative
out
->
set_dtype
(
x
.
dtype
());
// value)
}
else
{
capacity
*=
(
shape
[
i
]
?
shape
[
i
]
:
in_dims
[
i
]);
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
)
{
auto
x_dims
=
input
.
dims
();
int
offset_
=
offset
;
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
"least 2 dimensions, but got %ld)."
,
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
auto
out_dims
=
vectorize
(
x_dims
);
// from out_dims get the dim size of axis1_.
auto
axis1_size
=
out_dims
[
axis1_
];
auto
axis2_size
=
out_dims
[
axis2_
];
// delete two dims by attr axis1 and axis2 from out_dims.
/* example:
out_dim = [2, 3, 4];
axis1 = 0;
axis2 = 1;
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
if
(
offset_
==
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
}
else
if
(
offset_
>
0
)
{
if
((
axis2_size
-
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
else
{
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
PADDLE_ENFORCE_EQ
(
product
(
x
.
dims
()),
1UL
,
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
out
->
set_dtype
(
x
.
dtype
());
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE_EQ
(
unk_dim_idx
,
-
1
,
phi
::
errors
::
InvalidArgument
(
"Only one dimension value of 'shape' in ReshapeOp can "
"be -1. But received shape = [%s], shape[%d] is also -1."
,
phi
::
make_ddim
(
shape
),
i
));
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d."
,
phi
::
make_ddim
(
shape
),
i
,
in_dims
,
in_dims
.
size
()));
}
else
{
PADDLE_ENFORCE_GT
(
shape
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"Each dimension value of 'shape' in ReshapeOp must not "
"be negative except one unknown dimension. "
"But received shape = [%s], shape[%d] = %d."
,
phi
::
make_ddim
(
shape
),
i
,
shape
[
i
]));
}
// NOTE all non-zero values will be converted to True (include negative
// value)
capacity
*=
(
shape
[
i
]
?
shape
[
i
]
:
in_dims
[
i
]);
output_shape
[
i
]
=
(
shape
[
i
]
?
static_cast
<
int64_t
>
(
shape
[
i
])
:
in_dims
[
i
]);
output_shape
[
i
]
=
(
shape
[
i
]
?
static_cast
<
int64_t
>
(
shape
[
i
])
:
in_dims
[
i
]);
}
}
...
@@ -360,6 +548,11 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
...
@@ -360,6 +548,11 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
BOOL
);
out
->
set_dtype
(
DataType
::
BOOL
);
}
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
int
num_samples
,
bool
replacement
,
bool
replacement
,
...
@@ -395,124 +588,97 @@ void MultinomialInferMeta(const MetaTensor& x,
...
@@ -395,124 +588,97 @@ void MultinomialInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
TileInferMeta
(
const
MetaTensor
&
x
,
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
ScalarArray
&
repeat_times
,
const
std
::
vector
<
int
>&
paddings
,
MetaTensor
*
out
,
float
pad_value
,
MetaConfig
config
)
{
MetaTensor
*
out
,
#define MAX_RANK_SUPPORTED 6
MetaConfig
config
)
{
auto
x_dim
=
input
.
dims
();
auto
repeat_times_data
=
repeat_times
.
GetData
();
PADDLE_ENFORCE_EQ
(
auto
x_dims
=
x
.
dims
();
static_cast
<
int
>
(
paddings
.
size
()),
if
(
repeat_times_data
.
size
()
==
0
)
{
x_dim
.
size
()
*
2
,
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
phi
::
errors
::
InvalidArgument
(
}
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
PADDLE_ENFORCE_LE
(
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
x_dims
.
size
(),
static_cast
<
int
>
(
paddings
.
size
()),
MAX_RANK_SUPPORTED
,
x_dim
.
size
()
*
2
));
errors
::
InvalidArgument
(
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
"The rank of the input 'x' for tile op "
PADDLE_ENFORCE_GE
(
paddings
[
i
],
"must not be greater than %d, but the value received is %d."
,
0
,
MAX_RANK_SUPPORTED
,
phi
::
errors
::
InvalidArgument
(
x_dims
.
size
()));
"The element of 'paddings' should >= 0, but "
PADDLE_ENFORCE_LE
(
"received %d for index %d."
,
repeat_times_data
.
size
(),
paddings
[
i
],
MAX_RANK_SUPPORTED
,
static_cast
<
int
>
(
i
)));
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
}
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
out_shape
[
i
]
=
-
1
;
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
}
else
{
PADDLE_ENFORCE_GT
(
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
}
}
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
// Only pass LoD when the first dimension is equal between
out
->
share_lod
(
x
);
// output and input.
out
->
share_lod
(
input
);
}
}
out
->
set_dtype
(
input
.
dtype
());
}
}
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
int
upscale_factor
,
MetaTensor
*
out
,
const
std
::
string
&
data_format
,
MetaConfig
config
)
{
MetaTensor
*
out
)
{
auto
&
shape_data
=
shape
.
GetData
();
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_NOT_NULL
(
out
,
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
4
,
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
phi
::
errors
::
InvalidArgument
(
"
The shape's size in ReshapeOp can't be zero."
));
"
Input should be a 4-D tensor of format [N, C, H, W] "
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
"or [N, H, W, C], but got %u."
,
}
input_dims
.
size
()));
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
const
ScalarArray
&
shape
,
MetaTensor
*
xshape
,
if
(
!
channel_last
)
{
MetaTensor
*
out
,
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
MetaConfig
config
)
{
0
,
PADDLE_ENFORCE_NOT_NULL
(
phi
::
errors
::
InvalidArgument
(
xshape
,
"The square of upscale_factor[%u] should divide the "
phi
::
errors
::
InvalidArgument
(
"number of channel[%u]"
,
"Output(XShape) of ReshapeOp should not be null."
));
upscale_factor
*
upscale_factor
,
const
auto
&
x_dims
=
x
.
dims
();
input_dims
[
1
]));
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
}
else
{
xshape_dims
[
0
]
=
0
;
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
0
,
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
auto
output_dims
=
input_dims
;
xshape
->
share_lod
(
x
);
output_dims
[
0
]
=
input_dims
[
0
];
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
}
/* Why not use SumRawInferMeta directly?
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
Because we need make InferMetaFunction's args follow the design of api.yaml
out
->
set_dims
(
x
.
dims
());
*/
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
void
SumInferMeta
(
const
MetaTensor
&
x
,
out
->
set_layout
(
x
.
layout
());
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
}
}
DDim
ReduceInferDim
(
const
MetaTensor
&
x
,
DDim
ReduceInferDim
(
const
MetaTensor
&
x
,
...
@@ -584,29 +750,12 @@ DDim ReduceInferDim(const MetaTensor& x,
...
@@ -584,29 +750,12 @@ DDim ReduceInferDim(const MetaTensor& x,
return
out_dim
;
return
out_dim
;
}
}
void
SumRaw
InferMeta
(
const
MetaTensor
&
x
,
void
Reduce
InferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
bool
reduce_all
=
false
;
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
out
->
set_dims
(
out_dim
);
out
->
set_dtype
(
out_dtype
);
out
->
set_layout
(
x
.
layout
());
}
}
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
...
@@ -620,33 +769,109 @@ void ReduceInferMetaBase(const MetaTensor& x,
...
@@ -620,33 +769,109 @@ void ReduceInferMetaBase(const MetaTensor& x,
out
->
set_layout
(
x
.
layout
());
out
->
set_layout
(
x
.
layout
());
}
}
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
const
ScalarArray
&
shape
,
bool
keep_dim
,
MetaTensor
*
out
,
MetaTensor
*
out
)
{
MetaConfig
config
)
{
bool
reduce_all
=
false
;
auto
&
shape_data
=
shape
.
GetData
();
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
PADDLE_ENFORCE_NOT_NULL
(
out
,
phi
::
errors
::
InvalidArgument
(
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
"The shape's size in ReshapeOp can't be zero."
));
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
}
}
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
)
{
MetaTensor
*
xshape
,
out
->
set_dims
(
x
.
dims
());
MetaTensor
*
out
,
out
->
set_dtype
(
x
.
dtype
());
MetaConfig
config
)
{
out
->
set_layout
(
layout
);
PADDLE_ENFORCE_NOT_NULL
(
xshape
,
phi
::
errors
::
InvalidArgument
(
"Output(XShape) of ReshapeOp should not be null."
));
const
auto
&
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
xshape
->
share_lod
(
x
);
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
}
}
void
SplitInferMeta
(
const
MetaTensor
&
x
,
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
const
ScalarArray
&
num_or_sections
,
int
index_num
,
const
Scalar
&
axis
,
int
nshards
,
std
::
vector
<
MetaTensor
*>
out
,
int
shard_id
,
MetaConfig
config
)
{
int
ignore_value
,
int
axis_value
=
axis
.
to
<
int
>
();
MetaTensor
*
out
,
int
rank
=
x
.
dims
().
size
();
MetaConfig
config
)
{
PADDLE_ENFORCE_EQ
(
auto
x_dims
=
in
.
dims
();
axis_value
>=
-
rank
&&
axis_value
<
rank
,
PADDLE_ENFORCE_GE
(
true
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
SplitInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
)
{
int
axis_value
=
axis
.
to
<
int
>
();
int
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_EQ
(
axis_value
>=
-
rank
&&
axis_value
<
rank
,
true
,
phi
::
errors
::
InvalidArgument
(
"The axis is expected to be in range of [%d, %d), but got %d"
,
"The axis is expected to be in range of [%d, %d), but got %d"
,
-
rank
,
-
rank
,
rank
,
rank
,
...
@@ -767,22 +992,108 @@ void SplitInferMeta(const MetaTensor& x,
...
@@ -767,22 +992,108 @@ void SplitInferMeta(const MetaTensor& x,
}
}
}
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
/* Why not use SumRawInferMeta directly?
int
axis
,
Because we need make InferMetaFunction's args follow the design of api.yaml
std
::
vector
<
MetaTensor
>*
outs
)
{
*/
auto
in_dims
=
x
.
dims
();
void
SumInferMeta
(
const
MetaTensor
&
x
,
std
::
vector
<
int
>
out_dim
;
const
std
::
vector
<
int64_t
>&
axis
,
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
DataType
dtype
,
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
bool
keep_dim
,
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
}
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
out
->
set_dims
(
out_dim
);
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
out
->
set_dtype
(
out_dtype
);
(
*
outs
)[
i
].
set_dims
(
out_dims
);
out
->
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
}
(
*
outs
)[
i
].
share_lod
(
x
);
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
)
{
#define MAX_RANK_SUPPORTED 6
auto
repeat_times_data
=
repeat_times
.
GetData
();
auto
x_dims
=
x
.
dims
();
if
(
repeat_times_data
.
size
()
==
0
)
{
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
}
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
x_dims
.
size
()));
PADDLE_ENFORCE_LE
(
repeat_times_data
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
out_shape
[
i
]
=
-
1
;
}
else
{
PADDLE_ENFORCE_GT
(
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
out
->
share_lod
(
x
);
}
}
}
}
...
@@ -840,79 +1151,112 @@ void TraceInferMeta(
...
@@ -840,79 +1151,112 @@ void TraceInferMeta(
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dtype
(
x
.
dtype
());
}
}
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
DataLayout
layout
,
int
axis1
,
MetaTensor
*
out
)
{
int
axis2
,
out
->
set_dims
(
x
.
dims
());
MetaTensor
*
out
)
{
out
->
set_dtype
(
x
.
dtype
());
auto
x_dims
=
input
.
dims
();
out
->
set_layout
(
layout
);
int
offset_
=
offset
;
}
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
PADDLE_ENFORCE_GE
(
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
x_dims
.
size
(),
const
std
::
vector
<
int
>&
axis
,
2
,
MetaTensor
*
out
)
{
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
auto
x_dims
=
x
.
dims
();
"least 2 dimensions, but got %ld)."
,
size_t
x_rank
=
x_dims
.
size
();
x_dims
.
size
()));
size_t
axis_size
=
axis
.
size
();
PADDLE_ENFORCE_LT
(
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
auto
out_dims
=
vectorize
(
x_dims
);
PADDLE_ENFORCE_EQ
(
// from out_dims get the dim size of axis1_.
x_rank
,
auto
axis1_size
=
out_dims
[
axis1_
];
axis_size
,
auto
axis2_size
=
out_dims
[
axis2_
];
errors
::
InvalidArgument
(
"The input tensor's dimension "
// delete two dims by attr axis1 and axis2 from out_dims.
"should be equal to the axis's size. "
/* example:
"But received input tensor's dimension is %d, "
out_dim = [2, 3, 4];
"axis's size is %d"
,
axis1 = 0;
x_rank
,
axis2 = 1;
axis_size
));
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
if
(
offset_
==
0
)
{
std
::
vector
<
int
>
count
(
axis_size
,
0
);
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
}
else
if
(
offset_
>
0
)
{
PADDLE_ENFORCE_GE
(
if
((
axis2_size
-
offset_
)
>
0
)
{
axis
[
i
],
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
0
,
}
else
{
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
out_dims
.
push_back
(
0
);
"But received %d of axis[%d]"
,
}
axis
[
i
],
}
else
{
i
));
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
PADDLE_ENFORCE_EQ
(
}
else
{
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
out_dims
.
push_back
(
0
);
true
,
}
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
)
{
auto
in_dims
=
x
.
dims
();
std
::
vector
<
int
>
out_dim
;
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
(
*
outs
)[
i
].
set_dims
(
out_dims
);
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
share_lod
(
x
);
}
}
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
}
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
...
@@ -1073,303 +1417,6 @@ void UnfoldInferMeta(const MetaTensor& x,
...
@@ -1073,303 +1417,6 @@ void UnfoldInferMeta(const MetaTensor& x,
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
}
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
out
->
set_dims
({
size_
,
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
int64_t
size_
=
0
;
if
(
offset
>=
0
)
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
size_
=
x_dims
[
0
];
}
else
{
size_
=
x_dims
[
1
]
-
offset
;
}
}
else
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
size_
=
x_dims
[
0
]
+
offset
;
}
else
{
size_
=
x_dims
[
1
];
}
}
out
->
set_dims
({
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
{
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dim
=
input
.
dims
();
PADDLE_ENFORCE_EQ
(
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
,
phi
::
errors
::
InvalidArgument
(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
));
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
PADDLE_ENFORCE_GE
(
paddings
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"The element of 'paddings' should >= 0, but "
"received %d for index %d."
,
paddings
[
i
],
static_cast
<
int
>
(
i
)));
}
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
out
->
share_lod
(
input
);
}
out
->
set_dtype
(
input
.
dtype
());
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
input_dims
.
size
()));
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
if
(
!
channel_last
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
auto
output_dims
=
input_dims
;
output_dims
[
0
]
=
input_dims
[
0
];
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
size_t
x_rank
=
x_dims
.
size
();
size_t
axis_size
=
axis
.
size
();
PADDLE_ENFORCE_EQ
(
x_rank
,
axis_size
,
errors
::
InvalidArgument
(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d"
,
x_rank
,
axis_size
));
std
::
vector
<
int
>
count
(
axis_size
,
0
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
PADDLE_ENFORCE_GE
(
axis
[
i
],
0
,
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]"
,
axis
[
i
],
i
));
PADDLE_ENFORCE_EQ
(
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
true
,
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
)
{
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
)
{
auto
rank
=
condition
.
dims
().
size
();
auto
rank
=
condition
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
PADDLE_ENFORCE_GE
(
...
@@ -1381,53 +1428,6 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
...
@@ -1381,53 +1428,6 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
in
.
dims
();
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
}
// namespace phi
}
// namespace phi
PD_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
PD_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
f7765991
...
@@ -32,32 +32,20 @@ class MetaConfig;
...
@@ -32,32 +32,20 @@ class MetaConfig;
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
int
axis
,
bool
descending
,
bool
descending
,
MetaTensor
*
output
,
MetaTensor
*
output
,
MetaTensor
*
indices
);
MetaTensor
*
indices
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
bool
reverse
,
bool
reverse
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
int
num_samples
,
bool
replacement
,
bool
replacement
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaTensor
*
out
,
...
@@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
...
@@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
TileInferMeta
(
const
MetaTensor
&
x
,
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
const
ScalarArray
&
repeat_times
,
int
index_num
,
MetaTensor
*
out
,
int
nshards
,
MetaConfig
config
=
MetaConfig
());
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
std
::
vector
<
int64_t
>&
axis
,
const
ScalarArray
&
num_or_sections
,
bool
keep_dim
,
const
Scalar
&
axis
,
MetaTensor
*
out
);
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
SumInferMeta
(
const
MetaTensor
&
x
,
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
const
std
::
vector
<
int64_t
>&
axis
,
...
@@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x,
...
@@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x,
bool
keep_dim
,
bool
keep_dim
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
DataLayout
layout
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
num_or_sections
,
const
std
::
vector
<
int
>&
axis
,
const
Scalar
&
axis
,
MetaTensor
*
out
);
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
);
std
::
vector
<
MetaTensor
>*
outs
);
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/funcs/matrix_inverse.h
浏览文件 @
f7765991
...
@@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx,
...
@@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx,
int
batch_size
=
rank
>
2
?
a
.
numel
()
/
(
n
*
n
)
:
1
;
int
batch_size
=
rank
>
2
?
a
.
numel
()
/
(
n
*
n
)
:
1
;
const
T
*
a_ptr
=
a
.
data
<
T
>
();
const
T
*
a_ptr
=
a
.
data
<
T
>
();
T
*
a_inv_ptr
=
a_inv
->
mutable_data
<
T
>
(
dev_ctx
.
GetPlace
()
);
T
*
a_inv_ptr
=
dev_ctx
.
template
Alloc
<
T
>(
a_inv
);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
ConstEigenMatrixMap
mat
(
a_ptr
+
i
*
n
*
n
,
n
,
n
);
ConstEigenMatrixMap
mat
(
a_ptr
+
i
*
n
*
n
,
n
,
n
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录