Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f7765991
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f7765991
编写于
3月 13, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into add_some_yaml_config
上级
1694bcc8
1b0cecb7
变更
16
显示空白变更内容
内联
并排
Showing
16 changed file
with
1566 addition
and
1562 deletion
+1566
-1562
paddle/fluid/framework/infershape_utils.cc
paddle/fluid/framework/infershape_utils.cc
+2
-2
paddle/fluid/operators/gather_nd_op.cc
paddle/fluid/operators/gather_nd_op.cc
+0
-1
paddle/fluid/operators/softmax_op.cc
paddle/fluid/operators/softmax_op.cc
+2
-2
paddle/phi/core/meta_tensor.cc
paddle/phi/core/meta_tensor.cc
+2
-2
paddle/phi/core/meta_tensor.h
paddle/phi/core/meta_tensor.h
+1
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+15
-14
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+6
-1
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+452
-451
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+57
-57
paddle/phi/infermeta/nullary.cc
paddle/phi/infermeta/nullary.cc
+18
-18
paddle/phi/infermeta/nullary.h
paddle/phi/infermeta/nullary.h
+9
-9
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+153
-152
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+21
-25
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+730
-730
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+97
-96
paddle/phi/kernels/funcs/matrix_inverse.h
paddle/phi/kernels/funcs/matrix_inverse.h
+1
-1
未找到文件。
paddle/fluid/framework/infershape_utils.cc
浏览文件 @
f7765991
...
...
@@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor {
}
void
share_meta
(
const
MetaTensor
&
meta_tensor
)
override
{
share_dims
(
meta_tensor
);
set_dtype
(
meta_tensor
.
dtype
());
// VarDesc doesn't contains layout, so we cannot share layout
// set_layout(meta_tensor.layout());
// special case
1
: share lod of LoDTensor
// special case: share lod of LoDTensor
share_lod
(
meta_tensor
);
share_dims
(
meta_tensor
);
}
private:
...
...
paddle/fluid/operators/gather_nd_op.cc
浏览文件 @
f7765991
...
...
@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/ternary.h"
namespace
paddle
{
namespace
operators
{
...
...
paddle/fluid/operators/softmax_op.cc
浏览文件 @
f7765991
...
...
@@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGradnferShapeFunctor
,
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGrad
I
nferShapeFunctor
,
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
SoftmaxGradnferShapeFunctor
);
SoftmaxGrad
I
nferShapeFunctor
);
paddle/phi/core/meta_tensor.cc
浏览文件 @
f7765991
...
...
@@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
}
}
TensorBase
*
MetaTensor
::
get_
tensor
()
const
{
return
tensor_
;
}
TensorBase
*
MetaTensor
::
tensor
()
const
{
return
tensor_
;
}
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
...
...
@@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) {
if
(
is_dense_tensor
||
is_selected_rows
)
{
set_dims
(
meta_tensor
.
dims
());
if
(
is_selected_rows
)
{
const
auto
in_tensor_base
=
meta_tensor
.
get_
tensor
();
const
auto
in_tensor_base
=
meta_tensor
.
tensor
();
PADDLE_ENFORCE_EQ
(
phi
::
SelectedRows
::
classof
(
in_tensor_base
),
true
,
...
...
paddle/phi/core/meta_tensor.h
浏览文件 @
f7765991
...
...
@@ -66,7 +66,7 @@ class MetaTensor {
// Because the lod in compiletime and runtime is different,
// so `LoD` cannot in public methods
const
LoD
&
lod
()
const
;
TensorBase
*
get_
tensor
()
const
;
TensorBase
*
tensor
()
const
;
TensorBase
*
tensor_
;
};
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
f7765991
...
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
...
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
}
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
int
axis
,
...
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dout
.
dims
(),
errors
::
InvalidArgument
(
"Input(Out) and its gradients should have the same shape."
));
dx
->
share_meta
(
dout
);
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
dx
->
share_meta
(
dout
);
}
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
f7765991
...
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
);
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
...
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
MetaTensor
*
dy
,
MetaTensor
*
dz
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
int
axis
,
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
f7765991
...
...
@@ -22,6 +22,153 @@ limitations under the License. */
namespace
phi
{
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
int
rank
=
input_dims
.
size
();
PADDLE_ENFORCE_EQ
(
rank
,
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d]."
,
rank
,
label_dims
.
size
()));
bool
check
=
true
;
if
((
!
config
.
is_runtime
)
&&
(
phi
::
product
(
input_dims
)
<=
0
||
phi
::
product
(
label_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s]."
,
input_dims
,
label_dims
));
}
out
->
set_dims
(
input_dims
);
out
->
set_dtype
(
input
.
dtype
());
out
->
share_lod
(
input
);
}
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
)
{
auto
input_dim
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
minlength
,
0
,
phi
::
errors
::
InvalidArgument
(
"The minlength should be greater than or equal to 0."
"But received minlength is %d"
,
minlength
));
PADDLE_ENFORCE_EQ
(
input_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]"
,
input_dim
.
size
()));
if
(
weights
.
is_initialized
())
{
auto
weights_dim
=
weights
->
dims
();
PADDLE_ENFORCE_EQ
(
weights_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]"
,
weights_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
weights_dim
[
0
],
input_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]"
,
weights_dim
,
input_dim
));
}
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
if
(
weights
.
is_initialized
())
{
out
->
set_dtype
(
weights
->
dtype
());
}
else
{
out
->
set_dtype
(
x
.
dtype
());
}
out
->
share_lod
(
x
);
}
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input Y must greater or equal to 2"
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input X must greater or equal to 2"
));
PADDLE_ENFORCE_EQ
(
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld"
,
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
]));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld"
,
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
x_broadcast_dims
({
expand_batch_portion
});
x_broadcast_dims
.
insert
(
x_broadcast_dims
.
end
(),
{
x_dims_vec
[
x_dims_n
-
2
],
x_dims_vec
[
x_dims_n
-
1
]});
// dim of 'out' is the same with 'X' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
x_broadcast_dims
));
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
...
...
@@ -67,6 +214,74 @@ void CompareAllInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
PADDLE_ENFORCE_NE
(
phi
::
product
(
x_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s]."
,
x_dims
));
PADDLE_ENFORCE_NE
(
phi
::
product
(
y_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s]."
,
y_dims
));
out
->
set_dims
({
1
});
out
->
set_dtype
(
x
.
dtype
());
}
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
x_rank
=
static_cast
<
size_t
>
(
x_dims
.
size
());
...
...
@@ -109,106 +324,33 @@ void DotInferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out
->
set_layout
(
x
.
layout
());
}
void
Matmul
InferMeta
(
const
MetaTensor
&
x
,
void
Elementwise
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
)
{
std
::
vector
<
int64_t
>
dims_x
=
phi
::
vectorize
(
x
.
dims
());
std
::
vector
<
int64_t
>
dims_y
=
phi
::
vectorize
(
y
.
dims
());
auto
ndims_x
=
dims_x
.
size
();
auto
ndims_y
=
dims_y
.
size
();
PADDLE_ENFORCE_GT
(
ndims_x
,
0UL
,
return
ElementwiseRawInferMeta
(
x
,
y
,
-
1
,
std
::
move
(
out
));
}
void
ElementwiseRawInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
if
(
x
.
dims
()
!=
y
.
dims
())
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
int
max_dim
=
std
::
max
(
x_dims
.
size
(),
y_dims
.
size
());
if
(
x_dims
.
size
()
==
y_dims
.
size
())
{
PADDLE_ENFORCE_EQ
((
axis
==
-
1
)
||
(
axis
==
0
),
true
,
phi
::
errors
::
InvalidArgument
(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
PADDLE_ENFORCE_GT
(
ndims_y
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
bool
x_broadcasted
=
false
,
y_broadcasted
=
false
;
if
(
ndims_x
==
1
)
{
dims_x
.
insert
(
dims_x
.
begin
(),
1
);
ndims_x
=
2
;
x_broadcasted
=
true
;
}
if
(
ndims_y
==
1
)
{
dims_y
.
push_back
(
1
);
ndims_y
=
2
;
y_broadcasted
=
true
;
}
size_t
M
,
N
;
if
(
trans_x
)
{
M
=
dims_x
[
ndims_x
-
1
];
}
else
{
M
=
dims_x
[
ndims_x
-
2
];
}
if
(
trans_y
)
{
N
=
dims_y
[
ndims_y
-
2
];
}
else
{
N
=
dims_y
[
ndims_y
-
1
];
}
std
::
vector
<
int64_t
>
new_dims
;
if
(
ndims_x
>
ndims_y
)
{
new_dims
.
assign
(
dims_x
.
begin
(),
dims_x
.
end
()
-
2
);
}
else
if
(
ndims_x
<
ndims_y
)
{
new_dims
.
assign
(
dims_y
.
begin
(),
dims_y
.
end
()
-
2
);
}
else
{
new_dims
.
reserve
(
ndims_x
);
for
(
size_t
i
=
0
;
i
<
ndims_x
-
2
;
++
i
)
{
new_dims
.
push_back
(
std
::
max
(
dims_x
[
i
],
dims_y
[
i
]));
}
}
if
(
!
x_broadcasted
)
{
new_dims
.
push_back
(
M
);
}
if
(
!
y_broadcasted
)
{
new_dims
.
push_back
(
N
);
}
if
(
x_broadcasted
&&
y_broadcasted
)
{
new_dims
.
push_back
(
1
);
}
auto
ddim_out
=
phi
::
make_ddim
(
new_dims
);
out
->
set_dims
(
ddim_out
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
}
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
return
ElementwiseRawInferMeta
(
x
,
y
,
-
1
,
std
::
move
(
out
));
}
void
ElementwiseRawInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
if
(
x
.
dims
()
!=
y
.
dims
())
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
int
max_dim
=
std
::
max
(
x_dims
.
size
(),
y_dims
.
size
());
if
(
x_dims
.
size
()
==
y_dims
.
size
())
{
PADDLE_ENFORCE_EQ
((
axis
==
-
1
)
||
(
axis
==
0
),
true
,
phi
::
errors
::
InvalidArgument
(
"axis should be -1 or 0 while the dimension of "
"tensor X (%s) is equal to the dimension of "
"tensor Y (%s), but received axis: %s"
,
x_dims
.
size
(),
y_dims
.
size
(),
axis
));
}
PADDLE_ENFORCE_EQ
((
axis
>=
(
-
1
*
max_dim
))
&&
(
axis
<
max_dim
),
true
,
"axis should be -1 or 0 while the dimension of "
"tensor X (%s) is equal to the dimension of "
"tensor Y (%s), but received axis: %s"
,
x_dims
.
size
(),
y_dims
.
size
(),
axis
));
}
PADDLE_ENFORCE_EQ
((
axis
>=
(
-
1
*
max_dim
))
&&
(
axis
<
max_dim
),
true
,
phi
::
errors
::
InvalidArgument
(
"The axis range must be [%s, %s), but axis is %s. "
"Please set the axis again."
,
...
...
@@ -223,383 +365,19 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
funcs
::
GetBroadcastDimsArrays
(
x_dims
,
y_dims
,
x_dims_array
.
data
(),
y_dims_array
.
data
(),
out_dims_array
.
data
(),
max_dim
,
axis
);
auto
out_dims
=
phi
::
make_ddim
(
out_dims_array
);
out
->
set_dims
(
out_dims
);
}
else
{
out
->
set_dims
(
x
.
dims
());
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
HuberLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
delta
,
MetaTensor
*
out
,
MetaTensor
*
residual
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)"
,
input_dims
.
size
(),
label_dims
.
size
()));
bool
contain_unknown_dim
=
phi
::
contain_unknown_dim
(
input_dims
)
||
phi
::
contain_unknown_dim
(
label_dims
);
if
(
config
.
is_runtime
||
!
contain_unknown_dim
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]"
,
input_dims
,
label_dims
));
}
auto
out_dims
=
label_dims
;
residual
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
share_lod
(
input
);
}
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input Y must greater or equal to 2"
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"the rank of input X must greater or equal to 2"
));
PADDLE_ENFORCE_EQ
(
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"input Matrix Y should be square matrix,"
"But Got last shape of %ld x %ld"
,
y_dims
[
y_dims_n
-
1
],
y_dims
[
y_dims_n
-
2
]));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
],
phi
::
errors
::
InvalidArgument
(
"the first dim of Matrix X must be equal to "
"the fisrt dim of Matrix Y,"
"But Got %ld and %ld"
,
x_dims
[
x_dims_n
-
2
],
y_dims
[
y_dims_n
-
2
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
x_broadcast_dims
({
expand_batch_portion
});
x_broadcast_dims
.
insert
(
x_broadcast_dims
.
end
(),
{
x_dims_vec
[
x_dims_n
-
2
],
x_dims_vec
[
x_dims_n
-
1
]});
// dim of 'out' is the same with 'X' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
x_broadcast_dims
));
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]"
,
x_dims
.
size
(),
x_dims
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]"
,
y_dims
.
size
(),
y_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
],
phi
::
errors
::
InvalidArgument
(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d."
,
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
y_broadcast_dims
({
expand_batch_portion
});
y_broadcast_dims
.
insert
(
y_broadcast_dims
.
end
(),
{
y_dims_vec
[
y_dims_n
-
2
],
y_dims_vec
[
y_dims_n
-
1
]});
// dim of 'out' is the same with 'Y' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
y_broadcast_dims
));
out
->
set_dtype
(
y
.
dtype
());
out
->
set_layout
(
y
.
layout
());
out
->
share_lod
(
y
);
}
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape."
,
input_dims
));
auto
index_dims
=
y
.
dims
();
PADDLE_ENFORCE_EQ
(
index_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape."
,
input_dims
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
0
],
index_dims
[
0
],
errors
::
InvalidArgument
(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape."
,
input_dims
[
0
],
index_dims
[
0
]));
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
index_dims
);
out
->
share_lod
(
y
);
}
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
)
{
auto
x_dim
=
x
.
dims
();
auto
y_dim
=
y
.
dims
();
auto
dim
=
axis
;
bool
dims_match
=
phi
::
funcs
::
CheckDims
(
x_dim
,
y_dim
);
PADDLE_ENFORCE_EQ
(
dims_match
,
true
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]"
,
x_dim
,
y_dim
));
if
(
dim
!=
DDim
::
kMaxRank
)
{
PADDLE_ENFORCE_EQ
(
dim
<
x_dim
.
size
()
&&
dim
>=
(
0
-
x_dim
.
size
()),
true
,
phi
::
errors
::
OutOfRange
(
"Attr(dim) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(dim) = %d."
,
x_dim
.
size
(),
x_dim
.
size
()
-
1
,
dim
));
if
(
dim
<
0
)
{
dim
+=
x_dim
.
size
();
}
PADDLE_ENFORCE_EQ
(
x_dim
[
dim
]
==
3
&&
y_dim
[
dim
]
==
3
,
true
,
phi
::
errors
::
InvalidArgument
(
"Input(X/Y).dims()[dim] should be equal to 3."
"But received Input(X/Y).dims()[dim] = %d."
,
x_dim
[
dim
]));
}
out
->
set_dims
(
x_dim
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
)
{
auto
dims
=
x
.
dims
();
dims
[
0
]
=
-
1
;
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
pooltype
==
"MEAN"
)
{
summed_ids
->
set_dims
({
-
1
,
1
});
summed_ids
->
set_dtype
(
x
.
dtype
());
summed_ids
->
set_layout
(
x
.
layout
());
}
}
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
int
rank
=
input_dims
.
size
();
PADDLE_ENFORCE_EQ
(
rank
,
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same rank."
"But received: the rank of Input(X) is [%d], "
"the rank of Input(Label) is [%d]."
,
rank
,
label_dims
.
size
()));
bool
check
=
true
;
if
((
!
config
.
is_runtime
)
&&
(
phi
::
product
(
input_dims
)
<=
0
||
phi
::
product
(
label_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s]."
,
input_dims
,
label_dims
));
}
out
->
set_dims
(
input_dims
);
out
->
set_dtype
(
input
.
dtype
());
out
->
share_lod
(
input
);
}
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
)
{
auto
input_dim
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
minlength
,
0
,
phi
::
errors
::
InvalidArgument
(
"The minlength should be greater than or equal to 0."
"But received minlength is %d"
,
minlength
));
PADDLE_ENFORCE_EQ
(
input_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(X) must be 1-D tensor."
"But the dimension of Input(X) is [%d]"
,
input_dim
.
size
()));
if
(
weights
.
is_initialized
())
{
auto
weights_dim
=
weights
->
dims
();
PADDLE_ENFORCE_EQ
(
weights_dim
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be 1-D tensor."
"But the dimension of Input(Weights) is [%d]"
,
weights_dim
.
size
()));
PADDLE_ENFORCE_EQ
(
weights_dim
[
0
],
input_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"The 'shape' of Input(Weights) must be equal to the 'shape' of "
"Input(X)."
"But received: the 'shape' of Input(Weights) is [%s],"
"the 'shape' of Input(X) is [%s]"
,
weights_dim
,
input_dim
));
}
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
if
(
weights
.
is_initialized
())
{
out
->
set_dtype
(
weights
->
dtype
());
y_dims_array
.
data
(),
out_dims_array
.
data
(),
max_dim
,
axis
);
auto
out_dims
=
phi
::
make_ddim
(
out_dims_array
);
out
->
set_dims
(
out_dims
);
}
else
{
out
->
set_d
type
(
x
.
dtype
());
out
->
set_d
ims
(
x
.
dims
());
}
out
->
share_lod
(
x
);
}
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
PADDLE_ENFORCE_NE
(
phi
::
product
(
x_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) has not been initialized properly. The "
"shape of Input(X) = [%s]."
,
x_dims
));
PADDLE_ENFORCE_NE
(
phi
::
product
(
y_dims
),
0
,
phi
::
errors
::
InvalidArgument
(
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s]."
,
y_dims
));
out
->
set_dims
({
1
});
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
out
->
share_lod
(
x
);
}
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
...
...
@@ -648,6 +426,78 @@ void GatherTreeMeta(const MetaTensor& ids,
out
->
set_dims
(
ids_dims
);
}
void
HuberLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
delta
,
MetaTensor
*
out
,
MetaTensor
*
residual
,
MetaConfig
config
)
{
auto
input_dims
=
input
.
dims
();
auto
label_dims
=
label
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
label_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)"
,
input_dims
.
size
(),
label_dims
.
size
()));
bool
contain_unknown_dim
=
phi
::
contain_unknown_dim
(
input_dims
)
||
phi
::
contain_unknown_dim
(
label_dims
);
if
(
config
.
is_runtime
||
!
contain_unknown_dim
)
{
PADDLE_ENFORCE_EQ
(
input_dims
,
label_dims
,
phi
::
errors
::
InvalidArgument
(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]"
,
input_dims
,
label_dims
));
}
auto
out_dims
=
label_dims
;
residual
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
share_lod
(
input
);
}
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(X) shape of IndexSample op should be 2-D, but "
"got X's shape = [%s], please check X shape."
,
input_dims
));
auto
index_dims
=
y
.
dims
();
PADDLE_ENFORCE_EQ
(
index_dims
.
size
(),
2
,
errors
::
InvalidArgument
(
"Inputs(Index) shape of IndexSample op should be 2-D, but "
"got Index's shape [%s] , please check index shape."
,
input_dims
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
0
],
index_dims
[
0
],
errors
::
InvalidArgument
(
"Inputs(X)'s value of dimension 0 must same with "
"Inputs(Index)'s value of dimension 0, but "
"got %d of Inputs(X), and got %d of Inputs(Index), "
"please check Inputs shape."
,
input_dims
[
0
],
index_dims
[
0
]));
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
index_dims
);
out
->
share_lod
(
y
);
}
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
epsilon
,
...
...
@@ -690,6 +540,79 @@ void LogLossInferMeta(const MetaTensor& input,
out
->
share_lod
(
input
);
}
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
)
{
std
::
vector
<
int64_t
>
dims_x
=
phi
::
vectorize
(
x
.
dims
());
std
::
vector
<
int64_t
>
dims_y
=
phi
::
vectorize
(
y
.
dims
());
auto
ndims_x
=
dims_x
.
size
();
auto
ndims_y
=
dims_y
.
size
();
PADDLE_ENFORCE_GT
(
ndims_x
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(x) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
PADDLE_ENFORCE_GT
(
ndims_y
,
0UL
,
phi
::
errors
::
InvalidArgument
(
"The Input(y) dims size must be greater than 0,"
" but reviced dims size is 0. "
));
bool
x_broadcasted
=
false
,
y_broadcasted
=
false
;
if
(
ndims_x
==
1
)
{
dims_x
.
insert
(
dims_x
.
begin
(),
1
);
ndims_x
=
2
;
x_broadcasted
=
true
;
}
if
(
ndims_y
==
1
)
{
dims_y
.
push_back
(
1
);
ndims_y
=
2
;
y_broadcasted
=
true
;
}
size_t
M
,
N
;
if
(
trans_x
)
{
M
=
dims_x
[
ndims_x
-
1
];
}
else
{
M
=
dims_x
[
ndims_x
-
2
];
}
if
(
trans_y
)
{
N
=
dims_y
[
ndims_y
-
2
];
}
else
{
N
=
dims_y
[
ndims_y
-
1
];
}
std
::
vector
<
int64_t
>
new_dims
;
if
(
ndims_x
>
ndims_y
)
{
new_dims
.
assign
(
dims_x
.
begin
(),
dims_x
.
end
()
-
2
);
}
else
if
(
ndims_x
<
ndims_y
)
{
new_dims
.
assign
(
dims_y
.
begin
(),
dims_y
.
end
()
-
2
);
}
else
{
new_dims
.
reserve
(
ndims_x
);
for
(
size_t
i
=
0
;
i
<
ndims_x
-
2
;
++
i
)
{
new_dims
.
push_back
(
std
::
max
(
dims_x
[
i
],
dims_y
[
i
]));
}
}
if
(
!
x_broadcasted
)
{
new_dims
.
push_back
(
M
);
}
if
(
!
y_broadcasted
)
{
new_dims
.
push_back
(
N
);
}
if
(
x_broadcasted
&&
y_broadcasted
)
{
new_dims
.
push_back
(
1
);
}
auto
ddim_out
=
phi
::
make_ddim
(
new_dims
);
out
->
set_dims
(
ddim_out
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
}
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
dim_vec
=
vec
.
dims
();
...
...
@@ -720,6 +643,25 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) {
out
->
share_lod
(
x
);
}
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
)
{
auto
dims
=
x
.
dims
();
dims
[
0
]
=
-
1
;
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
pooltype
==
"MEAN"
)
{
summed_ids
->
set_dims
({
-
1
,
1
});
summed_ids
->
set_dtype
(
x
.
dtype
());
summed_ids
->
set_layout
(
x
.
layout
());
}
}
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
bool
normalize
,
...
...
@@ -761,4 +703,63 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
}
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
x_dims_n
=
x_dims
.
size
();
auto
y_dims_n
=
y_dims
.
size
();
PADDLE_ENFORCE_GE
(
x_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of TriangularSolveOp "
"should be >= 2. But received X's "
"dimensions = %d, X's shape = [%s]"
,
x_dims
.
size
(),
x_dims
));
PADDLE_ENFORCE_GE
(
y_dims_n
,
2
,
phi
::
errors
::
InvalidArgument
(
"The input tensor Y's dimensions of TriangularSolveOp "
"should be >=2. But received Y's "
"dimensions = %d, Y's shape = [%s]"
,
y_dims
.
size
(),
y_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
],
phi
::
errors
::
InvalidArgument
(
"The inner-most 2 dimensions of Input(X) all should "
"be square matrices "
"But received X's shape[-2] = %d and shape[-1] = %d."
,
x_dims
[
x_dims_n
-
2
],
x_dims
[
x_dims_n
-
1
]));
std
::
vector
<
int64_t
>
x_dims_vec
=
phi
::
vectorize
(
x_dims
);
std
::
vector
<
int64_t
>
y_dims_vec
=
phi
::
vectorize
(
y_dims
);
std
::
vector
<
int64_t
>
x_dims_vec_cut
(
x_dims_vec
.
begin
(),
x_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
y_dims_vec_cut
(
y_dims_vec
.
begin
(),
y_dims_vec
.
end
()
-
2
);
std
::
vector
<
int64_t
>
expand_batch_portion
=
funcs
::
MatrixGetBroadcastBatchPortion
(
x_dims_vec_cut
,
y_dims_vec_cut
);
std
::
vector
<
int64_t
>
y_broadcast_dims
({
expand_batch_portion
});
y_broadcast_dims
.
insert
(
y_broadcast_dims
.
end
(),
{
y_dims_vec
[
y_dims_n
-
2
],
y_dims_vec
[
y_dims_n
-
1
]});
// dim of 'out' is the same with 'Y' after broadcast
out
->
set_dims
(
phi
::
make_ddim
(
y_broadcast_dims
));
out
->
set_dtype
(
y
.
dtype
());
out
->
set_layout
(
y
.
layout
());
out
->
share_lod
(
y
);
}
}
// namespace phi
paddle/phi/infermeta/binary.h
浏览文件 @
f7765991
...
...
@@ -29,23 +29,44 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
void
CompareInferMeta
(
const
MetaTensor
&
x
,
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
bool
upper
,
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
);
void
Matmul
InferMeta
(
const
MetaTensor
&
x
,
void
Cross
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
int
axis
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
...
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
int
axis
,
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
const
MetaTensor
&
label_meta
,
float
delta
,
...
...
@@ -62,68 +91,32 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
MetaTensor
*
residual
,
MetaConfig
config
=
MetaConfig
());
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
);
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
epsilon
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
epsilon
,
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
bool
normalize
,
...
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
}
// namespace phi
paddle/phi/infermeta/nullary.cc
浏览文件 @
f7765991
...
...
@@ -16,6 +16,12 @@ limitations under the License. */
namespace
phi
{
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataLayout
layout
,
...
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
out
->
set_layout
(
layout
);
}
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
DataType
dtype
,
...
...
@@ -41,25 +41,25 @@ void EyeInferMeta(int64_t num_rows,
out
->
set_dtype
(
dtype
);
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
auto
out_dims
=
phi
::
make_ddim
(
shape
.
GetData
()
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
.
GetData
()
);
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
...
...
paddle/phi/infermeta/nullary.h
浏览文件 @
f7765991
...
...
@@ -28,26 +28,26 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataLayout
layout
,
MetaTensor
*
out
);
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
DataType
dtype
,
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
);
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
float
mean
,
float
std
,
int
seed
,
...
...
paddle/phi/infermeta/ternary.cc
浏览文件 @
f7765991
...
...
@@ -18,6 +18,58 @@ limitations under the License. */
namespace
phi
{
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
...
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
out
->
set_dtype
(
input
.
dtype
());
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
scores
->
set_dtype
(
length
.
dtype
());
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
}
// namespace phi
paddle/phi/infermeta/ternary.h
浏览文件 @
f7765991
...
...
@@ -45,15 +45,21 @@ void AddmmInferMeta(const MetaTensor& input,
float
beta
,
MetaTensor
*
out
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
);
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
bool
overwrite
,
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
...
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
MetaTensor
*
total_weight
,
MetaConfig
config
=
MetaConfig
());
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
bool
overwrite
,
MetaTensor
*
out
);
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
...
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
MetaTensor
*
path
,
MetaConfig
config
=
MetaConfig
());
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
}
// namespace phi
paddle/phi/infermeta/unary.cc
浏览文件 @
f7765991
...
...
@@ -26,6 +26,82 @@ limitations under the License. */
namespace
phi
{
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
bool
descending
,
...
...
@@ -54,96 +130,6 @@ void ArgsortInferMeta(const MetaTensor& input,
indices
->
share_lod
(
input
);
}
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
out
->
set_layout
(
x
.
layout
());
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
out_dtype
);
...
...
@@ -203,50 +189,252 @@ void CumsumInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
PADDLE_ENFORCE_EQ
(
product
(
x
.
dims
()),
1UL
,
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
out
->
set_dims
({
size_
,
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
int64_t
size_
=
0
;
if
(
offset
>=
0
)
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
size_
=
x_dims
[
0
];
}
else
{
size_
=
x_dims
[
1
]
-
offset
;
}
}
else
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
size_
=
x_dims
[
0
]
+
offset
;
}
else
{
size_
=
x_dims
[
1
];
}
}
out
->
set_dims
({
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
{
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
)
{
auto
x_dims
=
input
.
dims
();
int
offset_
=
offset
;
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE_EQ
(
unk_dim_idx
,
-
1
,
phi
::
errors
::
InvalidArgument
(
"Only one dimension value of 'shape' in ReshapeOp can "
"be -1. But received shape = [%s], shape[%d] is also -1."
,
phi
::
make_ddim
(
shape
),
i
));
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
"least 2 dimensions, but got %ld)."
,
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
auto
out_dims
=
vectorize
(
x_dims
);
// from out_dims get the dim size of axis1_.
auto
axis1_size
=
out_dims
[
axis1_
];
auto
axis2_size
=
out_dims
[
axis2_
];
// delete two dims by attr axis1 and axis2 from out_dims.
/* example:
out_dim = [2, 3, 4];
axis1 = 0;
axis2 = 1;
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
if
(
offset_
==
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
}
else
if
(
offset_
>
0
)
{
if
((
axis2_size
-
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
else
{
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
}
else
{
out_dims
.
push_back
(
0
);
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
int
in_dims_size
=
x_dims
.
size
();
if
(
start_axis
<
0
)
{
start_axis
=
start_axis
+
in_dims_size
;
}
if
(
stop_axis
<
0
)
{
stop_axis
=
stop_axis
+
in_dims_size
;
}
PADDLE_ENFORCE_GE
(
stop_axis
,
start_axis
,
phi
::
errors
::
InvalidArgument
(
"The stop_axis should be greater"
"than or equal to start_axis."
));
int64_t
outer
=
1
;
std
::
vector
<
int32_t
>
out_shape
;
out_shape
.
reserve
(
in_dims_size
-
stop_axis
+
start_axis
);
for
(
int
i
=
0
;
i
<
start_axis
;
++
i
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
for
(
int
i
=
start_axis
;
i
<=
stop_axis
;
i
++
)
{
if
(
x_dims
[
i
]
==
-
1
||
outer
==
-
1
)
{
outer
=
-
1
;
}
else
{
outer
*=
x_dims
[
i
];
}
}
out_shape
.
push_back
(
outer
);
for
(
int
i
=
stop_axis
+
1
;
i
<
in_dims_size
;
i
++
)
{
out_shape
.
push_back
(
x_dims
[
i
]);
}
const
auto
&
out_dims
=
phi
::
make_ddim
(
out_shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
x
.
layout
());
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
out
->
share_lod
(
x
);
}
}
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
)
{
UnchangedInferMetaCheckAxis
(
x
,
axis
,
out
);
}
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
)
{
PADDLE_ENFORCE_EQ
(
product
(
x
.
dims
()),
1UL
,
errors
::
InvalidArgument
(
"The number of elements in Input(X) should be 1."
"Now the number is %d."
,
product
(
x
.
dims
())));
out
->
set_dims
(
x
.
dims
());
out
->
share_lod
(
x
);
out
->
set_dtype
(
x
.
dtype
());
}
static
phi
::
DDim
ValidateShape
(
const
std
::
vector
<
int64_t
>
shape
,
const
phi
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
phi
::
product
(
in_dims
);
auto
in_dims_vec
=
phi
::
vectorize
(
in_dims
);
bool
all_positive
=
std
::
all_of
(
in_dims_vec
.
cbegin
(),
in_dims_vec
.
cend
(),
[](
int64_t
i
)
{
return
i
>
0
;
});
// only one dimension can be set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE_EQ
(
unk_dim_idx
,
-
1
,
phi
::
errors
::
InvalidArgument
(
"Only one dimension value of 'shape' in ReshapeOp can "
"be -1. But received shape = [%s], shape[%d] is also -1."
,
phi
::
make_ddim
(
shape
),
i
));
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE_LT
(
static_cast
<
int
>
(
i
),
in_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape = [%s], shape[%d] = 0, X's shape = [%s], "
"X's dimensions = %d."
,
...
...
@@ -360,6 +548,11 @@ void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
bool
replacement
,
...
...
@@ -395,124 +588,97 @@ void MultinomialInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
INT64
);
}
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
#define MAX_RANK_SUPPORTED 6
auto
repeat_times_data
=
repeat_times
.
GetData
();
auto
x_dims
=
x
.
dims
();
if
(
repeat_times_data
.
size
()
==
0
)
{
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
auto
x_dim
=
input
.
dims
();
PADDLE_ENFORCE_EQ
(
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
,
phi
::
errors
::
InvalidArgument
(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
));
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
PADDLE_ENFORCE_GE
(
paddings
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"The element of 'paddings' should >= 0, but "
"received %d for index %d."
,
paddings
[
i
],
static_cast
<
int
>
(
i
)));
}
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
out
->
share_lod
(
input
);
}
out
->
set_dtype
(
input
.
dtype
());
}
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
x_dims
.
size
()));
PADDLE_ENFORCE_LE
(
repeat_times_data
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
out_shape
[
i
]
=
-
1
;
}
else
{
PADDLE_ENFORCE_GT
(
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
}
}
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
input_dims
.
size
()));
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
out
->
share_lod
(
x
);
}
}
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
&
shape_data
=
shape
.
GetData
();
PADDLE_ENFORCE_NOT_NULL
(
out
,
phi
::
errors
::
InvalidArgument
(
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
if
(
!
channel_last
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The shape's size in ReshapeOp can't be zero."
));
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
}
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
xshape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
xshape
,
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"Output(XShape) of ReshapeOp should not be null."
));
const
auto
&
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
xshape
->
share_lod
(
x
);
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
auto
output_dims
=
input_dims
;
output_dims
[
0
]
=
input_dims
[
0
];
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
/* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of api.yaml
*/
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
dtype
::
ToReal
(
x
.
dtype
()));
out
->
set_layout
(
x
.
layout
());
}
DDim
ReduceInferDim
(
const
MetaTensor
&
x
,
...
...
@@ -584,29 +750,12 @@ DDim ReduceInferDim(const MetaTensor& x,
return
out_dim
;
}
void
SumRaw
InferMeta
(
const
MetaTensor
&
x
,
void
Reduce
InferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
out
->
set_dims
(
out_dim
);
out
->
set_dtype
(
out_dtype
);
out
->
set_layout
(
x
.
layout
());
bool
reduce_all
=
false
;
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
}
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
...
...
@@ -620,20 +769,96 @@ void ReduceInferMetaBase(const MetaTensor& x,
out
->
set_layout
(
x
.
layout
());
}
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
ReduceInferMetaBase
(
x
,
axis
,
keep_dim
,
reduce_all
,
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
&
shape_data
=
shape
.
GetData
();
PADDLE_ENFORCE_NOT_NULL
(
out
,
phi
::
errors
::
InvalidArgument
(
"Output(Out) of ReshapeOp should not be null."
));
if
(
!
config
.
is_runtime
&&
shape
.
FromTensor
())
{
out
->
set_dims
(
phi
::
make_ddim
(
shape_data
));
out
->
share_lod
(
x
);
return
;
}
PADDLE_ENFORCE_GT
(
shape_data
.
size
(),
0
,
phi
::
errors
::
InvalidArgument
(
"The shape's size in ReshapeOp can't be zero."
));
InferMetaFromVecValue
(
x
,
shape_data
,
out
);
}
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
)
{
void
ReshapeWithXShapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
xshape
,
MetaTensor
*
out
,
MetaConfig
config
)
{
PADDLE_ENFORCE_NOT_NULL
(
xshape
,
phi
::
errors
::
InvalidArgument
(
"Output(XShape) of ReshapeOp should not be null."
));
const
auto
&
x_dims
=
x
.
dims
();
std
::
vector
<
int64_t
>
xshape_dims
(
x_dims
.
size
()
+
1
);
xshape_dims
[
0
]
=
0
;
for
(
int
i
=
0
;
i
<
x_dims
.
size
();
++
i
)
{
xshape_dims
[
i
+
1
]
=
x_dims
[
i
];
}
xshape
->
set_dims
(
phi
::
make_ddim
(
xshape_dims
));
xshape
->
share_lod
(
x
);
ReshapeInferMeta
(
x
,
shape
,
out
,
config
);
}
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
in
.
dims
();
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
s
et_layout
(
layout
);
out
->
s
hare_lod
(
x
);
}
void
SplitInferMeta
(
const
MetaTensor
&
x
,
...
...
@@ -767,22 +992,108 @@ void SplitInferMeta(const MetaTensor& x,
}
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
)
{
auto
in_dims
=
x
.
dims
();
std
::
vector
<
int
>
out_dim
;
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
(
*
outs
)[
i
].
set_dims
(
out_dims
);
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
share_lod
(
x
);
/* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of api.yaml
*/
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
)
{
bool
reduce_all
=
false
;
SumRawInferMeta
(
x
,
axis
,
keep_dim
,
reduce_all
,
dtype
,
out
);
}
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
)
{
DDim
out_dim
=
ReduceInferDim
(
x
,
axis
,
keep_dim
,
reduce_all
);
DataType
out_dtype
;
if
(
dtype
!=
DataType
::
UNDEFINED
)
{
out_dtype
=
dtype
;
}
else
{
if
(
x
.
dtype
()
==
DataType
::
BOOL
||
x
.
dtype
()
==
DataType
::
INT32
||
x
.
dtype
()
==
DataType
::
INT64
)
{
out_dtype
=
DataType
::
INT64
;
}
else
{
out_dtype
=
x
.
dtype
();
}
}
out
->
set_dims
(
out_dim
);
out
->
set_dtype
(
out_dtype
);
out
->
set_layout
(
x
.
layout
());
}
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
)
{
#define MAX_RANK_SUPPORTED 6
auto
repeat_times_data
=
repeat_times
.
GetData
();
auto
x_dims
=
x
.
dims
();
if
(
repeat_times_data
.
size
()
==
0
)
{
repeat_times_data
=
std
::
vector
<
int64_t
>
(
x_dims
.
size
(),
-
1
);
}
PADDLE_ENFORCE_LE
(
x_dims
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The rank of the input 'x' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
x_dims
.
size
()));
PADDLE_ENFORCE_LE
(
repeat_times_data
.
size
(),
MAX_RANK_SUPPORTED
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must not be greater than %d, but the value received is %d."
,
MAX_RANK_SUPPORTED
,
repeat_times_data
.
size
()));
PADDLE_ENFORCE_GE
(
repeat_times_data
.
size
(),
1
,
errors
::
InvalidArgument
(
"The size of the shape of input 'repeat_times' for tile op "
"must be positive integers, but the value received is %d."
,
repeat_times_data
.
size
()));
auto
out_rank
=
std
::
max
(
static_cast
<
size_t
>
(
x_dims
.
size
()),
repeat_times_data
.
size
());
std
::
vector
<
int64_t
>
out_shape
(
out_rank
);
auto
x_dim_vec
=
phi
::
vectorize
<
int
>
(
x_dims
);
if
(
x_dim_vec
.
size
()
>
repeat_times_data
.
size
())
{
auto
diff
=
x_dim_vec
.
size
()
-
repeat_times_data
.
size
();
repeat_times_data
.
insert
(
repeat_times_data
.
begin
(),
diff
,
-
1
);
}
else
{
auto
diff
=
repeat_times_data
.
size
()
-
x_dim_vec
.
size
();
x_dim_vec
.
insert
(
x_dim_vec
.
begin
(),
diff
,
-
1
);
}
for
(
size_t
i
=
0
;
i
<
repeat_times_data
.
size
();
++
i
)
{
if
(
x_dim_vec
[
i
]
==
-
1
||
repeat_times_data
[
i
]
==
-
1
)
{
out_shape
[
i
]
=
-
1
;
}
else
{
PADDLE_ENFORCE_GT
(
repeat_times_data
[
i
],
0
,
errors
::
InvalidArgument
(
"Every element of the input 'repeat_times' for tile op must be "
"greater than 0, but the value given is %d."
,
repeat_times_data
[
i
]));
out_shape
[
i
]
=
x_dim_vec
[
i
]
*
repeat_times_data
[
i
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_shape
));
if
(
out_shape
[
0
]
==
x_dims
[
0
])
{
out
->
share_lod
(
x
);
}
}
...
...
@@ -840,97 +1151,130 @@ void TraceInferMeta(
out
->
set_dtype
(
x
.
dtype
());
}
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
)
{
auto
x_dims
=
input
.
dims
();
int
offset_
=
offset
;
int
axis1_
=
axis1
<
0
?
x_dims
.
size
()
+
axis1
:
axis1
;
int
axis2_
=
axis2
<
0
?
x_dims
.
size
()
+
axis2
:
axis2
;
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
set_layout
(
layout
);
}
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
size_t
x_rank
=
x_dims
.
size
();
size_t
axis_size
=
axis
.
size
();
PADDLE_ENFORCE_EQ
(
x_rank
,
axis_size
,
errors
::
InvalidArgument
(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d"
,
x_rank
,
axis_size
));
std
::
vector
<
int
>
count
(
axis_size
,
0
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
OutOfRange
(
"Input's dim is out of range (expected at "
"least 2 dimensions, but got %ld)."
,
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis1_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis1) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis1
));
PADDLE_ENFORCE_LT
(
axis2_
,
x_dims
.
size
(),
phi
::
errors
::
OutOfRange
(
"Attr(axis2) is out of range (expected to be in range of [%ld, "
"%ld], but got %ld)."
,
-
(
x_dims
.
size
()),
(
x_dims
.
size
()
-
1
),
axis2
));
PADDLE_ENFORCE_NE
(
axis1_
,
axis2_
,
phi
::
errors
::
InvalidArgument
(
"The dimensions should not be identical "
"%d vs %d."
,
axis1
,
axis2
));
axis
[
i
],
0
,
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]"
,
axis
[
i
],
i
));
auto
out_dims
=
vectorize
(
x_dims
);
// from out_dims get the dim size of axis1_.
auto
axis1_size
=
out_dims
[
axis1_
];
auto
axis2_size
=
out_dims
[
axis2_
];
// delete two dims by attr axis1 and axis2 from out_dims.
/* example:
out_dim = [2, 3, 4];
axis1 = 0;
axis2 = 1;
according to the attr of axis1 and axis2, we get:
out_dim = [4].
*/
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
max
(
axis1_
,
axis2_
));
out_dims
.
erase
(
out_dims
.
begin
()
+
std
::
min
(
axis1_
,
axis2_
));
PADDLE_ENFORCE_EQ
(
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
true
,
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
if
(
offset_
==
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
));
}
else
if
(
offset_
>
0
)
{
if
((
axis2_size
-
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
,
axis2_size
-
offset_
));
}
else
{
out_dims
.
push_back
(
0
);
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
}
else
{
if
((
axis1_size
+
offset_
)
>
0
)
{
out_dims
.
push_back
(
std
::
min
(
axis1_size
+
offset_
,
axis2_size
));
}
else
{
out_dims
.
push_back
(
0
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
)
{
auto
in_dims
=
x
.
dims
();
std
::
vector
<
int
>
out_dim
;
axis
=
axis
<
0
?
in_dims
.
size
()
+
axis
:
axis
;
for
(
int
i
=
0
;
i
<
in_dims
.
size
();
++
i
)
{
if
(
i
!=
axis
)
out_dim
.
push_back
(
in_dims
[
i
]);
}
auto
out_dims
=
phi
::
make_ddim
(
out_dim
);
for
(
size_t
i
=
0
;
i
<
outs
->
size
();
++
i
)
{
(
*
outs
)[
i
].
set_dtype
(
x
.
dtype
());
(
*
outs
)[
i
].
set_dims
(
out_dims
);
(
*
outs
)[
i
].
set_layout
(
x
.
layout
());
(
*
outs
)[
i
].
share_lod
(
x
);
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
in_dims
=
x
.
dims
();
// Only [N, C, H, W] input supported now
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be 4-D tensor of format [N, C, H, W], but get %u"
,
in_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
share_meta
(
x
);
}
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
rank
=
x
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank
,
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
PADDLE_ENFORCE_LT
(
axis
,
rank
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X). But received axis: %d, R: %d."
,
axis
,
rank
));
out
->
share_meta
(
x
);
}
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
strides
,
const
std
::
vector
<
int
>&
paddings
,
const
std
::
vector
<
int
>&
dilations
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
in_dims
=
x
.
dims
();
// Only [N, C, H, W] input supported now
PADDLE_ENFORCE_EQ
(
in_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be 4-D tensor of format [N, C, H, W], but get %u"
,
in_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
in_dims
.
size
()
-
kernel_sizes
.
size
(),
2U
,
phi
::
errors
::
InvalidArgument
(
...
...
@@ -1073,303 +1417,6 @@ void UnfoldInferMeta(const MetaTensor& x,
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
}
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
if
(
x_dims
.
size
()
==
1UL
)
{
int64_t
size_
=
x_dims
[
0
]
+
std
::
abs
(
offset
);
out
->
set_dims
({
size_
,
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
if
(
x_dims
.
size
()
==
2UL
)
{
int64_t
size_
=
0
;
if
(
offset
>=
0
)
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
<
x_dims
[
1
]
-
offset
)
{
size_
=
x_dims
[
0
];
}
else
{
size_
=
x_dims
[
1
]
-
offset
;
}
}
else
{
// Note(LutaoChu): Do not use std::min here, otherwise the calculation
// of `size_` will have unexpected result on Windows Python3.8
if
(
x_dims
[
0
]
+
offset
<
x_dims
[
1
])
{
size_
=
x_dims
[
0
]
+
offset
;
}
else
{
size_
=
x_dims
[
1
];
}
}
out
->
set_dims
({
size_
});
out
->
set_dtype
(
x
.
dtype
());
}
else
{
PADDLE_THROW
(
phi
::
errors
::
InvalidArgument
(
"The input tensor X's dimensions of DiagV2Op should be either 1 or "
"2, but received %d."
,
x_dims
.
size
()));
}
}
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
)
{
const
auto
&
x_dims
=
x
.
dims
();
PADDLE_ENFORCE_GE
(
axis
,
-
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be greater than or equal to"
" -Rank(X)(%d)."
,
axis
,
-
x_dims
.
size
()));
PADDLE_ENFORCE_LT
(
axis
,
x_dims
.
size
(),
phi
::
errors
::
InvalidArgument
(
"'axis'(%d) must be less than Rank(X)(%d) of Input(X)."
,
axis
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
(
dtype
<
0
||
dtype
==
2
||
dtype
==
3
),
true
,
phi
::
errors
::
InvalidArgument
(
"The attribute of dtype in argmin/argmax must be [%s] or [%s], but "
"received [%s]"
,
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT32
),
paddle
::
framework
::
DataTypeToString
(
paddle
::
framework
::
proto
::
VarType
::
INT64
),
paddle
::
framework
::
DataTypeToString
(
static_cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
(
dtype
))));
auto
x_rank
=
x_dims
.
size
();
if
(
axis
<
0
)
axis
+=
x_rank
;
if
(
config
.
is_runtime
)
{
if
(
dtype
==
paddle
::
framework
::
proto
::
VarType
::
INT32
)
{
int64_t
all_element_num
=
0
;
if
(
flatten
)
{
all_element_num
=
phi
::
product
(
x_dims
);
}
else
{
all_element_num
=
x_dims
[
axis
];
}
PADDLE_ENFORCE_LE
(
all_element_num
,
INT_MAX
,
phi
::
errors
::
InvalidArgument
(
"The element num of the argmin/argmax input at axis is "
"%d, is larger than int32 maximum value:%d, you must "
"set the dtype of argmin/argmax to 'int64'."
,
all_element_num
,
INT_MAX
));
}
}
std
::
vector
<
int64_t
>
vec
;
if
(
flatten
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
else
{
for
(
int64_t
i
=
0
;
i
<
axis
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
if
(
keepdims
)
{
vec
.
emplace_back
(
static_cast
<
int64_t
>
(
1
));
}
for
(
int64_t
i
=
axis
+
1
;
i
<
x_rank
;
i
++
)
vec
.
emplace_back
(
x_dims
[
i
]);
}
out
->
set_dims
(
phi
::
make_ddim
(
vec
));
if
(
dtype
==
2
)
{
out
->
set_dtype
(
DataType
::
INT32
);
}
else
if
(
dtype
==
3
)
{
out
->
set_dtype
(
DataType
::
INT64
);
}
}
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
)
{
out
->
set_dtype
(
DataType
::
INT64
);
out
->
set_dims
({
1
});
}
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dim
=
input
.
dims
();
PADDLE_ENFORCE_EQ
(
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
,
phi
::
errors
::
InvalidArgument
(
"Size of 'paddings' dimension should be equal to 2 * size of "
"Input(X)'s dimension, but received (size of 'paddings' dimension "
"is) %d vs (2 * size of Input(X)'s dimension is) %d."
,
static_cast
<
int
>
(
paddings
.
size
()),
x_dim
.
size
()
*
2
));
for
(
size_t
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
PADDLE_ENFORCE_GE
(
paddings
[
i
],
0
,
phi
::
errors
::
InvalidArgument
(
"The element of 'paddings' should >= 0, but "
"received %d for index %d."
,
paddings
[
i
],
static_cast
<
int
>
(
i
)));
}
std
::
vector
<
int64_t
>
out_dims
(
x_dim
.
size
());
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
if
((
!
config
.
is_runtime
)
&&
(
x_dim
[
i
]
==
-
1
))
{
out_dims
[
i
]
=
-
1
;
}
else
{
out_dims
[
i
]
=
x_dim
[
i
]
+
paddings
[
i
*
2
]
+
paddings
[
i
*
2
+
1
];
}
}
out
->
set_dims
(
phi
::
make_ddim
(
out_dims
));
if
(
out_dims
[
0
]
==
x_dim
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
out
->
share_lod
(
input
);
}
out
->
set_dtype
(
input
.
dtype
());
}
void
IsfiniteInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
)
{
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
)
{
auto
input_dims
=
x
.
dims
();
PADDLE_ENFORCE_EQ
(
input_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
input_dims
.
size
()));
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
if
(
!
channel_last
)
{
PADDLE_ENFORCE_EQ
(
input_dims
[
1
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
input_dims
[
3
]
%
(
upscale_factor
*
upscale_factor
),
0
,
phi
::
errors
::
InvalidArgument
(
"The square of upscale_factor[%u] should divide the "
"number of channel[%u]"
,
upscale_factor
*
upscale_factor
,
input_dims
[
3
]));
}
auto
output_dims
=
input_dims
;
output_dims
[
0
]
=
input_dims
[
0
];
if
(
!
channel_last
)
{
output_dims
[
1
]
=
input_dims
[
1
]
/
(
upscale_factor
*
upscale_factor
);
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
*
upscale_factor
;
}
else
{
output_dims
[
1
]
=
input_dims
[
1
]
*
upscale_factor
;
output_dims
[
2
]
=
input_dims
[
2
]
*
upscale_factor
;
output_dims
[
3
]
=
input_dims
[
3
]
/
(
upscale_factor
*
upscale_factor
);
}
out
->
set_dtype
(
x
.
dtype
());
out
->
set_dims
(
output_dims
);
}
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
size_t
x_rank
=
x_dims
.
size
();
size_t
axis_size
=
axis
.
size
();
PADDLE_ENFORCE_EQ
(
x_rank
,
axis_size
,
errors
::
InvalidArgument
(
"The input tensor's dimension "
"should be equal to the axis's size. "
"But received input tensor's dimension is %d, "
"axis's size is %d"
,
x_rank
,
axis_size
));
std
::
vector
<
int
>
count
(
axis_size
,
0
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
i
++
)
{
PADDLE_ENFORCE_GE
(
axis
[
i
],
0
,
errors
::
InvalidArgument
(
"The axis should be greater than or equal to 0."
"But received %d of axis[%d]"
,
axis
[
i
],
i
));
PADDLE_ENFORCE_EQ
(
axis
[
i
]
<
static_cast
<
int
>
(
axis_size
)
&&
++
count
[
axis
[
i
]]
==
1
,
true
,
errors
::
InvalidArgument
(
"Each element of Attribute axis should "
"be a unique value range from 0 to (dims - 1), "
"where the dims is the axis's size, "
"unique value means this axis value can appear only once. "
"But received axis[%d] is %d, axis_size is %d, "
"count[axis[%d]] is %d"
,
i
,
axis
[
i
],
axis_size
,
i
,
count
[
axis
[
i
]]));
}
phi
::
DDim
out_dims
(
x_dims
);
for
(
size_t
i
=
0
;
i
<
axis_size
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
axis
[
i
]];
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
}
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
)
{
auto
input_dim
=
x
.
dims
();
auto
rank
=
input_dim
.
size
();
PADDLE_ENFORCE_GE
(
rank
,
2
,
phi
::
errors
::
InvalidArgument
(
"The Input(X) should have at least 2 dimensions."
"But received a %d dimension tensor."
,
rank
));
PADDLE_ENFORCE_EQ
(
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
],
phi
::
errors
::
InvalidArgument
(
"Eigh op is designed for square matrix, consequently"
"inner-most 2 dimensions of Input(X) should be symmetric."
"But received X's shape[-2] = %d and shape[-1] = %d."
,
input_dim
[
rank
-
2
],
input_dim
[
rank
-
1
]));
std
::
vector
<
int64_t
>
values_dim
;
for
(
auto
i
=
0
;
i
<
rank
-
1
;
i
++
)
{
values_dim
.
emplace_back
(
input_dim
[
i
]);
}
out_w
->
set_dims
(
phi
::
make_ddim
(
values_dim
));
out_v
->
set_dims
(
input_dim
);
}
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
)
{
auto
rank
=
condition
.
dims
().
size
();
PADDLE_ENFORCE_GE
(
...
...
@@ -1381,53 +1428,6 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
out
->
set_dtype
(
DataType
::
INT64
);
}
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
)
{
auto
x_dims
=
in
.
dims
();
PADDLE_ENFORCE_GE
(
x_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"Rank of Input(X) should be at least 2, "
"but the value given is %d."
,
x_dims
.
size
()));
if
(
config
.
is_runtime
||
x_dims
[
x_dims
.
size
()
-
1
]
>
0
)
{
PADDLE_ENFORCE_EQ
(
x_dims
[
x_dims
.
size
()
-
1
],
1U
,
phi
::
errors
::
InvalidArgument
(
"The last dimension of Input(X) should be 1, "
"but the value given is %d."
,
x_dims
[
x_dims
.
size
()
-
1
]));
}
out
->
set_dims
(
x_dims
);
out
->
share_lod
(
in
);
out
->
set_dtype
(
in
.
dtype
());
}
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
)
{
auto
dim_x
=
x
.
dims
();
auto
rank_x
=
dim_x
.
size
();
PADDLE_ENFORCE_GE
(
axis
,
-
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
PADDLE_ENFORCE_LT
(
axis
,
rank_x
,
phi
::
errors
::
InvalidArgument
(
"Attr(axis) value should be in range [-R, R-1], "
"R is the rank of Input(X)."
));
out
->
set_dims
(
x
.
dims
());
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
}
// namespace phi
PD_REGISTER_INFER_META_FN
(
copy_to
,
phi
::
CopyToInferMeta
);
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
f7765991
...
...
@@ -32,32 +32,20 @@ class MetaConfig;
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
bool
descending
,
MetaTensor
*
output
,
MetaTensor
*
indices
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
...
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
bool
reverse
,
MetaTensor
*
out
);
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
bool
replacement
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
...
...
@@ -100,50 +138,63 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
Reduc
eInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axi
s
,
bool
keep_dim
,
MetaTensor
*
out
);
void
Til
eInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_time
s
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
()
);
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
DataType
dtype
,
bool
keep_dim
,
MetaTensor
*
out
);
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
ScalarArray
&
num_or_sections
,
const
Scalar
&
axis
,
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
);
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
...
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
}
// namespace phi
paddle/phi/kernels/funcs/matrix_inverse.h
浏览文件 @
f7765991
...
...
@@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx,
int
batch_size
=
rank
>
2
?
a
.
numel
()
/
(
n
*
n
)
:
1
;
const
T
*
a_ptr
=
a
.
data
<
T
>
();
T
*
a_inv_ptr
=
a_inv
->
mutable_data
<
T
>
(
dev_ctx
.
GetPlace
()
);
T
*
a_inv_ptr
=
dev_ctx
.
template
Alloc
<
T
>(
a_inv
);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
ConstEigenMatrixMap
mat
(
a_ptr
+
i
*
n
*
n
,
n
,
n
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录