Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
f3f27d25
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f3f27d25
编写于
3月 13, 2022
作者:
Z
zyfncg
提交者:
GitHub
3月 13, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PHI] Refactor infermeta files (Part2) (#40367)
* refactor infermeta files * update
上级
080024f0
变更
9
展开全部
显示空白变更内容
内联
并排
Showing
9 changed file
with
731 addition
and
728 deletion
+731
-728
paddle/fluid/operators/gather_nd_op.cc
paddle/fluid/operators/gather_nd_op.cc
+0
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+15
-14
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+6
-1
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+452
-451
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+57
-57
paddle/phi/infermeta/nullary.cc
paddle/phi/infermeta/nullary.cc
+18
-18
paddle/phi/infermeta/nullary.h
paddle/phi/infermeta/nullary.h
+9
-9
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+153
-152
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+21
-25
未找到文件。
paddle/fluid/operators/gather_nd_op.cc
浏览文件 @
f3f27d25
...
@@ -16,7 +16,6 @@ limitations under the License. */
...
@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/ternary.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
f3f27d25
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
if
(
dx
)
{
const
MetaTensor
&
index
,
dx
->
share_meta
(
x
);
const
MetaTensor
&
out_grad
,
}
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
}
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dout
.
dims
(),
dout
.
dims
(),
errors
::
InvalidArgument
(
errors
::
InvalidArgument
(
"Input(Out) and its gradients should have the same shape."
));
"Input(Out) and its gradients should have the same shape."
));
dx
->
share_meta
(
dout
);
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
dx
->
share_meta
(
dout
);
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
f3f27d25
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
MetaTensor
*
dbias
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
);
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
MetaTensor
*
dy
,
MetaTensor
*
dy
,
MetaTensor
*
dz
);
MetaTensor
*
dz
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
f3f27d25
此差异已折叠。
点击以展开。
paddle/phi/infermeta/binary.h
浏览文件 @
f3f27d25
...
@@ -29,23 +29,44 @@ namespace phi {
...
@@ -29,23 +29,44 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
CompareInferMeta
(
const
MetaTensor
&
x
,
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
int
axis
,
bool
upper
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
);
void
Matmul
InferMeta
(
const
MetaTensor
&
x
,
void
Cross
InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
bool
trans_x
,
int
axis
,
bool
trans_y
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
MetaTensor
*
out
);
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
int
axis
,
int
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
const
MetaTensor
&
label_meta
,
const
MetaTensor
&
label_meta
,
float
delta
,
float
delta
,
...
@@ -62,68 +91,32 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
...
@@ -62,68 +91,32 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
MetaTensor
*
residual
,
MetaTensor
*
residual
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CrossInferMeta
(
const
MetaTensor
&
x
,
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
y
,
int
axis
,
MetaTensor
*
out
);
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
float
epsilon
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
float
p
,
bool
trans_x
,
MetaTensor
*
out
);
bool
trans_y
,
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
segment_ids
,
float
epsilon
,
const
std
::
string
&
pooltype
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
bool
normalize
,
bool
normalize
,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/nullary.cc
浏览文件 @
f3f27d25
...
@@ -16,6 +16,12 @@ limitations under the License. */
...
@@ -16,6 +16,12 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
out
->
set_layout
(
layout
);
out
->
set_layout
(
layout
);
}
}
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
...
@@ -41,25 +41,25 @@ void EyeInferMeta(int64_t num_rows,
...
@@ -41,25 +41,25 @@ void EyeInferMeta(int64_t num_rows,
out
->
set_dtype
(
dtype
);
out
->
set_dtype
(
dtype
);
}
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
int
seed
,
int
seed
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
)
{
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
auto
out_dims
=
phi
::
make_ddim
(
shape
.
GetData
()
);
out
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
}
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
int
seed
,
int
seed
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
)
{
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
.
GetData
()
);
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
out
->
set_layout
(
DataLayout
::
NCHW
);
...
...
paddle/phi/infermeta/nullary.h
浏览文件 @
f3f27d25
...
@@ -28,26 +28,26 @@ namespace phi {
...
@@ -28,26 +28,26 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
int
seed
,
int
seed
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
int
seed
,
int
seed
,
...
...
paddle/phi/infermeta/ternary.cc
浏览文件 @
f3f27d25
...
@@ -18,6 +18,58 @@ limitations under the License. */
...
@@ -18,6 +18,58 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
x
,
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
out
->
set_dtype
(
input
.
dtype
());
out
->
set_dtype
(
input
.
dtype
());
}
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
scores
->
set_dtype
(
length
.
dtype
());
scores
->
set_dtype
(
length
.
dtype
());
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.h
浏览文件 @
f3f27d25
...
@@ -45,15 +45,21 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -45,15 +45,21 @@ void AddmmInferMeta(const MetaTensor& input,
float
beta
,
float
beta
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
out_grad
,
const
MetaTensor
&
dst_index
,
MetaTensor
*
x_grad
);
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
y
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
weight
,
bool
overwrite
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
MetaTensor
*
total_weight
,
MetaTensor
*
total_weight
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
bool
overwrite
,
MetaTensor
*
out
);
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
updates
,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
MetaTensor
*
path
,
MetaTensor
*
path
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
}
// namespace phi
}
// namespace phi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录