Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f7765991
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f7765991
编写于
3月 13, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into add_some_yaml_config
上级
1694bcc8
1b0cecb7
变更
16
展开全部
隐藏空白更改
内联
并排
Showing
16 changed file
with
1566 addition
and
1562 deletion
+1566
-1562
paddle/fluid/framework/infershape_utils.cc
paddle/fluid/framework/infershape_utils.cc
+2
-2
paddle/fluid/operators/gather_nd_op.cc
paddle/fluid/operators/gather_nd_op.cc
+0
-1
paddle/fluid/operators/softmax_op.cc
paddle/fluid/operators/softmax_op.cc
+2
-2
paddle/phi/core/meta_tensor.cc
paddle/phi/core/meta_tensor.cc
+2
-2
paddle/phi/core/meta_tensor.h
paddle/phi/core/meta_tensor.h
+1
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+15
-14
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+6
-1
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+452
-451
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+57
-57
paddle/phi/infermeta/nullary.cc
paddle/phi/infermeta/nullary.cc
+18
-18
paddle/phi/infermeta/nullary.h
paddle/phi/infermeta/nullary.h
+9
-9
paddle/phi/infermeta/ternary.cc
paddle/phi/infermeta/ternary.cc
+153
-152
paddle/phi/infermeta/ternary.h
paddle/phi/infermeta/ternary.h
+21
-25
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+730
-730
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+97
-96
paddle/phi/kernels/funcs/matrix_inverse.h
paddle/phi/kernels/funcs/matrix_inverse.h
+1
-1
未找到文件。
paddle/fluid/framework/infershape_utils.cc
浏览文件 @
f7765991
...
@@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor {
...
@@ -249,13 +249,13 @@ class CompatMetaTensor : public phi::MetaTensor {
}
}
void
share_meta
(
const
MetaTensor
&
meta_tensor
)
override
{
void
share_meta
(
const
MetaTensor
&
meta_tensor
)
override
{
share_dims
(
meta_tensor
);
set_dtype
(
meta_tensor
.
dtype
());
set_dtype
(
meta_tensor
.
dtype
());
// VarDesc doesn't contains layout, so we cannot share layout
// VarDesc doesn't contains layout, so we cannot share layout
// set_layout(meta_tensor.layout());
// set_layout(meta_tensor.layout());
// special case
1
: share lod of LoDTensor
// special case: share lod of LoDTensor
share_lod
(
meta_tensor
);
share_lod
(
meta_tensor
);
share_dims
(
meta_tensor
);
}
}
private:
private:
...
...
paddle/fluid/operators/gather_nd_op.cc
浏览文件 @
f7765991
...
@@ -16,7 +16,6 @@ limitations under the License. */
...
@@ -16,7 +16,6 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/infermeta/ternary.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
...
paddle/fluid/operators/softmax_op.cc
浏览文件 @
f7765991
...
@@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
...
@@ -215,7 +215,7 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxOpGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
ops
::
SoftmaxInplaceInferer
,
SoftmaxInferShapeFunctor
);
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGradnferShapeFunctor
,
DECLARE_INFER_SHAPE_FUNCTOR
(
softmax_grad
,
SoftmaxGrad
I
nferShapeFunctor
,
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
PD_INFER_META
(
phi
::
GeneralUnaryGradInferMeta
));
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
REGISTER_OPERATOR
(
softmax_grad
,
ops
::
SoftmaxOpGrad
,
SoftmaxGradnferShapeFunctor
);
SoftmaxGrad
I
nferShapeFunctor
);
paddle/phi/core/meta_tensor.cc
浏览文件 @
f7765991
...
@@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
...
@@ -110,7 +110,7 @@ void MetaTensor::share_meta(const MetaTensor& meta_tensor) {
}
}
}
}
TensorBase
*
MetaTensor
::
get_
tensor
()
const
{
return
tensor_
;
}
TensorBase
*
MetaTensor
::
tensor
()
const
{
return
tensor_
;
}
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
void
MetaTensor
::
share_dims
(
const
MetaTensor
&
meta_tensor
)
{
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
bool
is_dense_tensor
=
phi
::
DenseTensor
::
classof
(
tensor_
);
...
@@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) {
...
@@ -118,7 +118,7 @@ void MetaTensor::share_dims(const MetaTensor& meta_tensor) {
if
(
is_dense_tensor
||
is_selected_rows
)
{
if
(
is_dense_tensor
||
is_selected_rows
)
{
set_dims
(
meta_tensor
.
dims
());
set_dims
(
meta_tensor
.
dims
());
if
(
is_selected_rows
)
{
if
(
is_selected_rows
)
{
const
auto
in_tensor_base
=
meta_tensor
.
get_
tensor
();
const
auto
in_tensor_base
=
meta_tensor
.
tensor
();
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
phi
::
SelectedRows
::
classof
(
in_tensor_base
),
phi
::
SelectedRows
::
classof
(
in_tensor_base
),
true
,
true
,
...
...
paddle/phi/core/meta_tensor.h
浏览文件 @
f7765991
...
@@ -66,7 +66,7 @@ class MetaTensor {
...
@@ -66,7 +66,7 @@ class MetaTensor {
// Because the lod in compiletime and runtime is different,
// Because the lod in compiletime and runtime is different,
// so `LoD` cannot in public methods
// so `LoD` cannot in public methods
const
LoD
&
lod
()
const
;
const
LoD
&
lod
()
const
;
TensorBase
*
get_
tensor
()
const
;
TensorBase
*
tensor
()
const
;
TensorBase
*
tensor_
;
TensorBase
*
tensor_
;
};
};
...
...
paddle/phi/infermeta/backward.cc
浏览文件 @
f7765991
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -64,10 +64,14 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
if
(
dx
)
{
const
MetaTensor
&
index
,
dx
->
share_meta
(
x
);
const
MetaTensor
&
out_grad
,
}
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -99,6 +103,12 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
}
}
}
}
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
)
{
if
(
dx
)
{
dx
->
share_meta
(
x
);
}
}
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
...
@@ -108,17 +118,8 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
dout
.
dims
(),
dout
.
dims
(),
errors
::
InvalidArgument
(
errors
::
InvalidArgument
(
"Input(Out) and its gradients should have the same shape."
));
"Input(Out) and its gradients should have the same shape."
));
dx
->
share_meta
(
dout
);
}
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
dx
->
share_meta
(
dout
);
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
)
{
const
auto
&
dtype
=
out_grad
.
dtype
();
x_grad
->
set_dims
(
x
.
dims
());
x_grad
->
share_lod
(
x
);
x_grad
->
set_dtype
(
dtype
);
}
}
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
void
PsroiPoolGradInferMeta
(
const
MetaTensor
&
x
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
f7765991
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
...
@@ -30,7 +30,10 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor
*
dweight
,
MetaTensor
*
dweight
,
MetaTensor
*
dbias
);
MetaTensor
*
dbias
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
out_grad
,
MetaTensor
*
x_grad
);
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
void
GeneralBinaryGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
...
@@ -44,6 +47,8 @@ void GeneralTernaryGradInferMeta(const MetaTensor& x,
MetaTensor
*
dy
,
MetaTensor
*
dy
,
MetaTensor
*
dz
);
MetaTensor
*
dz
);
void
GeneralUnaryGradInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
dx
);
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
void
GumbelSoftmaxGradInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
dout
,
const
MetaTensor
&
dout
,
int
axis
,
int
axis
,
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
f7765991
此差异已折叠。
点击以展开。
paddle/phi/infermeta/binary.h
浏览文件 @
f7765991
...
@@ -29,22 +29,43 @@ namespace phi {
...
@@ -29,22 +29,43 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
CompareInferMeta
(
const
MetaTensor
&
x
,
void
CompareInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
int
axis
,
int
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CompareAllInferMeta
(
const
MetaTensor
&
x
,
void
CrossInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
int
axis
,
MetaTensor
*
out
);
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
void
DotInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
);
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
void
ElementwiseInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
...
@@ -55,6 +76,14 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
int
axis
,
int
axis
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
void
HuberLossInferMeta
(
const
MetaTensor
&
input_meta
,
const
MetaTensor
&
label_meta
,
const
MetaTensor
&
label_meta
,
float
delta
,
float
delta
,
...
@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
...
@@ -62,29 +91,24 @@ void HuberLossInferMeta(const MetaTensor& input_meta,
MetaTensor
*
residual
,
MetaTensor
*
residual
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CholeskySolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
MetaTensor
*
out
);
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
void
IndexSampleInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
CrossInferMeta
(
const
MetaTensor
&
x
,
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
y
,
const
MetaTensor
&
label
,
int
axis
,
float
epsilon
,
MetaTensor
*
out
);
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
Atan2InferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
MetaTensor
*
out
);
void
MatmulInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
trans_x
,
bool
trans_y
,
MetaTensor
*
out
);
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
void
SegmentPoolInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
segment_ids
,
const
MetaTensor
&
segment_ids
,
...
@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x,
...
@@ -93,37 +117,6 @@ void SegmentPoolInferMeta(const MetaTensor& x,
MetaTensor
*
summed_ids
,
MetaTensor
*
summed_ids
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
BCELossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
BincountInferMeta
(
const
MetaTensor
&
x
,
const
paddle
::
optional
<
const
MetaTensor
&>
weights
,
int
minlength
,
MetaTensor
*
out
);
void
DistInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
float
p
,
MetaTensor
*
out
);
void
GatherNdInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
MetaTensor
*
out
);
void
GatherTreeMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
parents
,
MetaTensor
*
out
);
void
LogLossInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
float
epsilon
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
MvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
vec
,
MetaTensor
*
out
);
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
void
SigmoidCrossEntropyWithLogitsInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
bool
normalize
,
bool
normalize
,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
...
@@ -131,4 +124,11 @@ void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
TriangularSolveInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
bool
upper
,
bool
transpose
,
bool
unitriangular
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/nullary.cc
浏览文件 @
f7765991
...
@@ -16,6 +16,12 @@ limitations under the License. */
...
@@ -16,6 +16,12 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
...
@@ -26,12 +32,6 @@ void CreateInferMetaBase(const std::vector<int64_t>& shape,
out
->
set_layout
(
layout
);
out
->
set_layout
(
layout
);
}
}
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
)
{
CreateInferMetaBase
(
shape
.
GetData
(),
dtype
,
DataLayout
::
NCHW
,
out
);
}
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
...
@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows,
...
@@ -41,18 +41,6 @@ void EyeInferMeta(int64_t num_rows,
out
->
set_dtype
(
dtype
);
out
->
set_dtype
(
dtype
);
}
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
...
@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
...
@@ -65,4 +53,16 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
out
->
set_layout
(
DataLayout
::
NCHW
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
}
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
)
{
auto
out_dims
=
phi
::
make_ddim
(
shape
);
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
dtype
);
out
->
set_layout
(
DataLayout
::
NCHW
);
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/nullary.h
浏览文件 @
f7765991
...
@@ -28,25 +28,18 @@ namespace phi {
...
@@ -28,25 +28,18 @@ namespace phi {
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
void
CreateInferMetaBase
(
const
std
::
vector
<
int64_t
>&
shape
,
DataType
dtype
,
DataType
dtype
,
DataLayout
layout
,
DataLayout
layout
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CreateInferMeta
(
const
ScalarArray
&
shape
,
DataType
dtype
,
MetaTensor
*
out
);
void
EyeInferMeta
(
int64_t
num_rows
,
void
EyeInferMeta
(
int64_t
num_rows
,
int64_t
num_columns
,
int64_t
num_columns
,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
);
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
void
GaussianRandomInferMeta
(
const
ScalarArray
&
shape
,
float
mean
,
float
mean
,
float
std
,
float
std
,
...
@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
...
@@ -54,4 +47,11 @@ void GaussianRandomInferMeta(const ScalarArray& shape,
DataType
dtype
,
DataType
dtype
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
TruncatedGaussianRandomInferMeta
(
const
std
::
vector
<
int
>&
shape
,
float
mean
,
float
std
,
int
seed
,
DataType
dtype
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.cc
浏览文件 @
f7765991
...
@@ -18,6 +18,58 @@ limitations under the License. */
...
@@ -18,6 +18,58 @@ limitations under the License. */
namespace
phi
{
namespace
phi
{
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
void
AddmmInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
x
,
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
y
,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -89,6 +141,107 @@ void AddmmInferMeta(const MetaTensor& input,
out
->
set_dtype
(
input
.
dtype
());
out
->
set_dtype
(
input
.
dtype
());
}
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -319,156 +472,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
scores
->
set_dtype
(
length
.
dtype
());
scores
->
set_dtype
(
length
.
dtype
());
}
}
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
)
{
auto
x_dims
=
x
.
dims
();
auto
y_dims
=
y
.
dims
();
auto
w_dims
=
weight
.
dims
();
DDim
out_dims
;
out_dims
=
funcs
::
GetOutputDims
(
x_dims
,
y_dims
);
if
(
w_dims
.
size
()
>
1
||
w_dims
[
0
]
!=
1
)
{
out_dims
=
funcs
::
GetOutputDims
(
out_dims
,
w_dims
);
}
out
->
set_dims
(
out_dims
);
out
->
set_dtype
(
x
.
dtype
());
out
->
share_lod
(
x
);
}
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
)
{
auto
s_dims
=
start
.
dims
();
PADDLE_ENFORCE_EQ
(
(
s_dims
.
size
()
==
1
)
&&
(
s_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Start) must be [1],"
"but received input shape is [%s]."
,
s_dims
));
auto
e_dims
=
stop
.
dims
();
PADDLE_ENFORCE_EQ
(
(
e_dims
.
size
()
==
1
)
&&
(
e_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Stop) must be [1],"
"but received input shape is [%s]."
,
e_dims
));
auto
step_dims
=
number
.
dims
();
PADDLE_ENFORCE_EQ
(
(
step_dims
.
size
()
==
1
)
&&
(
step_dims
[
0
]
==
1
),
true
,
phi
::
errors
::
InvalidArgument
(
"The shape of Input(Num) must be [1],"
"but received input shape is [%s]."
,
step_dims
));
out
->
set_dims
(
phi
::
make_ddim
({
-
1
}));
out
->
set_dtype
(
start
.
dtype
());
}
void
AccuracyInferMeta
(
const
MetaTensor
&
out
,
const
MetaTensor
&
indice
,
const
MetaTensor
&
label
,
MetaTensor
*
accuracy
,
MetaTensor
*
correct
,
MetaTensor
*
total
,
MetaConfig
config
)
{
auto
inference_dim
=
out
.
dims
();
auto
label_dim
=
label
.
dims
();
// Assume indices has same shape as inference, because
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's dimensions of AccuracyOp must be 2. "
"But received label's dimensions = %d, label's shape = [%s]"
,
label_dim
.
size
(),
label_dim
));
if
(
config
.
is_runtime
)
{
PADDLE_ENFORCE_EQ
(
label_dim
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]"
,
label_dim
[
1
],
label_dim
));
PADDLE_ENFORCE_EQ
(
inference_dim
[
0
],
label_dim
[
0
],
phi
::
errors
::
InvalidArgument
(
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, "
"label's "
"num_rows = %d"
,
inference_dim
,
label_dim
,
inference_dim
[
0
],
label_dim
[
0
]));
}
accuracy
->
set_dims
({
1
});
accuracy
->
set_dtype
(
out
.
dtype
());
correct
->
set_dims
({
1
});
correct
->
set_dtype
(
out
.
dtype
());
total
->
set_dims
({
1
});
total
->
set_dtype
(
out
.
dtype
());
accuracy
->
share_lod
(
out
);
}
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
)
{
auto
src_index_dims
=
src_index
.
dims
();
if
(
src_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
src_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Src_index should be 1 when it "
"is 2D, but we get %d"
,
src_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
src_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Src_index should be 1D, when it is not 2D, but we get %d"
,
src_index_dims
.
size
()));
}
auto
dst_index_dims
=
dst_index
.
dims
();
if
(
dst_index_dims
.
size
()
==
2
)
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
[
1
],
1
,
phi
::
errors
::
InvalidArgument
(
"The last dim of Dst_index should be 1 when it "
"is 2D, but we get %d"
,
dst_index_dims
[
1
]));
}
else
{
PADDLE_ENFORCE_EQ
(
dst_index_dims
.
size
(),
1
,
phi
::
errors
::
InvalidArgument
(
"The Dst_index should be 1D, "
"when it is not 2D, but we get %d"
,
dst_index_dims
.
size
()));
}
PADDLE_ENFORCE_EQ
(
src_index_dims
[
0
],
dst_index_dims
[
0
],
phi
::
errors
::
InvalidArgument
(
"Src_index and Dst_index should have the same shape."
));
auto
dims
=
x
.
dims
();
out
->
set_dims
(
dims
);
out
->
set_dtype
(
x
.
dtype
());
if
(
pool_type
==
"MEAN"
)
{
dst_count
->
set_dims
({
dims
[
0
]});
dst_count
->
set_dtype
(
DataType
::
INT32
);
}
}
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/ternary.h
浏览文件 @
f7765991
...
@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input,
...
@@ -45,16 +45,22 @@ void AddmmInferMeta(const MetaTensor& input,
float
beta
,
float
beta
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
GatherNdGradInferMeta
(
const
MetaTensor
&
x
,
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
out_grad
,
const
MetaTensor
&
dst_index
,
MetaTensor
*
x_grad
);
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
y
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
weight
,
bool
overwrite
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
void
NllLossRawInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
...
@@ -65,6 +71,12 @@ void NllLossRawInferMeta(const MetaTensor& input,
MetaTensor
*
total_weight
,
MetaTensor
*
total_weight
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
ScatterInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
bool
overwrite
,
MetaTensor
*
out
);
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
void
ScatterNdAddInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
index
,
const
MetaTensor
&
index
,
const
MetaTensor
&
updates
,
const
MetaTensor
&
updates
,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
...
@@ -78,20 +90,4 @@ void ViterbiDecodeInferMeta(const MetaTensor& input,
MetaTensor
*
path
,
MetaTensor
*
path
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
LerpInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
y
,
const
MetaTensor
&
weight
,
MetaTensor
*
out
);
void
LinspaceInferMeta
(
const
MetaTensor
&
start
,
const
MetaTensor
&
stop
,
const
MetaTensor
&
number
,
MetaTensor
*
out
);
void
GraphSendRecvInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
src_index
,
const
MetaTensor
&
dst_index
,
const
std
::
string
&
pool_type
,
MetaTensor
*
out
,
MetaTensor
*
dst_count
);
}
// namespace phi
}
// namespace phi
paddle/phi/infermeta/unary.cc
浏览文件 @
f7765991
此差异已折叠。
点击以展开。
paddle/phi/infermeta/unary.h
浏览文件 @
f7765991
...
@@ -32,32 +32,20 @@ class MetaConfig;
...
@@ -32,32 +32,20 @@ class MetaConfig;
// Because functions in this file not only can infer shape, but also need
// Because functions in this file not only can infer shape, but also need
// infer lod or other useful data.
// infer lod or other useful data.
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
void
ArgsortInferMeta
(
const
MetaTensor
&
input
,
int
axis
,
int
axis
,
bool
descending
,
bool
descending
,
MetaTensor
*
output
,
MetaTensor
*
output
,
MetaTensor
*
indices
);
MetaTensor
*
indices
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CastInferMeta
(
const
MetaTensor
&
x
,
DataType
out_dtype
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
void
CholeskyInferMeta
(
const
MetaTensor
&
x
,
bool
upper
,
MetaTensor
*
out
);
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
...
@@ -76,6 +64,30 @@ void CumsumInferMeta(const MetaTensor& x,
bool
reverse
,
bool
reverse
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
FlattenInferMeta
(
const
MetaTensor
&
x
,
int
start_axis
,
int
stop_axis
,
MetaTensor
*
out
);
void
GumbelSoftmaxInferMeta
(
const
MetaTensor
&
x
,
float
temperature
,
bool
hard
,
int
axis
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
IncrementInferMeta
(
const
MetaTensor
&
x
,
float
value
,
MetaTensor
*
out
);
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
void
InferMetaFromVecValue
(
const
MetaTensor
&
x
,
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
...
@@ -84,11 +96,37 @@ void InferMetaFromVecValue(const MetaTensor& x,
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsEmptyInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
void
MultinomialInferMeta
(
const
MetaTensor
&
x
,
int
num_samples
,
int
num_samples
,
bool
replacement
,
bool
replacement
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
RealAndImagInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
void
ReshapeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
shape
,
const
ScalarArray
&
shape
,
MetaTensor
*
out
,
MetaTensor
*
out
,
...
@@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
...
@@ -100,28 +138,23 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
TileInferMeta
(
const
MetaTensor
&
x
,
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
const
ScalarArray
&
repeat_times
,
int
index_num
,
MetaTensor
*
out
,
int
nshards
,
MetaConfig
config
=
MetaConfig
());
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
ReduceInferMetaBase
(
const
MetaTensor
&
x
,
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
MetaTensor
*
out
);
void
ReduceInferMeta
(
const
MetaTensor
&
x
,
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
const
std
::
vector
<
int64_t
>&
axis
,
const
ScalarArray
&
num_or_sections
,
bool
keep_dim
,
const
Scalar
&
axis
,
MetaTensor
*
out
);
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
SumInferMeta
(
const
MetaTensor
&
x
,
void
SumInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
const
std
::
vector
<
int64_t
>&
axis
,
...
@@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x,
...
@@ -129,21 +162,39 @@ void SumInferMeta(const MetaTensor& x,
bool
keep_dim
,
bool
keep_dim
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
SumRawInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int64_t
>&
axis
,
bool
keep_dim
,
bool
reduce_all
,
DataType
dtype
,
MetaTensor
*
out
);
void
TileInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
repeat_times
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
void
TransferLayoutInferMeta
(
const
MetaTensor
&
x
,
DataLayout
layout
,
DataLayout
layout
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
SplitInferMeta
(
const
MetaTensor
&
x_meta
,
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
ScalarArray
&
num_or_sections
,
const
std
::
vector
<
int
>&
axis
,
const
Scalar
&
axis
,
MetaTensor
*
out
);
std
::
vector
<
MetaTensor
*>
out
,
MetaConfig
config
=
MetaConfig
());
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
void
UnbindInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
int
axis
,
std
::
vector
<
MetaTensor
>*
outs
);
std
::
vector
<
MetaTensor
>*
outs
);
void
TraceInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
UnchangedInferMeta
(
const
MetaTensor
&
x
,
MetaTensor
*
out
);
// meta x -> out without change, check if axis in range [-Rank(x), Rank(x)-1]
void
UnchangedInferMetaCheckAxis
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
void
UnfoldInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
kernel_sizes
,
const
std
::
vector
<
int
>&
kernel_sizes
,
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
...
@@ -153,56 +204,6 @@ void UnfoldInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
MetaConfig
config
=
MetaConfig
());
void
DiagInferMeta
(
const
MetaTensor
&
x
,
int
offset
,
float
padding_value
,
MetaTensor
*
out
);
void
ArgMinMaxInferMeta
(
const
MetaTensor
&
x
,
int64_t
axis
,
bool
keepdims
,
bool
flatten
,
int
dtype
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SizeInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
PadInferMeta
(
const
MetaTensor
&
input
,
const
std
::
vector
<
int
>&
paddings
,
float
pad_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
DiagonalInferMeta
(
const
MetaTensor
&
input
,
int
offset
,
int
axis1
,
int
axis2
,
MetaTensor
*
out
);
void
PixelShuffleInferMeta
(
const
MetaTensor
&
x
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
IsfiniteInferMeta
(
const
MetaTensor
&
input
,
MetaTensor
*
out
);
void
TransposeInferMeta
(
const
MetaTensor
&
x
,
const
std
::
vector
<
int
>&
axis
,
MetaTensor
*
out
);
void
EighInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
uplo
,
MetaTensor
*
out_w
,
MetaTensor
*
out_v
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
WhereIndexInferMeta
(
const
MetaTensor
&
condition
,
MetaTensor
*
out
);
void
ShardIndexInferMeta
(
const
MetaTensor
&
in
,
int
index_num
,
int
nshards
,
int
shard_id
,
int
ignore_value
,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
SoftmaxInferMeta
(
const
MetaTensor
&
x
,
int
axis
,
MetaTensor
*
out
);
}
// namespace phi
}
// namespace phi
paddle/phi/kernels/funcs/matrix_inverse.h
浏览文件 @
f7765991
...
@@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx,
...
@@ -39,7 +39,7 @@ void ComputeInverseEigen(const Context& dev_ctx,
int
batch_size
=
rank
>
2
?
a
.
numel
()
/
(
n
*
n
)
:
1
;
int
batch_size
=
rank
>
2
?
a
.
numel
()
/
(
n
*
n
)
:
1
;
const
T
*
a_ptr
=
a
.
data
<
T
>
();
const
T
*
a_ptr
=
a
.
data
<
T
>
();
T
*
a_inv_ptr
=
a_inv
->
mutable_data
<
T
>
(
dev_ctx
.
GetPlace
()
);
T
*
a_inv_ptr
=
dev_ctx
.
template
Alloc
<
T
>(
a_inv
);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
ConstEigenMatrixMap
mat
(
a_ptr
+
i
*
n
*
n
,
n
,
n
);
ConstEigenMatrixMap
mat
(
a_ptr
+
i
*
n
*
n
,
n
,
n
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录