Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
44d94e11
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
44d94e11
编写于
9月 22, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
9月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove code that used in yaml's invoke (#46317)
* remove invoke yaml * fix ci bugs
上级
0a144ca1
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
14 addition
and
327 deletion
+14
-327
paddle/phi/api/lib/api_custom_impl.cc
paddle/phi/api/lib/api_custom_impl.cc
+0
-268
paddle/phi/api/lib/api_custom_impl.h
paddle/phi/api/lib/api_custom_impl.h
+0
-23
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+14
-2
paddle/phi/infermeta/binary.cc
paddle/phi/infermeta/binary.cc
+0
-27
paddle/phi/infermeta/binary.h
paddle/phi/infermeta/binary.h
+0
-6
paddle/phi/infermeta/multiary.cc
paddle/phi/infermeta/multiary.cc
+0
-1
未找到文件。
paddle/phi/api/lib/api_custom_impl.cc
浏览文件 @
44d94e11
...
...
@@ -129,276 +129,8 @@ Tensor copy_to_impl(const Tensor& x, Place place, bool blocking) {
return
out
;
}
Tensor
embedding_impl
(
const
Tensor
&
x
,
const
Tensor
&
weight
,
int64_t
padding_idx
,
bool
sparse
)
{
DataType
kernel_data_type
=
ParseDataType
(
weight
);
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
weight
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
VLOG
(
6
)
<<
"embedding API kernel key: ["
<<
kernel_key
.
backend
()
<<
", "
<<
kernel_key
.
layout
()
<<
", "
<<
kernel_data_type
<<
"]"
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_key
.
backend
());
Tensor
api_output
;
if
(
phi
::
DenseTensor
::
classof
(
weight
.
impl
().
get
()))
{
auto
kernel_result
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"embedding"
,
{
kernel_key
.
backend
(),
kernel_key
.
layout
(),
kernel_data_type
});
const
auto
&
kernel
=
kernel_result
.
kernel
;
VLOG
(
6
)
<<
"embedding API kernel: "
<<
kernel
;
auto
input_x
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
auto
input_weight
=
PrepareData
(
weight
,
kernel
.
InputAt
(
1
),
{});
auto
*
kernel_out
=
SetKernelOutput
(
&
api_output
);
phi
::
MetaTensor
meta_out
(
kernel_out
);
phi
::
EmbeddingInferMeta
(
MakeMetaTensor
(
*
input_x
),
MakeMetaTensor
(
*
input_weight
),
padding_idx
,
sparse
,
&
meta_out
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
int64_t
,
phi
::
DenseTensor
*
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
{
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_x
,
*
input_weight
,
padding_idx
,
kernel_out
);
}
}
else
{
auto
kernel_result
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"sparse_weight_embedding"
,
{
kernel_key
.
backend
(),
kernel_key
.
layout
(),
kernel_data_type
});
const
auto
&
kernel
=
kernel_result
.
kernel
;
VLOG
(
6
)
<<
"sparse_weight_embedding API kernel: "
<<
kernel
;
auto
input_x
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
auto
input_weight
=
TensorToSelectedRows
(
weight
);
auto
*
kernel_out
=
SetKernelOutput
(
&
api_output
);
phi
::
MetaTensor
meta_out
(
kernel_out
);
phi
::
EmbeddingInferMeta
(
MakeMetaTensor
(
*
input_x
),
MakeMetaTensor
(
*
input_weight
),
padding_idx
,
sparse
,
&
meta_out
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
SelectedRows
&
,
int64_t
,
phi
::
DenseTensor
*
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
{
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_x
,
*
input_weight
,
padding_idx
,
kernel_out
);
}
}
return
api_output
;
}
std
::
vector
<
Tensor
>
split_impl
(
const
Tensor
&
x
,
const
IntArray
&
num_or_sections
,
const
Scalar
&
axis
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
x
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
Backend
kernel_backend
=
kernel_key
.
backend
();
DataLayout
kernel_layout
=
kernel_key
.
layout
();
DataType
kernel_data_type
=
kernel_key
.
dtype
();
auto
kernel_result
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"split"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
const
auto
&
kernel
=
kernel_result
.
kernel
;
VLOG
(
6
)
<<
"split API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
VLOG
(
6
)
<<
"split API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
dense_x
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
// Calculate the number of out tensors
size_t
out_number
;
if
(
num_or_sections
.
size
()
==
1
)
{
if
(
num_or_sections
.
GetData
()[
0
]
<
0
)
{
out_number
=
1
;
}
else
{
out_number
=
num_or_sections
.
GetData
()[
0
];
}
}
else
{
out_number
=
num_or_sections
.
size
();
}
std
::
vector
<
Tensor
>
out
;
auto
dense_outs
=
SetKernelOutput
(
out_number
,
&
out
);
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
dense_outs
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
SplitInferMeta
(
MakeMetaTensor
(
*
dense_x
),
num_or_sections
,
axis
,
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
IntArray
&
,
const
phi
::
Scalar
&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
*
dense_x
,
phi
::
IntArray
(
num_or_sections
),
phi
::
Scalar
(
axis
),
dense_outs
);
return
out
;
}
////////////////// Backward(grad) api impls //////////////////////
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
>
batch_norm_impl
(
const
Tensor
&
x
,
const
Tensor
&
scale
,
const
Tensor
&
bias
,
const
Tensor
&
mean
,
const
Tensor
&
variance
,
float
momentum
,
float
epsilon
,
const
std
::
string
&
data_layout
,
bool
is_test
,
bool
use_global_stats
,
bool
trainable_statistics
,
bool
fuse_with_relu
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
kernel_data_type
=
ParseDataType
(
x
);
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
x
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
auto
kernel_result
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"batch_norm"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
const
auto
&
kernel
=
kernel_result
.
kernel
;
VLOG
(
6
)
<<
"batch_norm API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
VLOG
(
6
)
<<
"batch_norm API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_x
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
auto
input_scale
=
PrepareData
(
scale
,
kernel
.
InputAt
(
1
),
{});
auto
input_bias
=
PrepareData
(
bias
,
kernel
.
InputAt
(
2
),
{});
auto
input_mean
=
PrepareData
(
mean
,
kernel
.
InputAt
(
3
),
{});
auto
input_variance
=
PrepareData
(
variance
,
kernel
.
InputAt
(
4
),
{});
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
>
api_output
;
auto
kernel_out_0
=
SetKernelOutput
(
&
std
::
get
<
0
>
(
api_output
));
std
::
get
<
1
>
(
api_output
).
set_impl
(
mean
.
impl
());
std
::
get
<
2
>
(
api_output
).
set_impl
(
variance
.
impl
());
auto
kernel_out_1
=
SetKernelOutput
(
&
std
::
get
<
1
>
(
api_output
));
auto
kernel_out_2
=
SetKernelOutput
(
&
std
::
get
<
2
>
(
api_output
));
auto
kernel_out_3
=
SetKernelOutput
(
&
std
::
get
<
3
>
(
api_output
));
auto
kernel_out_4
=
SetKernelOutput
(
&
std
::
get
<
4
>
(
api_output
));
auto
kernel_out_5
=
SetKernelOutput
(
&
std
::
get
<
5
>
(
api_output
));
phi
::
MetaTensor
meta_out_0
(
kernel_out_0
);
phi
::
MetaTensor
meta_out_1
(
kernel_out_1
);
phi
::
MetaTensor
meta_out_2
(
kernel_out_2
);
phi
::
MetaTensor
meta_out_3
(
kernel_out_3
);
phi
::
MetaTensor
meta_out_4
(
kernel_out_4
);
phi
::
MetaTensor
meta_out_5
(
kernel_out_5
);
phi
::
BatchNormInferMeta
(
MakeMetaTensor
(
*
input_x
),
MakeMetaTensor
(
*
input_scale
),
MakeMetaTensor
(
*
input_bias
),
MakeMetaTensor
(
*
input_mean
),
MakeMetaTensor
(
*
input_variance
),
momentum
,
epsilon
,
data_layout
,
is_test
,
use_global_stats
,
trainable_statistics
,
fuse_with_relu
,
&
meta_out_0
,
&
meta_out_1
,
&
meta_out_2
,
&
meta_out_3
,
&
meta_out_4
,
&
meta_out_5
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
float
,
float
,
const
std
::
string
&
,
bool
,
bool
,
bool
,
bool
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
{
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_x
,
*
input_scale
,
*
input_bias
,
*
input_mean
,
*
input_variance
,
momentum
,
epsilon
,
data_layout
,
is_test
,
use_global_stats
,
trainable_statistics
,
fuse_with_relu
,
kernel_out_0
,
kernel_out_1
,
kernel_out_2
,
kernel_out_3
,
kernel_out_4
,
kernel_out_5
);
}
return
api_output
;
}
void
imag_grad_impl
(
const
Tensor
&
out_grad
,
Tensor
*
x_grad
)
{
phi
::
KernelKey
kernel_key
{
ParseBackend
(
out_grad
),
out_grad
.
layout
(),
...
...
paddle/phi/api/lib/api_custom_impl.h
浏览文件 @
44d94e11
...
...
@@ -33,31 +33,8 @@ namespace experimental {
Tensor
add_n_impl
(
const
std
::
vector
<
Tensor
>&
x
);
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
>
batch_norm_impl
(
const
Tensor
&
x
,
const
Tensor
&
scale
,
const
Tensor
&
bias
,
const
Tensor
&
mean
,
const
Tensor
&
variance
,
float
momentum
,
float
epsilon
,
const
std
::
string
&
data_layout
,
bool
is_test
,
bool
use_global_stats
,
bool
trainable_statistics
,
bool
fuse_with_relu
);
Tensor
copy_to_impl
(
const
Tensor
&
x
,
Place
place
,
bool
blocking
);
Tensor
embedding_impl
(
const
Tensor
&
x
,
const
Tensor
&
weight
,
int64_t
padding_idx
,
bool
sparse
);
std
::
vector
<
Tensor
>
split_impl
(
const
Tensor
&
x
,
const
IntArray
&
num_or_sections
,
const
Scalar
&
axis
);
////////////////// Backward(grad) api impls //////////////////////
void
imag_grad_impl
(
const
Tensor
&
out_grad
,
Tensor
*
x_grad
);
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
44d94e11
...
...
@@ -328,7 +328,12 @@
-
op
:
batch_norm
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
invoke
:
batch_norm_impl(x, scale, bias, mean, variance, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu)
infer_meta
:
func
:
BatchNormInferMeta
kernel
:
func
:
batch_norm
data_type
:
x
view
:
(mean -> mean_out), (variance -> variance_out)
backward
:
batch_norm_grad
-
op
:
bce_loss
...
...
@@ -798,7 +803,14 @@
-
op
:
embedding
args
:
(Tensor x, Tensor weight, int64_t padding_idx=-1, bool sparse=false)
output
:
Tensor
invoke
:
embedding_impl(x, weight, padding_idx, sparse)
infer_meta
:
func
:
EmbeddingInferMeta
param
:
[
x
,
weight
,
padding_idx
]
kernel
:
func
:
embedding {dense, dense -> dense}
sparse_weight_embedding {dense, selected_rows -> dense}
param
:
[
x
,
weight
,
padding_idx
]
data_type
:
weight
backward
:
embedding_grad
-
op
:
empty
...
...
paddle/phi/infermeta/binary.cc
浏览文件 @
44d94e11
...
...
@@ -90,32 +90,6 @@ void AllValueCompareInferMeta(const MetaTensor& x,
out
->
set_dtype
(
DataType
::
BOOL
);
}
void
EmbeddingInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
weight
,
int64_t
padding_idx
,
MetaTensor
*
out
)
{
auto
table_dims
=
weight
.
dims
();
auto
ids_dims
=
input
.
dims
();
int
ids_rank
=
ids_dims
.
size
();
VLOG
(
5
)
<<
"ids rank is "
<<
ids_rank
<<
std
::
endl
;
PADDLE_ENFORCE_EQ
(
table_dims
.
size
(),
2
,
phi
::
errors
::
InvalidArgument
(
"ShapeError: The dimensions of the 'lookup table' must be 2. "
"But received lookup table's dimensions = %d, "
"lookup table's shape = [%s]."
,
table_dims
.
size
(),
table_dims
));
auto
output_dims
=
phi
::
vectorize
(
ids_dims
);
output_dims
.
push_back
(
table_dims
[
1
]);
out
->
set_dims
(
phi
::
make_ddim
(
output_dims
));
out
->
set_dtype
(
weight
.
dtype
());
out
->
share_lod
(
input
);
}
void
KLDivInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
std
::
string
&
reduction
,
...
...
@@ -1196,7 +1170,6 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
void
EmbeddingInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
weight
,
int64_t
padding_idx
,
bool
sparse
,
MetaTensor
*
out
)
{
const
auto
&
table_dims
=
weight
.
dims
();
const
auto
&
ids_dims
=
x
.
dims
();
...
...
paddle/phi/infermeta/binary.h
浏览文件 @
44d94e11
...
...
@@ -38,11 +38,6 @@ void AllValueCompareInferMeta(const MetaTensor& x,
MetaTensor
*
out
,
MetaConfig
config
=
MetaConfig
());
void
EmbeddingInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
weight
,
int64_t
padding_idx
,
MetaTensor
*
out
);
void
KLDivInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
std
::
string
&
reduction
,
...
...
@@ -201,7 +196,6 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
void
EmbeddingInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
weight
,
int64_t
padding_idx
,
bool
sparse
,
MetaTensor
*
out
);
void
ExpandAsInferMeta
(
const
MetaTensor
&
x
,
...
...
paddle/phi/infermeta/multiary.cc
浏览文件 @
44d94e11
...
...
@@ -2901,5 +2901,4 @@ void GraphSendUVInferMeta(const MetaTensor& x,
}
// namespace phi
PD_REGISTER_INFER_META_FN
(
batch_norm
,
phi
::
BatchNormInferMeta
);
PD_REGISTER_INFER_META_FN
(
batch_norm_infer
,
phi
::
BatchNormInferInferMeta
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录