Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
642f6df9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
642f6df9
编写于
8月 16, 2022
作者:
C
Charles-hit
提交者:
GitHub
8月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support momentum op auto generation (#45163)
上级
59241336
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
8 addition
and
146 deletion
+8
-146
paddle/phi/api/lib/api_custom_impl.cc
paddle/phi/api/lib/api_custom_impl.cc
+0
-130
paddle/phi/api/lib/api_custom_impl.h
paddle/phi/api/lib/api_custom_impl.h
+0
-13
paddle/phi/api/yaml/legacy_api.yaml
paddle/phi/api/yaml/legacy_api.yaml
+7
-2
python/paddle/optimizer/momentum.py
python/paddle/optimizer/momentum.py
+1
-1
未找到文件。
paddle/phi/api/lib/api_custom_impl.cc
浏览文件 @
642f6df9
...
...
@@ -178,136 +178,6 @@ std::vector<Tensor> split_impl(const Tensor& x,
return
out
;
}
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
momentum_impl
(
const
Tensor
&
param
,
const
Tensor
&
grad
,
const
Tensor
&
velocity
,
const
Tensor
&
learning_rate
,
const
paddle
::
optional
<
Tensor
>&
master_param
,
float
mu
,
bool
use_nesterov
,
const
std
::
string
&
regularization_method
,
float
regularization_coeff
,
bool
multi_precision
,
float
rescale_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
param
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
std
::
string
kernel_name
=
"momentum"
;
if
(
grad
.
is_selected_rows
())
{
kernel_name
=
"momentum_dense_param_sparse_grad"
;
}
auto
kernel_result
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
kernel_name
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
const
auto
&
kernel
=
kernel_result
.
kernel
;
VLOG
(
6
)
<<
kernel_name
<<
" API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
VLOG
(
6
)
<<
kernel_name
<<
" API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_param
=
PrepareData
(
param
,
kernel
.
InputAt
(
0
),
{});
auto
input_grad
=
PrepareData
(
grad
,
kernel
.
InputAt
(
1
),
{});
auto
input_velocity
=
PrepareData
(
velocity
,
kernel
.
InputAt
(
2
),
{});
auto
input_learning_rate
=
PrepareData
(
learning_rate
,
kernel
.
InputAt
(
3
),
{});
auto
input_master_param
=
PrepareData
(
master_param
,
kernel
.
InputAt
(
4
),
{});
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
api_output
;
auto
kernel_out_0
=
input_param
.
get
();
auto
kernel_out_1
=
input_velocity
.
get
();
phi
::
DenseTensor
*
kernel_out_2
=
nullptr
;
if
(
input_master_param
)
{
kernel_out_2
=
input_master_param
.
get_ptr
();
}
auto
input_meta_ref_master_param
=
MakeMetaTensor
(
input_master_param
);
phi
::
MetaTensor
meta_out_0
(
kernel_out_0
);
phi
::
MetaTensor
meta_out_1
(
kernel_out_1
);
if
(
kernel_out_2
)
{
phi
::
MetaTensor
meta_out_2
(
kernel_out_2
);
phi
::
MomentumInferMeta
(
MakeMetaTensor
(
*
input_param
),
MakeMetaTensor
(
*
input_grad
),
MakeMetaTensor
(
*
input_velocity
),
MakeMetaTensor
(
*
input_learning_rate
),
input_meta_ref_master_param
,
mu
,
use_nesterov
,
regularization_method
,
regularization_coeff
,
multi_precision
,
rescale_grad
,
&
meta_out_0
,
&
meta_out_1
,
&
meta_out_2
);
}
else
{
phi
::
MomentumInferMeta
(
MakeMetaTensor
(
*
input_param
),
MakeMetaTensor
(
*
input_grad
),
MakeMetaTensor
(
*
input_velocity
),
MakeMetaTensor
(
*
input_learning_rate
),
input_meta_ref_master_param
,
mu
,
use_nesterov
,
regularization_method
,
regularization_coeff
,
multi_precision
,
rescale_grad
,
&
meta_out_0
,
&
meta_out_1
,
nullptr
);
}
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
const
paddle
::
optional
<
phi
::
DenseTensor
>&
,
float
,
bool
,
const
std
::
string
&
,
float
,
bool
,
float
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
,
phi
::
DenseTensor
*
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_param
,
*
input_grad
,
*
input_velocity
,
*
input_learning_rate
,
input_master_param
,
mu
,
use_nesterov
,
regularization_method
,
regularization_coeff
,
multi_precision
,
rescale_grad
,
kernel_out_0
,
kernel_out_1
,
kernel_out_2
);
return
api_output
;
}
////////////////// Backward(grad) api impls //////////////////////
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
,
Tensor
>
batch_norm_impl
(
...
...
paddle/phi/api/lib/api_custom_impl.h
浏览文件 @
642f6df9
...
...
@@ -56,19 +56,6 @@ std::vector<Tensor> split_impl(const Tensor& x,
const
IntArray
&
num_or_sections
,
const
Scalar
&
axis
);
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
momentum_impl
(
const
Tensor
&
param
,
const
Tensor
&
grad
,
const
Tensor
&
velocity
,
const
Tensor
&
learning_rate
,
const
paddle
::
optional
<
Tensor
>&
master_param
,
float
mu
,
bool
use_nesterov
,
const
std
::
string
&
regularization_method
,
float
regularization_coeff
,
bool
multi_precision
,
float
rescale_grad
);
////////////////// Backward(grad) api impls //////////////////////
void
imag_grad_impl
(
const
Tensor
&
out_grad
,
Tensor
*
x_grad
);
...
...
paddle/phi/api/yaml/legacy_api.yaml
浏览文件 @
642f6df9
...
...
@@ -1795,11 +1795,16 @@
func
:
modulo
backward
:
modulo_grad
-
api
:
momentum
-
api
:
momentum
_
args
:
(Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov =
false
, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision =
false
, float rescale_grad = 1.0f)
output
:
Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
infer_meta
:
func
:
MomentumInferMeta
kernel
:
func
:
momentum
data_type
:
param
optional
:
master_param
inplace
:
(param -> param_out), (velocity -> velocity_out), (master_param -> master_param_out)
-
api
:
multi_dot
args
:
(Tensor[] x)
...
...
python/paddle/optimizer/momentum.py
浏览文件 @
642f6df9
...
...
@@ -327,7 +327,7 @@ class Momentum(Optimizer):
if
in_dygraph_mode
():
if
isinstance
(
param_and_grad
,
dict
):
self
.
_update_regularization
(
param_and_grad
[
'weight_decay'
])
return
_C_ops
.
final_state_momentum
(
return
_C_ops
.
final_state_momentum
_
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
master_weight
,
self
.
_momentum
,
self
.
_use_nesterov
,
regularization_method
,
regularization_coeff
,
find_master
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录