未验证 提交 445d7337 编写于 作者: H hong 提交者: GitHub

fix new ir optimizer bug (#55910)

上级 37c487e0
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
kernel : kernel :
func : adadelta func : adadelta
data_type : param data_type : param
optional : master_param optional : master_param, master_param_out
inplace : (param -> param_out), (avg_squared_grad -> moment_out), (avg_squared_update -> inf_norm_out), (master_param -> master_param_out) inplace : (param -> param_out), (avg_squared_grad -> moment_out), (avg_squared_update -> inf_norm_out), (master_param -> master_param_out)
- op : add - op : add
......
...@@ -44,13 +44,19 @@ ...@@ -44,13 +44,19 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : adagrad_ - op : adadelta_ (adadelta)
inputs :
{param : Param, grad: Grad, avg_squared_grad : AvgSquaredGrad, avg_squared_update : AvgSquaredUpdate, learning_rate : LearningRate, master_param : MasterParam }
outputs :
{param_out : ParamOut, moment_out : AvgSquaredGradOut, inf_norm_out : AvgSquaredUpdateOut, master_param_out : MasterParamOut}
- op : adagrad_ (adagrad)
inputs : inputs :
{ param : Param, grad : Grad, moment : Moment, learning_rate : LearningRate, master_param : MasterParam } { param : Param, grad : Grad, moment : Moment, learning_rate : LearningRate, master_param : MasterParam }
outputs : outputs :
{ param_out : ParamOut, moment_out : MomentOut, master_param_out : MasterParamOut } { param_out : ParamOut, moment_out : MomentOut, master_param_out : MasterParamOut }
- op : adam_ - op : adam_ (adam)
inputs : inputs :
{param: Param, grad: Grad, learning_rate: LearningRate, moment1: Moment1, moment2: Moment2, beta1_pow: Beta1Pow, beta2_pow: Beta2Pow, master_param: MasterParam, skip_update: SkipUpdate} {param: Param, grad: Grad, learning_rate: LearningRate, moment1: Moment1, moment2: Moment2, beta1_pow: Beta1Pow, beta2_pow: Beta2Pow, master_param: MasterParam, skip_update: SkipUpdate}
outputs : outputs :
...@@ -67,13 +73,13 @@ ...@@ -67,13 +73,13 @@
tensor_name : EpsilonTensor tensor_name : EpsilonTensor
manual_signature : [adam_] manual_signature : [adam_]
- op : adamax_ - op : adamax_ (adamax)
inputs : inputs :
{param : Param, grad: Grad, learning_rate : LearningRate, moment : Moment, inf_norm : InfNorm, beta1_pow : Beta1Pow, master_param : MasterParam} {param : Param, grad: Grad, learning_rate : LearningRate, moment : Moment, inf_norm : InfNorm, beta1_pow : Beta1Pow, master_param : MasterParam}
outputs : outputs :
{param_out : ParamOut, moment_out : MomentOut, inf_norm_out : InfNormOut, master_param_out : MasterParamOut} {param_out : ParamOut, moment_out : MomentOut, inf_norm_out : InfNormOut, master_param_out : MasterParamOut}
- op : adamw_ - op : adamw_ (adamw)
inputs : inputs :
{param: Param, grad: Grad, learning_rate: LearningRate, moment1: Moment1, moment2: Moment2, beta1_pow: Beta1Pow, beta2_pow: Beta2Pow, master_param: MasterParam, skip_update: SkipUpdate} {param: Param, grad: Grad, learning_rate: LearningRate, moment1: Moment1, moment2: Moment2, beta1_pow: Beta1Pow, beta2_pow: Beta2Pow, master_param: MasterParam, skip_update: SkipUpdate}
outputs : outputs :
...@@ -1454,7 +1460,7 @@ ...@@ -1454,7 +1460,7 @@
outputs : outputs :
out : Out out : Out
- op : lamb_ - op : lamb_ (lamb)
inputs : inputs :
{param : Param, grad : Grad, learning_rate : LearningRate, moment1 : Moment1, moment2 : Moment2, beta1_pow : Beta1Pow, beta2_pow : Beta2Pow, master_param : MasterParam, skip_update : SkipUpdate} {param : Param, grad : Grad, learning_rate : LearningRate, moment1 : Moment1, moment2 : Moment2, beta1_pow : Beta1Pow, beta2_pow : Beta2Pow, master_param : MasterParam, skip_update : SkipUpdate}
outputs : outputs :
...@@ -1859,7 +1865,7 @@ ...@@ -1859,7 +1865,7 @@
outputs : outputs :
{out : Out, indices : Indices} {out : Out, indices : Indices}
- op : momentum_ - op : momentum_ (momentum)
inputs : inputs :
{param : Param, grad : Grad, velocity : Velocity, learning_rate : LearningRate, master_param : MasterParam} {param : Param, grad : Grad, velocity : Velocity, learning_rate : LearningRate, master_param : MasterParam}
outputs : outputs :
...@@ -2264,7 +2270,7 @@ ...@@ -2264,7 +2270,7 @@
support_tensor : true support_tensor : true
manual_signature : [reverse] manual_signature : [reverse]
- op : rmsprop_ - op : rmsprop_ (rmsprop)
inputs : inputs :
{param: Param, mean_square: MeanSquare, mean_grad: MeanGrad, learning_rate: LearningRate, grad: Grad, moment: Moment, master_param: MasterParam} {param: Param, mean_square: MeanSquare, mean_grad: MeanGrad, learning_rate: LearningRate, grad: Grad, moment: Moment, master_param: MasterParam}
outputs : outputs :
...@@ -2402,7 +2408,7 @@ ...@@ -2402,7 +2408,7 @@
extra : extra :
attrs : [str data_format = "AnyLayout"] attrs : [str data_format = "AnyLayout"]
- op : sgd_ - op : sgd_ (sgd)
inputs : inputs :
{param : Param, learning_rate : LearningRate, grad : Grad, master_param : MasterParam} {param : Param, learning_rate : LearningRate, grad : Grad, master_param : MasterParam}
outputs : outputs :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册