未验证 提交 64a21f71 编写于 作者: 姜永久 提交者: GitHub

modify optimizer tests (#51084)

* modify optimizer tests

* lint
上级 b93e5119
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -23,10 +23,47 @@ from paddle.fluid import core ...@@ -23,10 +23,47 @@ from paddle.fluid import core
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
def adam_wrapper(
param,
grad,
LearningRate,
moment1,
moment2,
beta1_pow,
beta2_pow,
master_weight=None,
find_inf=None,
beta1=0.78,
beta2=0.836,
epsilon=1e-4,
lazy_mode=False,
):
_, _, _, _, _, _ = paddle._C_ops.adam_(
param,
grad,
LearningRate,
moment1,
moment2,
beta1_pow,
beta2_pow,
master_weight,
find_inf,
beta1,
beta2,
epsilon,
lazy_mode,
1000,
False,
False,
)
class TestAdamOp1(OpTest): class TestAdamOp1(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with supplied attributes''' '''Test Adam Op with supplied attributes'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -73,6 +110,8 @@ class TestAdamOp2(OpTest): ...@@ -73,6 +110,8 @@ class TestAdamOp2(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with supplied attributes''' '''Test Adam Op with supplied attributes'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
self.set_shape() self.set_shape()
param = np.random.uniform(-1, 1, self.shape).astype("float32") param = np.random.uniform(-1, 1, self.shape).astype("float32")
grad = np.random.uniform(-1, 1, self.shape).astype("float32") grad = np.random.uniform(-1, 1, self.shape).astype("float32")
...@@ -122,6 +161,8 @@ class TestAdamOpMultipleSteps(OpTest): ...@@ -122,6 +161,8 @@ class TestAdamOpMultipleSteps(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Operator with supplied attributes''' '''Test Adam Operator with supplied attributes'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
self.num_steps = 10 self.num_steps = 10
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -414,6 +455,8 @@ class TestAdamOpBetaVariable(OpTest): ...@@ -414,6 +455,8 @@ class TestAdamOpBetaVariable(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with beta as Variable''' '''Test Adam Op with beta as Variable'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -459,6 +502,8 @@ class TestAdamOpBetaEpsilonVariable(OpTest): ...@@ -459,6 +502,8 @@ class TestAdamOpBetaEpsilonVariable(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with beta/epsilon as Variable''' '''Test Adam Op with beta/epsilon as Variable'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -505,6 +550,8 @@ class TestAdamOpWithGlobalBetaPow(OpTest): ...@@ -505,6 +550,8 @@ class TestAdamOpWithGlobalBetaPow(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with global_beta_pow''' '''Test Adam Op with global_beta_pow'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -554,6 +601,8 @@ class TestAdamOpWithSkipUpdate(OpTest): ...@@ -554,6 +601,8 @@ class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self): def setUp(self):
'''Test Adam Op with global_beta_pow''' '''Test Adam Op with global_beta_pow'''
self.op_type = "adam" self.op_type = "adam"
self.python_api = adam_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
......
...@@ -15,7 +15,33 @@ ...@@ -15,7 +15,33 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle
def adamx_wrapper(
param,
grad,
lr,
moment,
inf_norm,
beta1_pow=None,
beta1=0.78,
beta2=0.899,
epsilon=1e-5,
):
return paddle._C_ops.adamax_(
param,
grad,
lr,
moment,
inf_norm,
beta1_pow,
beta1,
beta2,
epsilon,
)
import paddle import paddle
...@@ -24,6 +50,8 @@ class TestAdamaxOp1(OpTest): ...@@ -24,6 +50,8 @@ class TestAdamaxOp1(OpTest):
def setUp(self): def setUp(self):
'''Test Adamax Operator with supplied attributes''' '''Test Adamax Operator with supplied attributes'''
self.op_type = "adamax" self.op_type = "adamax"
self.python_api = adamx_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -66,6 +94,8 @@ class TestAdamaxOp2(OpTest): ...@@ -66,6 +94,8 @@ class TestAdamaxOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "adamax" self.op_type = "adamax"
self.python_api = adamx_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -104,6 +134,8 @@ class TestAdamaxOpMultipleSteps(OpTest): ...@@ -104,6 +134,8 @@ class TestAdamaxOpMultipleSteps(OpTest):
def setUp(self): def setUp(self):
'''Test Adamax Operator with supplied attributes''' '''Test Adamax Operator with supplied attributes'''
self.op_type = "adamax" self.op_type = "adamax"
self.python_api = adamx_wrapper
self.python_out_sig = ['Out']
self.num_steps = 10 self.num_steps = 10
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
from functools import partial from functools import partial
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -60,10 +60,53 @@ def adamw_step(inputs, attributes): ...@@ -60,10 +60,53 @@ def adamw_step(inputs, attributes):
return param_out, moment1_out, moment2_out return param_out, moment1_out, moment2_out
def adamw_wrapper(
param,
grad,
lr,
moment1,
moment2,
beta1_pow,
beta2_pow,
master_weight=None,
found_inf=None,
beta1=0.78,
beta2=0.836,
epsilon=1e-4,
lr_ratio=1.0,
weight_decay=0.01,
with_decay=True,
lazy_mode=False,
):
_, _, _, _, _, _ = paddle._C_ops.adamw_(
param,
grad,
lr,
moment1,
moment2,
beta1_pow,
beta2_pow,
master_weight,
found_inf,
beta1,
beta2,
epsilon,
lr_ratio,
weight_decay,
with_decay,
lazy_mode,
1000,
False,
False,
)
class TestAdamW(OpTest): class TestAdamW(OpTest):
def setUp(self): def setUp(self):
'''Test AdamW Op with supplied attributes''' '''Test AdamW Op with supplied attributes'''
self.op_type = "adamw" self.op_type = "adamw"
self.python_api = adamw_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (102, 105)).astype("float32") param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32") moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
...@@ -118,6 +161,8 @@ class TestAdamW2(OpTest): ...@@ -118,6 +161,8 @@ class TestAdamW2(OpTest):
def setUp(self): def setUp(self):
'''Test AdamW Op with supplied attributes''' '''Test AdamW Op with supplied attributes'''
self.op_type = "adamw" self.op_type = "adamw"
self.python_api = adamw_wrapper
self.python_out_sig = ['Out']
param = np.random.uniform(-1, 1, (2, 2)).astype("float32") param = np.random.uniform(-1, 1, (2, 2)).astype("float32")
grad = np.random.uniform(-1, 1, (2, 2)).astype("float32") grad = np.random.uniform(-1, 1, (2, 2)).astype("float32")
moment1 = np.random.uniform(-1, 1, (2, 2)).astype("float32") moment1 = np.random.uniform(-1, 1, (2, 2)).astype("float32")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -32,6 +32,8 @@ def lamb_wrapper( ...@@ -32,6 +32,8 @@ def lamb_wrapper(
moment2, moment2,
beta1Pow, beta1Pow,
beta2Pow, beta2Pow,
master_weight=None,
found_inf=None,
epsilon=1e-8, epsilon=1e-8,
beta1=0.9, beta1=0.9,
beta2=0.999, beta2=0.999,
...@@ -45,8 +47,8 @@ def lamb_wrapper( ...@@ -45,8 +47,8 @@ def lamb_wrapper(
moment2, moment2,
beta1Pow, beta1Pow,
beta2Pow, beta2Pow,
None, master_weight,
None, found_inf,
weight_decay, weight_decay,
beta1, beta1,
beta2, beta2,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册