未验证 提交 86a22ad4 编写于 作者: 姜永久 提交者: GitHub

imigrating from old dynamic graph to new dynamic graph for argmin/argmax/adalta test (#50093)

* more ops

* revert some ops

* reset some ops
上级 a5f2e1f7
......@@ -16,18 +16,24 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def adamgrad_wrapper(param, grad, moment, learning_rate, epsilon):
paddle._C_ops.adagrad_(param, grad, moment, learning_rate, epsilon)
class TestAdagradOp1(OpTest):
'''Test Adagrad operator with explicit attributes'''
def setUp(self):
self.op_type = "adagrad"
self.python_api = adamgrad_wrapper
self.python_out_sig = ['out']
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
moment = np.zeros((123, 321)).astype("float32")
......@@ -57,6 +63,8 @@ class TestAdagradOp2(OpTest):
def setUp(self):
self.op_type = "adagrad"
self.python_api = adamgrad_wrapper
self.python_out_sig = ['out']
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -43,19 +43,19 @@ class TestAddMMOp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['Input', 'X', 'Y'], 'Out', check_eager=False)
self.check_grad(['Input', 'X', 'Y'], 'Out')
def test_check_grad_x(self):
self.check_grad(['X'], 'Out', no_grad_set=None, check_eager=False)
self.check_grad(['X'], 'Out', no_grad_set=None)
def test_check_grad_y(self):
self.check_grad(['Y'], 'Out', no_grad_set=None, check_eager=False)
self.check_grad(['Y'], 'Out', no_grad_set=None)
def test_check_grad_input(self):
self.check_grad(['Input'], 'Out', no_grad_set=None, check_eager=False)
self.check_grad(['Input'], 'Out', no_grad_set=None)
class TestAddMMOpError(unittest.TestCase):
......@@ -186,6 +186,7 @@ class TestAddMMOp3(OpTest):
# test broadcast
def setUp(self):
self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {
......@@ -225,6 +226,7 @@ class TestAddMMOp4(OpTest):
# test broadcast
def setUp(self):
self.op_type = "addmm"
self.python_api = paddle.addmm
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_attribute_var import UnittestBase
import paddle
......@@ -27,6 +27,7 @@ from paddle.fluid import Program, program_guard
class BaseTestCase(OpTest):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
......@@ -48,6 +49,7 @@ class BaseTestCase(OpTest):
class TestCase0(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
......@@ -56,6 +58,7 @@ class TestCase0(BaseTestCase):
class TestCase1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (3, 4)
self.dtype = 'float64'
self.axis = 1
......@@ -64,6 +67,7 @@ class TestCase1(BaseTestCase):
class TestCase2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3, 4)
self.dtype = 'int64'
self.axis = 0
......@@ -75,6 +79,7 @@ class TestCase2(BaseTestCase):
class TestCase0FP16(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3, 4, 5)
self.dtype = np.float16
self.axis = 0
......@@ -86,6 +91,7 @@ class TestCase0FP16(BaseTestCase):
class TestCase1FP16(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (3, 4)
self.dtype = np.float16
self.axis = 1
......@@ -94,6 +100,7 @@ class TestCase1FP16(BaseTestCase):
class TestCase2_1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3, 4)
self.dtype = 'int64'
self.axis = -1
......@@ -102,6 +109,7 @@ class TestCase2_1(BaseTestCase):
class TestCase3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3,)
self.dtype = 'int64'
self.axis = 0
......@@ -110,6 +118,7 @@ class TestCase3(BaseTestCase):
class TestCase4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (1,)
self.dtype = 'int32'
self.axis = 0
......@@ -118,6 +127,7 @@ class TestCase4(BaseTestCase):
class TestCase3_(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (3,)
self.axis = 0
......@@ -125,6 +135,7 @@ class TestCase3_(BaseTestCase):
class BaseTestComplex1_1(OpTest):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (4, 5, 6)
self.dtype = 'int32'
self.axis = 2
......@@ -148,6 +159,7 @@ class BaseTestComplex1_1(OpTest):
class BaseTestComplex1_2(OpTest):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (4, 5, 6)
self.dtype = 'int32'
self.axis = 2
......@@ -171,6 +183,7 @@ class BaseTestComplex1_2(OpTest):
class BaseTestComplex2_1(OpTest):
def initTestCase(self):
self.op_type = 'arg_max'
self.python_api = paddle.tensor.argmax
self.dims = (4, 5, 6)
self.dtype = 'int32'
self.axis = 2
......@@ -199,6 +212,7 @@ class BaseTestComplex2_1(OpTest):
class BaseTestComplex2_2(OpTest):
def initTestCase(self):
self.op_type = 'arg_min'
self.python_api = paddle.tensor.argmin
self.dims = (4, 5, 6)
self.dtype = 'int32'
self.axis = 2
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -33,6 +33,10 @@ def create_kernel_case(op_type, numpy_op_type):
def setUp(self):
np.random.seed(123)
self.initTestCase()
if op_type == 'arg_min':
self.python_api = paddle.tensor.argmin
else:
self.python_api = paddle.tensor.argmax
self.dims = (4, 5, 6)
self.dtype = "float64"
self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
......@@ -72,6 +76,10 @@ def create_kernel_case(op_type, numpy_op_type):
class ArgMinMaxKernelCase4(ArgMinMaxKernelBaseCase):
def setUp(self):
self.initTestCase()
if op_type == 'arg_min':
self.python_api = paddle.tensor.argmin
else:
self.python_api = paddle.tensor.argmax
self.dims = (4, 5, 6)
self.dtype = "float64"
self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
......@@ -85,6 +93,10 @@ def create_kernel_case(op_type, numpy_op_type):
class ArgMinMaxKernelCase5(ArgMinMaxKernelBaseCase):
def setUp(self):
self.initTestCase()
if op_type == 'arg_min':
self.python_api = paddle.tensor.argmin
else:
self.python_api = paddle.tensor.argmax
self.dims = 4
self.dtype = "float64"
self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
......@@ -98,6 +110,10 @@ def create_kernel_case(op_type, numpy_op_type):
class ArgMinMaxKernelCase6(ArgMinMaxKernelBaseCase):
def setUp(self):
self.initTestCase()
if op_type == 'arg_min':
self.python_api = paddle.tensor.argmin
else:
self.python_api = paddle.tensor.argmax
self.dims = 4
self.dtype = "float64"
self.x = 1000 * np.random.random(self.dims).astype(self.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册