未验证 提交 814315b4 编写于 作者: T taixiurong 提交者: GitHub

add matmul & adamw unittest test=kunlun (#41186)

上级 229e91bf
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("..")
import unittest
import paddle
import random
import numpy as np
import paddle.fluid as fluid
from functools import partial
from paddle.framework import core
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
def adamw_step(inputs, attributes):
param = inputs['Param']
grad = inputs['Grad']
moment1 = inputs['Moment1']
moment2 = inputs['Moment2']
lr = inputs['LearningRate']
beta1_pow = inputs['Beta1Pow']
beta2_pow = inputs['Beta2Pow']
epsilon = attributes['epsilon']
if 'lr_ratio' in attributes:
lr = lr * attributes['lr_ratio']
if attributes["with_decay"]:
coeff = attributes["coeff"]
decay = 1.0 - lr * coeff
param2 = param * decay
param = param2.copy()
if 'beta1' in attributes:
beta1 = attributes['beta1']
else:
beta1 = inputs['Beta1Tensor'][0]
if 'beta2' in attributes:
beta2 = attributes['beta2']
else:
beta2 = inputs['Beta2Tensor'][0]
moment1_out = beta1 * moment1 + (1 - beta1) * grad
moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad)
lr_t = lr * np.sqrt(1 - beta2_pow) / (1 - beta1_pow)
param_out = param - lr_t * (moment1_out / (np.sqrt(moment2_out) + epsilon))
return param_out, moment1_out, moment2_out
def simple_lr_setting(param, decay_rate, n_layers):
if "fc_0" in param.name or "linear_1" in param.name:
depth = int(param.name.split("_")[2]) + 1
elif "fc_1" in param.name or "linear_2" in param.name:
depth = int(param.name.split("_")[2]) + 2
else:
depth = 0
return decay_rate**(n_layers + 2 - depth)
class XPUTestAdamwOp1(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'adamw'
self.use_dynamic_create_class = False
class TestAdamW(XPUOpTest):
def setUp(self):
#Test AdamW Op with supplied attributes
self.op_type = "adamw"
self.init_shape()
self.dtype = self.in_type_str
param = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
grad = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
moment1 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
# The second moment is positive
moment2 = np.random.random(self.shape).astype(self.dtype)
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype(self.dtype),
'Beta1Pow': np.array([beta1_pow]).astype(self.dtype),
'Beta2Pow': np.array([beta2_pow]).astype(self.dtype)
}
self.attrs = {
'epsilon': epsilon,
'beta1': beta1,
'beta2': beta2,
"coeff": 0.5,
"with_decay": True
}
param_out, moment1_out, \
moment2_out = adamw_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype(self.dtype) * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype(self.dtype) * beta2
}
def init_shape(self):
self.shape = [102, 105]
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(place=paddle.XPUPlace(0))
class TestAdamW2(TestAdamW):
def init_shape(self):
self.shape = [1000, ]
class TestAdamW3(TestAdamW):
def init_shape(self):
self.shape = [200, 3000]
class XPUTestAdamwOp2(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'adamw'
self.use_dynamic_create_class = False
class TestAdamWOp(unittest.TestCase):
def test_adamw_op_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype(self.in_type_str)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW(
learning_rate=0.01,
parameters=linear.parameters(),
apply_decay_param_fun=lambda name: True,
weight_decay=0.01)
for _ in range(2):
out = linear(a)
out.backward()
adam.step()
adam.clear_gradients()
def test_adamw_op_coverage(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype(self.in_type_str)
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW(
learning_rate=0.0,
parameters=linear.parameters(),
apply_decay_param_fun=lambda name: True,
weight_decay=0.01)
assert (adam.__str__() is not None)
def test_adamw_op(self):
paddle.enable_static()
place = fluid.XPUPlace(0)
shape = [2, 3, 8, 8]
exe = fluid.Executor(place)
train_prog = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(train_prog, startup):
with fluid.unique_name.guard():
data = fluid.data(name="data", shape=shape)
conv = fluid.layers.conv2d(data, 8, 3)
loss = paddle.mean(conv)
beta1 = fluid.layers.create_global_var(
shape=[1],
value=0.85,
dtype=self.in_type_str,
persistable=True)
beta2 = fluid.layers.create_global_var(
shape=[1],
value=0.95,
dtype=self.in_type_str,
persistable=True)
betas = [beta1, beta2]
opt = paddle.optimizer.AdamW(
learning_rate=1e-5,
beta1=beta1,
beta2=beta2,
weight_decay=0.01,
epsilon=1e-8)
opt.minimize(loss)
exe.run(startup)
data_np = np.random.random(shape).astype(self.in_type_str)
rets = exe.run(train_prog,
feed={"data": data_np},
fetch_list=[loss])
assert rets[0] is not None
paddle.disable_static()
def test_adamw_op_invalid_input(self):
paddle.disable_static()
linear = paddle.nn.Linear(10, 10)
with self.assertRaises(ValueError):
adam = paddle.optimizer.AdamW(
0.1, beta1=-1, parameters=linear.parameters())
with self.assertRaises(ValueError):
adam = paddle.optimizer.AdamW(
0.1, beta2=-1, parameters=linear.parameters())
with self.assertRaises(ValueError):
adam = paddle.optimizer.AdamW(
0.1, epsilon=-1, parameters=linear.parameters())
class TestAdamWOpGroup(TestAdamWOp):
def test_adamw_op_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype(self.in_type_str)
a = paddle.to_tensor(value)
linear_1 = paddle.nn.Linear(13, 5)
linear_2 = paddle.nn.Linear(5, 3)
adam = paddle.optimizer.AdamW(
learning_rate=0.01,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001
}],
apply_decay_param_fun=lambda name: True,
weight_decay=0.01)
for _ in range(2):
out = linear_1(a)
out = linear_2(out)
out.backward()
adam.step()
adam.clear_gradients()
class TestAdamWOpGroupWithLR(TestAdamWOp):
def test_adamw_op_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype(self.in_type_str)
a = paddle.to_tensor(value)
linear_1 = paddle.nn.Linear(13, 5)
linear_2 = paddle.nn.Linear(5, 3)
adam = paddle.optimizer.AdamW(
learning_rate=paddle.optimizer.lr.PiecewiseDecay(
boundaries=[3, 6], values=[0.1, 0.2, 0.3]),
parameters=[{
'params': linear_1.parameters(),
'learning_rate': 0.1,
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
}],
apply_decay_param_fun=lambda name: True,
weight_decay=0.01)
for _ in range(2):
out = linear_1(a)
out = linear_2(out)
out.backward()
adam.step()
adam.clear_gradients()
support_types = get_xpu_op_support_types('adamw')
for stype in support_types:
create_test_class(globals(), XPUTestAdamwOp1, stype)
create_test_class(globals(), XPUTestAdamwOp2, stype)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
...@@ -24,7 +24,46 @@ import paddle ...@@ -24,7 +24,46 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
"""Reference forward implementation using np.matmul."""
# np.matmul does not support the transpose flags, so we manually
# transpose X and Y appropriately.
if transpose_X:
if X.ndim == 1:
X = X.reshape((X.size, 1))
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((1, Y.size))
elif Y.ndim == 2:
Y = Y.T
else:
dim = [i for i in range(len(Y.shape))]
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
if X.ndim == 3 and Y.ndim == 2:
x_dims = X.shape
X = X.reshape((x_dims[0] * x_dims[1], x_dims[2]))
if Y.ndim == 3 and X.ndim == 2:
y_dims = Y.shape
Y = Y.reshape((y_dims[0] * y_dims[1], y_dims[2]))
Out = np.matmul(X, Y)
if not Out.shape:
# We do not support 0-dimensional Tensors (scalars). So where
# np.matmul outputs a scalar, we must convert to a Tensor of
# shape (1, ) instead.
# Everywhere else, we are compatible with np.matmul.
Out = np.array([Out], dtype="float32")
return Out
def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y, def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y,
...@@ -72,96 +111,26 @@ def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y, ...@@ -72,96 +111,26 @@ def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y,
return shape_X, shape_Y return shape_X, shape_Y
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): def generate_compatible_shapes_2(dim, transpose_X, transpose_Y):
"""Reference forward implementation using np.matmul.""" M = 2
# np.matmul does not support the transpose flags, so we manually N = 4
# transpose X and Y appropriately. K = 3
if transpose_X: shape_X = [2 for _ in range(dim - 2)]
if X.ndim == 1: shape_Y = [2 for _ in range(dim - 2)]
X = X.reshape((X.size, 1))
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((1, Y.size))
elif Y.ndim == 2:
Y = Y.T
else:
dim = [i for i in range(len(Y.shape))]
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
if X.ndim == 3 and Y.ndim == 2:
x_dims = X.shape
X = X.reshape((x_dims[0] * x_dims[1], x_dims[2]))
if Y.ndim == 3 and X.ndim == 2:
y_dims = Y.shape
Y = Y.reshape((y_dims[0] * y_dims[1], y_dims[2]))
Out = np.matmul(X, Y)
if not Out.shape:
# We do not support 0-dimensional Tensors (scalars). So where
# np.matmul outputs a scalar, we must convert to a Tensor of
# shape (1, ) instead.
# Everywhere else, we are compatible with np.matmul.
Out = np.array([Out], dtype="float32")
return Out
class Generator(object):
def setUp(self):
self.use_xpu = True
self.op_type = "matmul"
# self.init_test_case()
X = np.random.random(self.shape_X).astype("float32")
Y = np.random.random(self.shape_Y).astype("float32")
Out = reference_matmul(X, Y, self.transpose_X, self.transpose_Y)
self.inputs = {'X': X, 'Y': Y}
self.attrs = {
'transpose_X': self.transpose_X,
'transpose_Y': self.transpose_Y
}
self.outputs = {'Out': Out}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=5e-2)
def test_check_grad_ignore_x(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'], 'Out', max_relative_error=5e-2, no_grad_set=set("X"))
def test_check_grad_ignore_y(self): if transpose_X:
place = paddle.XPUPlace(0) shape_X += [K, M]
self.check_grad_with_place( else:
place, ['X'], 'Out', max_relative_error=5e-2, no_grad_set=set('Y')) shape_X += [M, K]
if transpose_Y:
shape_Y += [N, K]
else:
shape_Y += [K, N]
class TestMatmulOpError(unittest.TestCase): return shape_X, shape_Y
def test_errors(self):
with program_guard(Program(), Program()):
# The inputs type of matmul_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.matmul, input1, input1)
# The inputs dtype of matmul_op must be float32, float64.
input2 = fluid.layers.data(
name='input2', shape=[10, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.matmul, input2, input2)
input3 = fluid.layers.data(
name='input3', shape=[2, 2], dtype="float16")
fluid.layers.matmul(input3, input3)
# Negative dimension generation
def generate_negative_dims(in_shape): def generate_negative_dims(in_shape):
from itertools import combinations from itertools import combinations
size = len(in_shape) size = len(in_shape)
...@@ -175,16 +144,15 @@ def generate_negative_dims(in_shape): ...@@ -175,16 +144,15 @@ def generate_negative_dims(in_shape):
return shapes return shapes
# Build program with inputs sizes that contain negative numbers
def test_negative_dims_program(obj): def test_negative_dims_program(obj):
for shape_x in generate_negative_dims(obj.shape_X): for shape_x in generate_negative_dims(obj.shape_X):
for shape_y in generate_negative_dims(obj.shape_Y): for shape_y in generate_negative_dims(obj.shape_Y):
X = np.random.random(obj.shape_X).astype("float32") X = np.random.random(obj.shape_X).astype(obj.in_type)
Y = np.random.random(obj.shape_Y).astype("float32") Y = np.random.random(obj.shape_Y).astype(obj.in_type)
Ref = reference_matmul(X, Y, obj.transpose_X, obj.transpose_Y) Ref = reference_matmul(X, Y, obj.transpose_X, obj.transpose_Y)
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=shape_x, dtype='float32') x = fluid.data(name='x', shape=shape_x, dtype=obj.in_type_str)
y = fluid.data(name='y', shape=shape_y, dtype='float32') y = fluid.data(name='y', shape=shape_y, dtype=obj.in_type_str)
output = fluid.layers.matmul(x, y, obj.transpose_X, output = fluid.layers.matmul(x, y, obj.transpose_X,
obj.transpose_Y) obj.transpose_Y)
obj.assertEqual(len(Ref.shape), len(output.shape)) obj.assertEqual(len(Ref.shape), len(output.shape))
...@@ -196,167 +164,252 @@ def test_negative_dims_program(obj): ...@@ -196,167 +164,252 @@ def test_negative_dims_program(obj):
feed={'x': X, feed={'x': X,
'y': Y}, 'y': Y},
fetch_list=[output]) fetch_list=[output])
np.allclose(res, Ref, atol=1e-5) np.allclose(res, Ref, atol=1e-3)
# Generate program api cases for all negative possibilities
def api_test(dim_x, dim_y, trans_x, trans_y, batch_size):
test_name = ('TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format(
dim_x, dim_y, trans_x, trans_y))
shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x,
trans_y, batch_size)
globals()[test_name] = type(test_name, (unittest.TestCase, ), {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': trans_x,
'transpose_Y': trans_y,
'test_propram': test_negative_dims_program,
})
# Generate operators cases for all possibilities
def inject_test(dim_x, dim_y, trans_x, trans_y, batch_size):
test_name = (
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.format(
dim_x, dim_y, trans_x, trans_y, batch))
shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x,
trans_y, batch_size)
globals()[test_name] = type(test_name, (Generator, XPUOpTest), {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': trans_x,
'transpose_Y': trans_y,
'op_type': "matmul"
})
xpu_support_dims_list = [[1, 1], [2, 2], [3, 3]]
batch_size = [2, 4, 5, 10, 50, 100, 300]
for dims in xpu_support_dims_list:
dim_X = dims[0]
dim_Y = dims[1]
for transose_x in (False, True):
for transose_y in (False, True):
for batch in batch_size:
inject_test(dim_X, dim_Y, transose_x, transose_y, batch)
# xpu not support all negative possibilities
# api_test(dim_X, dim_Y, False, False, 10)
# Test case n-dim
def generate_compatible_shapes_(dim, transpose_X, transpose_Y):
M = 2
N = 4
K = 3
shape_X = [2 for _ in range(dim - 2)]
shape_Y = [2 for _ in range(dim - 2)]
if transpose_X:
shape_X += [K, M]
else:
shape_X += [M, K]
if transpose_Y: class XPUTestMatmulOpErr(XPUOpTestWrapper):
shape_Y += [N, K] def __init__(self):
else: self.op_name = "matmul"
shape_Y += [K, N] self.use_dynamic_create_class = False
return shape_X, shape_Y class TestMatmulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The inputs type of matmul_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.matmul, input1,
input1)
# The inputs dtype of matmul_op must be float32, float16
input2 = fluid.layers.data(
name='input2', shape=[10, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.matmul, input2,
input2)
input3 = fluid.layers.data(
name='input3', shape=[2, 2], dtype="float16")
fluid.layers.matmul(input3, input3)
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype=self.in_type)
y = fluid.data(name='y', shape=[2], dtype=self.in_type)
res = fluid.data(name="output", shape=[1], dtype=self.in_type)
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.XPUPlace(0))
data1 = np.random.rand(2).astype(self.in_type)
data2 = np.random.rand(2).astype(self.in_type)
np_res = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1))
self.assertTrue(
np.allclose(
np_res, expected_result, atol=1e-3),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_without_out(self):
device = fluid.XPUPlace(0)
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype(self.in_type)
input_array2 = np.random.rand(4, 3).astype(self.in_type)
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(
np.allclose(
expected_result, out.numpy(), atol=1e-3))
class Test_API_Matmul(unittest.TestCase):
def test_dygraph_without_out(self):
device = fluid.XPUPlace(0)
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype(self.in_type)
input_array2 = np.random.rand(4, 3).astype(self.in_type)
data1 = fluid.dygraph.to_variable(input_array1).astype(
self.in_type)
data2 = fluid.dygraph.to_variable(input_array2).astype(
self.in_type)
out = paddle.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(
np.allclose(
expected_result, out.numpy(), atol=1e-3))
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[-1, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 2, 10], dtype="float32")
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 2, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
class TestMatmulBaseGenerator(XPUOpTest):
def setUp(self):
self.op_type = "matmul"
self.dtype = np.float32 if not hasattr(self,
'in_type') else self.in_type
shape_X = [4, 5] if not hasattr(self, 'shape_X') else self.shape_X
shape_Y = [5, 6] if not hasattr(self, 'shape_Y') else self.shape_Y
transpose_X = False if not hasattr(self,
'transpose_X') else self.transpose_X
transpose_Y = False if not hasattr(self,
'transpose_Y') else self.transpose_Y
X = np.random.random(shape_X).astype(self.dtype)
Y = np.random.random(shape_Y).astype(self.dtype)
Out = reference_matmul(X, Y, transpose_X, transpose_Y)
self.inputs = {'X': X, 'Y': Y}
self.attrs = {'transpose_X': transpose_X, 'transpose_Y': transpose_Y}
self.outputs = {'Out': Out}
def test_check_output(self):
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=5e-2)
def test_check_grad_ignore_x(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'], 'Out', max_relative_error=5e-2, no_grad_set=set("X"))
def test_check_grad_ignore_y(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', max_relative_error=5e-2, no_grad_set=set('Y'))
# Test case n-dim
for dim in [4]:
for transpose_X in [False, True]:
for transpose_Y in [False, True]:
test_name = (
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format(
dim, dim, transpose_X, transpose_Y))
shape_X, shape_Y = generate_compatible_shapes_(dim, transpose_X,
transpose_Y)
globals()[test_name] = type(test_name, (Generator, XPUOpTest), {
'shape_X': shape_X,
'shape_Y': shape_Y,
'transpose_X': transpose_X,
'transpose_Y': transpose_Y,
'op_type': "matmul"
})
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64')
res = fluid.data(name="output", shape=[1], dtype="float64")
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.XPUPlace(0))
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1))
self.assertTrue(
np.allclose(
np_res, expected_result, atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_without_out(self):
device = fluid.XPUPlace(0)
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class Test_API_Matmul(unittest.TestCase):
def test_dygraph_without_out(self):
device = fluid.XPUPlace(0)
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32")
data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[-1, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 2, 10], dtype="float32")
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 2, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
class XPUTestMatmulOp1(XPUOpTestWrapper):
def __init__(self):
self.op_name = "matmul"
self.use_dynamic_create_class = True
def dynamic_create_class(self):
base_class = TestMatmulBaseGenerator
classes = []
xpu_support_dims_list = [[1, 1], [2, 2], [3, 3]]
batch_size = [2, 4, 5, 10, 50, 100, 300]
for dims in xpu_support_dims_list:
dim_X = dims[0]
dim_Y = dims[1]
for transose_x in [True, False]:
for transose_y in [True, False]:
for batch in batch_size:
class_name = (
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.
format(dim_X, dim_Y, transose_x, transose_y, batch))
shape_x, shape_y = generate_compatible_shapes(
dim_X, dim_Y, transose_x, transose_y, batch)
attr_dict = {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': transose_x,
'transpose_Y': transose_y,
'op_type': "matmul"
}
classes.append([class_name, attr_dict])
return base_class, classes
class XPUTestMatmulOp2(XPUOpTestWrapper):
def __init__(self):
self.op_name = "matmul"
self.use_dynamic_create_class = True
def dynamic_create_class(self):
base_class = unittest.TestCase
classes = []
xpu_support_dims_list = [[1, 1], [2, 2], [3, 3]]
batch_size = [2, 4, 5, 10, 50, 100, 300]
for dims in xpu_support_dims_list:
dim_X = dims[0]
dim_Y = dims[1]
for transose_x in [True, False]:
for transose_y in [True, False]:
for batch in batch_size:
class_name = (
'TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}_batch_{}'.
format(dim_X, dim_Y, transose_x, transose_y, batch))
shape_x, shape_y = generate_compatible_shapes(
dim_X, dim_Y, transose_x, transose_y, batch)
attr_dict = {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': transose_x,
'transpose_Y': transose_y,
'test_propram': test_negative_dims_program,
}
classes.append([class_name, attr_dict])
return base_class, classes
class XPUTestMatmulOp3(XPUOpTestWrapper):
def __init__(self):
self.op_name = "matmul"
self.use_dynamic_create_class = True
def dynamic_create_class(self):
base_class = TestMatmulBaseGenerator
classes = []
for dim in [4]:
for transpose_X in [False, True]:
for transpose_Y in [False, True]:
class_name = (
'TestMatMulOp2_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.
format(dim, dim, transpose_X, transpose_Y))
shape_X, shape_Y = generate_compatible_shapes_2(
dim, transpose_X, transpose_Y)
attr_dict = {
'shape_X': shape_X,
'shape_Y': shape_Y,
'transpose_X': transpose_X,
'transpose_Y': transpose_Y,
'op_type': "matmul"
}
classes.append([class_name, attr_dict])
return base_class, classes
support_types = get_xpu_op_support_types('matmul')
for stype in support_types:
create_test_class(globals(), XPUTestMatmulOpErr, stype)
create_test_class(globals(), XPUTestMatmulOp1, stype)
create_test_class(globals(), XPUTestMatmulOp2, stype)
create_test_class(globals(), XPUTestMatmulOp3, stype)
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -23,6 +23,9 @@ import paddle.fluid.core as core ...@@ -23,6 +23,9 @@ import paddle.fluid.core as core
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.framework import _test_eager_guard
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
...@@ -55,273 +58,239 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -55,273 +58,239 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
return Out return Out
class TestMatMulV2Op(XPUOpTest): class XPUTestMatmulV2Op(XPUOpTestWrapper):
""" def __init__(self):
case 1 self.op_name = "matmul_v2"
""" self.use_dynamic_create_class = False
def config(self): class TestMatMulV2Op(XPUOpTest):
self.x_shape = (100, ) """
self.y_shape = (100, ) case 1
self.trans_x = False """
self.trans_y = False
def config(self):
def init_kernel_type(self): self.x_shape = (100, )
self.dtype = "float32" self.y_shape = (100, )
self.trans_x = False
def setUp(self): self.trans_y = False
self.use_xpu = True
self.init_kernel_type() def setUp(self):
self.config() self.dtype = self.in_type
self.op_type = "matmul_v2" self.config()
x = np.random.random(self.x_shape).astype(self.dtype) self.op_type = "matmul_v2"
y = np.random.random(self.y_shape).astype(self.dtype) x = np.random.random(self.x_shape).astype(self.dtype)
# -0.1 ~ 0.1 y = np.random.random(self.y_shape).astype(self.dtype)
x = -0.1 + 0.2 * x # -0.1 ~ 0.1
y = -0.1 + 0.2 * y x = -0.1 + 0.2 * x
result = reference_matmul(x, y, self.trans_x, self.trans_y) y = -0.1 + 0.2 * y
result = result.astype(self.dtype) result = reference_matmul(x, y, self.trans_x, self.trans_y)
self.inputs = { result = result.astype(self.dtype)
'X': x, self.inputs = {
'Y': y, 'X': x,
} 'Y': y,
self.attrs = {'trans_x': self.trans_x, 'trans_y': self.trans_y} }
self.outputs = {'Out': result} self.attrs = {'trans_x': self.trans_x, 'trans_y': self.trans_y}
self.outputs = {'Out': result}
def test_check_output(self):
place = paddle.XPUPlace(0) def test_check_output(self):
self.check_output_with_place(place) place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.XPUPlace(0) def test_check_grad(self):
self.check_grad_with_place(place, ['X', 'Y'], 'Out') place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
class TestMatMulOp2(TestMatMulV2Op): class TestMatMulOp2(TestMatMulV2Op):
""" """
case 2 case 2
""" """
def config(self): def config(self):
self.x_shape = (100) self.x_shape = (100)
self.y_shape = (100, 3) self.y_shape = (100, 3)
self.trans_x = False self.trans_x = False
self.trans_y = False self.trans_y = False
class TestMatMulOp3(TestMatMulV2Op):
class TestMatMulOp3(TestMatMulV2Op): """
""" case 3
case 3 """
"""
def config(self):
def config(self): self.x_shape = (100, )
self.x_shape = (100, ) self.y_shape = (1, 1, 100, 2)
self.y_shape = (1, 1, 100, 2) self.trans_x = False
self.trans_x = False self.trans_y = False
self.trans_y = False
class TestMatMulOp4(TestMatMulV2Op):
"""
class TestMatMulOp4(TestMatMulV2Op): case 4
""" """
case 4
""" def config(self):
self.x_shape = (1, 1, 100, 1)
def config(self): self.y_shape = (1, 100)
self.x_shape = (1, 1, 100, 1) self.trans_x = False
self.y_shape = (1, 100) self.trans_y = False
self.trans_x = False
self.trans_y = False class TestMatMulOp5(TestMatMulV2Op):
"""
case 5
class TestMatMulOp5(TestMatMulV2Op): """
"""
case 5 def config(self):
""" self.x_shape = (1, 1, 100, 1)
self.y_shape = (100, )
def config(self): self.trans_x = True
self.x_shape = (1, 1, 100, 1) self.trans_y = False
self.y_shape = (100, )
self.trans_x = True class TestMatMulOp6(TestMatMulV2Op):
self.trans_y = False """
case 6
"""
class TestMatMulOp6(TestMatMulV2Op):
""" def config(self):
case 6 self.x_shape = (1, 2, 102, 10)
""" self.y_shape = (2, 10, 111)
self.trans_x = False
def config(self): self.trans_y = False
self.x_shape = (1, 2, 102, 10)
self.y_shape = (2, 10, 111) class TestMatMulOp7(TestMatMulV2Op):
self.trans_x = False """
self.trans_y = False case 7
"""
class TestMatMulOp7(TestMatMulV2Op): def config(self):
""" self.x_shape = (1, 2, 100, 1)
case 7 self.y_shape = (2, 100, 12)
""" self.trans_x = True
self.trans_y = False
def config(self):
self.x_shape = (1, 2, 100, 1) class TestMatMulOp8(TestMatMulV2Op):
self.y_shape = (2, 100, 12) """
self.trans_x = True case 8
self.trans_y = False """
def config(self):
class TestMatMulOp8(TestMatMulV2Op): self.x_shape = (1, 1, 2, 100)
""" self.y_shape = (1, 1, 100, 2)
case 8 self.trans_x = False
""" self.trans_y = False
def config(self): class TestMatMulOp9(TestMatMulV2Op):
self.x_shape = (1, 1, 2, 100) """
self.y_shape = (1, 1, 100, 2) case 9
self.trans_x = False """
self.trans_y = False
def config(self):
self.x_shape = (100, 20, 100)
class TestMatMulOp9(TestMatMulV2Op): self.y_shape = (100, 100, 100)
""" self.trans_x = False
case 9 self.trans_y = True
"""
class TestMatMulOp10(TestMatMulV2Op):
def config(self): """
self.x_shape = (100, 20, 100) case 10
self.y_shape = (100, 100, 100) """
self.trans_x = False
self.trans_y = True def config(self):
self.x_shape = (100, 20, 100)
self.y_shape = (100, 20, 100)
class TestMatMulOp10(TestMatMulV2Op): self.trans_x = True
""" self.trans_y = False
case 10
""" class TestMatMulOp11(TestMatMulV2Op):
"""
def config(self): case 11
self.x_shape = (100, 20, 100) """
self.y_shape = (100, 20, 100)
self.trans_x = True def config(self):
self.trans_y = False self.x_shape = (2, 20, 100)
self.y_shape = (100, 30)
self.trans_x = False
class TestMatMulOp11(TestMatMulV2Op): self.trans_y = False
"""
case 11 class TestMatMulOp12(TestMatMulV2Op):
""" """
case 12
def config(self): """
self.x_shape = (2, 20, 100)
self.y_shape = (100, 30) def config(self):
self.trans_x = False self.x_shape = (1, 20, 100)
self.trans_y = False self.y_shape = (100, )
self.trans_x = False
self.trans_y = False
class TestMatMulOp12(TestMatMulV2Op):
""" class TestMatMulOp13(TestMatMulV2Op):
case 12 """
""" case 13
"""
def config(self):
self.x_shape = (1, 20, 100) def config(self):
self.y_shape = (100, ) self.x_shape = (2, 2, 10, 10)
self.trans_x = False self.y_shape = (2, 2, 10, 10)
self.trans_y = False self.trans_x = True
self.trans_y = False
class TestMatMulOp13(TestMatMulV2Op): class TestMatMulOp14(TestMatMulV2Op):
""" """
case 13 case 14_1
""" """
def config(self): def config(self):
self.x_shape = (2, 2, 10, 10) self.x_shape = (100, 2, 100, 10)
self.y_shape = (2, 2, 10, 10) self.y_shape = (100, 2, 10, 90)
self.trans_x = True self.trans_x = False
self.trans_y = False self.trans_y = False
class TestMatMulOp15(TestMatMulV2Op):
class TestMatMulOp14(TestMatMulV2Op): """
""" case 14_2
case 14_1 """
"""
def config(self):
def config(self): self.x_shape = (100, 2, 100, 10)
self.x_shape = (100, 2, 100, 10) self.y_shape = (100, 2, 100, 10)
self.y_shape = (100, 2, 10, 90) self.trans_x = False
self.trans_x = False self.trans_y = True
self.trans_y = False
class TestMatMulOp16(TestMatMulV2Op):
"""
class TestMatMulOp15(TestMatMulV2Op): case 16 : to check the big data
""" """
case 14_2
""" def config(self):
self.x_shape = (1000, 2, 100, 100)
def config(self): self.y_shape = (1000, 2, 100, 900)
self.x_shape = (100, 2, 100, 10) self.trans_x = False
self.y_shape = (100, 2, 100, 10) self.trans_y = False
self.trans_x = False
self.trans_y = True class TestMatMulOp17(TestMatMulV2Op):
"""
case 17 : to check the gradient for special case
class TestMatMulOp16(TestMatMulV2Op): """
"""
case 16 : to check the big data def config(self):
""" self.x_shape = (2, 1, 100)
self.y_shape = (100)
def config(self): self.trans_x = False
self.x_shape = (1000, 2, 100, 100) self.trans_y = False
self.y_shape = (1000, 2, 100, 900)
self.trans_x = False class TestMatMulOp18(TestMatMulV2Op):
self.trans_y = False """
case 18 : for ppyoloe model
"""
class TestMatMulOp17(TestMatMulV2Op):
""" def config(self):
case 17 : to check the gradient for special case self.x_shape = (8, 111, 4, 17)
""" self.y_shape = (17)
self.trans_x = False
def config(self): self.trans_y = False
self.x_shape = (2, 1, 100)
self.y_shape = (100)
self.trans_x = False support_types = get_xpu_op_support_types('matmul_v2')
self.trans_y = False for stype in support_types:
create_test_class(globals(), XPUTestMatmulV2Op, stype)
class TestMatMulOp18(TestMatMulV2Op):
"""
case 18 : for ppyoloe model
"""
def config(self):
self.x_shape = (8, 111, 4, 17)
self.y_shape = (17)
self.trans_x = False
self.trans_y = False
# class TestMatMulOpBroadcast1(TestMatMulV2Op):
# """
# case 14_3
# """
# def config(self):
# self.x_shape = (3, 1, 10, 10)
# self.y_shape = (1, 2, 10, 10)
# self.trans_x = True
# self.trans_y = True
# class TestMatMulOpBroadcast2(TestMatMulV2Op):
# """
# case 14_4
# """
# def config(self):
# self.x_shape = (3, 1, 10, 10)
# self.y_shape = (1, 2, 10, 10)
# self.trans_x = False
# self.trans_y = True
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册