未验证 提交 83dd7e47 编写于 作者: H houj04 提交者: GitHub

update unittests for activation ops on xpu test=kunlun (#39677)

* update unittests for activation ops on xpu. test=kunlun

* update input data range. test=kunlun

* update input data range. test=kunlun
上级 45dd4a5f
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -14,32 +14,31 @@
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
import paddle.fluid.core as core
import sys
sys.path.append("..")
import paddle
from op_test import OpTest
from op_test_xpu import XPUOpTest
from scipy.special import expit, erf
import paddle
import paddle.fluid as fluid
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.fluid import compiler, Program, program_guard
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUActivation(XPUOpTest):
class TestActivationOPBase(XPUOpTest):
def setUp(self):
self.op_type = "exp"
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.init_kernel_type()
self.set_case()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
def set_case(self):
self.op_type = 'exp'
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
......@@ -48,79 +47,87 @@ class TestXPUActivation(XPUOpTest):
self.dtype = np.float32
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
self.check_output_with_place(self.place)
def init_kernel_type(self):
pass
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSigmoid(TestXPUActivation):
def setUp(self):
self.op_type = "sigmoid"
self.init_dtype()
class XPUTestExpOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'exp'
self.use_dynamic_create_class = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = 1 / (1 + np.exp(-x))
class XPUTestExp(TestActivationOPBase):
def set_case(self):
self.op_type = 'exp'
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('exp')
for stype in support_types:
create_test_class(globals(), XPUTestExpOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUTanh(TestXPUActivation):
def setUp(self):
self.op_type = "tanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
class XPUTestSigmoidOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'sigmoid'
self.use_dynamic_create_class = False
class XPUTestSigmoid(TestActivationOPBase):
def set_case(self):
self.op_type = "sigmoid"
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = 1 / (1 + np.exp(-x))
self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('sigmoid')
for stype in support_types:
create_test_class(globals(), XPUTestSigmoidOP, stype)
class TestXPUTanhFP16(TestXPUActivation):
def setUp(self):
class XPUTestTanhOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'tanh'
self.use_dynamic_create_class = False
class XPUTestTanh(TestActivationOPBase):
def set_case(self):
self.op_type = "tanh"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.tanh(x)
self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def init_dtype(self):
self.dtype = np.float16
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('tanh')
for stype in support_types:
create_test_class(globals(), XPUTestTanhOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSqrt(TestXPUActivation):
def setUp(self):
class XPUTestSqrtOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'sqrt'
self.use_dynamic_create_class = False
class XPUTestSqrt(TestActivationOPBase):
def set_case(self):
self.op_type = "sqrt"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.sqrt(x)
......@@ -129,18 +136,21 @@ class TestXPUSqrt(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('sqrt')
for stype in support_types:
create_test_class(globals(), XPUTestSqrtOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUAbs(TestXPUActivation):
def setUp(self):
class XPUTestAbsOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'abs'
self.use_dynamic_create_class = False
class XPUTestAbs(TestActivationOPBase):
def set_case(self):
self.op_type = "abs"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
......@@ -154,18 +164,21 @@ class TestXPUAbs(TestXPUActivation):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('abs')
for stype in support_types:
create_test_class(globals(), XPUTestAbsOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPURelu(TestXPUActivation):
def setUp(self):
class XPUTestReluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'relu'
self.use_dynamic_create_class = False
class XPUTestRelu(TestActivationOPBase):
def set_case(self):
self.op_type = "relu"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# The same reason with TestAbs
......@@ -176,36 +189,22 @@ class TestXPURelu(TestXPUActivation):
self.inputs = {'X': x}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUGelu(TestXPUActivation):
def setUp(self):
self.op_type = "gelu"
self.init_dtype()
approximate = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate)
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate, 'use_xpu': True}
support_types = get_xpu_op_support_types('relu')
for stype in support_types:
create_test_class(globals(), XPUTestReluOP, stype)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
class XPUTestGeluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'gelu'
self.use_dynamic_create_class = False
class TestXPUGelu(TestXPUActivation):
def setUp(self):
class XPUTestGelu(TestActivationOPBase):
def set_case(self):
self.op_type = "gelu"
self.init_dtype()
self.dtype = self.in_type
approximate = False
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = gelu(x, approximate)
......@@ -214,16 +213,14 @@ class TestXPUGelu(TestXPUActivation):
self.outputs = {'Out': out}
self.attrs = {"approximate": approximate, 'use_xpu': True}
def init_dtype(self):
self.dtype = np.float16
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('gelu')
for stype in support_types:
create_test_class(globals(), XPUTestGeluOP, stype)
def gelu(x, approximate):
from scipy.special import erf
if approximate:
y_ref = 0.5 * x * (1.0 + np.tanh(
np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
......@@ -232,12 +229,16 @@ def gelu(x, approximate):
return y_ref.astype(x.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUHardSwish(TestXPUActivation):
def setUp(self):
class XPUTestHardSwishGeluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'hard_swish'
self.use_dynamic_create_class = False
class XPUTestHardSwish(TestActivationOPBase):
def set_case(self):
self.op_type = "hard_swish"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
offset = 3.0
threshold = 6.0
......@@ -248,10 +249,10 @@ class TestXPUHardSwish(TestXPUActivation):
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('hard_swish')
for stype in support_types:
create_test_class(globals(), XPUTestHardSwishOP, stype)
def hard_swish(x, offset, threshold, scale):
......@@ -259,12 +260,15 @@ def hard_swish(x, offset, threshold, scale):
return y_ref.astype(x.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULog(TestXPUActivation):
def setUp(self):
class XPUTestLogOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'log'
self.use_dynamic_create_class = False
class XPUTestLog(TestActivationOPBase):
def set_case(self):
self.op_type = "log"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.log(x)
......@@ -274,32 +278,43 @@ class TestXPULog(TestXPUActivation):
self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSquare(TestXPUActivation):
def setUp(self):
support_types = get_xpu_op_support_types('log')
for stype in support_types:
create_test_class(globals(), XPUTestLogOP, stype)
class XPUTestSquareOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'square'
self.use_dynamic_create_class = False
class XPUTestSquare(TestActivationOPBase):
def set_case(self):
self.op_type = "square"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
out = np.square(x)
self.attrs = {'use_xpu': True}
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('square')
for stype in support_types:
create_test_class(globals(), XPUTestSquareOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUPow(TestXPUActivation):
def setUp(self):
class XPUTestPowOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'pow'
self.use_dynamic_create_class = False
class XPUTestPow(TestActivationOPBase):
def set_case(self):
self.op_type = "pow"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
out = np.power(x, 3)
......@@ -309,12 +324,21 @@ class TestXPUPow(TestXPUActivation):
self.outputs = {'Out': out}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPULeakyRelu(TestXPUActivation):
def setUp(self):
support_types = get_xpu_op_support_types('pow')
for stype in support_types:
create_test_class(globals(), XPUTestPowOP, stype)
class XPUTestLeakyReluOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'leaky_relu'
self.use_dynamic_create_class = False
class XPUTestLeakyRelu(TestActivationOPBase):
def set_case(self):
self.op_type = "leaky_relu"
self.init_dtype()
self.dtype = self.in_type
x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
alpha = np.random.uniform(
0,
......@@ -325,10 +349,10 @@ class TestXPULeakyRelu(TestXPUActivation):
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True, 'alpha': alpha}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('leaky_relu')
for stype in support_types:
create_test_class(globals(), XPUTestLeakyReluOP, stype)
def leaky_relu(x, alpha):
......@@ -339,10 +363,15 @@ def leaky_relu(x, alpha):
return y_ref.astype(x.dtype)
class TestXPUReciprocal(TestXPUActivation):
def setUp(self):
class XPUTestReciprocalOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'reciprocal'
self.use_dynamic_create_class = False
class XPUTestRecipocal(TestActivationOPBase):
def set_case(self):
self.op_type = "reciprocal"
self.init_dtype()
self.dtype = self.in_type
np.random.seed(1024)
x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype)
......@@ -352,20 +381,23 @@ class TestXPUReciprocal(TestXPUActivation):
self.outputs = {'Out': out}
self.attrs = {'use_xpu': True}
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
support_types = get_xpu_op_support_types('reciprocal')
for stype in support_types:
create_test_class(globals(), XPUTestReciprocalOP, stype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSoftPlus(TestXPUActivation):
def setUp(self):
class XPUTestSoftPlusOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'softplus'
self.use_dynamic_create_class = False
class XPUTestSoftPlusBase(TestActivationOPBase):
def set_case(self):
self.op_type = "softplus"
self.init_dtype()
self.init_config()
self.dtype = self.in_type
self.init_config()
beta = np.random.uniform(0, 1)
threshold = np.random.uniform(0, 1)
out = ref_softplus(self.x, beta, threshold)
......@@ -377,31 +409,24 @@ class TestXPUSoftPlus(TestXPUActivation):
def init_config(self):
self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSoftPlus2(TestXPUSoftPlus):
class XPUTestSoftPlus2(XPUTestSoftPlusBase):
def init_config(self):
self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype)
class XPUTestSoftPlus3(XPUTestSoftPlusBase):
def init_config(self):
self.x = np.random.uniform(-2, 2,
[4, 512, 15, 15]).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSoftPlus3(TestXPUSoftPlus):
class XPUTestSoftPlus4(XPUTestSoftPlusBase):
def init_config(self):
self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype(self.dtype)
self.x = np.random.uniform(-2, 2,
[4, 256, 22, 22]).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestXPUSoftPlus4(TestXPUSoftPlus):
def init_config(self):
self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype(self.dtype)
support_types = get_xpu_op_support_types('softplus')
for stype in support_types:
create_test_class(globals(), XPUTestSoftPlusOP, stype)
def ref_softplus(x, beta=1, threshold=20):
......@@ -412,5 +437,4 @@ def ref_softplus(x, beta=1, threshold=20):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册