From 83dd7e473546322bae99cde67664720c8053e53e Mon Sep 17 00:00:00 2001 From: houj04 <35131887+houj04@users.noreply.github.com> Date: Mon, 21 Feb 2022 12:46:41 +0800 Subject: [PATCH] update unittests for activation ops on xpu test=kunlun (#39677) * update unittests for activation ops on xpu. test=kunlun * update input data range. test=kunlun * update input data range. test=kunlun --- .../unittests/xpu/test_activation_op_xpu.py | 570 +++++++++--------- 1 file changed, 297 insertions(+), 273 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py index 57af5739f5..3436e443ab 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,32 +14,31 @@ from __future__ import print_function -import sys -sys.path.append("..") import unittest import numpy as np -import paddle.fluid.core as core +import sys +sys.path.append("..") + +import paddle + from op_test import OpTest from op_test_xpu import XPUOpTest -from scipy.special import expit, erf -import paddle -import paddle.fluid as fluid -import paddle.nn as nn -import paddle.nn.functional as F -from paddle.fluid import compiler, Program, program_guard +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUActivation(XPUOpTest): + +class TestActivationOPBase(XPUOpTest): def setUp(self): - self.op_type = "exp" + self.place = paddle.XPUPlace(0) self.init_dtype() - self.init_kernel_type() + self.set_case() - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.exp(x) + def set_case(self): + self.op_type = 'exp' + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.exp(x) self.attrs = {'use_xpu': True} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} @@ -48,182 +47,180 @@ class TestXPUActivation(XPUOpTest): self.dtype = np.float32 def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, atol=1e-3) + self.check_output_with_place(self.place) - def init_kernel_type(self): - pass + def test_check_grad(self): + self.check_grad_with_place(self.place, ['X'], 'Out') -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSigmoid(TestXPUActivation): - def setUp(self): - self.op_type = "sigmoid" - self.init_dtype() +class XPUTestExpOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'exp' + self.use_dynamic_create_class = False - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - out = 1 / (1 + np.exp(-x)) + class XPUTestExp(TestActivationOPBase): + def set_case(self): + self.op_type = 'exp' + self.dtype = self.in_type - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.exp(x) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +support_types = get_xpu_op_support_types('exp') +for stype in support_types: + create_test_class(globals(), XPUTestExpOP, stype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUTanh(TestXPUActivation): - def setUp(self): - self.op_type = "tanh" - self.init_dtype() - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.tanh(x) - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} +class XPUTestSigmoidOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'sigmoid' + self.use_dynamic_create_class = False - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + class XPUTestSigmoid(TestActivationOPBase): + def set_case(self): + self.op_type = "sigmoid" + self.dtype = self.in_type + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = 1 / (1 + np.exp(-x)) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} -class TestXPUTanhFP16(TestXPUActivation): - def setUp(self): - self.op_type = "tanh" - self.init_dtype() - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.tanh(x) - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} +support_types = get_xpu_op_support_types('sigmoid') +for stype in support_types: + create_test_class(globals(), XPUTestSigmoidOP, stype) - def init_dtype(self): - self.dtype = np.float16 - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +class XPUTestTanhOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'tanh' + self.use_dynamic_create_class = False + class XPUTestTanh(TestActivationOPBase): + def set_case(self): + self.op_type = "tanh" + self.dtype = self.in_type -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSqrt(TestXPUActivation): - def setUp(self): - self.op_type = "sqrt" - self.init_dtype() + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.tanh(x) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.sqrt(x) - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} +support_types = get_xpu_op_support_types('tanh') +for stype in support_types: + create_test_class(globals(), XPUTestTanhOP, stype) - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +class XPUTestSqrtOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'sqrt' + self.use_dynamic_create_class = False -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUAbs(TestXPUActivation): - def setUp(self): - self.op_type = "abs" - self.init_dtype() + class XPUTestSqrt(TestActivationOPBase): + def set_case(self): + self.op_type = "sqrt" + self.dtype = self.in_type - x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype) - # Because we set delta = 0.005 in calculating numeric gradient, - # if x is too small, such as 0.002, x_neg will be -0.003 - # x_pos will be 0.007, so the numeric gradient is inaccurate. - # we should avoid this - x[np.abs(x) < 0.005] = 0.02 - out = np.abs(x) + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.sqrt(x) - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +support_types = get_xpu_op_support_types('sqrt') +for stype in support_types: + create_test_class(globals(), XPUTestSqrtOP, stype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPURelu(TestXPUActivation): - def setUp(self): - self.op_type = "relu" - self.init_dtype() - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - # The same reason with TestAbs - x[np.abs(x) < 0.005] = 0.02 - out = np.maximum(x, 0) +class XPUTestAbsOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'abs' + self.use_dynamic_create_class = False - self.attrs = {'use_xpu': True} - self.inputs = {'X': x} - self.outputs = {'Out': out} + class XPUTestAbs(TestActivationOPBase): + def set_case(self): + self.op_type = "abs" + self.dtype = self.in_type - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype) + # Because we set delta = 0.005 in calculating numeric gradient, + # if x is too small, such as 0.002, x_neg will be -0.003 + # x_pos will be 0.007, so the numeric gradient is inaccurate. + # we should avoid this + x[np.abs(x) < 0.005] = 0.02 + out = np.abs(x) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUGelu(TestXPUActivation): - def setUp(self): - self.op_type = "gelu" - self.init_dtype() - approximate = False - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - out = gelu(x, approximate) - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = {"approximate": approximate, 'use_xpu': True} +support_types = get_xpu_op_support_types('abs') +for stype in support_types: + create_test_class(globals(), XPUTestAbsOP, stype) - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +class XPUTestReluOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'relu' + self.use_dynamic_create_class = False -class TestXPUGelu(TestXPUActivation): - def setUp(self): - self.op_type = "gelu" - self.init_dtype() - approximate = False - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - out = gelu(x, approximate) + class XPUTestRelu(TestActivationOPBase): + def set_case(self): + self.op_type = "relu" + self.dtype = self.in_type - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = {"approximate": approximate, 'use_xpu': True} + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + # The same reason with TestAbs + x[np.abs(x) < 0.005] = 0.02 + out = np.maximum(x, 0) - def init_dtype(self): - self.dtype = np.float16 + self.attrs = {'use_xpu': True} + self.inputs = {'X': x} + self.outputs = {'Out': out} + + +support_types = get_xpu_op_support_types('relu') +for stype in support_types: + create_test_class(globals(), XPUTestReluOP, stype) + + +class XPUTestGeluOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'gelu' + self.use_dynamic_create_class = False + + class XPUTestGelu(TestActivationOPBase): + def set_case(self): + self.op_type = "gelu" + self.dtype = self.in_type + + approximate = False + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = gelu(x, approximate) + + self.inputs = {'X': x} + self.outputs = {'Out': out} + self.attrs = {"approximate": approximate, 'use_xpu': True} - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + +support_types = get_xpu_op_support_types('gelu') +for stype in support_types: + create_test_class(globals(), XPUTestGeluOP, stype) def gelu(x, approximate): + from scipy.special import erf if approximate: y_ref = 0.5 * x * (1.0 + np.tanh( np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))) @@ -232,26 +229,30 @@ def gelu(x, approximate): return y_ref.astype(x.dtype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUHardSwish(TestXPUActivation): - def setUp(self): - self.op_type = "hard_swish" - self.init_dtype() - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - offset = 3.0 - threshold = 6.0 - scale = 6.0 - out = hard_swish(x, offset, threshold, scale) +class XPUTestHardSwishGeluOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'hard_swish' + self.use_dynamic_create_class = False - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = {'use_xpu': True} + class XPUTestHardSwish(TestActivationOPBase): + def set_case(self): + self.op_type = "hard_swish" + self.dtype = self.in_type - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + offset = 3.0 + threshold = 6.0 + scale = 6.0 + out = hard_swish(x, offset, threshold, scale) + + self.inputs = {'X': x} + self.outputs = {'Out': out} + self.attrs = {'use_xpu': True} + + +support_types = get_xpu_op_support_types('hard_swish') +for stype in support_types: + create_test_class(globals(), XPUTestHardSwishOP, stype) def hard_swish(x, offset, threshold, scale): @@ -259,76 +260,99 @@ def hard_swish(x, offset, threshold, scale): return y_ref.astype(x.dtype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPULog(TestXPUActivation): - def setUp(self): - self.op_type = "log" - self.init_dtype() +class XPUTestLogOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'log' + self.use_dynamic_create_class = False - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.log(x) + class XPUTestLog(TestActivationOPBase): + def set_case(self): + self.op_type = "log" + self.dtype = self.in_type - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.log(x) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSquare(TestXPUActivation): - def setUp(self): - self.op_type = "square" - self.init_dtype() - x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) - out = np.square(x) +support_types = get_xpu_op_support_types('log') +for stype in support_types: + create_test_class(globals(), XPUTestLogOP, stype) - self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') +class XPUTestSquareOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'square' + self.use_dynamic_create_class = False + class XPUTestSquare(TestActivationOPBase): + def set_case(self): + self.op_type = "square" + self.dtype = self.in_type -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUPow(TestXPUActivation): - def setUp(self): - self.op_type = "pow" - self.init_dtype() + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = np.square(x) - x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) - out = np.power(x, 3) + self.attrs = {'use_xpu': True} + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.attrs = {'factor': 3.0, 'use_xpu': True} - self.outputs = {'Out': out} +support_types = get_xpu_op_support_types('square') +for stype in support_types: + create_test_class(globals(), XPUTestSquareOP, stype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPULeakyRelu(TestXPUActivation): - def setUp(self): - self.op_type = "leaky_relu" - self.init_dtype() - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) - alpha = np.random.uniform( - 0, - 1, ) - out = leaky_relu(x, alpha) - self.inputs = {'X': x} - self.outputs = {'Out': out} - self.attrs = {'use_xpu': True, 'alpha': alpha} +class XPUTestPowOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'pow' + self.use_dynamic_create_class = False - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + class XPUTestPow(TestActivationOPBase): + def set_case(self): + self.op_type = "pow" + self.dtype = self.in_type + + x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) + out = np.power(x, 3) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'factor': 3.0, 'use_xpu': True} + self.outputs = {'Out': out} + + +support_types = get_xpu_op_support_types('pow') +for stype in support_types: + create_test_class(globals(), XPUTestPowOP, stype) + + +class XPUTestLeakyReluOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'leaky_relu' + self.use_dynamic_create_class = False + + class XPUTestLeakyRelu(TestActivationOPBase): + def set_case(self): + self.op_type = "leaky_relu" + self.dtype = self.in_type + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + alpha = np.random.uniform( + 0, + 1, ) + out = leaky_relu(x, alpha) + + self.inputs = {'X': x} + self.outputs = {'Out': out} + self.attrs = {'use_xpu': True, 'alpha': alpha} + + +support_types = get_xpu_op_support_types('leaky_relu') +for stype in support_types: + create_test_class(globals(), XPUTestLeakyReluOP, stype) def leaky_relu(x, alpha): @@ -339,69 +363,70 @@ def leaky_relu(x, alpha): return y_ref.astype(x.dtype) -class TestXPUReciprocal(TestXPUActivation): - def setUp(self): - self.op_type = "reciprocal" - self.init_dtype() +class XPUTestReciprocalOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'reciprocal' + self.use_dynamic_create_class = False - np.random.seed(1024) - x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype) - out = np.reciprocal(x) + class XPUTestRecipocal(TestActivationOPBase): + def set_case(self): + self.op_type = "reciprocal" + self.dtype = self.in_type - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} - self.outputs = {'Out': out} - self.attrs = {'use_xpu': True} + np.random.seed(1024) + x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype) + out = np.reciprocal(x) - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + self.attrs = {'use_xpu': True} -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSoftPlus(TestXPUActivation): - def setUp(self): - self.op_type = "softplus" - self.init_dtype() - self.init_config() +support_types = get_xpu_op_support_types('reciprocal') +for stype in support_types: + create_test_class(globals(), XPUTestReciprocalOP, stype) - beta = np.random.uniform(0, 1) - threshold = np.random.uniform(0, 1) - out = ref_softplus(self.x, beta, threshold) - self.inputs = {'X': self.x} - self.outputs = {'Out': out} - self.attrs = {'use_xpu': True, 'beta': beta, 'threshold': threshold} +class XPUTestSoftPlusOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'softplus' + self.use_dynamic_create_class = False - def init_config(self): - self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + class XPUTestSoftPlusBase(TestActivationOPBase): + def set_case(self): + self.op_type = "softplus" + self.dtype = self.in_type - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X'], 'Out') + self.init_config() + beta = np.random.uniform(0, 1) + threshold = np.random.uniform(0, 1) + out = ref_softplus(self.x, beta, threshold) + + self.inputs = {'X': self.x} + self.outputs = {'Out': out} + self.attrs = {'use_xpu': True, 'beta': beta, 'threshold': threshold} + def init_config(self): + self.x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSoftPlus2(TestXPUSoftPlus): - def init_config(self): - self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) + class XPUTestSoftPlus2(XPUTestSoftPlusBase): + def init_config(self): + self.x = np.random.uniform(-2, 2, [1024, 8]).astype(self.dtype) + class XPUTestSoftPlus3(XPUTestSoftPlusBase): + def init_config(self): + self.x = np.random.uniform(-2, 2, + [4, 512, 15, 15]).astype(self.dtype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSoftPlus3(TestXPUSoftPlus): - def init_config(self): - self.x = np.random.uniform(-2, 2, [4, 512, 15, 15]).astype(self.dtype) + class XPUTestSoftPlus4(XPUTestSoftPlusBase): + def init_config(self): + self.x = np.random.uniform(-2, 2, + [4, 256, 22, 22]).astype(self.dtype) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestXPUSoftPlus4(TestXPUSoftPlus): - def init_config(self): - self.x = np.random.uniform(-2, 2, [4, 256, 22, 22]).astype(self.dtype) +support_types = get_xpu_op_support_types('softplus') +for stype in support_types: + create_test_class(globals(), XPUTestSoftPlusOP, stype) def ref_softplus(x, beta=1, threshold=20): @@ -412,5 +437,4 @@ def ref_softplus(x, beta=1, threshold=20): if __name__ == "__main__": - paddle.enable_static() unittest.main() -- GitLab