未验证 提交 457defe7 编写于 作者: C Charles-hit 提交者: GitHub

[OpTest] support prim test in OpTest (#50509)

* support prim test in OpTest

* fix cmake

* fix op test

* fix test_input_spec

* disable cinn in reduce_sum unit test

* add bfloat16 dtype for sum

* polish code

* add clear jit program function

* convert grad out from tensor to numpy

* remove unnecessary code

* add only_prim flag

* fix flag

* fix op test

* fix optest comp inplace error

* fix op test

* fix op test with guard

* add initialization of check_comp flag

* fix comp inplace error in op test

* rename check_comp with check_prim and add bfloat16 dtype convert

* rename comp_op_type to prim_op_type

* rename comp to prim

* remove useless code

* skip ci check for only prim

* add no_grad_vars and grad_outputs in prim test

* fix var_dict

* fix op test for only_prim

* fix dy2static bugs

* polish some code
上级 2135020a
...@@ -1204,6 +1204,14 @@ if($ENV{USE_STANDALONE_EXECUTOR}) ...@@ -1204,6 +1204,14 @@ if($ENV{USE_STANDALONE_EXECUTOR})
PROPERTIES ENVIRONMENT FLAGS_USE_STANDALONE_EXECUTOR=0) PROPERTIES ENVIRONMENT FLAGS_USE_STANDALONE_EXECUTOR=0)
endif() endif()
set(TEST_CINN_OPS test_softmax_op test_expand_v2_op test_reduce_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN)
set_tests_properties(${TEST_CINN_OPS} PROPERTIES LABELS "RUN_TYPE=CINN")
endif()
endforeach()
if(WITH_CINN AND WITH_TESTING) if(WITH_CINN AND WITH_TESTING)
set_tests_properties( set_tests_properties(
test_resnet50_with_cinn test_resnet50_with_cinn
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
TOLERANCE = {
np.dtype('float64'): {
"jit_comp": {"rtol": 1e-15, "atol": 1e-15},
"fw_comp": {"rtol": 1e-15, "atol": 1e-15},
"rev_comp": {"rtol": 1e-15, "atol": 1e-15},
"cinn": {"rtol": 1e-14, "atol": 1e-14},
},
np.dtype('float32'): {
"jit_comp": {"rtol": 1e-6, "atol": 1e-6},
"fw_comp": {"rtol": 1e-6, "atol": 1e-6},
"rev_comp": {"rtol": 1e-6, "atol": 1e-6},
"cinn": {"rtol": 1e-5, "atol": 1e-5},
},
np.dtype('float16'): {
"jit_comp": {"rtol": 1e-3, "atol": 1e-3},
"fw_comp": {"rtol": 1e-3, "atol": 1e-3},
"rev_comp": {"rtol": 1e-3, "atol": 1e-3},
"cinn": {"rtol": 1e-2, "atol": 1e-2},
},
np.dtype('uint16'): {
"jit_comp": {"rtol": 1e-2, "atol": 1e-2},
"fw_comp": {"rtol": 1e-2, "atol": 1e-2},
"rev_comp": {"rtol": 1e-2, "atol": 1e-2},
"cinn": {"rtol": 1e-1, "atol": 1e-1},
},
}
此差异已折叠。
...@@ -28,13 +28,14 @@ from paddle.fluid import Program, core, program_guard ...@@ -28,13 +28,14 @@ from paddle.fluid import Program, core, program_guard
class TestExpandV2OpRank1(OpTest): class TestExpandV2OpRank1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.init_data() self.init_data()
self.python_api = paddle.expand self.python_api = paddle.expand
self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")} self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'shape': self.shape} self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times) output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output} self.outputs = {'Out': output}
self.enable_cinn = False
def init_data(self): def init_data(self):
self.ori_shape = [100] self.ori_shape = [100]
...@@ -42,10 +43,10 @@ class TestExpandV2OpRank1(OpTest): ...@@ -42,10 +43,10 @@ class TestExpandV2OpRank1(OpTest):
self.expand_times = [1] self.expand_times = [1]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1):
...@@ -80,6 +81,7 @@ class TestExpandV2OpRank4(TestExpandV2OpRank1): ...@@ -80,6 +81,7 @@ class TestExpandV2OpRank4(TestExpandV2OpRank1):
class TestExpandV2OpRank1_tensor_attr(OpTest): class TestExpandV2OpRank1_tensor_attr(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand self.python_api = paddle.expand
self.init_data() self.init_data()
expand_shapes_tensor = [] expand_shapes_tensor = []
...@@ -103,10 +105,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): ...@@ -103,10 +105,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest):
self.infer_expand_shape = [-1] self.infer_expand_shape = [-1]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
...@@ -121,6 +123,7 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): ...@@ -121,6 +123,7 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
class TestExpandV2OpRank1_tensor(OpTest): class TestExpandV2OpRank1_tensor(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand self.python_api = paddle.expand
self.init_data() self.init_data()
...@@ -148,6 +151,7 @@ class TestExpandV2OpRank1_tensor(OpTest): ...@@ -148,6 +151,7 @@ class TestExpandV2OpRank1_tensor(OpTest):
class TestExpandV2OpInteger(OpTest): class TestExpandV2OpInteger(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand self.python_api = paddle.expand
self.inputs = { self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int32") 'X': np.random.randint(10, size=(2, 4, 5)).astype("int32")
...@@ -160,10 +164,11 @@ class TestExpandV2OpInteger(OpTest): ...@@ -160,10 +164,11 @@ class TestExpandV2OpInteger(OpTest):
self.check_output() self.check_output()
# Situation 5: input x is Bool # Situation 5: input x is Bool
class TestExpandV2OpBoolean(OpTest): class TestExpandV2OpBoolean(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand self.python_api = paddle.expand
self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")} self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")}
self.attrs = {'shape': [2, 4, 5]} self.attrs = {'shape': [2, 4, 5]}
...@@ -174,10 +179,11 @@ class TestExpandV2OpBoolean(OpTest): ...@@ -174,10 +179,11 @@ class TestExpandV2OpBoolean(OpTest):
self.check_output() self.check_output()
# Situation 56: input x is Integer # Situation 56: input x is Integer
class TestExpandV2OpInt64_t(OpTest): class TestExpandV2OpInt64_t(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_v2" self.op_type = "expand_v2"
self.prim_op_type = "prim"
self.python_api = paddle.expand self.python_api = paddle.expand
self.inputs = { self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int64") 'X': np.random.randint(10, size=(2, 4, 5)).astype("int64")
......
...@@ -76,10 +76,6 @@ class TestInputSpec(unittest.TestCase): ...@@ -76,10 +76,6 @@ class TestInputSpec(unittest.TestCase):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
tensor_spec = InputSpec(4, dtype='int8') tensor_spec = InputSpec(4, dtype='int8')
# 3. len(shape) should be greater than 0.
with self.assertRaises(ValueError):
tensor_spec = InputSpec([], dtype='int8')
def test_batch_and_unbatch(self): def test_batch_and_unbatch(self):
tensor_spec = InputSpec([10]) tensor_spec = InputSpec([10])
# insert batch_size # insert batch_size
...@@ -90,15 +86,11 @@ class TestInputSpec(unittest.TestCase): ...@@ -90,15 +86,11 @@ class TestInputSpec(unittest.TestCase):
unbatch_spec = batch_tensor_spec.unbatch() unbatch_spec = batch_tensor_spec.unbatch()
self.assertEqual(unbatch_spec.shape, (10,)) self.assertEqual(unbatch_spec.shape, (10,))
# 1. `unbatch` requires len(shape) > 1 # 1. `batch` requires len(batch_size) == 1
with self.assertRaises(ValueError):
unbatch_spec.unbatch()
# 2. `batch` requires len(batch_size) == 1
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
tensor_spec.batch([16, 12]) tensor_spec.batch([16, 12])
# 3. `batch` requires type(batch_size) == int # 2. `batch` requires type(batch_size) == int
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
tensor_spec.batch('16') tensor_spec.batch('16')
......
...@@ -28,36 +28,25 @@ class TestSumOp(OpTest): ...@@ -28,36 +28,25 @@ class TestSumOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
# reduce doesn't support float64 in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
class TestSumOp_ZeroDim(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random([]).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
self.attrs = {'dim': [], 'reduce_all': True}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
class TestSumOpFp32(OpTest):
class TestSumOp_fp16(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16") 'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
} }
...@@ -66,6 +55,8 @@ class TestSumOp_fp16(OpTest): ...@@ -66,6 +55,8 @@ class TestSumOp_fp16(OpTest):
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
# error occurred in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
...@@ -77,10 +68,33 @@ class TestSumOp_fp16(OpTest): ...@@ -77,10 +68,33 @@ class TestSumOp_fp16(OpTest):
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True,
check_prim=True,
) )
class TestSumOp_ZeroDim(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random([]).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=None)}
self.attrs = {'dim': [], 'reduce_all': True}
# reduce doesn't support float64 in cinn.
# 0-D tensor doesn't support in cinn
self.enable_cinn = False
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
@unittest.skipIf( @unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA" not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
) )
...@@ -89,6 +103,7 @@ class TestSumOp_bf16(OpTest): ...@@ -89,6 +103,7 @@ class TestSumOp_bf16(OpTest):
np.random.seed(100) np.random.seed(100)
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.dtype = np.uint16 self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32) self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
self.attrs = {'dim': [0, 1, 2]} self.attrs = {'dim': [0, 1, 2]}
...@@ -98,6 +113,7 @@ class TestSumOp_bf16(OpTest): ...@@ -98,6 +113,7 @@ class TestSumOp_bf16(OpTest):
self.inputs = {'X': convert_float_to_uint16(self.x)} self.inputs = {'X': convert_float_to_uint16(self.x)}
self.outputs = {'Out': convert_float_to_uint16(self.out)} self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -111,6 +127,7 @@ class TestSumOp_bf16(OpTest): ...@@ -111,6 +127,7 @@ class TestSumOp_bf16(OpTest):
'Out', 'Out',
user_defined_grads=self.gradient, user_defined_grads=self.gradient,
check_eager=True, check_eager=True,
check_prim=True,
) )
def calc_gradient(self): def calc_gradient(self):
...@@ -123,6 +140,7 @@ class TestSumOp_fp16_withInt(OpTest): ...@@ -123,6 +140,7 @@ class TestSumOp_fp16_withInt(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
# Precision limitations on integer values between 0 and 2048 can be exactly represented # Precision limitations on integer values between 0 and 2048 can be exactly represented
...@@ -133,6 +151,7 @@ class TestSumOp_fp16_withInt(OpTest): ...@@ -133,6 +151,7 @@ class TestSumOp_fp16_withInt(OpTest):
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
self.gradient = self.calc_gradient() self.gradient = self.calc_gradient()
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
...@@ -144,7 +163,11 @@ class TestSumOp_fp16_withInt(OpTest): ...@@ -144,7 +163,11 @@ class TestSumOp_fp16_withInt(OpTest):
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True,
check_prim=True,
) )
...@@ -152,34 +175,40 @@ class TestSumOp5D(OpTest): ...@@ -152,34 +175,40 @@ class TestSumOp5D(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64") 'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
} }
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# error occurred in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
class TestSumOp6D(OpTest): class TestSumOp6D(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.sum self.python_api = paddle.sum
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.inputs = { self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64") 'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
} }
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# error occurred in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
class TestSumOp8D(OpTest): class TestSumOp8D(OpTest):
...@@ -193,7 +222,7 @@ class TestSumOp8D(OpTest): ...@@ -193,7 +222,7 @@ class TestSumOp8D(OpTest):
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))} self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=True)
...@@ -633,72 +662,100 @@ class TestAnyOpError(unittest.TestCase): ...@@ -633,72 +662,100 @@ class TestAnyOpError(unittest.TestCase):
class Test1DReduce(OpTest): class Test1DReduce(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(120).astype("float64")} self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class Test2DReduce0(Test1DReduce): class Test2DReduce0(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [0]} self.attrs = {'dim': [0]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test2DReduce1(Test1DReduce): class Test2DReduce1(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1]} self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test3DReduce0(Test1DReduce): class Test3DReduce0(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1]} self.attrs = {'dim': [1]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test3DReduce1(Test1DReduce): class Test3DReduce1(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [2]} self.attrs = {'dim': [2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test3DReduce2(Test1DReduce): class Test3DReduce2(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [-2]} self.attrs = {'dim': [-2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test3DReduce3(Test1DReduce): class Test3DReduce3(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.attrs = {'dim': [1, 2]} self.attrs = {'dim': [1, 2]}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = { self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class Test8DReduce0(Test1DReduce): class Test8DReduce0(Test1DReduce):
...@@ -712,10 +769,18 @@ class Test8DReduce0(Test1DReduce): ...@@ -712,10 +769,18 @@ class Test8DReduce0(Test1DReduce):
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
} }
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestKeepDimReduce(Test1DReduce): class TestKeepDimReduce(Test1DReduce):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True} self.attrs = {'dim': [1], 'keep_dim': True}
self.outputs = { self.outputs = {
...@@ -723,6 +788,8 @@ class TestKeepDimReduce(Test1DReduce): ...@@ -723,6 +788,8 @@ class TestKeepDimReduce(Test1DReduce):
axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim']
) )
} }
# reduce doesn't support float64 in cinn.
self.enable_cinn = False
class TestKeepDim8DReduce(Test1DReduce): class TestKeepDim8DReduce(Test1DReduce):
...@@ -738,6 +805,12 @@ class TestKeepDim8DReduce(Test1DReduce): ...@@ -738,6 +805,12 @@ class TestKeepDim8DReduce(Test1DReduce):
) )
} }
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
@skip_check_grad_ci( @skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function," reason="reduce_max is discontinuous non-derivable function,"
...@@ -782,6 +855,8 @@ class TestReduceMinOpMultiAxises(OpTest): ...@@ -782,6 +855,8 @@ class TestReduceMinOpMultiAxises(OpTest):
class TestKeepDimReduceSumMultiAxises(OpTest): class TestKeepDimReduceSumMultiAxises(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [-2, -1], 'keep_dim': True} self.attrs = {'dim': [-2, -1], 'keep_dim': True}
self.outputs = { self.outputs = {
...@@ -794,12 +869,15 @@ class TestKeepDimReduceSumMultiAxises(OpTest): ...@@ -794,12 +869,15 @@ class TestKeepDimReduceSumMultiAxises(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
# rev_comp error
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestReduceSumWithDimOne(OpTest): class TestReduceSumWithDimOne(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'dim': [1, 2], 'keep_dim': True} self.attrs = {'dim': [1, 2], 'keep_dim': True}
self.outputs = { self.outputs = {
...@@ -807,17 +885,21 @@ class TestReduceSumWithDimOne(OpTest): ...@@ -807,17 +885,21 @@ class TestReduceSumWithDimOne(OpTest):
axis=tuple(self.attrs['dim']), keepdims=True axis=tuple(self.attrs['dim']), keepdims=True
) )
} }
# reduce doesn't support float64 in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceSumWithNumelOne(OpTest): class TestReduceSumWithNumelOne(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1)).astype("float64")} self.inputs = {'X': np.random.random((100, 1)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': False} self.attrs = {'dim': [1], 'keep_dim': False}
self.outputs = { self.outputs = {
...@@ -825,45 +907,74 @@ class TestReduceSumWithNumelOne(OpTest): ...@@ -825,45 +907,74 @@ class TestReduceSumWithNumelOne(OpTest):
axis=tuple(self.attrs['dim']), keepdims=False axis=tuple(self.attrs['dim']), keepdims=False
) )
} }
# reduce doesn't support float64 in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=False)
class TestReduceAll(OpTest): class TestReduceAll(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")}
self.attrs = {'reduce_all': True, 'keep_dim': False} self.attrs = {'reduce_all': True, 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum()} self.outputs = {'Out': self.inputs['X'].sum()}
# reduce doesn't support float64 in cinn
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceAllFp32(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")}
self.attrs = {'reduce_all': True, 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum()}
# reduce doesn't support float64 in cinn
self.enable_cinn = False
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
class Test1DReduceWithAxes1(OpTest): class Test1DReduceWithAxes1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random(100).astype("float64")} self.inputs = {'X': np.random.random(100).astype("float64")}
self.attrs = {'dim': [0], 'keep_dim': False} self.attrs = {'dim': [0], 'keep_dim': False}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceWithDtype(OpTest): class TestReduceWithDtype(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = paddle.sum
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
self.attrs = {'reduce_all': True} self.attrs = {'reduce_all': True}
...@@ -873,17 +984,26 @@ class TestReduceWithDtype(OpTest): ...@@ -873,17 +984,26 @@ class TestReduceWithDtype(OpTest):
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
} }
) )
self.enable_cinn = False
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_prim=True)
def reduce_sum_wrapper(
x, axis=None, dtype_rename=None, keepdim=False, name=None
):
return paddle.sum(x, axis, "float64", keepdim, name)
class TestReduceWithDtype1(TestReduceWithDtype): class TestReduceWithDtype1(TestReduceWithDtype):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.python_api = reduce_sum_wrapper
self.prim_op_type = "prim"
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1)} self.outputs = {'Out': self.inputs['X'].sum(axis=1)}
self.attrs = {'dim': [1]} self.attrs = {'dim': [1]}
...@@ -893,11 +1013,20 @@ class TestReduceWithDtype1(TestReduceWithDtype): ...@@ -893,11 +1013,20 @@ class TestReduceWithDtype1(TestReduceWithDtype):
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
} }
) )
self.enable_cinn = False
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceWithDtype2(TestReduceWithDtype): class TestReduceWithDtype2(TestReduceWithDtype):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.prim_op_type = "prim"
self.python_api = reduce_sum_wrapper
self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)} self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
self.attrs = {'dim': [1], 'keep_dim': True} self.attrs = {'dim': [1], 'keep_dim': True}
...@@ -907,6 +1036,13 @@ class TestReduceWithDtype2(TestReduceWithDtype): ...@@ -907,6 +1036,13 @@ class TestReduceWithDtype2(TestReduceWithDtype):
'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)),
} }
) )
self.enable_cinn = False
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
class TestReduceSumOpError(unittest.TestCase): class TestReduceSumOpError(unittest.TestCase):
......
...@@ -43,12 +43,6 @@ def ref_softmax(x, axis=None, dtype=None): ...@@ -43,12 +43,6 @@ def ref_softmax(x, axis=None, dtype=None):
return np.apply_along_axis(stable_softmax, axis, x_t) return np.apply_along_axis(stable_softmax, axis, x_t)
def softmax_wrapper(
x, axis=-1, dtype=None, name=None, use_cudnn=False, use_mkldnn=False
):
return paddle.nn.functional.softmax(x, axis=axis, dtype=dtype)
class TestSoftmaxOp(OpTest): class TestSoftmaxOp(OpTest):
def get_x_shape(self): def get_x_shape(self):
return [10, 10] return [10, 10]
...@@ -58,7 +52,8 @@ class TestSoftmaxOp(OpTest): ...@@ -58,7 +52,8 @@ class TestSoftmaxOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper self.prim_op_type = "comp"
self.python_api = F.softmax
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -78,6 +73,7 @@ class TestSoftmaxOp(OpTest): ...@@ -78,6 +73,7 @@ class TestSoftmaxOp(OpTest):
'use_cudnn': self.use_cudnn, 'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn, 'use_mkldnn': self.use_mkldnn,
} }
self.enable_cinn = False
def init_kernel_type(self): def init_kernel_type(self):
pass pass
...@@ -86,11 +82,9 @@ class TestSoftmaxOp(OpTest): ...@@ -86,11 +82,9 @@ class TestSoftmaxOp(OpTest):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(place, atol=1e-5)
place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
)
else: else:
self.check_output(check_dygraph=(not self.use_mkldnn)) self.check_output(check_prim=True)
def test_check_grad(self): def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -110,13 +104,20 @@ class TestSoftmaxOp(OpTest): ...@@ -110,13 +104,20 @@ class TestSoftmaxOp(OpTest):
"Out", "Out",
max_relative_error=0.01, max_relative_error=0.01,
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
check_prim=True,
) )
class TestSoftmaxOpfp32(TestSoftmaxOp):
def init_kernel_type(self):
self.dtype = np.float32
class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper self.prim_op_type = "comp"
self.python_api = F.softmax
self.use_cudnn = False self.use_cudnn = False
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -133,6 +134,15 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): ...@@ -133,6 +134,15 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
'use_cudnn': self.use_cudnn, 'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn, 'use_mkldnn': self.use_mkldnn,
} }
self.enable_cinn = False
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
else:
self.check_output(check_prim=True)
@unittest.skipIf( @unittest.skipIf(
...@@ -141,7 +151,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): ...@@ -141,7 +151,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp): class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper self.python_api = F.softmax
self.use_cudnn = True self.use_cudnn = True
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
...@@ -158,6 +168,15 @@ class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp): ...@@ -158,6 +168,15 @@ class TestSoftmaxOp_ZeroDim2(TestSoftmaxOp):
'use_cudnn': self.use_cudnn, 'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn, 'use_mkldnn': self.use_mkldnn,
} }
self.enable_cinn = False
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
else:
self.check_output(check_prim=True)
class TestSoftmaxOp2(TestSoftmaxOp): class TestSoftmaxOp2(TestSoftmaxOp):
...@@ -375,7 +394,7 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): ...@@ -375,7 +394,7 @@ class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
class TestSoftmaxBF16Op(OpTest): class TestSoftmaxBF16Op(OpTest):
def setUp(self): def setUp(self):
self.op_type = "softmax" self.op_type = "softmax"
self.python_api = softmax_wrapper self.python_api = F.softmax
self.use_cudnn = self.init_cudnn() self.use_cudnn = self.init_cudnn()
self.use_mkldnn = False self.use_mkldnn = False
self.dtype = np.uint16 self.dtype = np.uint16
......
...@@ -1243,6 +1243,9 @@ class ProgramCache: ...@@ -1243,6 +1243,9 @@ class ProgramCache:
def concrete_programs(self): def concrete_programs(self):
return [cp for key, (cp, _) in self._caches.items()] return [cp for key, (cp, _) in self._caches.items()]
def clear(self):
self._caches = collections.OrderedDict()
class ProgramTranslator: class ProgramTranslator:
""" """
......
...@@ -298,12 +298,6 @@ class InputSpec: ...@@ -298,12 +298,6 @@ class InputSpec:
type(shape).__name__ type(shape).__name__
) )
) )
if len(shape) == 0:
raise ValueError(
"`shape` in InputSpec should contain at least 1 element, but received {}.".format(
shape
)
)
for i, ele in enumerate(shape): for i, ele in enumerate(shape):
if ele is not None: if ele is not None:
......
...@@ -1265,6 +1265,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1265,6 +1265,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
'x', 'x',
[ [
'bool', 'bool',
'uint16',
'float16', 'float16',
'float32', 'float32',
'float64', 'float64',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册