未验证 提交 97fe79a9 编写于 作者: F Fisher 提交者: GitHub

[CINN] Enable check_cinn on some tests (#53710)

* Enable check_cinn on some tests

Tests: bitwise, compare, shape, assign_value, sum, expand_v2,
lookup_table, lookup_table_v2

* Enable more CINN tests

Tests with CINN: expand_v2, matmul, matmul_v2, mul, norm, one_hot_v2
Add target select in cinn_launch_op

* Revert test_mul_op

* Improve op unit tests
上级 89da2f19
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <functional> #include <functional>
#include <vector> #include <vector>
#include "cinn/common/target.h"
#include "cinn/hlir/framework/graph_compiler.h" #include "cinn/hlir/framework/graph_compiler.h"
#include "cinn/runtime/cinn_runtime.h" #include "cinn/runtime/cinn_runtime.h"
#include "cinn/runtime/flags.h" #include "cinn/runtime/flags.h"
...@@ -94,6 +95,11 @@ void SetCinnRandomSeed<phi::CPUContext>() { ...@@ -94,6 +95,11 @@ void SetCinnRandomSeed<phi::CPUContext>() {
::cinn::runtime::RandomSeed::GetOrSet(seed); ::cinn::runtime::RandomSeed::GetOrSet(seed);
} }
void SetCinnTarget(const ::cinn::common::Target& target) {
VLOG(4) << "Set CINN compile target to " << target;
::cinn::runtime::CurrentTarget::SetCurrentTarget(target);
}
} // namespace details } // namespace details
class CinnLaunchOp : public framework::OperatorWithKernel { class CinnLaunchOp : public framework::OperatorWithKernel {
......
...@@ -58,6 +58,9 @@ void SetCinnRuntimeFlags(); ...@@ -58,6 +58,9 @@ void SetCinnRuntimeFlags();
template <typename DeviceContext> template <typename DeviceContext>
void SetCinnRandomSeed(); void SetCinnRandomSeed();
// set CINN compile target
void SetCinnTarget(const ::cinn::common::Target& target);
} // namespace details } // namespace details
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
...@@ -115,6 +118,7 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> { ...@@ -115,6 +118,7 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> {
"Step 2. Get compilation result of the graph"); "Step 2. Get compilation result of the graph");
// Step 2. Get compilation result of the graph // Step 2. Get compilation result of the graph
auto target = details::PlaceToCinnTarget(place); auto target = details::PlaceToCinnTarget(place);
details::SetCinnTarget(target);
using ClockType = std::chrono::steady_clock; using ClockType = std::chrono::steady_clock;
std::chrono::time_point<ClockType> start_t, end_t; std::chrono::time_point<ClockType> start_t, end_t;
if (VLOG_IS_ON(1)) { if (VLOG_IS_ON(1)) {
......
...@@ -1116,7 +1116,15 @@ set(TEST_CINN_OPS ...@@ -1116,7 +1116,15 @@ set(TEST_CINN_OPS
test_tile_op test_tile_op
test_roll_op test_roll_op
test_sum_op test_sum_op
test_elementwise_min_op) test_elementwise_min_op
test_bitwise_op
test_compare_op
test_shape_op
test_assign_value_op
test_lookup_table_op
test_lookup_table_v2_op
test_norm_op
test_one_hot_v2_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS}) foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN) if(WITH_CINN)
......
...@@ -49,7 +49,7 @@ class TestAssignValueOp(eager_op_test.OpTest): ...@@ -49,7 +49,7 @@ class TestAssignValueOp(eager_op_test.OpTest):
self.attrs["fp32_values"] = [float(v) for v in self.value.flat] self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self): def test_forward(self):
self.check_output() self.check_output(check_cinn=True)
class TestAssignValueOp2(TestAssignValueOp): class TestAssignValueOp2(TestAssignValueOp):
......
...@@ -43,7 +43,7 @@ class TestBitwiseAnd(OpTest): ...@@ -43,7 +43,7 @@ class TestBitwiseAnd(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
pass pass
...@@ -150,7 +150,7 @@ class TestBitwiseOr(OpTest): ...@@ -150,7 +150,7 @@ class TestBitwiseOr(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
pass pass
...@@ -258,7 +258,7 @@ class TestBitwiseXor(OpTest): ...@@ -258,7 +258,7 @@ class TestBitwiseXor(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
pass pass
...@@ -363,7 +363,7 @@ class TestBitwiseNot(OpTest): ...@@ -363,7 +363,7 @@ class TestBitwiseNot(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
pass pass
......
...@@ -35,7 +35,7 @@ def create_test_class(op_type, typename, callback): ...@@ -35,7 +35,7 @@ def create_test_class(op_type, typename, callback):
self.op_type = op_type self.op_type = op_type
def test_output(self): def test_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
...@@ -460,7 +460,7 @@ def create_bf16_case(op_type, callback): ...@@ -460,7 +460,7 @@ def create_bf16_case(op_type, callback):
self.outputs = {'Out': real_result} self.outputs = {'Out': real_result}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
cls_name = f"BF16TestCase_{op_type}" cls_name = f"BF16TestCase_{op_type}"
TestCompareOpBF16Op.__name__ = cls_name TestCompareOpBF16Op.__name__ = cls_name
......
...@@ -44,7 +44,7 @@ class TestExpandV2OpRank1(OpTest): ...@@ -44,7 +44,7 @@ class TestExpandV2OpRank1(OpTest):
self.expand_times = [1] self.expand_times = [1]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=self.enable_cinn)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
...@@ -107,10 +107,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): ...@@ -107,10 +107,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest):
self.infer_expand_shape = [-1] self.infer_expand_shape = [-1]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_cinn=True)
class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
...@@ -144,10 +144,10 @@ class TestExpandV2OpRank1_tensor(OpTest): ...@@ -144,10 +144,10 @@ class TestExpandV2OpRank1_tensor(OpTest):
self.expand_shape = [2, 100] self.expand_shape = [2, 100]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_cinn=True)
# Situation 4: input x is Integer # Situation 4: input x is Integer
...@@ -165,7 +165,7 @@ class TestExpandV2OpInteger(OpTest): ...@@ -165,7 +165,7 @@ class TestExpandV2OpInteger(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
# Situation 5: input x is Bool # Situation 5: input x is Bool
...@@ -181,7 +181,7 @@ class TestExpandV2OpBoolean(OpTest): ...@@ -181,7 +181,7 @@ class TestExpandV2OpBoolean(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
# Situation 6: input x is Integer # Situation 6: input x is Integer
...@@ -199,7 +199,7 @@ class TestExpandV2OpInt64_t(OpTest): ...@@ -199,7 +199,7 @@ class TestExpandV2OpInt64_t(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
# Situation 7: input x is Float16 # Situation 7: input x is Float16
...@@ -218,7 +218,7 @@ class TestExpandV2FP16Op(OpTest): ...@@ -218,7 +218,7 @@ class TestExpandV2FP16Op(OpTest):
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True) self.check_grad(['X'], 'Out', check_prim=True)
...@@ -245,7 +245,7 @@ class TestExpandV2BF16Op(OpTest): ...@@ -245,7 +245,7 @@ class TestExpandV2BF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place, check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
......
...@@ -39,10 +39,10 @@ class TestLookupTableOp(OpTest): ...@@ -39,10 +39,10 @@ class TestLookupTableOp(OpTest):
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True)
class TestLookupTableOpWithTensorIds(OpTest): class TestLookupTableOpWithTensorIds(OpTest):
...@@ -56,10 +56,10 @@ class TestLookupTableOpWithTensorIds(OpTest): ...@@ -56,10 +56,10 @@ class TestLookupTableOpWithTensorIds(OpTest):
self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -73,7 +73,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): ...@@ -73,7 +73,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp):
padding_idx = np.random.choice(ids, 1)[0] padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)} self.attrs = {'padding_idx': int(padding_idx)}
self.check_output() self.check_output(check_cinn=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -88,7 +88,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): ...@@ -88,7 +88,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
padding_idx = np.random.choice(flatten_idx, 1)[0] padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': padding_idx} self.attrs = {'padding_idx': padding_idx}
self.check_output() self.check_output(check_cinn=True)
class TestLookupTableWIsSelectedRows(unittest.TestCase): class TestLookupTableWIsSelectedRows(unittest.TestCase):
...@@ -212,7 +212,7 @@ class TestLookupTableOpInt8(OpTest): ...@@ -212,7 +212,7 @@ class TestLookupTableOpInt8(OpTest):
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
# since int8 type only be used in test and inference, there is # since int8 type only be used in test and inference, there is
...@@ -233,7 +233,7 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest): ...@@ -233,7 +233,7 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest):
self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
# since int8 type only be used in test and inference, there is # since int8 type only be used in test and inference, there is
...@@ -247,7 +247,7 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): ...@@ -247,7 +247,7 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8):
padding_idx = np.random.choice(ids, 1)[0] padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)} self.attrs = {'padding_idx': int(padding_idx)}
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of # Since paddings are not trainable and fixed in forward, the gradient of
...@@ -264,7 +264,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8( ...@@ -264,7 +264,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8(
padding_idx = np.random.choice(flatten_idx, 1)[0] padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': padding_idx} self.attrs = {'padding_idx': padding_idx}
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
# Since paddings are not trainable and fixed in forward, the gradient of # Since paddings are not trainable and fixed in forward, the gradient of
...@@ -354,7 +354,7 @@ class TestLookupTableOpInt16(OpTest): ...@@ -354,7 +354,7 @@ class TestLookupTableOpInt16(OpTest):
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
@skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @skip_check_grad_ci(reason="Int16 type only be used in test and inference.")
...@@ -371,7 +371,7 @@ class TestLookupTableOpWithTensorIdsInt16(OpTest): ...@@ -371,7 +371,7 @@ class TestLookupTableOpWithTensorIdsInt16(OpTest):
self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
@skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @skip_check_grad_ci(reason="Int16 type only be used in test and inference.")
...@@ -381,7 +381,7 @@ class TestLookupTableOpWithPaddingInt16(TestLookupTableOpInt16): ...@@ -381,7 +381,7 @@ class TestLookupTableOpWithPaddingInt16(TestLookupTableOpInt16):
padding_idx = np.random.choice(ids, 1)[0] padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)} self.attrs = {'padding_idx': int(padding_idx)}
self.check_output() self.check_output(check_cinn=True)
@skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @skip_check_grad_ci(reason="Int16 type only be used in test and inference.")
...@@ -394,7 +394,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16( ...@@ -394,7 +394,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16(
padding_idx = np.random.choice(flatten_idx, 1)[0] padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': padding_idx} self.attrs = {'padding_idx': padding_idx}
self.check_output() self.check_output(check_cinn=True)
class TestLookupTableWIsSelectedRowsInt16(unittest.TestCase): class TestLookupTableWIsSelectedRowsInt16(unittest.TestCase):
......
...@@ -56,10 +56,10 @@ class TestLookupTableOp(OpTest): ...@@ -56,10 +56,10 @@ class TestLookupTableOp(OpTest):
return "int64" return "int64"
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True)
class TestLookupTableOpInt16(OpTest): class TestLookupTableOpInt16(OpTest):
...@@ -87,10 +87,10 @@ class TestLookupTableOpWithTensorIds(OpTest): ...@@ -87,10 +87,10 @@ class TestLookupTableOpWithTensorIds(OpTest):
self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -104,7 +104,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): ...@@ -104,7 +104,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp):
padding_idx = np.random.choice(ids, 1)[0] padding_idx = np.random.choice(ids, 1)[0]
self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.outputs['Out'][ids == padding_idx] = np.zeros(31)
self.attrs = {'padding_idx': int(padding_idx)} self.attrs = {'padding_idx': int(padding_idx)}
self.check_output() self.check_output(check_cinn=True)
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -119,7 +119,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): ...@@ -119,7 +119,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds):
padding_idx = np.random.choice(flatten_idx, 1)[0] padding_idx = np.random.choice(flatten_idx, 1)[0]
self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31)
self.attrs = {'padding_idx': padding_idx} self.attrs = {'padding_idx': padding_idx}
self.check_output() self.check_output(check_cinn=True)
class TestLookupTableWIsSelectedRows(unittest.TestCase): class TestLookupTableWIsSelectedRows(unittest.TestCase):
......
...@@ -100,19 +100,29 @@ class Generator: ...@@ -100,19 +100,29 @@ class Generator:
self.outputs = {'Out': Out} self.outputs = {'Out': Out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-3) self.check_grad(
['X', 'Y'], 'Out', max_relative_error=1e-3, check_cinn=True
)
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=1e-3, no_grad_set=set("X") ['Y'],
'Out',
max_relative_error=1e-3,
no_grad_set=set("X"),
check_cinn=True,
) )
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y') ['X'],
'Out',
max_relative_error=1e-3,
no_grad_set=set('Y'),
check_cinn=True,
) )
......
...@@ -103,13 +103,28 @@ class TestMatMulV2Op(OpTest): ...@@ -103,13 +103,28 @@ class TestMatMulV2Op(OpTest):
self.outputs = {'Out': result} self.outputs = {'Out': result}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(
check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True
)
def test_check_grad(self): def test_check_grad(self):
if core.is_compiled_with_rocm(): if core.is_compiled_with_rocm():
self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-2) self.check_grad(
['X', 'Y'],
'Out',
max_relative_error=1e-2,
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
)
else: else:
self.check_grad(['X', 'Y'], 'Out') self.check_grad(
['X', 'Y'],
'Out',
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
)
class TestMatMulOp2(TestMatMulV2Op): class TestMatMulOp2(TestMatMulV2Op):
...@@ -290,6 +305,7 @@ class TestMatMulOp16(TestMatMulV2Op): ...@@ -290,6 +305,7 @@ class TestMatMulOp16(TestMatMulV2Op):
self.y_shape = (1, 2, 2, 100, 2) self.y_shape = (1, 2, 2, 100, 2)
self.trans_x = False self.trans_x = False
self.trans_y = False self.trans_y = False
self.check_cinn = False
class TestMatMulOp17(TestMatMulV2Op): class TestMatMulOp17(TestMatMulV2Op):
...@@ -343,7 +359,13 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): ...@@ -343,7 +359,13 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, atol=atol) self.check_output_with_place(
place,
atol=atol,
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -353,6 +375,9 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): ...@@ -353,6 +375,9 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0):
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
max_relative_error=max_relative_error, max_relative_error=max_relative_error,
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
) )
cls_name = "{}_{}".format(parent.__name__, "Fp16") cls_name = "{}_{}".format(parent.__name__, "Fp16")
...@@ -405,7 +430,13 @@ def create_test_bf16_class(parent, atol=0.01): ...@@ -405,7 +430,13 @@ def create_test_bf16_class(parent, atol=0.01):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=atol) self.check_output_with_place(
place,
atol=atol,
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
)
def test_check_grad_x(self): def test_check_grad_x(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -416,6 +447,9 @@ def create_test_bf16_class(parent, atol=0.01): ...@@ -416,6 +447,9 @@ def create_test_bf16_class(parent, atol=0.01):
'Out', 'Out',
no_grad_set={'Y'}, no_grad_set={'Y'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
) )
def test_check_grad_y(self): def test_check_grad_y(self):
...@@ -427,6 +461,9 @@ def create_test_bf16_class(parent, atol=0.01): ...@@ -427,6 +461,9 @@ def create_test_bf16_class(parent, atol=0.01):
'Out', 'Out',
no_grad_set={'X'}, no_grad_set={'X'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
check_cinn=self.check_cinn
if hasattr(self, 'check_cinn')
else True,
) )
def test_check_grad(self): def test_check_grad(self):
...@@ -596,7 +633,7 @@ class TestComplexMatMulOp(OpTest): ...@@ -596,7 +633,7 @@ class TestComplexMatMulOp(OpTest):
self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out) self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out)
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -604,6 +641,7 @@ class TestComplexMatMulOp(OpTest): ...@@ -604,6 +641,7 @@ class TestComplexMatMulOp(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -613,6 +651,7 @@ class TestComplexMatMulOp(OpTest): ...@@ -613,6 +651,7 @@ class TestComplexMatMulOp(OpTest):
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -622,6 +661,7 @@ class TestComplexMatMulOp(OpTest): ...@@ -622,6 +661,7 @@ class TestComplexMatMulOp(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
...@@ -662,7 +702,7 @@ class TestComplexMatMulOpBroadcast(OpTest): ...@@ -662,7 +702,7 @@ class TestComplexMatMulOpBroadcast(OpTest):
) )
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
...@@ -670,6 +710,7 @@ class TestComplexMatMulOpBroadcast(OpTest): ...@@ -670,6 +710,7 @@ class TestComplexMatMulOpBroadcast(OpTest):
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -679,6 +720,7 @@ class TestComplexMatMulOpBroadcast(OpTest): ...@@ -679,6 +720,7 @@ class TestComplexMatMulOpBroadcast(OpTest):
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -688,6 +730,7 @@ class TestComplexMatMulOpBroadcast(OpTest): ...@@ -688,6 +730,7 @@ class TestComplexMatMulOpBroadcast(OpTest):
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out], user_defined_grad_outputs=[self.grad_out],
check_cinn=False,
) )
......
...@@ -48,10 +48,10 @@ class TestNormOp(OpTest): ...@@ -48,10 +48,10 @@ class TestNormOp(OpTest):
self.python_out_sig = ['Out'] self.python_out_sig = ['Out']
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_cinn=True)
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -109,7 +109,7 @@ class TestNormOp6(TestNormOp): ...@@ -109,7 +109,7 @@ class TestNormOp6(TestNormOp):
self.dtype = "float32" self.dtype = "float32"
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008, check_cinn=True)
@unittest.skipIf( @unittest.skipIf(
...@@ -120,11 +120,17 @@ class TestNormOp7(TestNormOp): ...@@ -120,11 +120,17 @@ class TestNormOp7(TestNormOp):
self.dtype = "float16" self.dtype = "float16"
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(fluid.core.CUDAPlace(0), atol=5e-2) self.check_output_with_place(
fluid.core.CUDAPlace(0), atol=5e-2, check_cinn=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(
fluid.core.CUDAPlace(0), ['X'], 'Out', max_relative_error=0.05 fluid.core.CUDAPlace(0),
['X'],
'Out',
max_relative_error=0.05,
check_cinn=True,
) )
...@@ -147,7 +153,7 @@ class TestNormTestOp(OpTest): ...@@ -147,7 +153,7 @@ class TestNormTestOp(OpTest):
def test_check_output(self): def test_check_output(self):
# dynamic graph just supports float tensor # dynamic graph just supports float tensor
self.check_output(check_dygraph=True) self.check_output(check_dygraph=True, check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
pass pass
...@@ -176,11 +182,17 @@ class TestNormBF16Op(OpTest): ...@@ -176,11 +182,17 @@ class TestNormBF16Op(OpTest):
self.python_out_sig = ['Out'] self.python_out_sig = ['Out']
def test_check_output(self): def test_check_output(self):
self.check_output_with_place(core.CUDAPlace(0), atol=1e-1) self.check_output_with_place(
core.CUDAPlace(0), atol=1e-1, check_cinn=True
)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place( self.check_grad_with_place(
core.CUDAPlace(0), ['X'], 'Out', max_relative_error=1e-2 core.CUDAPlace(0),
['X'],
'Out',
max_relative_error=1e-2,
check_cinn=True,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -49,7 +49,7 @@ class TestOneHotOp(OpTest): ...@@ -49,7 +49,7 @@ class TestOneHotOp(OpTest):
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
class TestOneHotOp_attr(OpTest): class TestOneHotOp_attr(OpTest):
...@@ -57,6 +57,7 @@ class TestOneHotOp_attr(OpTest): ...@@ -57,6 +57,7 @@ class TestOneHotOp_attr(OpTest):
self.op_type = 'one_hot_v2' self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper self.python_api = one_hot_wrapper
depth = 10 depth = 10
depth_np = np.array(10).astype('int32')
dimension = 12 dimension = 12
x_lod = [[4, 1, 3, 3]] x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
...@@ -69,12 +70,12 @@ class TestOneHotOp_attr(OpTest): ...@@ -69,12 +70,12 @@ class TestOneHotOp_attr(OpTest):
for i in range(np.product(x.shape)): for i in range(np.product(x.shape)):
out[i, 0, x[i]] = 1.0 out[i, 0, x[i]] = 1.0
self.inputs = {'X': (x, x_lod)} self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np}
self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth} self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth}
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
class TestOneHotOp_default_dtype(OpTest): class TestOneHotOp_default_dtype(OpTest):
...@@ -98,7 +99,7 @@ class TestOneHotOp_default_dtype(OpTest): ...@@ -98,7 +99,7 @@ class TestOneHotOp_default_dtype(OpTest):
self.outputs = {'Out': (out, x_lod)} self.outputs = {'Out': (out, x_lod)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
class TestOneHotOp_default_dtype_attr(OpTest): class TestOneHotOp_default_dtype_attr(OpTest):
...@@ -106,6 +107,7 @@ class TestOneHotOp_default_dtype_attr(OpTest): ...@@ -106,6 +107,7 @@ class TestOneHotOp_default_dtype_attr(OpTest):
self.op_type = 'one_hot_v2' self.op_type = 'one_hot_v2'
self.python_api = one_hot_wrapper self.python_api = one_hot_wrapper
depth = 10 depth = 10
depth_np = np.array(depth).astype('int32')
dimension = 12 dimension = 12
x_lod = [[4, 1, 3, 3]] x_lod = [[4, 1, 3, 3]]
x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))]
......
...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest): ...@@ -36,7 +36,7 @@ class TestShapeOp(OpTest):
self.dtype = np.float32 self.dtype = np.float32
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_cinn=True)
class case1(TestShapeOp): class case1(TestShapeOp):
...@@ -125,7 +125,7 @@ class TestShapeOpBf16(OpTest): ...@@ -125,7 +125,7 @@ class TestShapeOpBf16(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place, check_cinn=True)
class case1Bf16(TestShapeOpBf16): class case1Bf16(TestShapeOpBf16):
......
...@@ -62,10 +62,10 @@ class TestSumOp(OpTest): ...@@ -62,10 +62,10 @@ class TestSumOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_prim=True) self.check_output(check_prim=True, check_cinn=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_prim=True) self.check_grad(['x0'], 'Out', check_prim=True, check_cinn=True)
class TestSelectedRowsSumOp(unittest.TestCase): class TestSelectedRowsSumOp(unittest.TestCase):
...@@ -299,14 +299,14 @@ class TestFP16SumOp(TestSumOp): ...@@ -299,14 +299,14 @@ class TestFP16SumOp(TestSumOp):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place) self.check_output_with_place(place, check_cinn=True)
# FIXME: Because of the precision fp16, max_relative_error # FIXME: Because of the precision fp16, max_relative_error
# should be 0.15 here. # should be 0.15 here.
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_cinn=True)
def create_test_sum_fp16_class(parent): def create_test_sum_fp16_class(parent):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册